From fc0a5921561c71be2c334a335c1680f7930434d7 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 20 Dec 2013 22:41:07 +0100 Subject: reset: Add of_reset_control_get In some cases, you might need to deassert from reset an hardware block that doesn't associated to a struct device (CPUs, timers, etc.). Add a small helper to retrieve the reset controller from the device tree without the need to pass a struct device. Signed-off-by: Maxime Ripard Signed-off-by: Philipp Zabel --- include/linux/reset.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/reset.h b/include/linux/reset.h index 6082247feab1..a398025d1138 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -1,6 +1,8 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ +#include + struct device; struct reset_control; @@ -8,6 +10,8 @@ int reset_control_reset(struct reset_control *rstc); int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); +struct reset_control *of_reset_control_get(struct device_node *node, + const char *id); struct reset_control *reset_control_get(struct device *dev, const char *id); void reset_control_put(struct reset_control *rstc); struct reset_control *devm_reset_control_get(struct device *dev, const char *id); -- cgit v1.2.3 From a028c6da34d434e35ba8322568c756ea97ff3c18 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Sat, 14 Dec 2013 16:23:51 +0100 Subject: ARM: shmobile: wait for MSTP clock status to toggle, when enabling it On r-/sh-mobile SoCs MSTP clocks are used by the runtime PM to dynamically enable and disable peripheral clocks. To make sure the clock has really started we have to read back its status register until it confirms success. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Simon Horman --- drivers/sh/clk/cpg.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/sh_clk.h | 19 ++++++++++++------- 2 files changed, 50 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 1ebe67cd1833..7442bc130055 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -36,9 +36,47 @@ static void sh_clk_write(int value, struct clk *clk) iowrite32(value, clk->mapped_reg); } +static unsigned int r8(const void __iomem *addr) +{ + return ioread8(addr); +} + +static unsigned int r16(const void __iomem *addr) +{ + return ioread16(addr); +} + +static unsigned int r32(const void __iomem *addr) +{ + return ioread32(addr); +} + static int sh_clk_mstp_enable(struct clk *clk) { sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); + if (clk->status_reg) { + unsigned int (*read)(const void __iomem *addr); + int i; + void __iomem *mapped_status = (phys_addr_t)clk->status_reg - + (phys_addr_t)clk->enable_reg + clk->mapped_reg; + + if (clk->flags & CLK_ENABLE_REG_8BIT) + read = r8; + else if (clk->flags & CLK_ENABLE_REG_16BIT) + read = r16; + else + read = r32; + + for (i = 1000; + (read(mapped_status) & (1 << clk->enable_bit)) && i; + i--) + cpu_relax(); + if (!i) { + pr_err("cpg: failed to enable %p[%d]\n", + clk->enable_reg, clk->enable_bit); + return -ETIMEDOUT; + } + } return 0; } diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h index 60c72395ec6b..1f208b2a1ed6 100644 --- a/include/linux/sh_clk.h +++ b/include/linux/sh_clk.h @@ -52,6 +52,7 @@ struct clk { unsigned long flags; void __iomem *enable_reg; + void __iomem *status_reg; unsigned int enable_bit; void __iomem *mapped_reg; @@ -116,22 +117,26 @@ long clk_round_parent(struct clk *clk, unsigned long target, unsigned long *best_freq, unsigned long *parent_freq, unsigned int div_min, unsigned int div_max); -#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _flags) \ +#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _status_reg, _flags) \ { \ .parent = _parent, \ .enable_reg = (void __iomem *)_enable_reg, \ .enable_bit = _enable_bit, \ + .status_reg = _status_reg, \ .flags = _flags, \ } -#define SH_CLK_MSTP32(_p, _r, _b, _f) \ - SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_32BIT) +#define SH_CLK_MSTP32(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_32BIT) -#define SH_CLK_MSTP16(_p, _r, _b, _f) \ - SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_16BIT) +#define SH_CLK_MSTP32_STS(_p, _r, _b, _s, _f) \ + SH_CLK_MSTP(_p, _r, _b, _s, _f | CLK_ENABLE_REG_32BIT) -#define SH_CLK_MSTP8(_p, _r, _b, _f) \ - SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_8BIT) +#define SH_CLK_MSTP16(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_16BIT) + +#define SH_CLK_MSTP8(_p, _r, _b, _f) \ + SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_8BIT) int sh_clk_mstp_register(struct clk *clks, int nr); -- cgit v1.2.3 From 006e983bbc805431c44e2135e13841f66059a045 Mon Sep 17 00:00:00 2001 From: Sricharan R Date: Tue, 3 Dec 2013 15:57:22 +0530 Subject: DRIVERS: IRQCHIP: IRQ-GIC: Add support for routable irqs In some socs the gic can be preceded by a crossbar IP which routes the peripheral interrupts to the gic inputs. The peripheral interrupts are associated with a fixed crossbar input line and the crossbar routes that to one of the free gic input line. The DT entries for peripherals provides the fixed crossbar input line as its interrupt number and the mapping code should associate this with a free gic input line. This patch adds the support inside the gic irqchip to handle such routable irqs. The routable irqs are registered in a linear domain. The registered routable domain's callback should be implemented to get a free irq and to configure the IP to route it. Cc: Thomas Gleixner Cc: Linus Walleij Cc: Santosh Shilimkar Cc: Russell King Cc: Tony Lindgren Cc: Rajendra Nayak Cc: Marc Zyngier Cc: Grant Likely Cc: Rob Herring Signed-off-by: Sricharan R Reviewed-by: Thomas Gleixner Acked-by: Santosh Shilimkar Acked-by: Linus Walleij --- Documentation/devicetree/bindings/arm/gic.txt | 6 ++ drivers/irqchip/irq-gic.c | 82 +++++++++++++++++++++++---- include/linux/irqchip/arm-gic.h | 7 ++- 3 files changed, 84 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt index bae0d87a38b2..5573c08d3180 100644 --- a/Documentation/devicetree/bindings/arm/gic.txt +++ b/Documentation/devicetree/bindings/arm/gic.txt @@ -50,6 +50,11 @@ Optional regions, used when the GIC doesn't have banked registers. The offset is cpu-offset * cpu-nr. +- arm,routable-irqs : Total number of gic irq inputs which are not directly + connected from the peripherals, but are routed dynamically + by a crossbar/multiplexer preceding the GIC. The GIC irq + input line is assigned dynamically when the corresponding + peripheral's crossbar line is mapped. Example: intc: interrupt-controller@fff11000 { @@ -57,6 +62,7 @@ Example: #interrupt-cells = <3>; #address-cells = <1>; interrupt-controller; + arm,routable-irqs = <160>; reg = <0xfff11000 0x1000>, <0xfff10100 0x100>; }; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 341c6016812d..07a7050841ec 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -824,16 +824,25 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_set_chip_and_handler(irq, &gic_chip, handle_fasteoi_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + + gic_routable_irq_domain_ops->map(d, irq, hw); } irq_set_chip_data(irq, d->host_data); return 0; } +static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) +{ + gic_routable_irq_domain_ops->unmap(d, irq); +} + static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *controller, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type) { + unsigned long ret = 0; + if (d->of_node != controller) return -EINVAL; if (intsize < 3) @@ -843,11 +852,20 @@ static int gic_irq_domain_xlate(struct irq_domain *d, *out_hwirq = intspec[1] + 16; /* For SPIs, we need to add 16 more to get the GIC irq ID number */ - if (!intspec[0]) - *out_hwirq += 16; + if (!intspec[0]) { + ret = gic_routable_irq_domain_ops->xlate(d, controller, + intspec, + intsize, + out_hwirq, + out_type); + + if (IS_ERR_VALUE(ret)) + return ret; + } *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; - return 0; + + return ret; } #ifdef CONFIG_SMP @@ -871,9 +889,41 @@ static struct notifier_block gic_cpu_notifier = { const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, + .unmap = gic_irq_domain_unmap, .xlate = gic_irq_domain_xlate, }; +/* Default functions for routable irq domain */ +static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + return 0; +} + +static void gic_routable_irq_domain_unmap(struct irq_domain *d, + unsigned int irq) +{ +} + +static int gic_routable_irq_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + *out_hwirq += 16; + return 0; +} + +const struct irq_domain_ops gic_default_routable_irq_domain_ops = { + .map = gic_routable_irq_domain_map, + .unmap = gic_routable_irq_domain_unmap, + .xlate = gic_routable_irq_domain_xlate, +}; + +const struct irq_domain_ops *gic_routable_irq_domain_ops = + &gic_default_routable_irq_domain_ops; + void __init gic_init_bases(unsigned int gic_nr, int irq_start, void __iomem *dist_base, void __iomem *cpu_base, u32 percpu_offset, struct device_node *node) @@ -881,6 +931,7 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, irq_hw_number_t hwirq_base; struct gic_chip_data *gic; int gic_irqs, irq_base, i; + int nr_routable_irqs; BUG_ON(gic_nr >= MAX_GIC_NR); @@ -946,14 +997,25 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic->gic_irqs = gic_irqs; gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); - if (IS_ERR_VALUE(irq_base)) { - WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", - irq_start); - irq_base = irq_start; + + if (of_property_read_u32(node, "arm,routable-irqs", + &nr_routable_irqs)) { + irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, + numa_node_id()); + if (IS_ERR_VALUE(irq_base)) { + WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", + irq_start); + irq_base = irq_start; + } + + gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, + hwirq_base, &gic_irq_domain_ops, gic); + } else { + gic->domain = irq_domain_add_linear(node, nr_routable_irqs, + &gic_irq_domain_ops, + gic); } - gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, - hwirq_base, &gic_irq_domain_ops, gic); + if (WARN_ON(!gic->domain)) return; diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 0ceb389dba6c..7ed92d0560d5 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -93,6 +93,11 @@ int gic_get_cpu_id(unsigned int cpu); void gic_migrate_target(unsigned int new_cpu_id); unsigned long gic_get_sgir_physaddr(void); +extern const struct irq_domain_ops *gic_routable_irq_domain_ops; +static inline void __init register_routable_domain_ops + (const struct irq_domain_ops *ops) +{ + gic_routable_irq_domain_ops = ops; +} #endif /* __ASSEMBLY */ - #endif -- cgit v1.2.3 From 96ca848ef7ea1be7e92d1cceb34ef3aa86053828 Mon Sep 17 00:00:00 2001 From: Sricharan R Date: Tue, 3 Dec 2013 15:57:23 +0530 Subject: DRIVERS: IRQCHIP: CROSSBAR: Add support for Crossbar IP Some socs have a large number of interrupts requests to service the needs of its many peripherals and subsystems. All of the interrupt lines from the subsystems are not needed at the same time, so they have to be muxed to the irq-controller appropriately. In such places a interrupt controllers are preceded by an CROSSBAR that provides flexibility in muxing the device requests to the controller inputs. This driver takes care a allocating a free irq and then configuring the crossbar IP as a part of the mpu's irqchip callbacks. crossbar_init should be called right before the irqchip_init, so that it is setup to handle the irqchip callbacks. Cc: Thomas Gleixner Cc: Linus Walleij Cc: Santosh Shilimkar Cc: Russell King Cc: Tony Lindgren Cc: Rajendra Nayak Cc: Marc Zyngier Cc: Grant Likely Cc: Rob Herring Signed-off-by: Sricharan R Acked-by: Kumar Gala (for DT binding portion) Acked-by: Santosh Shilimkar Acked-by: Linus Walleij Acked-by: Thomas Gleixner --- .../devicetree/bindings/arm/omap/crossbar.txt | 27 +++ drivers/irqchip/Kconfig | 8 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-crossbar.c | 208 +++++++++++++++++++++ include/linux/irqchip/irq-crossbar.h | 11 ++ 5 files changed, 255 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/omap/crossbar.txt create mode 100644 drivers/irqchip/irq-crossbar.c create mode 100644 include/linux/irqchip/irq-crossbar.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/arm/omap/crossbar.txt b/Documentation/devicetree/bindings/arm/omap/crossbar.txt new file mode 100644 index 000000000000..fb88585cfb93 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/omap/crossbar.txt @@ -0,0 +1,27 @@ +Some socs have a large number of interrupts requests to service +the needs of its many peripherals and subsystems. All of the +interrupt lines from the subsystems are not needed at the same +time, so they have to be muxed to the irq-controller appropriately. +In such places a interrupt controllers are preceded by an CROSSBAR +that provides flexibility in muxing the device requests to the controller +inputs. + +Required properties: +- compatible : Should be "ti,irq-crossbar" +- reg: Base address and the size of the crossbar registers. +- ti,max-irqs: Total number of irqs available at the interrupt controller. +- ti,reg-size: Size of a individual register in bytes. Every individual + register is assumed to be of same size. Valid sizes are 1, 2, 4. +- ti,irqs-reserved: List of the reserved irq lines that are not muxed using + crossbar. These interrupt lines are reserved in the soc, + so crossbar bar driver should not consider them as free + lines. + +Examples: + crossbar_mpu: @4a020000 { + compatible = "ti,irq-crossbar"; + reg = <0x4a002a48 0x130>; + ti,max-irqs = <160>; + ti,reg-size = <2>; + ti,irqs-reserved = <0 1 2 3 5 6 131 132 139 140>; + }; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 61ffdca96e25..111068782da4 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -69,3 +69,11 @@ config VERSATILE_FPGA_IRQ_NR config XTENSA_MX bool select IRQ_DOMAIN + +config IRQ_CROSSBAR + bool + help + Support for a CROSSBAR ip that preceeds the main interrupt controller. + The primary irqchip invokes the crossbar's callback which inturn allocates + a free irq and configures the IP. Thus the peripheral interrupts are + routed to one of the free irqchip interrupt lines. diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 86b484cb3ec2..3e776cb8dd46 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -25,3 +25,4 @@ obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o +obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c new file mode 100644 index 000000000000..fc817d28d1fe --- /dev/null +++ b/drivers/irqchip/irq-crossbar.c @@ -0,0 +1,208 @@ +/* + * drivers/irqchip/irq-crossbar.c + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com + * Author: Sricharan R + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include +#include +#include +#include + +#define IRQ_FREE -1 +#define GIC_IRQ_START 32 + +/* + * @int_max: maximum number of supported interrupts + * @irq_map: array of interrupts to crossbar number mapping + * @crossbar_base: crossbar base address + * @register_offsets: offsets for each irq number + */ +struct crossbar_device { + uint int_max; + uint *irq_map; + void __iomem *crossbar_base; + int *register_offsets; + void (*write) (int, int); +}; + +static struct crossbar_device *cb; + +static inline void crossbar_writel(int irq_no, int cb_no) +{ + writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline void crossbar_writew(int irq_no, int cb_no) +{ + writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline void crossbar_writeb(int irq_no, int cb_no) +{ + writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); +} + +static inline int allocate_free_irq(int cb_no) +{ + int i; + + for (i = 0; i < cb->int_max; i++) { + if (cb->irq_map[i] == IRQ_FREE) { + cb->irq_map[i] = cb_no; + return i; + } + } + + return -ENODEV; +} + +static int crossbar_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]); + return 0; +} + +static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq) +{ + irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq; + + if (hw > GIC_IRQ_START) + cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE; +} + +static int crossbar_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + unsigned long ret; + + ret = allocate_free_irq(intspec[1]); + + if (IS_ERR_VALUE(ret)) + return ret; + + *out_hwirq = ret + GIC_IRQ_START; + return 0; +} + +const struct irq_domain_ops routable_irq_domain_ops = { + .map = crossbar_domain_map, + .unmap = crossbar_domain_unmap, + .xlate = crossbar_domain_xlate +}; + +static int __init crossbar_of_init(struct device_node *node) +{ + int i, size, max, reserved = 0, entry; + const __be32 *irqsr; + + cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); + + if (!cb) + return -ENOMEM; + + cb->crossbar_base = of_iomap(node, 0); + if (!cb->crossbar_base) + goto err1; + + of_property_read_u32(node, "ti,max-irqs", &max); + cb->irq_map = kzalloc(max * sizeof(int), GFP_KERNEL); + if (!cb->irq_map) + goto err2; + + cb->int_max = max; + + for (i = 0; i < max; i++) + cb->irq_map[i] = IRQ_FREE; + + /* Get and mark reserved irqs */ + irqsr = of_get_property(node, "ti,irqs-reserved", &size); + if (irqsr) { + size /= sizeof(__be32); + + for (i = 0; i < size; i++) { + of_property_read_u32_index(node, + "ti,irqs-reserved", + i, &entry); + if (entry > max) { + pr_err("Invalid reserved entry\n"); + goto err3; + } + cb->irq_map[entry] = 0; + } + } + + cb->register_offsets = kzalloc(max * sizeof(int), GFP_KERNEL); + if (!cb->register_offsets) + goto err3; + + of_property_read_u32(node, "ti,reg-size", &size); + + switch (size) { + case 1: + cb->write = crossbar_writeb; + break; + case 2: + cb->write = crossbar_writew; + break; + case 4: + cb->write = crossbar_writel; + break; + default: + pr_err("Invalid reg-size property\n"); + goto err4; + break; + } + + /* + * Register offsets are not linear because of the + * reserved irqs. so find and store the offsets once. + */ + for (i = 0; i < max; i++) { + if (!cb->irq_map[i]) + continue; + + cb->register_offsets[i] = reserved; + reserved += size; + } + + register_routable_domain_ops(&routable_irq_domain_ops); + return 0; + +err4: + kfree(cb->register_offsets); +err3: + kfree(cb->irq_map); +err2: + iounmap(cb->crossbar_base); +err1: + kfree(cb); + return -ENOMEM; +} + +static const struct of_device_id crossbar_match[] __initconst = { + { .compatible = "ti,irq-crossbar" }, + {} +}; + +int __init irqcrossbar_init(void) +{ + struct device_node *np; + np = of_find_matching_node(NULL, crossbar_match); + if (!np) + return -ENODEV; + + crossbar_of_init(np); + return 0; +} diff --git a/include/linux/irqchip/irq-crossbar.h b/include/linux/irqchip/irq-crossbar.h new file mode 100644 index 000000000000..e5537b81df8d --- /dev/null +++ b/include/linux/irqchip/irq-crossbar.h @@ -0,0 +1,11 @@ +/* + * drivers/irqchip/irq-crossbar.h + * + * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +int irqcrossbar_init(void); -- cgit v1.2.3 From 09c978bc7bdcfc3db91801454273a4330e1933bf Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 10 Jan 2014 15:57:27 +0100 Subject: ARM: integrator: switch to fetch clocks from device tree This atomic commit changes the Integrator clock implementation and the machines to register clocks from the device tree and use these instead of the previous hard-coded clocks. In the clock implementation all hard-coded clocks and the special initialization function call goes away, and is replaced by two compatible strings for the two clocks available on the core module. Cc: Mike Turquette Signed-off-by: Linus Walleij --- .../devicetree/bindings/clock/arm-integrator.txt | 34 +++++++++ arch/arm/mach-integrator/integrator_ap.c | 19 +++-- arch/arm/mach-integrator/integrator_cp.c | 6 -- drivers/clk/versatile/clk-integrator.c | 80 +++++++++------------- include/linux/platform_data/clk-integrator.h | 1 - 5 files changed, 80 insertions(+), 60 deletions(-) create mode 100644 Documentation/devicetree/bindings/clock/arm-integrator.txt (limited to 'include') diff --git a/Documentation/devicetree/bindings/clock/arm-integrator.txt b/Documentation/devicetree/bindings/clock/arm-integrator.txt new file mode 100644 index 000000000000..652914b17b95 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/arm-integrator.txt @@ -0,0 +1,34 @@ +Clock bindings for ARM Integrator Core Module clocks + +Auxilary Oscillator Clock + +This is a configurable clock fed from a 24 MHz chrystal, +used for generating e.g. video clocks. It is located on the +core module and there is only one of these. + +This clock node *must* be a subnode of the core module, since +it obtains the base address for it's address range from its +parent node. + + +Required properties: +- compatible: must be "arm,integrator-cm-auxosc" +- #clock-cells: must be <0> + +Optional properties: +- clocks: parent clock(s) + +Example: + +core-module@10000000 { + xtal24mhz: xtal24mhz@24M { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <24000000>; + }; + auxosc: cm_aux_osc@25M { + #clock-cells = <0>; + compatible = "arm,integrator-cm-auxosc"; + clocks = <&xtal24mhz>; + }; +}; diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 17c0fe627435..fedcd2fab094 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -402,10 +403,7 @@ static void __init ap_of_timer_init(void) struct clk *clk; unsigned long rate; - clk = clk_get_sys("ap_timer", NULL); - BUG_ON(IS_ERR(clk)); - clk_prepare_enable(clk); - rate = clk_get_rate(clk); + of_clk_init(NULL); err = of_property_read_string(of_aliases, "arm,timer-primary", &path); @@ -415,6 +413,12 @@ static void __init ap_of_timer_init(void) base = of_iomap(node, 0); if (WARN_ON(!base)) return; + + clk = of_clk_get(node, 0); + BUG_ON(IS_ERR(clk)); + clk_prepare_enable(clk); + rate = clk_get_rate(clk); + writel(0, base + TIMER_CTRL); integrator_clocksource_init(rate, base); @@ -427,6 +431,12 @@ static void __init ap_of_timer_init(void) if (WARN_ON(!base)) return; irq = irq_of_parse_and_map(node, 0); + + clk = of_clk_get(node, 0); + BUG_ON(IS_ERR(clk)); + clk_prepare_enable(clk); + rate = clk_get_rate(clk); + writel(0, base + TIMER_CTRL); integrator_clockevent_init(rate, base, irq); } @@ -440,7 +450,6 @@ static void __init ap_init_irq_of(void) { cm_init(); of_irq_init(fpga_irq_of_match); - integrator_clk_init(false); } /* For the Device Tree, add in the UART callbacks as AUXDATA */ diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index a3ef961e4a93..0ad5f60598c8 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -33,8 +32,6 @@ #include #include #include -#include -#include #include @@ -43,8 +40,6 @@ #include #include -#include - #include #include @@ -250,7 +245,6 @@ static void __init intcp_init_irq_of(void) { cm_init(); of_irq_init(fpga_irq_of_match); - integrator_clk_init(true); } /* diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c index bda8967e09c2..19864b5690e9 100644 --- a/drivers/clk/versatile/clk-integrator.c +++ b/drivers/clk/versatile/clk-integrator.c @@ -10,20 +10,17 @@ #include #include #include -#include - -#include -#include +#include +#include #include "clk-icst.h" -/* - * Implementation of the ARM Integrator/AP and Integrator/CP clock tree. - * Inspired by portions of: - * plat-versatile/clock.c and plat-versatile/include/plat/clock.h - */ +#define INTEGRATOR_HDR_LOCK_OFFSET 0x14 -static const struct icst_params cp_auxvco_params = { +/* Base offset for the core module */ +static void __iomem *cm_base; + +static const struct icst_params cp_auxosc_params = { .ref = 24000000, .vco_max = ICST525_VCO_MAX_5V, .vco_min = ICST525_VCO_MIN, @@ -35,50 +32,37 @@ static const struct icst_params cp_auxvco_params = { .idx2s = icst525_idx2s, }; -static const struct clk_icst_desc __initdata cp_icst_desc = { - .params = &cp_auxvco_params, +static const struct clk_icst_desc __initdata cm_auxosc_desc = { + .params = &cp_auxosc_params, .vco_offset = 0x1c, .lock_offset = INTEGRATOR_HDR_LOCK_OFFSET, }; -/* - * integrator_clk_init() - set up the integrator clock tree - * @is_cp: pass true if it's the Integrator/CP else AP is assumed - */ -void __init integrator_clk_init(bool is_cp) +static void __init of_integrator_cm_osc_setup(struct device_node *np) { - struct clk *clk; - - /* APB clock dummy */ - clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0); - clk_register_clkdev(clk, "apb_pclk", NULL); - - /* UART reference clock */ - clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT, - 14745600); - clk_register_clkdev(clk, NULL, "uart0"); - clk_register_clkdev(clk, NULL, "uart1"); - if (is_cp) - clk_register_clkdev(clk, NULL, "mmci"); - - /* 24 MHz clock */ - clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT, - 24000000); - clk_register_clkdev(clk, NULL, "kmi0"); - clk_register_clkdev(clk, NULL, "kmi1"); - if (!is_cp) - clk_register_clkdev(clk, NULL, "ap_timer"); + struct clk *clk = ERR_PTR(-EINVAL); + const char *clk_name = np->name; + const struct clk_icst_desc *desc = &cm_auxosc_desc; - if (!is_cp) - return; + if (!cm_base) { + /* Remap the core module base if not done yet */ + struct device_node *parent; - /* 1 MHz clock */ - clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT, - 1000000); - clk_register_clkdev(clk, NULL, "sp804"); + parent = of_get_parent(np); + if (!np) { + pr_err("no parent on core module clock\n"); + return; + } + cm_base = of_iomap(parent, 0); + if (!cm_base) { + pr_err("could not remap core module base\n"); + return; + } + } - /* ICST VCO clock used on the Integrator/CP CLCD */ - clk = icst_clk_register(NULL, &cp_icst_desc, "icst", - __io_address(INTEGRATOR_HDR_BASE)); - clk_register_clkdev(clk, NULL, "clcd"); + clk = icst_clk_register(NULL, desc, clk_name, cm_base); + if (!IS_ERR(clk)) + of_clk_add_provider(np, of_clk_src_simple_get, clk); } +CLK_OF_DECLARE(integrator_cm_auxosc_clk, + "arm,integrator-cm-auxosc", of_integrator_cm_osc_setup); diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h index 280edac9d0a5..addd48cac625 100644 --- a/include/linux/platform_data/clk-integrator.h +++ b/include/linux/platform_data/clk-integrator.h @@ -1,3 +1,2 @@ -void integrator_clk_init(bool is_cp); void integrator_impd1_clk_init(void __iomem *base, unsigned int id); void integrator_impd1_clk_exit(unsigned int id); -- cgit v1.2.3 From e641b987c20832dfaaa51d7792ed928c2b2d2dbf Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 21 Nov 2013 23:11:29 +0100 Subject: irqchip: support cascaded VICs This adds support for a VIC to be cascaded off another IRQ. On the Integrator/AP logical module IM-PD1 there is a VIC cascaded off the central FPGA IRQ controller so this is needed for that to work out. In order for the plug-in board to be able to register all the devices with their IRQs relative to the offset of the base obtained for the cascaded VIC, the base IRQ number is passed back to the caller. Cc: Thomas Gleixner Signed-off-by: Linus Walleij --- arch/arm/mach-versatile/core.c | 2 +- drivers/irqchip/irq-vic.c | 53 ++++++++++++++++++++++++++++++++++++----- include/linux/irqchip/arm-vic.h | 6 +++-- 3 files changed, 52 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index a335126ae18f..f2c89fb8fca9 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -108,7 +108,7 @@ void __init versatile_init_irq(void) np = of_find_matching_node_by_address(NULL, vic_of_match, VERSATILE_VIC_BASE); - __vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0, np); + __vic_init(VA_VIC_BASE, 0, IRQ_VIC_START, ~0, 0, np); writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR); diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c index 70108c1491bc..6002942a231c 100644 --- a/drivers/irqchip/irq-vic.c +++ b/drivers/irqchip/irq-vic.c @@ -57,6 +57,7 @@ /** * struct vic_device - VIC PM device + * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0. * @irq: The IRQ number for the base of the VIC. * @base: The register base for the VIC. * @valid_sources: A bitmask of valid interrupts @@ -224,6 +225,17 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) return handled; } +static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc) +{ + u32 stat, hwirq; + struct vic_device *vic = irq_desc_get_handler_data(desc); + + while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { + hwirq = ffs(stat) - 1; + generic_handle_irq(irq_find_mapping(vic->domain, hwirq)); + } +} + /* * Keep iterating over all registered VIC's until there are no pending * interrupts. @@ -246,6 +258,7 @@ static struct irq_domain_ops vic_irqdomain_ops = { /** * vic_register() - Register a VIC. * @base: The base address of the VIC. + * @parent_irq: The parent IRQ if cascaded, else 0. * @irq: The base IRQ for the VIC. * @valid_sources: bitmask of valid interrupts * @resume_sources: bitmask of interrupts allowed for resume sources. @@ -257,7 +270,8 @@ static struct irq_domain_ops vic_irqdomain_ops = { * * This also configures the IRQ domain for the VIC. */ -static void __init vic_register(void __iomem *base, unsigned int irq, +static void __init vic_register(void __iomem *base, unsigned int parent_irq, + unsigned int irq, u32 valid_sources, u32 resume_sources, struct device_node *node) { @@ -275,6 +289,12 @@ static void __init vic_register(void __iomem *base, unsigned int irq, v->resume_sources = resume_sources; set_handle_irq(vic_handle_irq); vic_id++; + + if (parent_irq) { + irq_set_handler_data(parent_irq, v); + irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded); + } + v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, &vic_irqdomain_ops, v); /* create an IRQ mapping for each valid IRQ */ @@ -413,10 +433,10 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start, writel(32, base + VIC_PL190_DEF_VECT_ADDR); } - vic_register(base, irq_start, vic_sources, 0, node); + vic_register(base, 0, irq_start, vic_sources, 0, node); } -void __init __vic_init(void __iomem *base, int irq_start, +void __init __vic_init(void __iomem *base, int parent_irq, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node) { @@ -453,7 +473,7 @@ void __init __vic_init(void __iomem *base, int irq_start, vic_init2(base); - vic_register(base, irq_start, vic_sources, resume_sources, node); + vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node); } /** @@ -466,7 +486,28 @@ void __init __vic_init(void __iomem *base, int irq_start, void __init vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources) { - __vic_init(base, irq_start, vic_sources, resume_sources, NULL); + __vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL); +} + +/** + * vic_init_cascaded() - initialise a cascaded vectored interrupt controller + * @base: iomem base address + * @parent_irq: the parent IRQ we're cascaded off + * @irq_start: starting interrupt number, must be muliple of 32 + * @vic_sources: bitmask of interrupt sources to allow + * @resume_sources: bitmask of interrupt sources to allow for resume + * + * This returns the base for the new interrupts or negative on error. + */ +int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq, + u32 vic_sources, u32 resume_sources) +{ + struct vic_device *v; + + v = &vic_devices[vic_id]; + __vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL); + /* Return out acquired base */ + return v->irq; } #ifdef CONFIG_OF @@ -489,7 +530,7 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent) /* * Passing 0 as first IRQ makes the simple domain allocate descriptors */ - __vic_init(regs, 0, interrupt_mask, wakeup_mask, node); + __vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node); return 0; } diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h index e3c82dc95756..ba46c794b4e5 100644 --- a/include/linux/irqchip/arm-vic.h +++ b/include/linux/irqchip/arm-vic.h @@ -29,8 +29,10 @@ struct device_node; struct pt_regs; -void __vic_init(void __iomem *base, int irq_start, u32 vic_sources, - u32 resume_sources, struct device_node *node); +void __vic_init(void __iomem *base, int parent_irq, int irq_start, + u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); +int vic_init_cascaded(void __iomem *base, unsigned int parent_irq, + u32 vic_sources, u32 resume_sources); #endif -- cgit v1.2.3 From 67f5185cad24b3c3d9ab07508dfcab55cdab02de Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 30 Jan 2014 13:03:40 +0200 Subject: ARM: davinci: aemif: get rid of davinci-nand driver dependency on aemif The problem that the set timings code contains the call of Davinci platform function davinci_aemif_setup_timing() which is not accessible if kernel is built for another platform like Keystone. The Keysone platform is going to use TI AEMIF driver. If TI AEMIF is used we don't need to set timings and bus width. It is done by AEMIF driver. To get rid of davinci-nand driver dependency on aemif platform code we moved aemif code to davinci platform. The platform AEMIF code (aemif.c) has to be removed once Davinci will be converted to DT and use ti-aemif.c driver. Acked-by: Brian Norris Signed-off-by: Ivan Khoronzhuk [nsekhar@ti.com: fixed checkpatch error and a build breakage due to missing include, rebased onto l2-mtd/master] Signed-off-by: Sekhar Nori --- arch/arm/mach-davinci/aemif.c | 107 +++++++++++++++++++++--- arch/arm/mach-davinci/board-da830-evm.c | 3 + arch/arm/mach-davinci/board-da850-evm.c | 3 + arch/arm/mach-davinci/board-dm644x-evm.c | 5 ++ arch/arm/mach-davinci/board-dm646x-evm.c | 3 + arch/arm/mach-davinci/board-mityomapl138.c | 4 + drivers/mtd/nand/davinci_nand.c | 22 ----- include/linux/platform_data/mtd-davinci-aemif.h | 5 +- 8 files changed, 117 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-davinci/aemif.c b/arch/arm/mach-davinci/aemif.c index f091a9010c2f..ff8b7e76b6e9 100644 --- a/arch/arm/mach-davinci/aemif.c +++ b/arch/arm/mach-davinci/aemif.c @@ -16,6 +16,7 @@ #include #include +#include /* Timing value configuration */ @@ -43,6 +44,17 @@ WSTROBE(WSTROBE_MAX) | \ WSETUP(WSETUP_MAX)) +static inline unsigned int davinci_aemif_readl(void __iomem *base, int offset) +{ + return readl_relaxed(base + offset); +} + +static inline void davinci_aemif_writel(void __iomem *base, + int offset, unsigned long value) +{ + writel_relaxed(value, base + offset); +} + /* * aemif_calc_rate - calculate timing data. * @wanted: The cycle time needed in nanoseconds. @@ -76,6 +88,7 @@ static int aemif_calc_rate(int wanted, unsigned long clk, int max) * @t: timing values to be progammed * @base: The virtual base address of the AEMIF interface * @cs: chip-select to program the timing values for + * @clkrate: the AEMIF clkrate * * This function programs the given timing values (in real clock) into the * AEMIF registers taking the AEMIF clock into account. @@ -86,24 +99,17 @@ static int aemif_calc_rate(int wanted, unsigned long clk, int max) * * Returns 0 on success, else negative errno. */ -int davinci_aemif_setup_timing(struct davinci_aemif_timing *t, - void __iomem *base, unsigned cs) +static int davinci_aemif_setup_timing(struct davinci_aemif_timing *t, + void __iomem *base, unsigned cs, + unsigned long clkrate) { unsigned set, val; int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup; unsigned offset = A1CR_OFFSET + cs * 4; - struct clk *aemif_clk; - unsigned long clkrate; if (!t) return 0; /* Nothing to do */ - aemif_clk = clk_get(NULL, "aemif"); - if (IS_ERR(aemif_clk)) - return PTR_ERR(aemif_clk); - - clkrate = clk_get_rate(aemif_clk); - clkrate /= 1000; /* turn clock into kHz for ease of use */ ta = aemif_calc_rate(t->ta, clkrate, TA_MAX); @@ -130,4 +136,83 @@ int davinci_aemif_setup_timing(struct davinci_aemif_timing *t, return 0; } -EXPORT_SYMBOL(davinci_aemif_setup_timing); + +/** + * davinci_aemif_setup - setup AEMIF interface by davinci_nand_pdata + * @pdev - link to platform device to setup settings for + * + * This function does not use any locking while programming the AEMIF + * because it is expected that there is only one user of a given + * chip-select. + * + * Returns 0 on success, else negative errno. + */ +int davinci_aemif_setup(struct platform_device *pdev) +{ + struct davinci_nand_pdata *pdata = dev_get_platdata(&pdev->dev); + uint32_t val; + unsigned long clkrate; + struct resource *res; + void __iomem *base; + struct clk *clk; + int ret = 0; + + clk = clk_get(&pdev->dev, "aemif"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(clk); + if (ret < 0) { + dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n", + ret); + goto err_put; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n"); + ret = -ENOMEM; + goto err; + } + + base = ioremap(res->start, resource_size(res)); + if (!base) { + dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res); + ret = -ENOMEM; + goto err; + } + + /* + * Setup Async configuration register in case we did not boot + * from NAND and so bootloader did not bother to set it up. + */ + val = davinci_aemif_readl(base, A1CR_OFFSET + pdev->id * 4); + /* + * Extended Wait is not valid and Select Strobe mode is not + * used + */ + val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK); + if (pdata->options & NAND_BUSWIDTH_16) + val |= 0x1; + + davinci_aemif_writel(base, A1CR_OFFSET + pdev->id * 4, val); + + clkrate = clk_get_rate(clk); + + if (pdata->timing) + ret = davinci_aemif_setup_timing(pdata->timing, base, pdev->id, + clkrate); + + if (ret < 0) + dev_dbg(&pdev->dev, "NAND timing values setup fail\n"); + + iounmap(base); +err: + clk_disable_unprepare(clk); +err_put: + clk_put(clk); + return ret; +} diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index d1f45af7a530..5623131c4f0b 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -419,6 +419,9 @@ static inline void da830_evm_init_nand(int mux_mode) if (ret) pr_warning("da830_evm_init: NAND device not registered.\n"); + if (davinci_aemif_setup(&da830_evm_nand_device)) + pr_warn("%s: Cannot configure AEMIF.\n", __func__); + gpio_direction_output(mux_mode, 1); } #else diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index e0af0eccde8f..234c5bb091f5 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -358,6 +358,9 @@ static inline void da850_evm_setup_nor_nand(void) platform_add_devices(da850_evm_devices, ARRAY_SIZE(da850_evm_devices)); + + if (davinci_aemif_setup(&da850_evm_nandflash_device)) + pr_warn("%s: Cannot configure AEMIF.\n", __func__); } } diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 987605b78556..5602957b67d7 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -778,6 +778,11 @@ static __init void davinci_evm_init(void) /* only one device will be jumpered and detected */ if (HAS_NAND) { platform_device_register(&davinci_evm_nandflash_device); + + if (davinci_aemif_setup(&davinci_evm_nandflash_device)) + pr_warn("%s: Cannot configure AEMIF.\n", + __func__); + evm_leds[7].default_trigger = "nand-disk"; if (HAS_NOR) pr_warning("WARNING: both NAND and NOR flash " diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 13d0801fd6b1..ae129bc49273 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -805,6 +805,9 @@ static __init void evm_init(void) platform_device_register(&davinci_nand_device); + if (davinci_aemif_setup(&davinci_nand_device)) + pr_warn("%s: Cannot configure AEMIF.\n", __func__); + dm646x_init_edma(dm646x_edma_rsv); if (HAS_ATA) diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 7aa105b1fd0f..96fc00a167f5 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -432,6 +433,9 @@ static void __init mityomapl138_setup_nand(void) { platform_add_devices(mityomapl138_devices, ARRAY_SIZE(mityomapl138_devices)); + + if (davinci_aemif_setup(&mityomapl138_nandflash_device)) + pr_warn("%s: Cannot configure AEMIF.\n", __func__); } static const short mityomap_mii_pins[] = { diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index a4989ec6292e..8eb6a36f125a 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -746,28 +746,6 @@ static int nand_davinci_probe(struct platform_device *pdev) goto err_clk_enable; } - /* - * Setup Async configuration register in case we did not boot from - * NAND and so bootloader did not bother to set it up. - */ - val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4); - - /* Extended Wait is not valid and Select Strobe mode is not used */ - val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK); - if (info->chip.options & NAND_BUSWIDTH_16) - val |= 0x1; - - davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val); - - ret = 0; - if (info->timing) - ret = davinci_aemif_setup_timing(info->timing, info->base, - info->core_chipsel); - if (ret < 0) { - dev_dbg(&pdev->dev, "NAND timing values setup fail\n"); - goto err; - } - spin_lock_irq(&davinci_nand_lock); /* put CSxNAND into NAND mode */ diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h index 05b293443097..97948ac2bb9b 100644 --- a/include/linux/platform_data/mtd-davinci-aemif.h +++ b/include/linux/platform_data/mtd-davinci-aemif.h @@ -10,6 +10,8 @@ #ifndef _MACH_DAVINCI_AEMIF_H #define _MACH_DAVINCI_AEMIF_H +#include + #define NRCSR_OFFSET 0x00 #define AWCCR_OFFSET 0x04 #define A1CR_OFFSET 0x10 @@ -31,6 +33,5 @@ struct davinci_aemif_timing { u8 ta; }; -int davinci_aemif_setup_timing(struct davinci_aemif_timing *t, - void __iomem *base, unsigned cs); +int davinci_aemif_setup(struct platform_device *pdev); #endif -- cgit v1.2.3 From b998da0541e64da5bd857c09347c0fa954c03432 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Thu, 6 Feb 2014 09:25:01 +0900 Subject: ARM: shmobile: r8a7790: Correct SYS DMAC clock defines This brings the implementation into line with the documentation. This problem was introduced when SYS DMAC clock defines were added by ac991dce6498b5fc ("ARM: shmobile: r8a7790: Add clock index macros for DT sources") in v3.13-rc2. I do not believe this results in any problems as these defines do not appear to be used anywhere yet. Acked-by: Laurent Pinchart Signed-off-by: Simon Horman --- include/dt-bindings/clock/r8a7790-clock.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h index 859e9be511d9..6548a5fbcf4a 100644 --- a/include/dt-bindings/clock/r8a7790-clock.h +++ b/include/dt-bindings/clock/r8a7790-clock.h @@ -46,8 +46,8 @@ #define R8A7790_CLK_MSIOF1 8 #define R8A7790_CLK_MSIOF3 15 #define R8A7790_CLK_SCIFB2 16 -#define R8A7790_CLK_SYS_DMAC0 18 -#define R8A7790_CLK_SYS_DMAC1 19 +#define R8A7790_CLK_SYS_DMAC1 18 +#define R8A7790_CLK_SYS_DMAC0 19 /* MSTP3 */ #define R8A7790_CLK_TPU0 4 -- cgit v1.2.3 From 1f27f15258bfee2ae85240e9505186dd959d2892 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 14 Feb 2014 12:29:18 -0600 Subject: clk: bcm281xx: add initial clock framework support Add code for device tree support of clocks in the BCM281xx family of SoCs. Machines in this family use peripheral clocks implemented by "Kona" clock control units (CCUs). (Other Broadcom SoC families use Kona style CCUs as well, but support for them is not yet upstream.) A BCM281xx SoC has multiple CCUs, each of which manages a set of clocks on the SoC. A Kona peripheral clock is composite clock that may include a gate, a parent clock multiplexor, and zero, one or two dividers. There is a variety of gate types, and many gates implement hardware-managed gating (often called "auto-gating"). Most dividers divide their input clock signal by an integer value (one or more). There are also "fractional" dividers which allow division by non-integer values. To accomodate such dividers, clock rates and dividers are generally maintained by the code in "scaled" form, which allows integer and fractional dividers to be handled in a uniform way. If present, the gate for a Kona peripheral clock must be enabled when a change is made to its multiplexor or one of its dividers. Additionally, dividers and multiplexors have trigger registers which must be used whenever the divider value or selected parent clock is changed. The same trigger is often used for a divider and multiplexor, and a BCM281xx peripheral clock occasionally has two triggers. The gate, dividers, and parent clock selector are treated in this code as "components" of a peripheral clock. Their functionality is implemented directly--e.g. the common clock framework gate implementation is not used for a Kona peripheral clock gate. (This has being considered though, and the intention is to evolve this code to leverage common code as much as possible.) The source code is divided into three general portions: drivers/clk/bcm/clk-kona.h drivers/clk/bcm/clk-kona.c These implement the basic Kona clock functionality, including the clk_ops methods and various routines to manipulate registers and interpret their values. This includes some functions used to set clocks to a desired initial state (though this feature is only partially implemented here). drivers/clk/bcm/clk-kona-setup.c This contains generic run-time initialization code for data structures representing Kona CCUs and clocks. This encapsulates the clock structure initialization that can't be done statically. Note that there is a great deal of validity-checking code here, making explicit certain assumptions in the code. This is mostly useful for adding new clock definitions and could possibly be disabled for production use. drivers/clk/bcm/clk-bcm281xx.c This file defines the specific CCUs used by BCM281XX family SoCs, as well as the specific clocks implemented by each. It declares a device tree clock match entry for each CCU defined. include/dt-bindings/clock/bcm281xx.h This file defines the selector (index) values used to identify a particular clock provided by a CCU. It consists entirely of C preprocessor constants, to be used by both the C source and device tree source files. Signed-off-by: Alex Elder Reviewed-by: Tim Kryger Reviewed-by: Matt Porter Acked-by: Mike Turquette Signed-off-by: Matt Porter --- drivers/clk/Kconfig | 1 + drivers/clk/Makefile | 1 + drivers/clk/bcm/Kconfig | 9 + drivers/clk/bcm/Makefile | 3 + drivers/clk/bcm/clk-bcm281xx.c | 416 ++++++++++++++ drivers/clk/bcm/clk-kona-setup.c | 769 +++++++++++++++++++++++++ drivers/clk/bcm/clk-kona.c | 1033 ++++++++++++++++++++++++++++++++++ drivers/clk/bcm/clk-kona.h | 410 ++++++++++++++ include/dt-bindings/clock/bcm281xx.h | 65 +++ 9 files changed, 2707 insertions(+) create mode 100644 drivers/clk/bcm/Kconfig create mode 100644 drivers/clk/bcm/Makefile create mode 100644 drivers/clk/bcm/clk-bcm281xx.c create mode 100644 drivers/clk/bcm/clk-kona-setup.c create mode 100644 drivers/clk/bcm/clk-kona.c create mode 100644 drivers/clk/bcm/clk-kona.h create mode 100644 include/dt-bindings/clock/bcm281xx.h (limited to 'include') diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 7641965d208d..f9f605695e40 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -111,4 +111,5 @@ source "drivers/clk/qcom/Kconfig" endmenu +source "drivers/clk/bcm/Kconfig" source "drivers/clk/mvebu/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index a367a9831717..88af4a399d6c 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o obj-$(CONFIG_COMMON_CLK_AT91) += at91/ +obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/ obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/ obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/ ifeq ($(CONFIG_COMMON_CLK), y) diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig new file mode 100644 index 000000000000..a7262fb8ce55 --- /dev/null +++ b/drivers/clk/bcm/Kconfig @@ -0,0 +1,9 @@ +config CLK_BCM_KONA + bool "Broadcom Kona CCU clock support" + depends on ARCH_BCM_MOBILE + depends on COMMON_CLK + default y + help + Enable common clock framework support for Broadcom SoCs + using "Kona" style clock control units, including those + in the BCM281xx family. diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile new file mode 100644 index 000000000000..cf93359aa862 --- /dev/null +++ b/drivers/clk/bcm/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o +obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o +obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o diff --git a/drivers/clk/bcm/clk-bcm281xx.c b/drivers/clk/bcm/clk-bcm281xx.c new file mode 100644 index 000000000000..3c66de696aeb --- /dev/null +++ b/drivers/clk/bcm/clk-bcm281xx.c @@ -0,0 +1,416 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "clk-kona.h" +#include "dt-bindings/clock/bcm281xx.h" + +/* bcm11351 CCU device tree "compatible" strings */ +#define BCM11351_DT_ROOT_CCU_COMPAT "brcm,bcm11351-root-ccu" +#define BCM11351_DT_AON_CCU_COMPAT "brcm,bcm11351-aon-ccu" +#define BCM11351_DT_HUB_CCU_COMPAT "brcm,bcm11351-hub-ccu" +#define BCM11351_DT_MASTER_CCU_COMPAT "brcm,bcm11351-master-ccu" +#define BCM11351_DT_SLAVE_CCU_COMPAT "brcm,bcm11351-slave-ccu" + +/* Root CCU clocks */ + +static struct peri_clk_data frac_1m_data = { + .gate = HW_SW_GATE(0x214, 16, 0, 1), + .trig = TRIGGER(0x0e04, 0), + .div = FRAC_DIVIDER(0x0e00, 0, 22, 16), + .clocks = CLOCKS("ref_crystal"), +}; + +/* AON CCU clocks */ + +static struct peri_clk_data hub_timer_data = { + .gate = HW_SW_GATE(0x0414, 16, 0, 1), + .clocks = CLOCKS("bbl_32k", + "frac_1m", + "dft_19_5m"), + .sel = SELECTOR(0x0a10, 0, 2), + .trig = TRIGGER(0x0a40, 4), +}; + +static struct peri_clk_data pmu_bsc_data = { + .gate = HW_SW_GATE(0x0418, 16, 0, 1), + .clocks = CLOCKS("ref_crystal", + "pmu_bsc_var", + "bbl_32k"), + .sel = SELECTOR(0x0a04, 0, 2), + .div = DIVIDER(0x0a04, 3, 4), + .trig = TRIGGER(0x0a40, 0), +}; + +static struct peri_clk_data pmu_bsc_var_data = { + .clocks = CLOCKS("var_312m", + "ref_312m"), + .sel = SELECTOR(0x0a00, 0, 2), + .div = DIVIDER(0x0a00, 4, 5), + .trig = TRIGGER(0x0a40, 2), +}; + +/* Hub CCU clocks */ + +static struct peri_clk_data tmon_1m_data = { + .gate = HW_SW_GATE(0x04a4, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "frac_1m"), + .sel = SELECTOR(0x0e74, 0, 2), + .trig = TRIGGER(0x0e84, 1), +}; + +/* Master CCU clocks */ + +static struct peri_clk_data sdio1_data = { + .gate = HW_SW_GATE(0x0358, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_52m", + "ref_52m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a28, 0, 3), + .div = DIVIDER(0x0a28, 4, 14), + .trig = TRIGGER(0x0afc, 9), +}; + +static struct peri_clk_data sdio2_data = { + .gate = HW_SW_GATE(0x035c, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_52m", + "ref_52m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a2c, 0, 3), + .div = DIVIDER(0x0a2c, 4, 14), + .trig = TRIGGER(0x0afc, 10), +}; + +static struct peri_clk_data sdio3_data = { + .gate = HW_SW_GATE(0x0364, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_52m", + "ref_52m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a34, 0, 3), + .div = DIVIDER(0x0a34, 4, 14), + .trig = TRIGGER(0x0afc, 12), +}; + +static struct peri_clk_data sdio4_data = { + .gate = HW_SW_GATE(0x0360, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_52m", + "ref_52m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a30, 0, 3), + .div = DIVIDER(0x0a30, 4, 14), + .trig = TRIGGER(0x0afc, 11), +}; + +static struct peri_clk_data usb_ic_data = { + .gate = HW_SW_GATE(0x0354, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_96m", + "ref_96m"), + .div = FIXED_DIVIDER(2), + .sel = SELECTOR(0x0a24, 0, 2), + .trig = TRIGGER(0x0afc, 7), +}; + +/* also called usbh_48m */ +static struct peri_clk_data hsic2_48m_data = { + .gate = HW_SW_GATE(0x0370, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a38, 0, 2), + .div = FIXED_DIVIDER(2), + .trig = TRIGGER(0x0afc, 5), +}; + +/* also called usbh_12m */ +static struct peri_clk_data hsic2_12m_data = { + .gate = HW_SW_GATE(0x0370, 20, 4, 5), + .div = DIVIDER(0x0a38, 12, 2), + .clocks = CLOCKS("ref_crystal", + "var_96m", + "ref_96m"), + .pre_div = FIXED_DIVIDER(2), + .sel = SELECTOR(0x0a38, 0, 2), + .trig = TRIGGER(0x0afc, 5), +}; + +/* Slave CCU clocks */ + +static struct peri_clk_data uartb_data = { + .gate = HW_SW_GATE(0x0400, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_156m", + "ref_156m"), + .sel = SELECTOR(0x0a10, 0, 2), + .div = FRAC_DIVIDER(0x0a10, 4, 12, 8), + .trig = TRIGGER(0x0afc, 2), +}; + +static struct peri_clk_data uartb2_data = { + .gate = HW_SW_GATE(0x0404, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_156m", + "ref_156m"), + .sel = SELECTOR(0x0a14, 0, 2), + .div = FRAC_DIVIDER(0x0a14, 4, 12, 8), + .trig = TRIGGER(0x0afc, 3), +}; + +static struct peri_clk_data uartb3_data = { + .gate = HW_SW_GATE(0x0408, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_156m", + "ref_156m"), + .sel = SELECTOR(0x0a18, 0, 2), + .div = FRAC_DIVIDER(0x0a18, 4, 12, 8), + .trig = TRIGGER(0x0afc, 4), +}; + +static struct peri_clk_data uartb4_data = { + .gate = HW_SW_GATE(0x0408, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_156m", + "ref_156m"), + .sel = SELECTOR(0x0a1c, 0, 2), + .div = FRAC_DIVIDER(0x0a1c, 4, 12, 8), + .trig = TRIGGER(0x0afc, 5), +}; + +static struct peri_clk_data ssp0_data = { + .gate = HW_SW_GATE(0x0410, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a20, 0, 3), + .div = DIVIDER(0x0a20, 4, 14), + .trig = TRIGGER(0x0afc, 6), +}; + +static struct peri_clk_data ssp2_data = { + .gate = HW_SW_GATE(0x0418, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a28, 0, 3), + .div = DIVIDER(0x0a28, 4, 14), + .trig = TRIGGER(0x0afc, 8), +}; + +static struct peri_clk_data bsc1_data = { + .gate = HW_SW_GATE(0x0458, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_13m", + "ref_13m"), + .sel = SELECTOR(0x0a64, 0, 3), + .trig = TRIGGER(0x0afc, 23), +}; + +static struct peri_clk_data bsc2_data = { + .gate = HW_SW_GATE(0x045c, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_13m", + "ref_13m"), + .sel = SELECTOR(0x0a68, 0, 3), + .trig = TRIGGER(0x0afc, 24), +}; + +static struct peri_clk_data bsc3_data = { + .gate = HW_SW_GATE(0x0484, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_13m", + "ref_13m"), + .sel = SELECTOR(0x0a84, 0, 3), + .trig = TRIGGER(0x0b00, 2), +}; + +static struct peri_clk_data pwm_data = { + .gate = HW_SW_GATE(0x0468, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m"), + .sel = SELECTOR(0x0a70, 0, 2), + .div = DIVIDER(0x0a70, 4, 3), + .trig = TRIGGER(0x0afc, 15), +}; + +/* + * CCU setup routines + * + * These are called from kona_dt_ccu_setup() to initialize the array + * of clocks provided by the CCU. Once allocated, the entries in + * the array are initialized by calling kona_clk_setup() with the + * initialization data for each clock. They return 0 if successful + * or an error code otherwise. + */ +static int __init bcm281xx_root_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_ROOT_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate root clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_ROOT_CCU_FRAC_1M, frac_1m); + + return 0; +} + +static int __init bcm281xx_aon_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_AON_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate aon clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_HUB_TIMER, hub_timer); + PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC, pmu_bsc); + PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC_VAR, pmu_bsc_var); + + return 0; +} + +static int __init bcm281xx_hub_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_HUB_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate hub clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_HUB_CCU_TMON_1M, tmon_1m); + + return 0; +} + +static int __init bcm281xx_master_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_MASTER_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate master clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO1, sdio1); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO2, sdio2); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO3, sdio3); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO4, sdio4); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_USB_IC, usb_ic); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_48M, hsic2_48m); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_12M, hsic2_12m); + + return 0; +} + +static int __init bcm281xx_slave_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_SLAVE_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate slave clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB, uartb); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB2, uartb2); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB3, uartb3); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB4, uartb4); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP0, ssp0); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP2, ssp2); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC1, bsc1); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC2, bsc2); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC3, bsc3); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_PWM, pwm); + + return 0; +} + +/* Device tree match table callback functions */ + +static void __init kona_dt_root_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_root_ccu_clks_setup); +} + +static void __init kona_dt_aon_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_aon_ccu_clks_setup); +} + +static void __init kona_dt_hub_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_hub_ccu_clks_setup); +} + +static void __init kona_dt_master_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_master_ccu_clks_setup); +} + +static void __init kona_dt_slave_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_slave_ccu_clks_setup); +} + +CLK_OF_DECLARE(bcm11351_root_ccu, BCM11351_DT_ROOT_CCU_COMPAT, + kona_dt_root_ccu_setup); +CLK_OF_DECLARE(bcm11351_aon_ccu, BCM11351_DT_AON_CCU_COMPAT, + kona_dt_aon_ccu_setup); +CLK_OF_DECLARE(bcm11351_hub_ccu, BCM11351_DT_HUB_CCU_COMPAT, + kona_dt_hub_ccu_setup); +CLK_OF_DECLARE(bcm11351_master_ccu, BCM11351_DT_MASTER_CCU_COMPAT, + kona_dt_master_ccu_setup); +CLK_OF_DECLARE(bcm11351_slave_ccu, BCM11351_DT_SLAVE_CCU_COMPAT, + kona_dt_slave_ccu_setup); diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c new file mode 100644 index 000000000000..f1e88fe6bb4c --- /dev/null +++ b/drivers/clk/bcm/clk-kona-setup.c @@ -0,0 +1,769 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "clk-kona.h" + +/* These are used when a selector or trigger is found to be unneeded */ +#define selector_clear_exists(sel) ((sel)->width = 0) +#define trigger_clear_exists(trig) FLAG_CLEAR(trig, TRIG, EXISTS) + +LIST_HEAD(ccu_list); /* The list of set up CCUs */ + +/* Validity checking */ + +static bool clk_requires_trigger(struct kona_clk *bcm_clk) +{ + struct peri_clk_data *peri = bcm_clk->peri; + struct bcm_clk_sel *sel; + struct bcm_clk_div *div; + + if (bcm_clk->type != bcm_clk_peri) + return false; + + sel = &peri->sel; + if (sel->parent_count && selector_exists(sel)) + return true; + + div = &peri->div; + if (!divider_exists(div)) + return false; + + /* Fixed dividers don't need triggers */ + if (!divider_is_fixed(div)) + return true; + + div = &peri->pre_div; + + return divider_exists(div) && !divider_is_fixed(div); +} + +static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) +{ + struct peri_clk_data *peri; + struct bcm_clk_gate *gate; + struct bcm_clk_div *div; + struct bcm_clk_sel *sel; + struct bcm_clk_trig *trig; + const char *name; + u32 range; + u32 limit; + + BUG_ON(bcm_clk->type != bcm_clk_peri); + peri = bcm_clk->peri; + name = bcm_clk->name; + range = bcm_clk->ccu->range; + + limit = range - sizeof(u32); + limit = round_down(limit, sizeof(u32)); + + gate = &peri->gate; + if (gate_exists(gate)) { + if (gate->offset > limit) { + pr_err("%s: bad gate offset for %s (%u > %u)\n", + __func__, name, gate->offset, limit); + return false; + } + } + + div = &peri->div; + if (divider_exists(div)) { + if (div->offset > limit) { + pr_err("%s: bad divider offset for %s (%u > %u)\n", + __func__, name, div->offset, limit); + return false; + } + } + + div = &peri->pre_div; + if (divider_exists(div)) { + if (div->offset > limit) { + pr_err("%s: bad pre-divider offset for %s " + "(%u > %u)\n", + __func__, name, div->offset, limit); + return false; + } + } + + sel = &peri->sel; + if (selector_exists(sel)) { + if (sel->offset > limit) { + pr_err("%s: bad selector offset for %s (%u > %u)\n", + __func__, name, sel->offset, limit); + return false; + } + } + + trig = &peri->trig; + if (trigger_exists(trig)) { + if (trig->offset > limit) { + pr_err("%s: bad trigger offset for %s (%u > %u)\n", + __func__, name, trig->offset, limit); + return false; + } + } + + trig = &peri->pre_trig; + if (trigger_exists(trig)) { + if (trig->offset > limit) { + pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n", + __func__, name, trig->offset, limit); + return false; + } + } + + return true; +} + +/* A bit position must be less than the number of bits in a 32-bit register. */ +static bool bit_posn_valid(u32 bit_posn, const char *field_name, + const char *clock_name) +{ + u32 limit = BITS_PER_BYTE * sizeof(u32) - 1; + + if (bit_posn > limit) { + pr_err("%s: bad %s bit for %s (%u > %u)\n", __func__, + field_name, clock_name, bit_posn, limit); + return false; + } + return true; +} + +/* + * A bitfield must be at least 1 bit wide. Both the low-order and + * high-order bits must lie within a 32-bit register. We require + * fields to be less than 32 bits wide, mainly because we use + * shifting to produce field masks, and shifting a full word width + * is not well-defined by the C standard. + */ +static bool bitfield_valid(u32 shift, u32 width, const char *field_name, + const char *clock_name) +{ + u32 limit = BITS_PER_BYTE * sizeof(u32); + + if (!width) { + pr_err("%s: bad %s field width 0 for %s\n", __func__, + field_name, clock_name); + return false; + } + if (shift + width > limit) { + pr_err("%s: bad %s for %s (%u + %u > %u)\n", __func__, + field_name, clock_name, shift, width, limit); + return false; + } + return true; +} + +/* + * All gates, if defined, have a status bit, and for hardware-only + * gates, that's it. Gates that can be software controlled also + * have an enable bit. And a gate that can be hardware or software + * controlled will have a hardware/software select bit. + */ +static bool gate_valid(struct bcm_clk_gate *gate, const char *field_name, + const char *clock_name) +{ + if (!bit_posn_valid(gate->status_bit, "gate status", clock_name)) + return false; + + if (gate_is_sw_controllable(gate)) { + if (!bit_posn_valid(gate->en_bit, "gate enable", clock_name)) + return false; + + if (gate_is_hw_controllable(gate)) { + if (!bit_posn_valid(gate->hw_sw_sel_bit, + "gate hw/sw select", + clock_name)) + return false; + } + } else { + BUG_ON(!gate_is_hw_controllable(gate)); + } + + return true; +} + +/* + * A selector bitfield must be valid. Its parent_sel array must + * also be reasonable for the field. + */ +static bool sel_valid(struct bcm_clk_sel *sel, const char *field_name, + const char *clock_name) +{ + if (!bitfield_valid(sel->shift, sel->width, field_name, clock_name)) + return false; + + if (sel->parent_count) { + u32 max_sel; + u32 limit; + + /* + * Make sure the selector field can hold all the + * selector values we expect to be able to use. A + * clock only needs to have a selector defined if it + * has more than one parent. And in that case the + * highest selector value will be in the last entry + * in the array. + */ + max_sel = sel->parent_sel[sel->parent_count - 1]; + limit = (1 << sel->width) - 1; + if (max_sel > limit) { + pr_err("%s: bad selector for %s " + "(%u needs > %u bits)\n", + __func__, clock_name, max_sel, + sel->width); + return false; + } + } else { + pr_warn("%s: ignoring selector for %s (no parents)\n", + __func__, clock_name); + selector_clear_exists(sel); + kfree(sel->parent_sel); + sel->parent_sel = NULL; + } + + return true; +} + +/* + * A fixed divider just needs to be non-zero. A variable divider + * has to have a valid divider bitfield, and if it has a fraction, + * the width of the fraction must not be no more than the width of + * the divider as a whole. + */ +static bool div_valid(struct bcm_clk_div *div, const char *field_name, + const char *clock_name) +{ + if (divider_is_fixed(div)) { + /* Any fixed divider value but 0 is OK */ + if (div->fixed == 0) { + pr_err("%s: bad %s fixed value 0 for %s\n", __func__, + field_name, clock_name); + return false; + } + return true; + } + if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) + return false; + + if (divider_has_fraction(div)) + if (div->frac_width > div->width) { + pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", + __func__, field_name, clock_name, + div->frac_width, div->width); + return false; + } + + return true; +} + +/* + * If a clock has two dividers, the combined number of fractional + * bits must be representable in a 32-bit unsigned value. This + * is because we scale up a dividend using both dividers before + * dividing to improve accuracy, and we need to avoid overflow. + */ +static bool kona_dividers_valid(struct kona_clk *bcm_clk) +{ + struct peri_clk_data *peri = bcm_clk->peri; + struct bcm_clk_div *div; + struct bcm_clk_div *pre_div; + u32 limit; + + BUG_ON(bcm_clk->type != bcm_clk_peri); + + if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div)) + return true; + + div = &peri->div; + pre_div = &peri->pre_div; + if (divider_is_fixed(div) || divider_is_fixed(pre_div)) + return true; + + limit = BITS_PER_BYTE * sizeof(u32); + + return div->frac_width + pre_div->frac_width <= limit; +} + + +/* A trigger just needs to represent a valid bit position */ +static bool trig_valid(struct bcm_clk_trig *trig, const char *field_name, + const char *clock_name) +{ + return bit_posn_valid(trig->bit, field_name, clock_name); +} + +/* Determine whether the set of peripheral clock registers are valid. */ +static bool +peri_clk_data_valid(struct kona_clk *bcm_clk) +{ + struct peri_clk_data *peri; + struct bcm_clk_gate *gate; + struct bcm_clk_sel *sel; + struct bcm_clk_div *div; + struct bcm_clk_div *pre_div; + struct bcm_clk_trig *trig; + const char *name; + + BUG_ON(bcm_clk->type != bcm_clk_peri); + + /* + * First validate register offsets. This is the only place + * where we need something from the ccu, so we do these + * together. + */ + if (!peri_clk_data_offsets_valid(bcm_clk)) + return false; + + peri = bcm_clk->peri; + name = bcm_clk->name; + gate = &peri->gate; + if (gate_exists(gate) && !gate_valid(gate, "gate", name)) + return false; + + sel = &peri->sel; + if (selector_exists(sel)) { + if (!sel_valid(sel, "selector", name)) + return false; + + } else if (sel->parent_count > 1) { + pr_err("%s: multiple parents but no selector for %s\n", + __func__, name); + + return false; + } + + div = &peri->div; + pre_div = &peri->pre_div; + if (divider_exists(div)) { + if (!div_valid(div, "divider", name)) + return false; + + if (divider_exists(pre_div)) + if (!div_valid(pre_div, "pre-divider", name)) + return false; + } else if (divider_exists(pre_div)) { + pr_err("%s: pre-divider but no divider for %s\n", __func__, + name); + return false; + } + + trig = &peri->trig; + if (trigger_exists(trig)) { + if (!trig_valid(trig, "trigger", name)) + return false; + + if (trigger_exists(&peri->pre_trig)) { + if (!trig_valid(trig, "pre-trigger", name)) { + return false; + } + } + if (!clk_requires_trigger(bcm_clk)) { + pr_warn("%s: ignoring trigger for %s (not needed)\n", + __func__, name); + trigger_clear_exists(trig); + } + } else if (trigger_exists(&peri->pre_trig)) { + pr_err("%s: pre-trigger but no trigger for %s\n", __func__, + name); + return false; + } else if (clk_requires_trigger(bcm_clk)) { + pr_err("%s: required trigger missing for %s\n", __func__, + name); + return false; + } + + return kona_dividers_valid(bcm_clk); +} + +static bool kona_clk_valid(struct kona_clk *bcm_clk) +{ + switch (bcm_clk->type) { + case bcm_clk_peri: + if (!peri_clk_data_valid(bcm_clk)) + return false; + break; + default: + pr_err("%s: unrecognized clock type (%d)\n", __func__, + (int)bcm_clk->type); + return false; + } + return true; +} + +/* + * Scan an array of parent clock names to determine whether there + * are any entries containing BAD_CLK_NAME. Such entries are + * placeholders for non-supported clocks. Keep track of the + * position of each clock name in the original array. + * + * Allocates an array of pointers to to hold the names of all + * non-null entries in the original array, and returns a pointer to + * that array in *names. This will be used for registering the + * clock with the common clock code. On successful return, + * *count indicates how many entries are in that names array. + * + * If there is more than one entry in the resulting names array, + * another array is allocated to record the parent selector value + * for each (defined) parent clock. This is the value that + * represents this parent clock in the clock's source selector + * register. The position of the clock in the original parent array + * defines that selector value. The number of entries in this array + * is the same as the number of entries in the parent names array. + * + * The array of selector values is returned. If the clock has no + * parents, no selector is required and a null pointer is returned. + * + * Returns a null pointer if the clock names array supplied was + * null. (This is not an error.) + * + * Returns a pointer-coded error if an error occurs. + */ +static u32 *parent_process(const char *clocks[], + u32 *count, const char ***names) +{ + static const char **parent_names; + static u32 *parent_sel; + const char **clock; + u32 parent_count; + u32 bad_count = 0; + u32 orig_count; + u32 i; + u32 j; + + *count = 0; /* In case of early return */ + *names = NULL; + if (!clocks) + return NULL; + + /* + * Count the number of names in the null-terminated array, + * and find out how many of those are actually clock names. + */ + for (clock = clocks; *clock; clock++) + if (*clock == BAD_CLK_NAME) + bad_count++; + orig_count = (u32)(clock - clocks); + parent_count = orig_count - bad_count; + + /* If all clocks are unsupported, we treat it as no clock */ + if (!parent_count) + return NULL; + + /* Avoid exceeding our parent clock limit */ + if (parent_count > PARENT_COUNT_MAX) { + pr_err("%s: too many parents (%u > %u)\n", __func__, + parent_count, PARENT_COUNT_MAX); + return ERR_PTR(-EINVAL); + } + + /* + * There is one parent name for each defined parent clock. + * We also maintain an array containing the selector value + * for each defined clock. If there's only one clock, the + * selector is not required, but we allocate space for the + * array anyway to keep things simple. + */ + parent_names = kmalloc(parent_count * sizeof(parent_names), GFP_KERNEL); + if (!parent_names) { + pr_err("%s: error allocating %u parent names\n", __func__, + parent_count); + return ERR_PTR(-ENOMEM); + } + + /* There is at least one parent, so allocate a selector array */ + + parent_sel = kmalloc(parent_count * sizeof(*parent_sel), GFP_KERNEL); + if (!parent_sel) { + pr_err("%s: error allocating %u parent selectors\n", __func__, + parent_count); + kfree(parent_names); + + return ERR_PTR(-ENOMEM); + } + + /* Now fill in the parent names and selector arrays */ + for (i = 0, j = 0; i < orig_count; i++) { + if (clocks[i] != BAD_CLK_NAME) { + parent_names[j] = clocks[i]; + parent_sel[j] = i; + j++; + } + } + *names = parent_names; + *count = parent_count; + + return parent_sel; +} + +static int +clk_sel_setup(const char **clocks, struct bcm_clk_sel *sel, + struct clk_init_data *init_data) +{ + const char **parent_names = NULL; + u32 parent_count = 0; + u32 *parent_sel; + + /* + * If a peripheral clock has multiple parents, the value + * used by the hardware to select that parent is represented + * by the parent clock's position in the "clocks" list. Some + * values don't have defined or supported clocks; these will + * have BAD_CLK_NAME entries in the parents[] array. The + * list is terminated by a NULL entry. + * + * We need to supply (only) the names of defined parent + * clocks when registering a clock though, so we use an + * array of parent selector values to map between the + * indexes the common clock code uses and the selector + * values we need. + */ + parent_sel = parent_process(clocks, &parent_count, &parent_names); + if (IS_ERR(parent_sel)) { + int ret = PTR_ERR(parent_sel); + + pr_err("%s: error processing parent clocks for %s (%d)\n", + __func__, init_data->name, ret); + + return ret; + } + + init_data->parent_names = parent_names; + init_data->num_parents = parent_count; + + sel->parent_count = parent_count; + sel->parent_sel = parent_sel; + + return 0; +} + +static void clk_sel_teardown(struct bcm_clk_sel *sel, + struct clk_init_data *init_data) +{ + kfree(sel->parent_sel); + sel->parent_sel = NULL; + sel->parent_count = 0; + + init_data->num_parents = 0; + kfree(init_data->parent_names); + init_data->parent_names = NULL; +} + +static void peri_clk_teardown(struct peri_clk_data *data, + struct clk_init_data *init_data) +{ + clk_sel_teardown(&data->sel, init_data); + init_data->ops = NULL; +} + +/* + * Caller is responsible for freeing the parent_names[] and + * parent_sel[] arrays in the peripheral clock's "data" structure + * that can be assigned if the clock has one or more parent clocks + * associated with it. + */ +static int peri_clk_setup(struct ccu_data *ccu, struct peri_clk_data *data, + struct clk_init_data *init_data) +{ + init_data->ops = &kona_peri_clk_ops; + init_data->flags = 0; + + return clk_sel_setup(data->clocks, &data->sel, init_data); +} + +static void bcm_clk_teardown(struct kona_clk *bcm_clk) +{ + switch (bcm_clk->type) { + case bcm_clk_peri: + peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); + break; + default: + break; + } + bcm_clk->data = NULL; + bcm_clk->type = bcm_clk_none; +} + +static void kona_clk_teardown(struct clk *clk) +{ + struct clk_hw *hw; + struct kona_clk *bcm_clk; + + if (!clk) + return; + + hw = __clk_get_hw(clk); + if (!hw) { + pr_err("%s: clk %p has null hw pointer\n", __func__, clk); + return; + } + clk_unregister(clk); + + bcm_clk = to_kona_clk(hw); + bcm_clk_teardown(bcm_clk); +} + +struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, + enum bcm_clk_type type, void *data) +{ + struct kona_clk *bcm_clk; + struct clk_init_data *init_data; + struct clk *clk = NULL; + + bcm_clk = kzalloc(sizeof(*bcm_clk), GFP_KERNEL); + if (!bcm_clk) { + pr_err("%s: failed to allocate bcm_clk for %s\n", __func__, + name); + return NULL; + } + bcm_clk->ccu = ccu; + bcm_clk->name = name; + + init_data = &bcm_clk->init_data; + init_data->name = name; + switch (type) { + case bcm_clk_peri: + if (peri_clk_setup(ccu, data, init_data)) + goto out_free; + break; + default: + data = NULL; + break; + } + bcm_clk->type = type; + bcm_clk->data = data; + + /* Make sure everything makes sense before we set it up */ + if (!kona_clk_valid(bcm_clk)) { + pr_err("%s: clock data invalid for %s\n", __func__, name); + goto out_teardown; + } + + bcm_clk->hw.init = init_data; + clk = clk_register(NULL, &bcm_clk->hw); + if (IS_ERR(clk)) { + pr_err("%s: error registering clock %s (%ld)\n", __func__, + name, PTR_ERR(clk)); + goto out_teardown; + } + BUG_ON(!clk); + + return clk; +out_teardown: + bcm_clk_teardown(bcm_clk); +out_free: + kfree(bcm_clk); + + return NULL; +} + +static void ccu_clks_teardown(struct ccu_data *ccu) +{ + u32 i; + + for (i = 0; i < ccu->data.clk_num; i++) + kona_clk_teardown(ccu->data.clks[i]); + kfree(ccu->data.clks); +} + +static void kona_ccu_teardown(struct ccu_data *ccu) +{ + if (!ccu) + return; + + if (!ccu->base) + goto done; + + of_clk_del_provider(ccu->node); /* safe if never added */ + ccu_clks_teardown(ccu); + list_del(&ccu->links); + of_node_put(ccu->node); + iounmap(ccu->base); +done: + kfree(ccu->name); + kfree(ccu); +} + +/* + * Set up a CCU. Call the provided ccu_clks_setup callback to + * initialize the array of clocks provided by the CCU. + */ +void __init kona_dt_ccu_setup(struct device_node *node, + int (*ccu_clks_setup)(struct ccu_data *)) +{ + struct ccu_data *ccu; + struct resource res = { 0 }; + resource_size_t range; + int ret; + + ccu = kzalloc(sizeof(*ccu), GFP_KERNEL); + if (ccu) + ccu->name = kstrdup(node->name, GFP_KERNEL); + if (!ccu || !ccu->name) { + pr_err("%s: unable to allocate CCU struct for %s\n", + __func__, node->name); + kfree(ccu); + + return; + } + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + pr_err("%s: no valid CCU registers found for %s\n", __func__, + node->name); + goto out_err; + } + + range = resource_size(&res); + if (range > (resource_size_t)U32_MAX) { + pr_err("%s: address range too large for %s\n", __func__, + node->name); + goto out_err; + } + + ccu->range = (u32)range; + ccu->base = ioremap(res.start, ccu->range); + if (!ccu->base) { + pr_err("%s: unable to map CCU registers for %s\n", __func__, + node->name); + goto out_err; + } + + spin_lock_init(&ccu->lock); + INIT_LIST_HEAD(&ccu->links); + ccu->node = of_node_get(node); + + list_add_tail(&ccu->links, &ccu_list); + + /* Set up clocks array (in ccu->data) */ + if (ccu_clks_setup(ccu)) + goto out_err; + + ret = of_clk_add_provider(node, of_clk_src_onecell_get, &ccu->data); + if (ret) { + pr_err("%s: error adding ccu %s as provider (%d)\n", __func__, + node->name, ret); + goto out_err; + } + + if (!kona_ccu_init(ccu)) + pr_err("Broadcom %s initialization had errors\n", node->name); + + return; +out_err: + kona_ccu_teardown(ccu); + pr_err("Broadcom %s setup aborted\n", node->name); +} diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c new file mode 100644 index 000000000000..e3d339e08309 --- /dev/null +++ b/drivers/clk/bcm/clk-kona.c @@ -0,0 +1,1033 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "clk-kona.h" + +#include + +#define CCU_ACCESS_PASSWORD 0xA5A500 +#define CLK_GATE_DELAY_LOOP 2000 + +/* Bitfield operations */ + +/* Produces a mask of set bits covering a range of a 32-bit value */ +static inline u32 bitfield_mask(u32 shift, u32 width) +{ + return ((1 << width) - 1) << shift; +} + +/* Extract the value of a bitfield found within a given register value */ +static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width) +{ + return (reg_val & bitfield_mask(shift, width)) >> shift; +} + +/* Replace the value of a bitfield found within a given register value */ +static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val) +{ + u32 mask = bitfield_mask(shift, width); + + return (reg_val & ~mask) | (val << shift); +} + +/* Divider and scaling helpers */ + +/* + * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values + * unsigned. Note that unlike do_div(), the remainder is discarded + * and the return value is the quotient (not the remainder). + */ +u64 do_div_round_closest(u64 dividend, unsigned long divisor) +{ + u64 result; + + result = dividend + ((u64)divisor >> 1); + (void)do_div(result, divisor); + + return result; +} + +/* Convert a divider into the scaled divisor value it represents. */ +static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) +{ + return (u64)reg_div + ((u64)1 << div->frac_width); +} + +/* + * Build a scaled divider value as close as possible to the + * given whole part (div_value) and fractional part (expressed + * in billionths). + */ +u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) +{ + u64 combined; + + BUG_ON(!div_value); + BUG_ON(billionths >= BILLION); + + combined = (u64)div_value * BILLION + billionths; + combined <<= div->frac_width; + + return do_div_round_closest(combined, BILLION); +} + +/* The scaled minimum divisor representable by a divider */ +static inline u64 +scaled_div_min(struct bcm_clk_div *div) +{ + if (divider_is_fixed(div)) + return (u64)div->fixed; + + return scaled_div_value(div, 0); +} + +/* The scaled maximum divisor representable by a divider */ +u64 scaled_div_max(struct bcm_clk_div *div) +{ + u32 reg_div; + + if (divider_is_fixed(div)) + return (u64)div->fixed; + + reg_div = ((u32)1 << div->width) - 1; + + return scaled_div_value(div, reg_div); +} + +/* + * Convert a scaled divisor into its divider representation as + * stored in a divider register field. + */ +static inline u32 +divider(struct bcm_clk_div *div, u64 scaled_div) +{ + BUG_ON(scaled_div < scaled_div_min(div)); + BUG_ON(scaled_div > scaled_div_max(div)); + + return (u32)(scaled_div - ((u64)1 << div->frac_width)); +} + +/* Return a rate scaled for use when dividing by a scaled divisor. */ +static inline u64 +scale_rate(struct bcm_clk_div *div, u32 rate) +{ + if (divider_is_fixed(div)) + return (u64)rate; + + return (u64)rate << div->frac_width; +} + +/* CCU access */ + +/* Read a 32-bit register value from a CCU's address space. */ +static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset) +{ + return readl(ccu->base + reg_offset); +} + +/* Write a 32-bit register value into a CCU's address space. */ +static inline void +__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val) +{ + writel(reg_val, ccu->base + reg_offset); +} + +static inline unsigned long ccu_lock(struct ccu_data *ccu) +{ + unsigned long flags; + + spin_lock_irqsave(&ccu->lock, flags); + + return flags; +} +static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags) +{ + spin_unlock_irqrestore(&ccu->lock, flags); +} + +/* + * Enable/disable write access to CCU protected registers. The + * WR_ACCESS register for all CCUs is at offset 0. + */ +static inline void __ccu_write_enable(struct ccu_data *ccu) +{ + if (ccu->write_enabled) { + pr_err("%s: access already enabled for %s\n", __func__, + ccu->name); + return; + } + ccu->write_enabled = true; + __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1); +} + +static inline void __ccu_write_disable(struct ccu_data *ccu) +{ + if (!ccu->write_enabled) { + pr_err("%s: access wasn't enabled for %s\n", __func__, + ccu->name); + return; + } + + __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD); + ccu->write_enabled = false; +} + +/* + * Poll a register in a CCU's address space, returning when the + * specified bit in that register's value is set (or clear). Delay + * a microsecond after each read of the register. Returns true if + * successful, or false if we gave up trying. + * + * Caller must ensure the CCU lock is held. + */ +static inline bool +__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want) +{ + unsigned int tries; + u32 bit_mask = 1 << bit; + + for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) { + u32 val; + bool bit_val; + + val = __ccu_read(ccu, reg_offset); + bit_val = (val & bit_mask) != 0; + if (bit_val == want) + return true; + udelay(1); + } + return false; +} + +/* Gate operations */ + +/* Determine whether a clock is gated. CCU lock must be held. */ +static bool +__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) +{ + u32 bit_mask; + u32 reg_val; + + /* If there is no gate we can assume it's enabled. */ + if (!gate_exists(gate)) + return true; + + bit_mask = 1 << gate->status_bit; + reg_val = __ccu_read(ccu, gate->offset); + + return (reg_val & bit_mask) != 0; +} + +/* Determine whether a clock is gated. */ +static bool +is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) +{ + long flags; + bool ret; + + /* Avoid taking the lock if we can */ + if (!gate_exists(gate)) + return true; + + flags = ccu_lock(ccu); + ret = __is_clk_gate_enabled(ccu, gate); + ccu_unlock(ccu, flags); + + return ret; +} + +/* + * Commit our desired gate state to the hardware. + * Returns true if successful, false otherwise. + */ +static bool +__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate) +{ + u32 reg_val; + u32 mask; + bool enabled = false; + + BUG_ON(!gate_exists(gate)); + if (!gate_is_sw_controllable(gate)) + return true; /* Nothing we can change */ + + reg_val = __ccu_read(ccu, gate->offset); + + /* For a hardware/software gate, set which is in control */ + if (gate_is_hw_controllable(gate)) { + mask = (u32)1 << gate->hw_sw_sel_bit; + if (gate_is_sw_managed(gate)) + reg_val |= mask; + else + reg_val &= ~mask; + } + + /* + * If software is in control, enable or disable the gate. + * If hardware is, clear the enabled bit for good measure. + * If a software controlled gate can't be disabled, we're + * required to write a 0 into the enable bit (but the gate + * will be enabled). + */ + mask = (u32)1 << gate->en_bit; + if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) && + !gate_is_no_disable(gate)) + reg_val |= mask; + else + reg_val &= ~mask; + + __ccu_write(ccu, gate->offset, reg_val); + + /* For a hardware controlled gate, we're done */ + if (!gate_is_sw_managed(gate)) + return true; + + /* Otherwise wait for the gate to be in desired state */ + return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled); +} + +/* + * Initialize a gate. Our desired state (hardware/software select, + * and if software, its enable state) is committed to hardware + * without the usual checks to see if it's already set up that way. + * Returns true if successful, false otherwise. + */ +static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate) +{ + if (!gate_exists(gate)) + return true; + return __gate_commit(ccu, gate); +} + +/* + * Set a gate to enabled or disabled state. Does nothing if the + * gate is not currently under software control, or if it is already + * in the requested state. Returns true if successful, false + * otherwise. CCU lock must be held. + */ +static bool +__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable) +{ + bool ret; + + if (!gate_exists(gate) || !gate_is_sw_managed(gate)) + return true; /* Nothing to do */ + + if (!enable && gate_is_no_disable(gate)) { + pr_warn("%s: invalid gate disable request (ignoring)\n", + __func__); + return true; + } + + if (enable == gate_is_enabled(gate)) + return true; /* No change */ + + gate_flip_enabled(gate); + ret = __gate_commit(ccu, gate); + if (!ret) + gate_flip_enabled(gate); /* Revert the change */ + + return ret; +} + +/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */ +static int clk_gate(struct ccu_data *ccu, const char *name, + struct bcm_clk_gate *gate, bool enable) +{ + unsigned long flags; + bool success; + + /* + * Avoid taking the lock if we can. We quietly ignore + * requests to change state that don't make sense. + */ + if (!gate_exists(gate) || !gate_is_sw_managed(gate)) + return 0; + if (!enable && gate_is_no_disable(gate)) + return 0; + + flags = ccu_lock(ccu); + __ccu_write_enable(ccu); + + success = __clk_gate(ccu, gate, enable); + + __ccu_write_disable(ccu); + ccu_unlock(ccu, flags); + + if (success) + return 0; + + pr_err("%s: failed to %s gate for %s\n", __func__, + enable ? "enable" : "disable", name); + + return -EIO; +} + +/* Trigger operations */ + +/* + * Caller must ensure CCU lock is held and access is enabled. + * Returns true if successful, false otherwise. + */ +static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig) +{ + /* Trigger the clock and wait for it to finish */ + __ccu_write(ccu, trig->offset, 1 << trig->bit); + + return __ccu_wait_bit(ccu, trig->offset, trig->bit, false); +} + +/* Divider operations */ + +/* Read a divider value and return the scaled divisor it represents. */ +static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) +{ + unsigned long flags; + u32 reg_val; + u32 reg_div; + + if (divider_is_fixed(div)) + return (u64)div->fixed; + + flags = ccu_lock(ccu); + reg_val = __ccu_read(ccu, div->offset); + ccu_unlock(ccu, flags); + + /* Extract the full divider field from the register value */ + reg_div = bitfield_extract(reg_val, div->shift, div->width); + + /* Return the scaled divisor value it represents */ + return scaled_div_value(div, reg_div); +} + +/* + * Convert a divider's scaled divisor value into its recorded form + * and commit it into the hardware divider register. + * + * Returns 0 on success. Returns -EINVAL for invalid arguments. + * Returns -ENXIO if gating failed, and -EIO if a trigger failed. + */ +static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_div *div, struct bcm_clk_trig *trig) +{ + bool enabled; + u32 reg_div; + u32 reg_val; + int ret = 0; + + BUG_ON(divider_is_fixed(div)); + + /* + * If we're just initializing the divider, and no initial + * state was defined in the device tree, we just find out + * what its current value is rather than updating it. + */ + if (div->scaled_div == BAD_SCALED_DIV_VALUE) { + reg_val = __ccu_read(ccu, div->offset); + reg_div = bitfield_extract(reg_val, div->shift, div->width); + div->scaled_div = scaled_div_value(div, reg_div); + + return 0; + } + + /* Convert the scaled divisor to the value we need to record */ + reg_div = divider(div, div->scaled_div); + + /* Clock needs to be enabled before changing the rate */ + enabled = __is_clk_gate_enabled(ccu, gate); + if (!enabled && !__clk_gate(ccu, gate, true)) { + ret = -ENXIO; + goto out; + } + + /* Replace the divider value and record the result */ + reg_val = __ccu_read(ccu, div->offset); + reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); + __ccu_write(ccu, div->offset, reg_val); + + /* If the trigger fails we still want to disable the gate */ + if (!__clk_trigger(ccu, trig)) + ret = -EIO; + + /* Disable the clock again if it was disabled to begin with */ + if (!enabled && !__clk_gate(ccu, gate, false)) + ret = ret ? ret : -ENXIO; /* return first error */ +out: + return ret; +} + +/* + * Initialize a divider by committing our desired state to hardware + * without the usual checks to see if it's already set up that way. + * Returns true if successful, false otherwise. + */ +static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_div *div, struct bcm_clk_trig *trig) +{ + if (!divider_exists(div) || divider_is_fixed(div)) + return true; + return !__div_commit(ccu, gate, div, trig); +} + +static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_div *div, struct bcm_clk_trig *trig, + u64 scaled_div) +{ + unsigned long flags; + u64 previous; + int ret; + + BUG_ON(divider_is_fixed(div)); + + previous = div->scaled_div; + if (previous == scaled_div) + return 0; /* No change */ + + div->scaled_div = scaled_div; + + flags = ccu_lock(ccu); + __ccu_write_enable(ccu); + + ret = __div_commit(ccu, gate, div, trig); + + __ccu_write_disable(ccu); + ccu_unlock(ccu, flags); + + if (ret) + div->scaled_div = previous; /* Revert the change */ + + return ret; + +} + +/* Common clock rate helpers */ + +/* + * Implement the common clock framework recalc_rate method, taking + * into account a divider and an optional pre-divider. The + * pre-divider register pointer may be NULL. + */ +static unsigned long clk_recalc_rate(struct ccu_data *ccu, + struct bcm_clk_div *div, struct bcm_clk_div *pre_div, + unsigned long parent_rate) +{ + u64 scaled_parent_rate; + u64 scaled_div; + u64 result; + + if (!divider_exists(div)) + return parent_rate; + + if (parent_rate > (unsigned long)LONG_MAX) + return 0; /* actually this would be a caller bug */ + + /* + * If there is a pre-divider, divide the scaled parent rate + * by the pre-divider value first. In this case--to improve + * accuracy--scale the parent rate by *both* the pre-divider + * value and the divider before actually computing the + * result of the pre-divider. + * + * If there's only one divider, just scale the parent rate. + */ + if (pre_div && divider_exists(pre_div)) { + u64 scaled_rate; + + scaled_rate = scale_rate(pre_div, parent_rate); + scaled_rate = scale_rate(div, scaled_rate); + scaled_div = divider_read_scaled(ccu, pre_div); + scaled_parent_rate = do_div_round_closest(scaled_rate, + scaled_div); + } else { + scaled_parent_rate = scale_rate(div, parent_rate); + } + + /* + * Get the scaled divisor value, and divide the scaled + * parent rate by that to determine this clock's resulting + * rate. + */ + scaled_div = divider_read_scaled(ccu, div); + result = do_div_round_closest(scaled_parent_rate, scaled_div); + + return (unsigned long)result; +} + +/* + * Compute the output rate produced when a given parent rate is fed + * into two dividers. The pre-divider can be NULL, and even if it's + * non-null it may be nonexistent. It's also OK for the divider to + * be nonexistent, and in that case the pre-divider is also ignored. + * + * If scaled_div is non-null, it is used to return the scaled divisor + * value used by the (downstream) divider to produce that rate. + */ +static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div, + struct bcm_clk_div *pre_div, + unsigned long rate, unsigned long parent_rate, + u64 *scaled_div) +{ + u64 scaled_parent_rate; + u64 min_scaled_div; + u64 max_scaled_div; + u64 best_scaled_div; + u64 result; + + BUG_ON(!divider_exists(div)); + BUG_ON(!rate); + BUG_ON(parent_rate > (u64)LONG_MAX); + + /* + * If there is a pre-divider, divide the scaled parent rate + * by the pre-divider value first. In this case--to improve + * accuracy--scale the parent rate by *both* the pre-divider + * value and the divider before actually computing the + * result of the pre-divider. + * + * If there's only one divider, just scale the parent rate. + * + * For simplicity we treat the pre-divider as fixed (for now). + */ + if (divider_exists(pre_div)) { + u64 scaled_rate; + u64 scaled_pre_div; + + scaled_rate = scale_rate(pre_div, parent_rate); + scaled_rate = scale_rate(div, scaled_rate); + scaled_pre_div = divider_read_scaled(ccu, pre_div); + scaled_parent_rate = do_div_round_closest(scaled_rate, + scaled_pre_div); + } else { + scaled_parent_rate = scale_rate(div, parent_rate); + } + + /* + * Compute the best possible divider and ensure it is in + * range. A fixed divider can't be changed, so just report + * the best we can do. + */ + if (!divider_is_fixed(div)) { + best_scaled_div = do_div_round_closest(scaled_parent_rate, + rate); + min_scaled_div = scaled_div_min(div); + max_scaled_div = scaled_div_max(div); + if (best_scaled_div > max_scaled_div) + best_scaled_div = max_scaled_div; + else if (best_scaled_div < min_scaled_div) + best_scaled_div = min_scaled_div; + } else { + best_scaled_div = divider_read_scaled(ccu, div); + } + + /* OK, figure out the resulting rate */ + result = do_div_round_closest(scaled_parent_rate, best_scaled_div); + + if (scaled_div) + *scaled_div = best_scaled_div; + + return (long)result; +} + +/* Common clock parent helpers */ + +/* + * For a given parent selector (register field) value, find the + * index into a selector's parent_sel array that contains it. + * Returns the index, or BAD_CLK_INDEX if it's not found. + */ +static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel) +{ + u8 i; + + BUG_ON(sel->parent_count > (u32)U8_MAX); + for (i = 0; i < sel->parent_count; i++) + if (sel->parent_sel[i] == parent_sel) + return i; + return BAD_CLK_INDEX; +} + +/* + * Fetch the current value of the selector, and translate that into + * its corresponding index in the parent array we registered with + * the clock framework. + * + * Returns parent array index that corresponds with the value found, + * or BAD_CLK_INDEX if the found value is out of range. + */ +static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel) +{ + unsigned long flags; + u32 reg_val; + u32 parent_sel; + u8 index; + + /* If there's no selector, there's only one parent */ + if (!selector_exists(sel)) + return 0; + + /* Get the value in the selector register */ + flags = ccu_lock(ccu); + reg_val = __ccu_read(ccu, sel->offset); + ccu_unlock(ccu, flags); + + parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); + + /* Look up that selector's parent array index and return it */ + index = parent_index(sel, parent_sel); + if (index == BAD_CLK_INDEX) + pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n", + __func__, parent_sel, ccu->name, sel->offset); + + return index; +} + +/* + * Commit our desired selector value to the hardware. + * + * Returns 0 on success. Returns -EINVAL for invalid arguments. + * Returns -ENXIO if gating failed, and -EIO if a trigger failed. + */ +static int +__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) +{ + u32 parent_sel; + u32 reg_val; + bool enabled; + int ret = 0; + + BUG_ON(!selector_exists(sel)); + + /* + * If we're just initializing the selector, and no initial + * state was defined in the device tree, we just find out + * what its current value is rather than updating it. + */ + if (sel->clk_index == BAD_CLK_INDEX) { + u8 index; + + reg_val = __ccu_read(ccu, sel->offset); + parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); + index = parent_index(sel, parent_sel); + if (index == BAD_CLK_INDEX) + return -EINVAL; + sel->clk_index = index; + + return 0; + } + + BUG_ON((u32)sel->clk_index >= sel->parent_count); + parent_sel = sel->parent_sel[sel->clk_index]; + + /* Clock needs to be enabled before changing the parent */ + enabled = __is_clk_gate_enabled(ccu, gate); + if (!enabled && !__clk_gate(ccu, gate, true)) + return -ENXIO; + + /* Replace the selector value and record the result */ + reg_val = __ccu_read(ccu, sel->offset); + reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel); + __ccu_write(ccu, sel->offset, reg_val); + + /* If the trigger fails we still want to disable the gate */ + if (!__clk_trigger(ccu, trig)) + ret = -EIO; + + /* Disable the clock again if it was disabled to begin with */ + if (!enabled && !__clk_gate(ccu, gate, false)) + ret = ret ? ret : -ENXIO; /* return first error */ + + return ret; +} + +/* + * Initialize a selector by committing our desired state to hardware + * without the usual checks to see if it's already set up that way. + * Returns true if successful, false otherwise. + */ +static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) +{ + if (!selector_exists(sel)) + return true; + return !__sel_commit(ccu, gate, sel, trig); +} + +/* + * Write a new value into a selector register to switch to a + * different parent clock. Returns 0 on success, or an error code + * (from __sel_commit()) otherwise. + */ +static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_sel *sel, struct bcm_clk_trig *trig, + u8 index) +{ + unsigned long flags; + u8 previous; + int ret; + + previous = sel->clk_index; + if (previous == index) + return 0; /* No change */ + + sel->clk_index = index; + + flags = ccu_lock(ccu); + __ccu_write_enable(ccu); + + ret = __sel_commit(ccu, gate, sel, trig); + + __ccu_write_disable(ccu); + ccu_unlock(ccu, flags); + + if (ret) + sel->clk_index = previous; /* Revert the change */ + + return ret; +} + +/* Clock operations */ + +static int kona_peri_clk_enable(struct clk_hw *hw) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + + return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); +} + +static void kona_peri_clk_disable(struct clk_hw *hw) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + + (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); +} + +static int kona_peri_clk_is_enabled(struct clk_hw *hw) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + + return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; +} + +static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct peri_clk_data *data = bcm_clk->peri; + + return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, + parent_rate); +} + +static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct bcm_clk_div *div = &bcm_clk->peri->div; + + if (!divider_exists(div)) + return __clk_get_rate(hw->clk); + + /* Quietly avoid a zero rate */ + return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, + rate ? rate : 1, *parent_rate, NULL); +} + +static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct peri_clk_data *data = bcm_clk->peri; + struct bcm_clk_sel *sel = &data->sel; + struct bcm_clk_trig *trig; + int ret; + + BUG_ON(index >= sel->parent_count); + + /* If there's only one parent we don't require a selector */ + if (!selector_exists(sel)) + return 0; + + /* + * The regular trigger is used by default, but if there's a + * pre-trigger we want to use that instead. + */ + trig = trigger_exists(&data->pre_trig) ? &data->pre_trig + : &data->trig; + + ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index); + if (ret == -ENXIO) { + pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name); + ret = -EIO; /* Don't proliferate weird errors */ + } else if (ret == -EIO) { + pr_err("%s: %strigger failed for %s\n", __func__, + trig == &data->pre_trig ? "pre-" : "", + bcm_clk->name); + } + + return ret; +} + +static u8 kona_peri_clk_get_parent(struct clk_hw *hw) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct peri_clk_data *data = bcm_clk->peri; + u8 index; + + index = selector_read_index(bcm_clk->ccu, &data->sel); + + /* Not all callers would handle an out-of-range value gracefully */ + return index == BAD_CLK_INDEX ? 0 : index; +} + +static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct peri_clk_data *data = bcm_clk->peri; + struct bcm_clk_div *div = &data->div; + u64 scaled_div = 0; + int ret; + + if (parent_rate > (unsigned long)LONG_MAX) + return -EINVAL; + + if (rate == __clk_get_rate(hw->clk)) + return 0; + + if (!divider_exists(div)) + return rate == parent_rate ? 0 : -EINVAL; + + /* + * A fixed divider can't be changed. (Nor can a fixed + * pre-divider be, but for now we never actually try to + * change that.) Tolerate a request for a no-op change. + */ + if (divider_is_fixed(&data->div)) + return rate == parent_rate ? 0 : -EINVAL; + + /* + * Get the scaled divisor value needed to achieve a clock + * rate as close as possible to what was requested, given + * the parent clock rate supplied. + */ + (void)round_rate(bcm_clk->ccu, div, &data->pre_div, + rate ? rate : 1, parent_rate, &scaled_div); + + /* + * We aren't updating any pre-divider at this point, so + * we'll use the regular trigger. + */ + ret = divider_write(bcm_clk->ccu, &data->gate, &data->div, + &data->trig, scaled_div); + if (ret == -ENXIO) { + pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name); + ret = -EIO; /* Don't proliferate weird errors */ + } else if (ret == -EIO) { + pr_err("%s: trigger failed for %s\n", __func__, bcm_clk->name); + } + + return ret; +} + +struct clk_ops kona_peri_clk_ops = { + .enable = kona_peri_clk_enable, + .disable = kona_peri_clk_disable, + .is_enabled = kona_peri_clk_is_enabled, + .recalc_rate = kona_peri_clk_recalc_rate, + .round_rate = kona_peri_clk_round_rate, + .set_parent = kona_peri_clk_set_parent, + .get_parent = kona_peri_clk_get_parent, + .set_rate = kona_peri_clk_set_rate, +}; + +/* Put a peripheral clock into its initial state */ +static bool __peri_clk_init(struct kona_clk *bcm_clk) +{ + struct ccu_data *ccu = bcm_clk->ccu; + struct peri_clk_data *peri = bcm_clk->peri; + const char *name = bcm_clk->name; + struct bcm_clk_trig *trig; + + BUG_ON(bcm_clk->type != bcm_clk_peri); + + if (!gate_init(ccu, &peri->gate)) { + pr_err("%s: error initializing gate for %s\n", __func__, name); + return false; + } + if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) { + pr_err("%s: error initializing divider for %s\n", __func__, + name); + return false; + } + + /* + * For the pre-divider and selector, the pre-trigger is used + * if it's present, otherwise we just use the regular trigger. + */ + trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig + : &peri->trig; + + if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) { + pr_err("%s: error initializing pre-divider for %s\n", __func__, + name); + return false; + } + + if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) { + pr_err("%s: error initializing selector for %s\n", __func__, + name); + return false; + } + + return true; +} + +static bool __kona_clk_init(struct kona_clk *bcm_clk) +{ + switch (bcm_clk->type) { + case bcm_clk_peri: + return __peri_clk_init(bcm_clk); + default: + BUG(); + } + return -EINVAL; +} + +/* Set a CCU and all its clocks into their desired initial state */ +bool __init kona_ccu_init(struct ccu_data *ccu) +{ + unsigned long flags; + unsigned int which; + struct clk **clks = ccu->data.clks; + bool success = true; + + flags = ccu_lock(ccu); + __ccu_write_enable(ccu); + + for (which = 0; which < ccu->data.clk_num; which++) { + struct kona_clk *bcm_clk; + + if (!clks[which]) + continue; + bcm_clk = to_kona_clk(__clk_get_hw(clks[which])); + success &= __kona_clk_init(bcm_clk); + } + + __ccu_write_disable(ccu); + ccu_unlock(ccu, flags); + return success; +} diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h new file mode 100644 index 000000000000..5e139adc3dc5 --- /dev/null +++ b/drivers/clk/bcm/clk-kona.h @@ -0,0 +1,410 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CLK_KONA_H +#define _CLK_KONA_H + +#include +#include +#include +#include +#include +#include +#include + +#define BILLION 1000000000 + +/* The common clock framework uses u8 to represent a parent index */ +#define PARENT_COUNT_MAX ((u32)U8_MAX) + +#define BAD_CLK_INDEX U8_MAX /* Can't ever be valid */ +#define BAD_CLK_NAME ((const char *)-1) + +#define BAD_SCALED_DIV_VALUE U64_MAX + +/* + * Utility macros for object flag management. If possible, flags + * should be defined such that 0 is the desired default value. + */ +#define FLAG(type, flag) BCM_CLK_ ## type ## _FLAGS_ ## flag +#define FLAG_SET(obj, type, flag) ((obj)->flags |= FLAG(type, flag)) +#define FLAG_CLEAR(obj, type, flag) ((obj)->flags &= ~(FLAG(type, flag))) +#define FLAG_FLIP(obj, type, flag) ((obj)->flags ^= FLAG(type, flag)) +#define FLAG_TEST(obj, type, flag) (!!((obj)->flags & FLAG(type, flag))) + +/* Clock field state tests */ + +#define gate_exists(gate) FLAG_TEST(gate, GATE, EXISTS) +#define gate_is_enabled(gate) FLAG_TEST(gate, GATE, ENABLED) +#define gate_is_hw_controllable(gate) FLAG_TEST(gate, GATE, HW) +#define gate_is_sw_controllable(gate) FLAG_TEST(gate, GATE, SW) +#define gate_is_sw_managed(gate) FLAG_TEST(gate, GATE, SW_MANAGED) +#define gate_is_no_disable(gate) FLAG_TEST(gate, GATE, NO_DISABLE) + +#define gate_flip_enabled(gate) FLAG_FLIP(gate, GATE, ENABLED) + +#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) +#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) +#define divider_has_fraction(div) (!divider_is_fixed(div) && \ + (div)->frac_width > 0) + +#define selector_exists(sel) ((sel)->width != 0) +#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) + +/* Clock type, used to tell common block what it's part of */ +enum bcm_clk_type { + bcm_clk_none, /* undefined clock type */ + bcm_clk_bus, + bcm_clk_core, + bcm_clk_peri +}; + +/* + * Each CCU defines a mapped area of memory containing registers + * used to manage clocks implemented by the CCU. Access to memory + * within the CCU's space is serialized by a spinlock. Before any + * (other) address can be written, a special access "password" value + * must be written to its WR_ACCESS register (located at the base + * address of the range). We keep track of the name of each CCU as + * it is set up, and maintain them in a list. + */ +struct ccu_data { + void __iomem *base; /* base of mapped address space */ + spinlock_t lock; /* serialization lock */ + bool write_enabled; /* write access is currently enabled */ + struct list_head links; /* for ccu_list */ + struct device_node *node; + struct clk_onecell_data data; + const char *name; + u32 range; /* byte range of address space */ +}; + +/* + * Gating control and status is managed by a 32-bit gate register. + * + * There are several types of gating available: + * - (no gate) + * A clock with no gate is assumed to be always enabled. + * - hardware-only gating (auto-gating) + * Enabling or disabling clocks with this type of gate is + * managed automatically by the hardware. Such clocks can be + * considered by the software to be enabled. The current status + * of auto-gated clocks can be read from the gate status bit. + * - software-only gating + * Auto-gating is not available for this type of clock. + * Instead, software manages whether it's enabled by setting or + * clearing the enable bit. The current gate status of a gate + * under software control can be read from the gate status bit. + * To ensure a change to the gating status is complete, the + * status bit can be polled to verify that the gate has entered + * the desired state. + * - selectable hardware or software gating + * Gating for this type of clock can be configured to be either + * under software or hardware control. Which type is in use is + * determined by the hw_sw_sel bit of the gate register. + */ +struct bcm_clk_gate { + u32 offset; /* gate register offset */ + u32 status_bit; /* 0: gate is disabled; 0: gatge is enabled */ + u32 en_bit; /* 0: disable; 1: enable */ + u32 hw_sw_sel_bit; /* 0: hardware gating; 1: software gating */ + u32 flags; /* BCM_CLK_GATE_FLAGS_* below */ +}; + +/* + * Gate flags: + * HW means this gate can be auto-gated + * SW means the state of this gate can be software controlled + * NO_DISABLE means this gate is (only) enabled if under software control + * SW_MANAGED means the status of this gate is under software control + * ENABLED means this software-managed gate is *supposed* to be enabled + */ +#define BCM_CLK_GATE_FLAGS_EXISTS ((u32)1 << 0) /* Gate is valid */ +#define BCM_CLK_GATE_FLAGS_HW ((u32)1 << 1) /* Can auto-gate */ +#define BCM_CLK_GATE_FLAGS_SW ((u32)1 << 2) /* Software control */ +#define BCM_CLK_GATE_FLAGS_NO_DISABLE ((u32)1 << 3) /* HW or enabled */ +#define BCM_CLK_GATE_FLAGS_SW_MANAGED ((u32)1 << 4) /* SW now in control */ +#define BCM_CLK_GATE_FLAGS_ENABLED ((u32)1 << 5) /* If SW_MANAGED */ + +/* + * Gate initialization macros. + * + * Any gate initially under software control will be enabled. + */ + +/* A hardware/software gate initially under software control */ +#define HW_SW_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .en_bit = (_en_bit), \ + .hw_sw_sel_bit = (_hw_sw_sel_bit), \ + .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ + FLAG(GATE, SW_MANAGED)|FLAG(GATE, ENABLED)| \ + FLAG(GATE, EXISTS), \ + } + +/* A hardware/software gate initially under hardware control */ +#define HW_SW_GATE_AUTO(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .en_bit = (_en_bit), \ + .hw_sw_sel_bit = (_hw_sw_sel_bit), \ + .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ + FLAG(GATE, EXISTS), \ + } + +/* A hardware-or-enabled gate (enabled if not under hardware control) */ +#define HW_ENABLE_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .en_bit = (_en_bit), \ + .hw_sw_sel_bit = (_hw_sw_sel_bit), \ + .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ + FLAG(GATE, NO_DISABLE)|FLAG(GATE, EXISTS), \ + } + +/* A software-only gate */ +#define SW_ONLY_GATE(_offset, _status_bit, _en_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .en_bit = (_en_bit), \ + .flags = FLAG(GATE, SW)|FLAG(GATE, SW_MANAGED)| \ + FLAG(GATE, ENABLED)|FLAG(GATE, EXISTS), \ + } + +/* A hardware-only gate */ +#define HW_ONLY_GATE(_offset, _status_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .flags = FLAG(GATE, HW)|FLAG(GATE, EXISTS), \ + } + +/* + * Each clock can have zero, one, or two dividers which change the + * output rate of the clock. Each divider can be either fixed or + * variable. If there are two dividers, they are the "pre-divider" + * and the "regular" or "downstream" divider. If there is only one, + * there is no pre-divider. + * + * A fixed divider is any non-zero (positive) value, and it + * indicates how the input rate is affected by the divider. + * + * The value of a variable divider is maintained in a sub-field of a + * 32-bit divider register. The position of the field in the + * register is defined by its offset and width. The value recorded + * in this field is always 1 less than the value it represents. + * + * In addition, a variable divider can indicate that some subset + * of its bits represent a "fractional" part of the divider. Such + * bits comprise the low-order portion of the divider field, and can + * be viewed as representing the portion of the divider that lies to + * the right of the decimal point. Most variable dividers have zero + * fractional bits. Variable dividers with non-zero fraction width + * still record a value 1 less than the value they represent; the + * added 1 does *not* affect the low-order bit in this case, it + * affects the bits above the fractional part only. (Often in this + * code a divider field value is distinguished from the value it + * represents by referring to the latter as a "divisor".) + * + * In order to avoid dealing with fractions, divider arithmetic is + * performed using "scaled" values. A scaled value is one that's + * been left-shifted by the fractional width of a divider. Dividing + * a scaled value by a scaled divisor produces the desired quotient + * without loss of precision and without any other special handling + * for fractions. + * + * The recorded value of a variable divider can be modified. To + * modify either divider (or both), a clock must be enabled (i.e., + * using its gate). In addition, a trigger register (described + * below) must be used to commit the change, and polled to verify + * the change is complete. + */ +struct bcm_clk_div { + union { + struct { /* variable divider */ + u32 offset; /* divider register offset */ + u32 shift; /* field shift */ + u32 width; /* field width */ + u32 frac_width; /* field fraction width */ + + u64 scaled_div; /* scaled divider value */ + }; + u32 fixed; /* non-zero fixed divider value */ + }; + u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ +}; + +/* + * Divider flags: + * EXISTS means this divider exists + * FIXED means it is a fixed-rate divider + */ +#define BCM_CLK_DIV_FLAGS_EXISTS ((u32)1 << 0) /* Divider is valid */ +#define BCM_CLK_DIV_FLAGS_FIXED ((u32)1 << 1) /* Fixed-value */ + +/* Divider initialization macros */ + +/* A fixed (non-zero) divider */ +#define FIXED_DIVIDER(_value) \ + { \ + .fixed = (_value), \ + .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ + } + +/* A divider with an integral divisor */ +#define DIVIDER(_offset, _shift, _width) \ + { \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_width), \ + .scaled_div = BAD_SCALED_DIV_VALUE, \ + .flags = FLAG(DIV, EXISTS), \ + } + +/* A divider whose divisor has an integer and fractional part */ +#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ + { \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_width), \ + .frac_width = (_frac_width), \ + .scaled_div = BAD_SCALED_DIV_VALUE, \ + .flags = FLAG(DIV, EXISTS), \ + } + +/* + * Clocks may have multiple "parent" clocks. If there is more than + * one, a selector must be specified to define which of the parent + * clocks is currently in use. The selected clock is indicated in a + * sub-field of a 32-bit selector register. The range of + * representable selector values typically exceeds the number of + * available parent clocks. Occasionally the reset value of a + * selector field is explicitly set to a (specific) value that does + * not correspond to a defined input clock. + * + * We register all known parent clocks with the common clock code + * using a packed array (i.e., no empty slots) of (parent) clock + * names, and refer to them later using indexes into that array. + * We maintain an array of selector values indexed by common clock + * index values in order to map between these common clock indexes + * and the selector values used by the hardware. + * + * Like dividers, a selector can be modified, but to do so a clock + * must be enabled, and a trigger must be used to commit the change. + */ +struct bcm_clk_sel { + u32 offset; /* selector register offset */ + u32 shift; /* field shift */ + u32 width; /* field width */ + + u32 parent_count; /* number of entries in parent_sel[] */ + u32 *parent_sel; /* array of parent selector values */ + u8 clk_index; /* current selected index in parent_sel[] */ +}; + +/* Selector initialization macro */ +#define SELECTOR(_offset, _shift, _width) \ + { \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_width), \ + .clk_index = BAD_CLK_INDEX, \ + } + +/* + * Making changes to a variable divider or a selector for a clock + * requires the use of a trigger. A trigger is defined by a single + * bit within a register. To signal a change, a 1 is written into + * that bit. To determine when the change has been completed, that + * trigger bit is polled; the read value will be 1 while the change + * is in progress, and 0 when it is complete. + * + * Occasionally a clock will have more than one trigger. In this + * case, the "pre-trigger" will be used when changing a clock's + * selector and/or its pre-divider. + */ +struct bcm_clk_trig { + u32 offset; /* trigger register offset */ + u32 bit; /* trigger bit */ + u32 flags; /* BCM_CLK_TRIG_FLAGS_* below */ +}; + +/* + * Trigger flags: + * EXISTS means this trigger exists + */ +#define BCM_CLK_TRIG_FLAGS_EXISTS ((u32)1 << 0) /* Trigger is valid */ + +/* Trigger initialization macro */ +#define TRIGGER(_offset, _bit) \ + { \ + .offset = (_offset), \ + .bit = (_bit), \ + .flags = FLAG(TRIG, EXISTS), \ + } + +struct peri_clk_data { + struct bcm_clk_gate gate; + struct bcm_clk_trig pre_trig; + struct bcm_clk_div pre_div; + struct bcm_clk_trig trig; + struct bcm_clk_div div; + struct bcm_clk_sel sel; + const char *clocks[]; /* must be last; use CLOCKS() to declare */ +}; +#define CLOCKS(...) { __VA_ARGS__, NULL, } +#define NO_CLOCKS { NULL, } /* Must use of no parent clocks */ + +struct kona_clk { + struct clk_hw hw; + struct clk_init_data init_data; + const char *name; /* name of this clock */ + struct ccu_data *ccu; /* ccu this clock is associated with */ + enum bcm_clk_type type; + union { + void *data; + struct peri_clk_data *peri; + }; +}; +#define to_kona_clk(_hw) \ + container_of(_hw, struct kona_clk, hw) + +/* Exported globals */ + +extern struct clk_ops kona_peri_clk_ops; + +/* Help functions */ + +#define PERI_CLK_SETUP(clks, ccu, id, name) \ + clks[id] = kona_clk_setup(ccu, #name, bcm_clk_peri, &name ## _data) + +/* Externally visible functions */ + +extern u64 do_div_round_closest(u64 dividend, unsigned long divisor); +extern u64 scaled_div_max(struct bcm_clk_div *div); +extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, + u32 billionths); + +extern struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, + enum bcm_clk_type type, void *data); +extern void __init kona_dt_ccu_setup(struct device_node *node, + int (*ccu_clks_setup)(struct ccu_data *)); +extern bool __init kona_ccu_init(struct ccu_data *ccu); + +#endif /* _CLK_KONA_H */ diff --git a/include/dt-bindings/clock/bcm281xx.h b/include/dt-bindings/clock/bcm281xx.h new file mode 100644 index 000000000000..e0096940886d --- /dev/null +++ b/include/dt-bindings/clock/bcm281xx.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CLOCK_BCM281XX_H +#define _CLOCK_BCM281XX_H + +/* + * This file defines the values used to specify clocks provided by + * the clock control units (CCUs) on Broadcom BCM281XX family SoCs. + */ + +/* root CCU clock ids */ + +#define BCM281XX_ROOT_CCU_FRAC_1M 0 +#define BCM281XX_ROOT_CCU_CLOCK_COUNT 1 + +/* aon CCU clock ids */ + +#define BCM281XX_AON_CCU_HUB_TIMER 0 +#define BCM281XX_AON_CCU_PMU_BSC 1 +#define BCM281XX_AON_CCU_PMU_BSC_VAR 2 +#define BCM281XX_AON_CCU_CLOCK_COUNT 3 + +/* hub CCU clock ids */ + +#define BCM281XX_HUB_CCU_TMON_1M 0 +#define BCM281XX_HUB_CCU_CLOCK_COUNT 1 + +/* master CCU clock ids */ + +#define BCM281XX_MASTER_CCU_SDIO1 0 +#define BCM281XX_MASTER_CCU_SDIO2 1 +#define BCM281XX_MASTER_CCU_SDIO3 2 +#define BCM281XX_MASTER_CCU_SDIO4 3 +#define BCM281XX_MASTER_CCU_USB_IC 4 +#define BCM281XX_MASTER_CCU_HSIC2_48M 5 +#define BCM281XX_MASTER_CCU_HSIC2_12M 6 +#define BCM281XX_MASTER_CCU_CLOCK_COUNT 7 + +/* slave CCU clock ids */ + +#define BCM281XX_SLAVE_CCU_UARTB 0 +#define BCM281XX_SLAVE_CCU_UARTB2 1 +#define BCM281XX_SLAVE_CCU_UARTB3 2 +#define BCM281XX_SLAVE_CCU_UARTB4 3 +#define BCM281XX_SLAVE_CCU_SSP0 4 +#define BCM281XX_SLAVE_CCU_SSP2 5 +#define BCM281XX_SLAVE_CCU_BSC1 6 +#define BCM281XX_SLAVE_CCU_BSC2 7 +#define BCM281XX_SLAVE_CCU_BSC3 8 +#define BCM281XX_SLAVE_CCU_PWM 9 +#define BCM281XX_SLAVE_CCU_CLOCK_COUNT 10 + +#endif /* _CLOCK_BCM281XX_H */ -- cgit v1.2.3 From b424080a9e086e683ad5fdc624a7cf3c024e0c0f Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Fri, 7 Mar 2014 15:18:47 +0100 Subject: reset: Add optional resets and stubs This patch adds device_reset_optional and (devm_)reset_control_get_optional variants that drivers can use to indicate they can function without control over the reset line. For those functions, stubs are added so the drivers can be compiled with CONFIG_RESET_CONTROLLER disabled. Also, device_reset is annotated with __must_check. Drivers ignoring the return value should use device_reset_optional instead. Signed-off-by: Philipp Zabel Reviewed-by: Maxime Ripard Reviewed-by: Marek Vasut Acked-by: Wolfram Sang --- include/linux/reset.h | 69 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 64 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/reset.h b/include/linux/reset.h index a398025d1138..c0eda5023d74 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -1,21 +1,80 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ -#include - struct device; struct reset_control; +#ifdef CONFIG_RESET_CONTROLLER + int reset_control_reset(struct reset_control *rstc); int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); -struct reset_control *of_reset_control_get(struct device_node *node, - const char *id); struct reset_control *reset_control_get(struct device *dev, const char *id); void reset_control_put(struct reset_control *rstc); struct reset_control *devm_reset_control_get(struct device *dev, const char *id); -int device_reset(struct device *dev); +int __must_check device_reset(struct device *dev); + +static inline int device_reset_optional(struct device *dev) +{ + return device_reset(dev); +} + +static inline struct reset_control *reset_control_get_optional( + struct device *dev, const char *id) +{ + return reset_control_get(dev, id); +} + +static inline struct reset_control *devm_reset_control_get_optional( + struct device *dev, const char *id) +{ + return devm_reset_control_get(dev, id); +} + +#else + +static inline int reset_control_reset(struct reset_control *rstc) +{ + WARN_ON(1); + return 0; +} + +static inline int reset_control_assert(struct reset_control *rstc) +{ + WARN_ON(1); + return 0; +} + +static inline int reset_control_deassert(struct reset_control *rstc) +{ + WARN_ON(1); + return 0; +} + +static inline void reset_control_put(struct reset_control *rstc) +{ + WARN_ON(1); +} + +static inline int device_reset_optional(struct device *dev) +{ + return -ENOSYS; +} + +static inline struct reset_control *reset_control_get_optional( + struct device *dev, const char *id) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct reset_control *devm_reset_control_get_optional( + struct device *dev, const char *id) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* CONFIG_RESET_CONTROLLER */ #endif -- cgit v1.2.3