summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/include/asm/localtimer.h34
-rw-r--r--arch/arm/kernel/smp.c87
-rw-r--r--arch/arm/kernel/smp_twd.c64
-rw-r--r--arch/arm/mach-highbank/Kconfig2
-rw-r--r--arch/arm/mach-imx/Kconfig3
-rw-r--r--arch/arm/mach-msm/timer.c124
-rw-r--r--arch/arm/mach-omap2/Kconfig3
-rw-r--r--arch/arm/mach-omap2/timer.c7
-rw-r--r--arch/arm/mach-realview/Kconfig8
-rw-r--r--arch/arm/mach-spear/Kconfig2
-rw-r--r--arch/arm/mach-tegra/Kconfig2
-rw-r--r--arch/arm/mach-ux500/Kconfig2
-rw-r--r--arch/arm/mach-vexpress/Kconfig2
-rw-r--r--arch/arm/mach-zynq/Kconfig2
-rw-r--r--drivers/clocksource/exynos_mct.c58
-rw-r--r--drivers/clocksource/time-armada-370-xp.c92
-rw-r--r--drivers/clocksource/timer-marco.c98
-rw-r--r--include/linux/time-armada-370-xp.h4
19 files changed, 265 insertions, 341 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba412e02ec0c..baf43de26a54 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -645,7 +645,7 @@ config ARCH_SHMOBILE
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_CLK
select HAVE_MACH_CLKDEV
select HAVE_SMP
@@ -1584,16 +1584,6 @@ config ARM_PSCI
0022A ("Power State Coordination Interface System Software on
ARM processors").
-config LOCAL_TIMERS
- bool "Use local timer interrupts"
- depends on SMP
- default y
- help
- Enable support for local timers on SMP platforms, rather then the
- legacy IPI broadcast method. Local timers allows the system
- accounting to be spread across the timer interval, preventing a
- "thundering herd" at every timer tick.
-
# The GPIO number here must be sorted by descending number. In case of
# a multiplatform kernel, we just want the highest value required by the
# selected platforms.
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h
deleted file mode 100644
index f77ffc1eb0c2..000000000000
--- a/arch/arm/include/asm/localtimer.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * arch/arm/include/asm/localtimer.h
- *
- * Copyright (C) 2004-2005 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_ARM_LOCALTIMER_H
-#define __ASM_ARM_LOCALTIMER_H
-
-#include <linux/errno.h>
-
-struct clock_event_device;
-
-struct local_timer_ops {
- int (*setup)(struct clock_event_device *);
- void (*stop)(struct clock_event_device *);
-};
-
-#ifdef CONFIG_LOCAL_TIMERS
-/*
- * Register a local timer driver
- */
-int local_timer_register(struct local_timer_ops *);
-#else
-static inline int local_timer_register(struct local_timer_ops *ops)
-{
- return -ENXIO;
-}
-#endif
-
-#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c2b4f8f0be9a..3a98192a3118 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -41,7 +41,6 @@
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
-#include <asm/localtimer.h>
#include <asm/smp_plat.h>
#include <asm/virt.h>
#include <asm/mach/arch.h>
@@ -146,8 +145,6 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
}
#ifdef CONFIG_HOTPLUG_CPU
-static void percpu_timer_stop(void);
-
static int platform_cpu_kill(unsigned int cpu)
{
if (smp_ops.cpu_kill)
@@ -191,11 +188,6 @@ int __cpu_disable(void)
migrate_irqs();
/*
- * Stop the local timer for this CPU.
- */
- percpu_timer_stop();
-
- /*
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
*
@@ -316,8 +308,6 @@ static void smp_store_cpu_info(unsigned int cpuid)
store_cpu_topology(cpuid);
}
-static void percpu_timer_setup(void);
-
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -372,11 +362,6 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- /*
- * Setup the percpu timer for this CPU.
- */
- percpu_timer_setup();
-
local_irq_enable();
local_fiq_enable();
@@ -423,12 +408,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
max_cpus = ncores;
if (ncores > 1 && max_cpus) {
/*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- /*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time. A platform should
* re-initialize the map in the platforms smp_prepare_cpus()
@@ -504,11 +483,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
-/*
- * Timer (local or broadcast) support
- */
-static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
-
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
@@ -516,67 +490,6 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
-static void broadcast_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-
-static void broadcast_timer_setup(struct clock_event_device *evt)
-{
- evt->name = "dummy_timer";
- evt->features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DUMMY;
- evt->rating = 100;
- evt->mult = 1;
- evt->set_mode = broadcast_timer_set_mode;
-
- clockevents_register_device(evt);
-}
-
-static struct local_timer_ops *lt_ops;
-
-#ifdef CONFIG_LOCAL_TIMERS
-int local_timer_register(struct local_timer_ops *ops)
-{
- if (!is_smp() || !setup_max_cpus)
- return -ENXIO;
-
- if (lt_ops)
- return -EBUSY;
-
- lt_ops = ops;
- return 0;
-}
-#endif
-
-static void percpu_timer_setup(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
-
- evt->cpumask = cpumask_of(cpu);
-
- if (!lt_ops || lt_ops->setup(evt))
- broadcast_timer_setup(evt);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * The generic clock events code purposely does not stop the local timer
- * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
- * manually here.
- */
-static void percpu_timer_stop(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
-
- if (lt_ops)
- lt_ops->stop(evt);
-}
-#endif
-
static DEFINE_RAW_SPINLOCK(stop_lock);
/*
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 25956204ef23..2985c9f0905d 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -24,7 +25,6 @@
#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
-#include <asm/localtimer.h>
/* set up by the platform code */
static void __iomem *twd_base;
@@ -33,7 +33,7 @@ static struct clk *twd_clk;
static unsigned long twd_timer_rate;
static DEFINE_PER_CPU(bool, percpu_setup_called);
-static struct clock_event_device __percpu **twd_evt;
+static struct clock_event_device __percpu *twd_evt;
static int twd_ppi;
static void twd_set_mode(enum clock_event_mode mode,
@@ -90,8 +90,10 @@ static int twd_timer_ack(void)
return 0;
}
-static void twd_timer_stop(struct clock_event_device *clk)
+static void twd_timer_stop(void)
{
+ struct clock_event_device *clk = __this_cpu_ptr(twd_evt);
+
twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
disable_percpu_irq(clk->irq);
}
@@ -106,7 +108,7 @@ static void twd_update_frequency(void *new_rate)
{
twd_timer_rate = *((unsigned long *) new_rate);
- clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
+ clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate);
}
static int twd_rate_change(struct notifier_block *nb,
@@ -132,7 +134,7 @@ static struct notifier_block twd_clk_nb = {
static int twd_clk_init(void)
{
- if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
+ if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return clk_notifier_register(twd_clk, &twd_clk_nb);
return 0;
@@ -151,7 +153,7 @@ static void twd_update_frequency(void *data)
{
twd_timer_rate = clk_get_rate(twd_clk);
- clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
+ clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate);
}
static int twd_cpufreq_transition(struct notifier_block *nb,
@@ -177,7 +179,7 @@ static struct notifier_block twd_cpufreq_nb = {
static int twd_cpufreq_init(void)
{
- if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
+ if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return cpufreq_register_notifier(&twd_cpufreq_nb,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -228,7 +230,7 @@ static void twd_calibrate_rate(void)
static irqreturn_t twd_handler(int irq, void *dev_id)
{
- struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+ struct clock_event_device *evt = dev_id;
if (twd_timer_ack()) {
evt->event_handler(evt);
@@ -265,9 +267,9 @@ static void twd_get_clock(struct device_node *np)
/*
* Setup the local clock events for a CPU.
*/
-static int twd_timer_setup(struct clock_event_device *clk)
+static void twd_timer_setup(void)
{
- struct clock_event_device **this_cpu_clk;
+ struct clock_event_device *clk = __this_cpu_ptr(twd_evt);
int cpu = smp_processor_id();
/*
@@ -276,9 +278,9 @@ static int twd_timer_setup(struct clock_event_device *clk)
*/
if (per_cpu(percpu_setup_called, cpu)) {
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
- clockevents_register_device(*__this_cpu_ptr(twd_evt));
+ clockevents_register_device(clk);
enable_percpu_irq(clk->irq, 0);
- return 0;
+ return;
}
per_cpu(percpu_setup_called, cpu) = true;
@@ -297,27 +299,37 @@ static int twd_timer_setup(struct clock_event_device *clk)
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
clk->irq = twd_ppi;
-
- this_cpu_clk = __this_cpu_ptr(twd_evt);
- *this_cpu_clk = clk;
+ clk->cpumask = cpumask_of(cpu);
clockevents_config_and_register(clk, twd_timer_rate,
0xf, 0xffffffff);
enable_percpu_irq(clk->irq, 0);
+}
- return 0;
+static int twd_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ twd_timer_setup();
+ break;
+ case CPU_DYING:
+ twd_timer_stop();
+ break;
+ }
+
+ return NOTIFY_OK;
}
-static struct local_timer_ops twd_lt_ops = {
- .setup = twd_timer_setup,
- .stop = twd_timer_stop,
+static struct notifier_block twd_timer_cpu_nb = {
+ .notifier_call = twd_timer_cpu_notify,
};
static int __init twd_local_timer_common_register(struct device_node *np)
{
int err;
- twd_evt = alloc_percpu(struct clock_event_device *);
+ twd_evt = alloc_percpu(struct clock_event_device);
if (!twd_evt) {
err = -ENOMEM;
goto out_free;
@@ -329,12 +341,22 @@ static int __init twd_local_timer_common_register(struct device_node *np)
goto out_free;
}
- err = local_timer_register(&twd_lt_ops);
+ err = register_cpu_notifier(&twd_timer_cpu_nb);
if (err)
goto out_irq;
twd_get_clock(np);
+ /*
+ * Immediately configure the timer on the boot CPU, unless we need
+ * jiffies to be incrementing to calibrate the rate in which case
+ * setup the timer in late_time_init.
+ */
+ if (twd_timer_rate)
+ twd_timer_setup();
+ else
+ late_time_init = twd_timer_setup;
+
return 0;
out_irq:
diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig
index cd9fcb1cd7ab..6acbdabf6222 100644
--- a/arch/arm/mach-highbank/Kconfig
+++ b/arch/arm/mach-highbank/Kconfig
@@ -12,7 +12,7 @@ config ARCH_HIGHBANK
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MAILBOX
select PL320_MBOX
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index f54656091a9d..21fa9fa3d54e 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -793,7 +793,8 @@ config SOC_IMX6Q
select COMMON_CLK
select CPU_V7
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
+ select HAVE_CAN_FLEXCAN if CAN
select HAVE_IMX_ANATOP
select HAVE_IMX_GPC
select HAVE_IMX_MMDC
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 8697cfc0d0b6..a7afbacae61a 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -16,6 +16,7 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -26,7 +27,6 @@
#include <linux/sched_clock.h>
#include <asm/mach/time.h>
-#include <asm/localtimer.h>
#include "common.h"
@@ -49,7 +49,7 @@ static void __iomem *sts_base;
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+ struct clock_event_device *evt = dev_id;
/* Stop the timer tick */
if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
@@ -101,18 +101,7 @@ static void msm_timer_set_mode(enum clock_event_mode mode,
writel_relaxed(ctrl, event_base + TIMER_ENABLE);
}
-static struct clock_event_device msm_clockevent = {
- .name = "gp_timer",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_next_event = msm_timer_set_next_event,
- .set_mode = msm_timer_set_mode,
-};
-
-static union {
- struct clock_event_device *evt;
- struct clock_event_device * __percpu *percpu_evt;
-} msm_evt;
+static struct clock_event_device __percpu *msm_evt;
static void __iomem *source_base;
@@ -138,23 +127,34 @@ static struct clocksource msm_clocksource = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-#ifdef CONFIG_LOCAL_TIMERS
+static int msm_timer_irq;
+static int msm_timer_has_ppi;
+
static int msm_local_timer_setup(struct clock_event_device *evt)
{
- /* Use existing clock_event for cpu 0 */
- if (!smp_processor_id())
- return 0;
-
- evt->irq = msm_clockevent.irq;
- evt->name = "local_timer";
- evt->features = msm_clockevent.features;
- evt->rating = msm_clockevent.rating;
+ int cpu = smp_processor_id();
+ int err;
+
+ evt->irq = msm_timer_irq;
+ evt->name = "msm_timer";
+ evt->features = CLOCK_EVT_FEAT_ONESHOT;
+ evt->rating = 200;
evt->set_mode = msm_timer_set_mode;
evt->set_next_event = msm_timer_set_next_event;
+ evt->cpumask = cpumask_of(cpu);
+
+ clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff);
+
+ if (msm_timer_has_ppi) {
+ enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
+ } else {
+ err = request_irq(evt->irq, msm_timer_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING |
+ IRQF_TRIGGER_RISING, "gp_timer", evt);
+ if (err)
+ pr_err("request_irq failed\n");
+ }
- *__this_cpu_ptr(msm_evt.percpu_evt) = evt;
- clockevents_config_and_register(evt, GPT_HZ, 4, 0xf0000000);
- enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
return 0;
}
@@ -164,11 +164,28 @@ static void msm_local_timer_stop(struct clock_event_device *evt)
disable_percpu_irq(evt->irq);
}
-static struct local_timer_ops msm_local_timer_ops = {
- .setup = msm_local_timer_setup,
- .stop = msm_local_timer_stop,
+static int msm_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ msm_local_timer_setup(this_cpu_ptr(msm_evt));
+ break;
+ case CPU_DYING:
+ msm_local_timer_stop(this_cpu_ptr(msm_evt));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block msm_timer_cpu_nb = {
+ .notifier_call = msm_timer_cpu_notify,
};
-#endif /* CONFIG_LOCAL_TIMERS */
static notrace u32 msm_sched_clock_read(void)
{
@@ -178,38 +195,35 @@ static notrace u32 msm_sched_clock_read(void)
static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
bool percpu)
{
- struct clock_event_device *ce = &msm_clockevent;
struct clocksource *cs = &msm_clocksource;
- int res;
+ int res = 0;
+
+ msm_timer_irq = irq;
+ msm_timer_has_ppi = percpu;
+
+ msm_evt = alloc_percpu(struct clock_event_device);
+ if (!msm_evt) {
+ pr_err("memory allocation failed for clockevents\n");
+ goto err;
+ }
- ce->cpumask = cpumask_of(0);
- ce->irq = irq;
+ if (percpu)
+ res = request_percpu_irq(irq, msm_timer_interrupt,
+ "gp_timer", msm_evt);
- clockevents_config_and_register(ce, GPT_HZ, 4, 0xffffffff);
- if (percpu) {
- msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *);
- if (!msm_evt.percpu_evt) {
- pr_err("memory allocation failed for %s\n", ce->name);
+ if (res) {
+ pr_err("request_percpu_irq failed\n");
+ } else {
+ res = register_cpu_notifier(&msm_timer_cpu_nb);
+ if (res) {
+ free_percpu_irq(irq, msm_evt);
goto err;
}
- *__this_cpu_ptr(msm_evt.percpu_evt) = ce;
- res = request_percpu_irq(ce->irq, msm_timer_interrupt,
- ce->name, msm_evt.percpu_evt);
- if (!res) {
- enable_percpu_irq(ce->irq, IRQ_TYPE_EDGE_RISING);
-#ifdef CONFIG_LOCAL_TIMERS
- local_timer_register(&msm_local_timer_ops);
-#endif
- }
- } else {
- msm_evt.evt = ce;
- res = request_irq(ce->irq, msm_timer_interrupt,
- IRQF_TIMER | IRQF_NOBALANCING |
- IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt);
+
+ /* Immediately configure the timer on the boot CPU */
+ msm_local_timer_setup(__this_cpu_ptr(msm_evt));
}
- if (res)
- pr_err("request_irq failed for %s\n", ce->name);
err:
writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
res = clocksource_register_hz(cs, dgt_hz);
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 627fa7e41fba..3d6ee149d3d7 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -37,9 +37,8 @@ config ARCH_OMAP4
select CACHE_L2X0
select CPU_V7
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
- select LOCAL_TIMERS if SMP
select OMAP_INTERCONNECT
select PL310_ERRATA_588369
select PL310_ERRATA_727915
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index b37e1fcbad56..8e99ca368e07 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -600,7 +600,6 @@ static OMAP_SYS_32K_TIMER_INIT(4, 1, "timer_32k_ck", "ti,timer-alwon",
#endif
#ifdef CONFIG_ARCH_OMAP4
-#ifdef CONFIG_LOCAL_TIMERS
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, OMAP44XX_LOCAL_TWD_BASE, 29);
void __init omap4_local_timer_init(void)
{
@@ -619,12 +618,6 @@ void __init omap4_local_timer_init(void)
pr_err("twd_local_timer_register failed %d\n", err);
}
}
-#else /* CONFIG_LOCAL_TIMERS */
-void __init omap4_local_timer_init(void)
-{
- omap4_sync32k_timer_init();
-}
-#endif /* CONFIG_LOCAL_TIMERS */
#endif /* CONFIG_ARCH_OMAP4 */
#ifdef CONFIG_SOC_OMAP5
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index d210c0f9c2c4..9db2029aa632 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -13,7 +13,7 @@ config REALVIEW_EB_A9MP
depends on MACH_REALVIEW_EB
select CPU_V7
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
help
@@ -26,7 +26,7 @@ config REALVIEW_EB_ARM11MP
select ARCH_HAS_BARRIERS if SMP
select CPU_V6K
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
help
@@ -48,7 +48,7 @@ config MACH_REALVIEW_PB11MP
select ARM_GIC
select CPU_V6K
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_PATA_PLATFORM
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
@@ -92,7 +92,7 @@ config MACH_REALVIEW_PBX
select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
select ARM_GIC
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_PATA_PLATFORM
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
diff --git a/arch/arm/mach-spear/Kconfig b/arch/arm/mach-spear/Kconfig
index 442917eedff3..df0d59afeb40 100644
--- a/arch/arm/mach-spear/Kconfig
+++ b/arch/arm/mach-spear/Kconfig
@@ -23,7 +23,7 @@ config ARCH_SPEAR13XX
select CPU_V7
select GPIO_SPEAR_SPICS
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
select PINCTRL
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index ef3a8da49b2d..59925cc896fb 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -8,7 +8,7 @@ config ARCH_TEGRA
select COMMON_CLK
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_CLK
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index b19b07204aaf..99a28d628297 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -8,7 +8,7 @@ config ARCH_U8500
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
help
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index b8bbabec6310..83c8677bb181 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -10,7 +10,7 @@ config ARCH_VEXPRESS
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_CLK
select HAVE_PATA_PLATFORM
select HAVE_SMP
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index c1d61f281e68..04f8a4a6e755 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -6,7 +6,7 @@ config ARCH_ZYNQ
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select ICST
select MIGHT_HAVE_CACHE_L2X0
select USE_OF
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index b2bbc415f120..5b34768f4d7c 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
+#include <linux/cpu.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/percpu.h>
@@ -24,7 +25,6 @@
#include <linux/of_address.h>
#include <linux/clocksource.h>
-#include <asm/localtimer.h>
#include <asm/mach/time.h>
#define EXYNOS4_MCTREG(x) (x)
@@ -80,7 +80,7 @@ static unsigned int mct_int_type;
static int mct_irqs[MCT_NR_IRQS];
struct mct_clock_event_device {
- struct clock_event_device *evt;
+ struct clock_event_device evt;
unsigned long base;
char name[10];
};
@@ -295,8 +295,6 @@ static void exynos4_clockevent_init(void)
setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
}
-#ifdef CONFIG_LOCAL_TIMERS
-
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
/* Clock event handling */
@@ -369,7 +367,7 @@ static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
{
- struct clock_event_device *evt = mevt->evt;
+ struct clock_event_device *evt = &mevt->evt;
/*
* This is for supporting oneshot mode.
@@ -391,7 +389,7 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
struct mct_clock_event_device *mevt = dev_id;
- struct clock_event_device *evt = mevt->evt;
+ struct clock_event_device *evt = &mevt->evt;
exynos4_mct_tick_clear(mevt);
@@ -405,8 +403,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
struct mct_clock_event_device *mevt;
unsigned int cpu = smp_processor_id();
- mevt = this_cpu_ptr(&percpu_mct_tick);
- mevt->evt = evt;
+ mevt = container_of(evt, struct mct_clock_event_device, evt);
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
sprintf(mevt->name, "mct_tick%d", cpu);
@@ -448,14 +445,37 @@ static void exynos4_local_timer_stop(struct clock_event_device *evt)
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
-static struct local_timer_ops exynos4_mct_tick_ops = {
- .setup = exynos4_local_timer_setup,
- .stop = exynos4_local_timer_stop,
+static int exynos4_mct_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct mct_clock_event_device *mevt;
+
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ mevt = this_cpu_ptr(&percpu_mct_tick);
+ exynos4_local_timer_setup(&mevt->evt);
+ break;
+ case CPU_DYING:
+ mevt = this_cpu_ptr(&percpu_mct_tick);
+ exynos4_local_timer_stop(&mevt->evt);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos4_mct_cpu_nb = {
+ .notifier_call = exynos4_mct_cpu_notify,
};
-#endif /* CONFIG_LOCAL_TIMERS */
static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
{
+ int err;
+ struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
struct clk *mct_clk, *tick_clk;
tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
@@ -473,9 +493,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
if (!reg_base)
panic("%s: unable to ioremap mct address space\n", __func__);
-#ifdef CONFIG_LOCAL_TIMERS
if (mct_int_type == MCT_INT_PPI) {
- int err;
err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
exynos4_mct_tick_isr, "MCT",
@@ -484,8 +502,16 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
mct_irqs[MCT_L0_IRQ], err);
}
- local_timer_register(&exynos4_mct_tick_ops);
-#endif /* CONFIG_LOCAL_TIMERS */
+ err = register_cpu_notifier(&exynos4_mct_cpu_nb);
+ if (err)
+ goto out_irq;
+
+ /* Immediately configure the timer on the boot CPU */
+ exynos4_local_timer_setup(&mevt->evt);
+ return;
+
+out_irq:
+ free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
}
void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1)
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 1b04b7e1d39b..847cab6f6e31 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
@@ -28,9 +29,9 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/sched_clock.h>
-
-#include <asm/localtimer.h>
#include <linux/percpu.h>
+#include <linux/time-armada-370-xp.h>
+
/*
* Timer block registers.
*/
@@ -69,7 +70,7 @@ static bool timer25Mhz = true;
*/
static u32 ticks_per_jiffy;
-static struct clock_event_device __percpu **percpu_armada_370_xp_evt;
+static struct clock_event_device __percpu *armada_370_xp_evt;
static u32 notrace armada_370_xp_read_sched_clock(void)
{
@@ -142,21 +143,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
}
}
-static struct clock_event_device armada_370_xp_clkevt = {
- .name = "armada_370_xp_per_cpu_tick",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .shift = 32,
- .rating = 300,
- .set_next_event = armada_370_xp_clkevt_next_event,
- .set_mode = armada_370_xp_clkevt_mode,
-};
+static int armada_370_xp_clkevt_irq;
static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
{
/*
* ACK timer interrupt and call event handler.
*/
- struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
+ struct clock_event_device *evt = dev_id;
writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
evt->event_handler(evt);
@@ -172,42 +166,55 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt)
u32 u;
int cpu = smp_processor_id();
- /* Use existing clock_event for cpu 0 */
- if (!smp_processor_id())
- return 0;
-
u = readl(local_base + TIMER_CTRL_OFF);
if (timer25Mhz)
writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
else
writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
- evt->name = armada_370_xp_clkevt.name;
- evt->irq = armada_370_xp_clkevt.irq;
- evt->features = armada_370_xp_clkevt.features;
- evt->shift = armada_370_xp_clkevt.shift;
- evt->rating = armada_370_xp_clkevt.rating,
+ evt->name = "armada_370_xp_per_cpu_tick",
+ evt->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC;
+ evt->shift = 32,
+ evt->rating = 300,
evt->set_next_event = armada_370_xp_clkevt_next_event,
evt->set_mode = armada_370_xp_clkevt_mode,
+ evt->irq = armada_370_xp_clkevt_irq;
evt->cpumask = cpumask_of(cpu);
- *__this_cpu_ptr(percpu_armada_370_xp_evt) = evt;
-
clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
enable_percpu_irq(evt->irq, 0);
return 0;
}
-static void armada_370_xp_timer_stop(struct clock_event_device *evt)
+static void armada_370_xp_timer_stop(struct clock_event_device *evt)
{
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
disable_percpu_irq(evt->irq);
}
-static struct local_timer_ops armada_370_xp_local_timer_ops = {
- .setup = armada_370_xp_timer_setup,
- .stop = armada_370_xp_timer_stop,
+static int armada_370_xp_timer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+ break;
+ case CPU_DYING:
+ armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block armada_370_xp_timer_cpu_nb = {
+ .notifier_call = armada_370_xp_timer_cpu_notify,
};
void __init armada_370_xp_timer_init(void)
@@ -223,9 +230,6 @@ void __init armada_370_xp_timer_init(void)
if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
/* The fixed 25MHz timer is available so let's use it */
- u = readl(local_base + TIMER_CTRL_OFF);
- writel(u | TIMER0_25MHZ,
- local_base + TIMER_CTRL_OFF);
u = readl(timer_base + TIMER_CTRL_OFF);
writel(u | TIMER0_25MHZ,
timer_base + TIMER_CTRL_OFF);
@@ -235,9 +239,6 @@ void __init armada_370_xp_timer_init(void)
struct clk *clk = of_clk_get(np, 0);
WARN_ON(IS_ERR(clk));
rate = clk_get_rate(clk);
- u = readl(local_base + TIMER_CTRL_OFF);
- writel(u & ~(TIMER0_25MHZ),
- local_base + TIMER_CTRL_OFF);
u = readl(timer_base + TIMER_CTRL_OFF);
writel(u & ~(TIMER0_25MHZ),
@@ -251,7 +252,7 @@ void __init armada_370_xp_timer_init(void)
* We use timer 0 as clocksource, and private(local) timer 0
* for clockevents
*/
- armada_370_xp_clkevt.irq = irq_of_parse_and_map(np, 4);
+ armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
@@ -276,26 +277,19 @@ void __init armada_370_xp_timer_init(void)
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
- /* Register the clockevent on the private timer of CPU 0 */
- armada_370_xp_clkevt.cpumask = cpumask_of(0);
- clockevents_config_and_register(&armada_370_xp_clkevt,
- timer_clk, 1, 0xfffffffe);
+ register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
- percpu_armada_370_xp_evt = alloc_percpu(struct clock_event_device *);
+ armada_370_xp_evt = alloc_percpu(struct clock_event_device);
/*
* Setup clockevent timer (interrupt-driven).
*/
- *__this_cpu_ptr(percpu_armada_370_xp_evt) = &armada_370_xp_clkevt;
- res = request_percpu_irq(armada_370_xp_clkevt.irq,
+ res = request_percpu_irq(armada_370_xp_clkevt_irq,
armada_370_xp_timer_interrupt,
- armada_370_xp_clkevt.name,
- percpu_armada_370_xp_evt);
- if (!res) {
- enable_percpu_irq(armada_370_xp_clkevt.irq, 0);
-#ifdef CONFIG_LOCAL_TIMERS
- local_timer_register(&armada_370_xp_local_timer_ops);
-#endif
- }
+ "armada_370_xp_per_cpu_tick",
+ armada_370_xp_evt);
+ /* Immediately configure the timer on the boot CPU */
+ if (!res)
+ armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
}
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index 62876baa3ab9..09a17d9a6594 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
+#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/clk.h>
@@ -18,7 +19,6 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
-#include <asm/localtimer.h>
#include <asm/mach/time.h>
#define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000
@@ -151,13 +151,7 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs)
BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
}
-static struct clock_event_device sirfsoc_clockevent = {
- .name = "sirfsoc_clockevent",
- .rating = 200,
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = sirfsoc_timer_set_mode,
- .set_next_event = sirfsoc_timer_set_next_event,
-};
+static struct clock_event_device __percpu *sirfsoc_clockevent;
static struct clocksource sirfsoc_clocksource = {
.name = "sirfsoc_clocksource",
@@ -173,11 +167,8 @@ static struct irqaction sirfsoc_timer_irq = {
.name = "sirfsoc_timer0",
.flags = IRQF_TIMER | IRQF_NOBALANCING,
.handler = sirfsoc_timer_interrupt,
- .dev_id = &sirfsoc_clockevent,
};
-#ifdef CONFIG_LOCAL_TIMERS
-
static struct irqaction sirfsoc_timer1_irq = {
.name = "sirfsoc_timer1",
.flags = IRQF_TIMER | IRQF_NOBALANCING,
@@ -186,24 +177,28 @@ static struct irqaction sirfsoc_timer1_irq = {
static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
{
- /* Use existing clock_event for cpu 0 */
- if (!smp_processor_id())
- return 0;
+ int cpu = smp_processor_id();
+ struct irqaction *action;
+
+ if (cpu == 0)
+ action = &sirfsoc_timer_irq;
+ else
+ action = &sirfsoc_timer1_irq;
- ce->irq = sirfsoc_timer1_irq.irq;
+ ce->irq = action->irq;
ce->name = "local_timer";
- ce->features = sirfsoc_clockevent.features;
- ce->rating = sirfsoc_clockevent.rating;
+ ce->features = CLOCK_EVT_FEAT_ONESHOT;
+ ce->rating = 200;
ce->set_mode = sirfsoc_timer_set_mode;
ce->set_next_event = sirfsoc_timer_set_next_event;
- ce->shift = sirfsoc_clockevent.shift;
- ce->mult = sirfsoc_clockevent.mult;
- ce->max_delta_ns = sirfsoc_clockevent.max_delta_ns;
- ce->min_delta_ns = sirfsoc_clockevent.min_delta_ns;
+ clockevents_calc_mult_shift(ce, CLOCK_TICK_RATE, 60);
+ ce->max_delta_ns = clockevent_delta2ns(-2, ce);
+ ce->min_delta_ns = clockevent_delta2ns(2, ce);
+ ce->cpumask = cpumask_of(cpu);
- sirfsoc_timer1_irq.dev_id = ce;
- BUG_ON(setup_irq(ce->irq, &sirfsoc_timer1_irq));
- irq_set_affinity(sirfsoc_timer1_irq.irq, cpumask_of(1));
+ action->dev_id = ce;
+ BUG_ON(setup_irq(ce->irq, action));
+ irq_set_affinity(action->irq, cpumask_of(cpu));
clockevents_register_device(ce);
return 0;
@@ -211,31 +206,48 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
{
+ int cpu = smp_processor_id();
+
sirfsoc_timer_count_disable(1);
- remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
+ if (cpu == 0)
+ remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
+ else
+ remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
}
-static struct local_timer_ops sirfsoc_local_timer_ops = {
- .setup = sirfsoc_local_timer_setup,
- .stop = sirfsoc_local_timer_stop,
+static int sirfsoc_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * Grab cpu pointer in each case to avoid spurious
+ * preemptible warnings
+ */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
+ break;
+ case CPU_DYING:
+ sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent));
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sirfsoc_cpu_nb = {
+ .notifier_call = sirfsoc_cpu_notify,
};
-#endif /* CONFIG_LOCAL_TIMERS */
static void __init sirfsoc_clockevent_init(void)
{
- clockevents_calc_mult_shift(&sirfsoc_clockevent, CLOCK_TICK_RATE, 60);
-
- sirfsoc_clockevent.max_delta_ns =
- clockevent_delta2ns(-2, &sirfsoc_clockevent);
- sirfsoc_clockevent.min_delta_ns =
- clockevent_delta2ns(2, &sirfsoc_clockevent);
-
- sirfsoc_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&sirfsoc_clockevent);
-#ifdef CONFIG_LOCAL_TIMERS
- local_timer_register(&sirfsoc_local_timer_ops);
-#endif
+ sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
+ BUG_ON(!sirfsoc_clockevent);
+
+ BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
+
+ /* Immediately configure the timer on the boot CPU */
+ sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
}
/* initialize the kernel jiffy timer source */
@@ -273,8 +285,6 @@ static void __init sirfsoc_marco_timer_init(void)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
- BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
-
sirfsoc_clockevent_init();
}
@@ -288,11 +298,9 @@ static void __init sirfsoc_of_timer_init(struct device_node *np)
if (!sirfsoc_timer_irq.irq)
panic("No irq passed for timer0 via DT\n");
-#ifdef CONFIG_LOCAL_TIMERS
sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
if (!sirfsoc_timer1_irq.irq)
panic("No irq passed for timer1 via DT\n");
-#endif
sirfsoc_marco_timer_init();
}
diff --git a/include/linux/time-armada-370-xp.h b/include/linux/time-armada-370-xp.h
index dfdfdc03115b..6fb0856b9405 100644
--- a/include/linux/time-armada-370-xp.h
+++ b/include/linux/time-armada-370-xp.h
@@ -11,8 +11,6 @@
#ifndef __TIME_ARMADA_370_XPPRCMU_H
#define __TIME_ARMADA_370_XPPRCMU_H
-#include <linux/init.h>
-
-void __init armada_370_xp_timer_init(void);
+void armada_370_xp_timer_init(void);
#endif