diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2015-04-03 02:05:15 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-03 08:44:33 +0200 |
commit | 1fe5d5c3c9ba0c4ade18e3325cba0ffe35127941 (patch) | |
tree | b07d74d6468aa0bf1e2b107bb82d62b9fe6638d4 /kernel/time | |
parent | 89feddbfe7023ccfb4a6d7f5e3f5161d91b28b18 (diff) | |
download | linux-1fe5d5c3c9ba0c4ade18e3325cba0ffe35127941.tar.bz2 |
clockevents: Provide explicit broadcast oneshot control functions
clockevents_notify() is a leftover from the early design of the
clockevents facility. It's really not a notification mechanism,
it's a multiplex call. We are way better off to have explicit
calls instead of this monstrosity.
Split out the broadcast oneshot control into a separate function
and provide inline helpers. Switch clockevents_notify() over.
This will go away once all callers are converted.
This also gets rid of the nested locking of clockevents_lock and
broadcast_lock. The broadcast oneshot control functions do not
require clockevents_lock. Only the managing functions
(setup/shutdown/suspend/resume of the broadcast device require
clockevents_lock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Alexandre Courbot <gnurou@gmail.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephen Warren <swarren@wwwdotorg.org>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: Tony Lindgren <tony@atomide.com>
Link: http://lkml.kernel.org/r/13000649.8qZuEDV0OA@vostro.rjw.lan
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/clockevents.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 28 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 2 |
3 files changed, 20 insertions, 14 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index dba0b83708b3..7791b1c94ef2 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -656,8 +656,10 @@ int clockevents_notify(unsigned long reason, void *arg) switch (reason) { case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: + tick_broadcast_enter(); + break; case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: - ret = tick_broadcast_oneshot_control(reason); + tick_broadcast_exit(); break; case CLOCK_EVT_NOTIFY_CPU_DYING: diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 1a0bee04ef8c..55e43f20987a 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -687,18 +687,23 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu) raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } -/* - * Powerstate information: The system enters/leaves a state, where - * affected devices might stop +/** + * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode + * @state: The target state (enter/exit) + * + * The system enters/leaves a state, where affected devices might stop * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. + * + * Called with interrupts disabled, so clockevents_lock is not + * required here because the local clock event device cannot go away + * under us. */ -int tick_broadcast_oneshot_control(unsigned long reason) +int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { struct clock_event_device *bc, *dev; struct tick_device *td; - unsigned long flags; - ktime_t now; int cpu, ret = 0; + ktime_t now; /* * Periodic mode does not care about the enter/exit of power @@ -711,17 +716,17 @@ int tick_broadcast_oneshot_control(unsigned long reason) * We are called with preemtion disabled from the depth of the * idle code, so we can't be moved away. */ - cpu = smp_processor_id(); - td = &per_cpu(tick_cpu_device, cpu); + td = this_cpu_ptr(&tick_cpu_device); dev = td->evtdev; if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) return 0; + raw_spin_lock(&tick_broadcast_lock); bc = tick_broadcast_device.evtdev; + cpu = smp_processor_id(); - raw_spin_lock_irqsave(&tick_broadcast_lock, flags); - if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { + if (state == TICK_BROADCAST_ENTER) { if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); broadcast_shutdown_local(bc, dev); @@ -813,9 +818,10 @@ int tick_broadcast_oneshot_control(unsigned long reason) } } out: - raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); + raw_spin_unlock(&tick_broadcast_lock); return ret; } +EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control); /* * Reset the one shot broadcast for a cpu diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 62e331d1bc76..0266f9dbd114 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -117,7 +117,6 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } /* Functions related to oneshot broadcasting */ #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); -extern int tick_broadcast_oneshot_control(unsigned long reason); extern void tick_broadcast_switch_to_oneshot(void); extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern int tick_broadcast_oneshot_active(void); @@ -126,7 +125,6 @@ bool tick_broadcast_oneshot_available(void); extern struct cpumask *tick_get_broadcast_oneshot_mask(void); #else /* !(BROADCAST && ONESHOT): */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } -static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; } static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline int tick_broadcast_oneshot_active(void) { return 0; } |