diff options
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/Kconfig | 4 | ||||
-rw-r--r-- | arch/sh/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/sh/kernel/idle.c | 101 | ||||
-rw-r--r-- | arch/sh/kernel/smp.c | 2 |
4 files changed, 13 insertions, 96 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5e859633ce69..1ea597c6497a 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -33,6 +33,7 @@ config SUPERH select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select GENERIC_SMP_IDLE_THREAD + select GENERIC_IDLE_POLL_SETUP select GENERIC_CLOCKEVENTS select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST select GENERIC_STRNCPY_FROM_USER @@ -148,9 +149,6 @@ config ARCH_HAS_ILOG2_U32 config ARCH_HAS_ILOG2_U64 def_bool n -config ARCH_HAS_DEFAULT_IDLE - def_bool y - config NO_IOPORT def_bool !PCI depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \ diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 7d5ac4e48485..45a93669289d 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -207,8 +207,6 @@ static inline bool test_and_clear_restore_sigmask(void) return true; } -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 3d5a1b387cc0..2ea4483fd722 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -24,98 +24,24 @@ static void (*sh_idle)(void); -static int hlt_counter; - -static int __init nohlt_setup(char *__unused) -{ - hlt_counter = 1; - return 1; -} -__setup("nohlt", nohlt_setup); - -static int __init hlt_setup(char *__unused) -{ - hlt_counter = 0; - return 1; -} -__setup("hlt", hlt_setup); - -static inline int hlt_works(void) -{ - return !hlt_counter; -} - -/* - * On SMP it's slightly faster (but much more power-consuming!) - * to poll the ->work.need_resched flag instead of waiting for the - * cross-CPU IPI to arrive. Use this option with caution. - */ -static void poll_idle(void) +void default_idle(void) { + set_bl_bit(); local_irq_enable(); - while (!need_resched()) - cpu_relax(); + /* Isn't this racy ? */ + cpu_sleep(); + clear_bl_bit(); } -void default_idle(void) +void arch_cpu_idle_dead(void) { - if (hlt_works()) { - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - - set_bl_bit(); - if (!need_resched()) { - local_irq_enable(); - cpu_sleep(); - } else - local_irq_enable(); - - set_thread_flag(TIF_POLLING_NRFLAG); - clear_bl_bit(); - } else - poll_idle(); + play_dead(); } -/* - * The idle thread. There's no useful work to be done, so just try to conserve - * power and have a low exit latency (ie sit in a loop waiting for somebody to - * say that they'd like to reschedule) - */ -void cpu_idle(void) +void arch_cpu_idle(void) { - unsigned int cpu = smp_processor_id(); - - set_thread_flag(TIF_POLLING_NRFLAG); - - /* endless idle loop with no priority at all */ - while (1) { - tick_nohz_idle_enter(); - rcu_idle_enter(); - - while (!need_resched()) { - check_pgt_cache(); - rmb(); - - if (cpu_is_offline(cpu)) - play_dead(); - - local_irq_disable(); - /* Don't trace irqs off for idle */ - stop_critical_timings(); - if (cpuidle_idle_call()) - sh_idle(); - /* - * Sanity check to ensure that sh_idle() returns - * with IRQs enabled - */ - WARN_ON(irqs_disabled()); - start_critical_timings(); - } - - rcu_idle_exit(); - tick_nohz_idle_exit(); - schedule_preempt_disabled(); - } + if (cpuidle_idle_call()) + sh_idle(); } void __init select_idle_routine(void) @@ -123,13 +49,8 @@ void __init select_idle_routine(void) /* * If a platform has set its own idle routine, leave it alone. */ - if (sh_idle) - return; - - if (hlt_works()) + if (!sh_idle) sh_idle = default_idle; - else - sh_idle = poll_idle; } void stop_this_cpu(void *unused) diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 2062aa88af41..45696451f0ea 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -203,7 +203,7 @@ asmlinkage void __cpuinit start_secondary(void) set_cpu_online(cpu, true); per_cpu(cpu_state, cpu) = CPU_ONLINE; - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } extern struct { |