diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-07 13:55:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-07 13:55:45 -0700 |
commit | d691b7e7d1b5186eae62fd32adee65d3316bfdf6 (patch) | |
tree | 3808f7deab74f68267b9fdd6a35dcda9e50142aa /drivers/cpuidle | |
parent | b59eea554f57befa2aa3172fcb63e521bdd850dd (diff) | |
parent | 1e0fc9d1eb2b0241a03e0a02bcdb9b5b641b9d35 (diff) | |
download | linux-d691b7e7d1b5186eae62fd32adee65d3316bfdf6.tar.bz2 |
Merge tag 'powerpc-4.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman:
"Highlights include:
- Support for STRICT_KERNEL_RWX on 64-bit server CPUs.
- Platform support for FSP2 (476fpe) board
- Enable ZONE_DEVICE on 64-bit server CPUs.
- Generic & powerpc spin loop primitives to optimise busy waiting
- Convert VDSO update function to use new update_vsyscall() interface
- Optimisations to hypercall/syscall/context-switch paths
- Improvements to the CPU idle code on Power8 and Power9.
As well as many other fixes and improvements.
Thanks to: Akshay Adiga, Andrew Donnellan, Andrew Jeffery, Anshuman
Khandual, Anton Blanchard, Balbir Singh, Benjamin Herrenschmidt,
Christophe Leroy, Christophe Lombard, Colin Ian King, Dan Carpenter,
Gautham R. Shenoy, Hari Bathini, Ian Munsie, Ivan Mikhaylov, Javier
Martinez Canillas, Madhavan Srinivasan, Masahiro Yamada, Matt Brown,
Michael Neuling, Michal Suchanek, Murilo Opsfelder Araujo, Naveen N.
Rao, Nicholas Piggin, Oliver O'Halloran, Paul Mackerras, Pavel Machek,
Russell Currey, Santosh Sivaraj, Stephen Rothwell, Thiago Jung
Bauermann, Yang Li"
* tag 'powerpc-4.13-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (158 commits)
powerpc/Kconfig: Enable STRICT_KERNEL_RWX for some configs
powerpc/mm/radix: Implement STRICT_RWX/mark_rodata_ro() for Radix
powerpc/mm/hash: Implement mark_rodata_ro() for hash
powerpc/vmlinux.lds: Align __init_begin to 16M
powerpc/lib/code-patching: Use alternate map for patch_instruction()
powerpc/xmon: Add patch_instruction() support for xmon
powerpc/kprobes/optprobes: Use patch_instruction()
powerpc/kprobes: Move kprobes over to patch_instruction()
powerpc/mm/radix: Fix execute permissions for interrupt_vectors
powerpc/pseries: Fix passing of pp0 in updatepp() and updateboltedpp()
powerpc/64s: Blacklist rtas entry/exit from kprobes
powerpc/64s: Blacklist functions invoked on a trap
powerpc/64s: Un-blacklist system_call() from kprobes
powerpc/64s: Move system_call() symbol to just after setting MSR_EE
powerpc/64s: Blacklist system_call() and system_call_common() from kprobes
powerpc/64s: Convert .L__replay_interrupt_return to a local label
powerpc64/elfv1: Only dereference function descriptor for non-text symbols
cxl: Export library to support IBM XSL
powerpc/dts: Use #include "..." to include local DT
powerpc/perf/hv-24x7: Aggregate result elements on POWER9 SMT8
...
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r-- | drivers/cpuidle/cpuidle-powernv.c | 53 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-pseries.c | 22 |
2 files changed, 47 insertions, 28 deletions
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 12409a519cc5..37b0698b7193 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -32,18 +32,18 @@ static struct cpuidle_driver powernv_idle_driver = { .owner = THIS_MODULE, }; -static int max_idle_state; -static struct cpuidle_state *cpuidle_state_table; +static int max_idle_state __read_mostly; +static struct cpuidle_state *cpuidle_state_table __read_mostly; struct stop_psscr_table { u64 val; u64 mask; }; -static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX]; +static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly; -static u64 snooze_timeout; -static bool snooze_timeout_en; +static u64 snooze_timeout __read_mostly; +static bool snooze_timeout_en __read_mostly; static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, @@ -51,21 +51,30 @@ static int snooze_loop(struct cpuidle_device *dev, { u64 snooze_exit_time; - local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); + local_irq_enable(); + snooze_exit_time = get_tb() + snooze_timeout; ppc64_runlatch_off(); HMT_very_low(); while (!need_resched()) { - if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) + if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) { + /* + * Task has not woken up but we are exiting the polling + * loop anyway. Require a barrier after polling is + * cleared to order subsequent test of need_resched(). + */ + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb(); break; + } } HMT_medium(); ppc64_runlatch_on(); clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb(); + return index; } @@ -73,9 +82,8 @@ static int nap_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - ppc64_runlatch_off(); - power7_idle(); - ppc64_runlatch_on(); + power7_idle_type(PNV_THREAD_NAP); + return index; } @@ -98,7 +106,8 @@ static int fastsleep_loop(struct cpuidle_device *dev, new_lpcr &= ~LPCR_PECE1; mtspr(SPRN_LPCR, new_lpcr); - power7_sleep(); + + power7_idle_type(PNV_THREAD_SLEEP); mtspr(SPRN_LPCR, old_lpcr); @@ -110,10 +119,8 @@ static int stop_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - ppc64_runlatch_off(); - power9_idle_stop(stop_psscr_table[index].val, + power9_idle_type(stop_psscr_table[index].val, stop_psscr_table[index].mask); - ppc64_runlatch_on(); return index; } @@ -354,6 +361,7 @@ static int powernv_add_idle_states(void) for (i = 0; i < dt_idle_states; i++) { unsigned int exit_latency, target_residency; + bool stops_timebase = false; /* * If an idle state has exit latency beyond * POWERNV_THRESHOLD_LATENCY_NS then don't use it @@ -381,6 +389,9 @@ static int powernv_add_idle_states(void) } } + if (flags[i] & OPAL_PM_TIMEBASE_STOP) + stops_timebase = true; + /* * For nap and fastsleep, use default target_residency * values if f/w does not expose it. @@ -392,8 +403,7 @@ static int powernv_add_idle_states(void) add_powernv_state(nr_idle_states, "Nap", CPUIDLE_FLAG_NONE, nap_loop, target_residency, exit_latency, 0, 0); - } else if ((flags[i] & OPAL_PM_STOP_INST_FAST) && - !(flags[i] & OPAL_PM_TIMEBASE_STOP)) { + } else if (has_stop_states && !stops_timebase) { add_powernv_state(nr_idle_states, names[i], CPUIDLE_FLAG_NONE, stop_loop, target_residency, exit_latency, @@ -405,8 +415,8 @@ static int powernv_add_idle_states(void) * within this config dependency check. */ #ifdef CONFIG_TICK_ONESHOT - if (flags[i] & OPAL_PM_SLEEP_ENABLED || - flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) { + else if (flags[i] & OPAL_PM_SLEEP_ENABLED || + flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) { if (!rc) target_residency = 300000; /* Add FASTSLEEP state */ @@ -414,14 +424,15 @@ static int powernv_add_idle_states(void) CPUIDLE_FLAG_TIMER_STOP, fastsleep_loop, target_residency, exit_latency, 0, 0); - } else if ((flags[i] & OPAL_PM_STOP_INST_DEEP) && - (flags[i] & OPAL_PM_TIMEBASE_STOP)) { + } else if (has_stop_states && stops_timebase) { add_powernv_state(nr_idle_states, names[i], CPUIDLE_FLAG_TIMER_STOP, stop_loop, target_residency, exit_latency, psscr_val[i], psscr_mask[i]); } #endif + else + continue; nr_idle_states++; } out: diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c index 166ccd711ec9..e9b3853d93ea 100644 --- a/drivers/cpuidle/cpuidle-pseries.c +++ b/drivers/cpuidle/cpuidle-pseries.c @@ -25,10 +25,10 @@ struct cpuidle_driver pseries_idle_driver = { .owner = THIS_MODULE, }; -static int max_idle_state; -static struct cpuidle_state *cpuidle_state_table; -static u64 snooze_timeout; -static bool snooze_timeout_en; +static int max_idle_state __read_mostly; +static struct cpuidle_state *cpuidle_state_table __read_mostly; +static u64 snooze_timeout __read_mostly; +static bool snooze_timeout_en __read_mostly; static inline void idle_loop_prolog(unsigned long *in_purr) { @@ -62,21 +62,29 @@ static int snooze_loop(struct cpuidle_device *dev, unsigned long in_purr; u64 snooze_exit_time; + set_thread_flag(TIF_POLLING_NRFLAG); + idle_loop_prolog(&in_purr); local_irq_enable(); - set_thread_flag(TIF_POLLING_NRFLAG); snooze_exit_time = get_tb() + snooze_timeout; while (!need_resched()) { HMT_low(); HMT_very_low(); - if (snooze_timeout_en && get_tb() > snooze_exit_time) + if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) { + /* + * Task has not woken up but we are exiting the polling + * loop anyway. Require a barrier after polling is + * cleared to order subsequent test of need_resched(). + */ + clear_thread_flag(TIF_POLLING_NRFLAG); + smp_mb(); break; + } } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb(); idle_loop_epilog(in_purr); |