diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-10 11:49:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-10 11:49:27 -0700 |
commit | 1bc27dec7ea5ac01f126734b723acc5f3cbe5713 (patch) | |
tree | 92907b34cb4553dc3ac5999bc991858ce7b876fb | |
parent | d56fd98612aef73f85ec0c44e86fe04a9d3325ee (diff) | |
parent | 67e59f8d019fb097f35c82533cc9b27bb392e5b1 (diff) | |
download | linux-1bc27dec7ea5ac01f126734b723acc5f3cbe5713.tar.bz2 |
Merge tag 'pm-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management fixes from Rafael Wysocki:
"These fix an intel_idle issue introduced during the 5.16 development
cycle and two recent regressions in the system reboot/poweroff code.
Specifics:
- Fix CPUIDLE_FLAG_IRQ_ENABLE handling in intel_idle (Peter Zijlstra)
- Allow all platforms to use the global poweroff handler and make
non-syscall poweroff code paths work again (Dmitry Osipenko)"
* tag 'pm-5.19-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
cpuidle,intel_idle: Fix CPUIDLE_FLAG_IRQ_ENABLE
kernel/reboot: Fix powering off using a non-syscall code paths
kernel/reboot: Use static handler for register_platform_power_off()
-rw-r--r-- | drivers/idle/intel_idle.c | 32 | ||||
-rw-r--r-- | kernel/reboot.c | 87 |
2 files changed, 87 insertions, 32 deletions
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b9bb94bd0f67..424ef470223d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -115,6 +115,18 @@ static unsigned int mwait_substates __initdata; #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define MWAIT2flg(eax) ((eax & 0xFF) << 24) +static __always_inline int __intel_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct cpuidle_state *state = &drv->states[index]; + unsigned long eax = flg2MWAIT(state->flags); + unsigned long ecx = 1; /* break on interrupt flag */ + + mwait_idle_with_hints(eax, ecx); + + return index; +} + /** * intel_idle - Ask the processor to enter the given idle state. * @dev: cpuidle device of the target CPU. @@ -132,16 +144,19 @@ static unsigned int mwait_substates __initdata; static __cpuidle int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - struct cpuidle_state *state = &drv->states[index]; - unsigned long eax = flg2MWAIT(state->flags); - unsigned long ecx = 1; /* break on interrupt flag */ + return __intel_idle(dev, drv, index); +} - if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) - local_irq_enable(); +static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + int ret; - mwait_idle_with_hints(eax, ecx); + raw_local_irq_enable(); + ret = __intel_idle(dev, drv, index); + raw_local_irq_disable(); - return index; + return ret; } /** @@ -1801,6 +1816,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) /* Structure copy. */ drv->states[drv->state_count] = cpuidle_state_table[cstate]; + if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE) + drv->states[drv->state_count].enter = intel_idle_irq; + if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && intel_idle_off_by_default(mwait_hint) && diff --git a/kernel/reboot.c b/kernel/reboot.c index a091145ee710..b5a71d1ff603 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -315,6 +315,43 @@ static int sys_off_notify(struct notifier_block *nb, return handler->sys_off_cb(&data); } +static struct sys_off_handler platform_sys_off_handler; + +static struct sys_off_handler *alloc_sys_off_handler(int priority) +{ + struct sys_off_handler *handler; + gfp_t flags; + + /* + * Platforms like m68k can't allocate sys_off handler dynamically + * at the early boot time because memory allocator isn't available yet. + */ + if (priority == SYS_OFF_PRIO_PLATFORM) { + handler = &platform_sys_off_handler; + if (handler->cb_data) + return ERR_PTR(-EBUSY); + } else { + if (system_state > SYSTEM_RUNNING) + flags = GFP_ATOMIC; + else + flags = GFP_KERNEL; + + handler = kzalloc(sizeof(*handler), flags); + if (!handler) + return ERR_PTR(-ENOMEM); + } + + return handler; +} + +static void free_sys_off_handler(struct sys_off_handler *handler) +{ + if (handler == &platform_sys_off_handler) + memset(handler, 0, sizeof(*handler)); + else + kfree(handler); +} + /** * register_sys_off_handler - Register sys-off handler * @mode: Sys-off mode @@ -345,9 +382,9 @@ register_sys_off_handler(enum sys_off_mode mode, struct sys_off_handler *handler; int err; - handler = kzalloc(sizeof(*handler), GFP_KERNEL); - if (!handler) - return ERR_PTR(-ENOMEM); + handler = alloc_sys_off_handler(priority); + if (IS_ERR(handler)) + return handler; switch (mode) { case SYS_OFF_MODE_POWER_OFF_PREPARE: @@ -364,7 +401,7 @@ register_sys_off_handler(enum sys_off_mode mode, break; default: - kfree(handler); + free_sys_off_handler(handler); return ERR_PTR(-EINVAL); } @@ -391,7 +428,7 @@ register_sys_off_handler(enum sys_off_mode mode, } if (err) { - kfree(handler); + free_sys_off_handler(handler); return ERR_PTR(err); } @@ -409,7 +446,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler) { int err; - if (!handler) + if (IS_ERR_OR_NULL(handler)) return; if (handler->blocking) @@ -422,7 +459,7 @@ void unregister_sys_off_handler(struct sys_off_handler *handler) /* sanity check, shall never happen */ WARN_ON(err); - kfree(handler); + free_sys_off_handler(handler); } EXPORT_SYMBOL_GPL(unregister_sys_off_handler); @@ -584,7 +621,23 @@ static void do_kernel_power_off_prepare(void) */ void do_kernel_power_off(void) { + struct sys_off_handler *sys_off = NULL; + + /* + * Register sys-off handlers for legacy PM callback. This allows + * legacy PM callbacks temporary co-exist with the new sys-off API. + * + * TODO: Remove legacy handlers once all legacy PM users will be + * switched to the sys-off based APIs. + */ + if (pm_power_off) + sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_DEFAULT, + legacy_pm_power_off, NULL); + atomic_notifier_call_chain(&power_off_handler_list, 0, NULL); + + unregister_sys_off_handler(sys_off); } /** @@ -595,7 +648,8 @@ void do_kernel_power_off(void) */ bool kernel_can_power_off(void) { - return !atomic_notifier_call_chain_is_empty(&power_off_handler_list); + return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) || + pm_power_off; } EXPORT_SYMBOL_GPL(kernel_can_power_off); @@ -630,7 +684,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg) { struct pid_namespace *pid_ns = task_active_pid_ns(current); - struct sys_off_handler *sys_off = NULL; char buffer[256]; int ret = 0; @@ -655,21 +708,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, if (ret) return ret; - /* - * Register sys-off handlers for legacy PM callback. This allows - * legacy PM callbacks temporary co-exist with the new sys-off API. - * - * TODO: Remove legacy handlers once all legacy PM users will be - * switched to the sys-off based APIs. - */ - if (pm_power_off) { - sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, - SYS_OFF_PRIO_DEFAULT, - legacy_pm_power_off, NULL); - if (IS_ERR(sys_off)) - return PTR_ERR(sys_off); - } - /* Instead of trying to make the power_off code look like * halt when pm_power_off is not set do it the easy way. */ @@ -727,7 +765,6 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, break; } mutex_unlock(&system_transition_mutex); - unregister_sys_off_handler(sys_off); return ret; } |