From 74a1dd86d1739eae2015a3832f62c1d6546893a7 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 25 Mar 2019 10:24:56 -0700 Subject: PM / wakeup: Use pm_pr_dbg() instead of pr_debug() These prints are useful if we're doing PM suspend debugging. Having them at pr_debug() level means that we need to either enable DEBUG in this file, or compile the kernel with dynamic debug capabilities. Both of these options have drawbacks like custom compilation or opting into all debug statements being included into the kernel image. Given that we already have infrastructure to collect PM debugging information with CONFIG_PM_DEBUG and friends, let's change the pr_debug usage here to be pm_pr_dbg() instead so we can collect the wakeup information in the kernel logs. Signed-off-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeup.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index bb1ae175fae1..23c243a4c675 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -804,7 +804,7 @@ void pm_print_active_wakeup_sources(void) srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { - pr_debug("active wakeup source: %s\n", ws->name); + pm_pr_dbg("active wakeup source: %s\n", ws->name); active = 1; } else if (!active && (!last_activity_ws || @@ -815,7 +815,7 @@ void pm_print_active_wakeup_sources(void) } if (!active && last_activity_ws) - pr_debug("last active wakeup source: %s\n", + pm_pr_dbg("last active wakeup source: %s\n", last_activity_ws->name); srcu_read_unlock(&wakeup_srcu, srcuidx); } @@ -845,7 +845,7 @@ bool pm_wakeup_pending(void) raw_spin_unlock_irqrestore(&events_lock, flags); if (ret) { - pr_debug("Wakeup pending, aborting suspend\n"); + pm_pr_dbg("Wakeup pending, aborting suspend\n"); pm_print_active_wakeup_sources(); } -- cgit v1.2.3 From c4a586fdd440931c2773a9da42c1fc564068f128 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Fri, 15 Mar 2019 23:37:51 -0400 Subject: PM / core: fix kerneldoc comment for dpm_watchdog_handler() This brings the kernel doc in line with the function signature. Signed-off-by: Yangtao Li Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index f80d298de3fa..497238704abd 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -478,7 +478,7 @@ struct dpm_watchdog { /** * dpm_watchdog_handler - Driver suspend / resume watchdog handler. - * @data: Watchdog object address. + * @t: The timer that PM watchdog depends on. * * Called when a driver has timed out suspending or resuming. * There's not much we can do here to recover so panic() to -- cgit v1.2.3 From 0b237cb2fc7b10b2c4e92477760fa7442847c804 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Fri, 15 Mar 2019 23:48:41 -0400 Subject: PM / core: fix kerneldoc comment for device_pm_wait_for_dev() Rearrange comment to make the comment style consistent, the previous function parameters are described first. Signed-off-by: Yangtao Li Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 497238704abd..41eba82ee7b9 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -2069,8 +2069,8 @@ EXPORT_SYMBOL_GPL(__suspend_report_result); /** * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. - * @dev: Device to wait for. * @subordinate: Device that needs to wait for @dev. + * @dev: Device to wait for. */ int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) { -- cgit v1.2.3 From 49a27e279052cf71ba931a26b00194ff46510480 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 27 Mar 2019 15:35:45 +0100 Subject: PM / Domains: Add generic data pointer to struct genpd_power_state Add a data pointer to the genpd_power_state struct, to allow a genpd backend driver to store per-state specific data. To introduce the pointer, change the way genpd deals with freeing of the corresponding allocated data. More precisely, clarify the responsibility of whom that shall free the data, by adding a ->free_states() callback to the generic_pm_domain structure. The one allocating the data will be expected to set the callback, to allow genpd to invoke it from genpd_remove(). Co-developed-by: Lina Iyer Acked-by: Daniel Lezcano Signed-off-by: Ulf Hansson [ rjw: Subject & changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 12 ++++++++++-- include/linux/pm_domain.h | 4 +++- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 96a6dc9d305c..ff6f992f7a1d 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1686,6 +1686,12 @@ out: } EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); +static void genpd_free_default_power_state(struct genpd_power_state *states, + unsigned int state_count) +{ + kfree(states); +} + static int genpd_set_default_power_state(struct generic_pm_domain *genpd) { struct genpd_power_state *state; @@ -1696,7 +1702,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd) genpd->states = state; genpd->state_count = 1; - genpd->free = state; + genpd->free_states = genpd_free_default_power_state; return 0; } @@ -1812,7 +1818,9 @@ static int genpd_remove(struct generic_pm_domain *genpd) list_del(&genpd->gpd_list_node); genpd_unlock(genpd); cancel_work_sync(&genpd->power_off_work); - kfree(genpd->free); + if (genpd->free_states) + genpd->free_states(genpd->states, genpd->state_count); + pr_debug("%s: removed %s\n", __func__, genpd->name); return 0; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 1ed5874bcee0..8e1399231753 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -69,6 +69,7 @@ struct genpd_power_state { s64 residency_ns; struct fwnode_handle *fwnode; ktime_t idle_time; + void *data; }; struct genpd_lock_ops; @@ -110,9 +111,10 @@ struct generic_pm_domain { struct device *dev); unsigned int flags; /* Bit field of configs for genpd */ struct genpd_power_state *states; + void (*free_states)(struct genpd_power_state *states, + unsigned int state_count); unsigned int state_count; /* number of states */ unsigned int state_idx; /* state that genpd will go to when off */ - void *free; /* Free the state that was allocated for default */ ktime_t on_time; ktime_t accounting_time; const struct genpd_lock_ops *lock_ops; -- cgit v1.2.3 From eb594b7325f61835555140922a4cb715264a325c Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 27 Mar 2019 15:35:46 +0100 Subject: PM / Domains: Add support for CPU devices to genpd To enable a CPU device to be attached to a PM domain managed by genpd, make a few changes to it for convenience. To be able to quickly find out what CPUs are attached to a genpd, which typically becomes useful from a genpd governor as subsequent changes are about to show, add a cpumask to struct generic_pm_domain to be updated when a CPU device gets attached to the genpd containing that cpumask. Also, propagate the cpumask changes upwards in the domain hierarchy to the master PM domains. This way, the cpumask for a genpd hierarchically reflects all CPUs attached to the topology below it. Finally, make this an opt-in feature, to avoid having to manage CPUs and the cpumask for a genpd that don't need it. To that end, add a new genpd configuration bit, GENPD_FLAG_CPU_DOMAIN. Co-developed-by: Lina Iyer Acked-by: Daniel Lezcano Signed-off-by: Ulf Hansson [ rjw: Changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 65 ++++++++++++++++++++++++++++++++++++++++++++- include/linux/pm_domain.h | 13 +++++++++ 2 files changed, 77 insertions(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ff6f992f7a1d..ecac03dcc9b2 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "power.h" @@ -128,6 +129,7 @@ static const struct genpd_lock_ops genpd_spin_ops = { #define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) #define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) #define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) +#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, const struct generic_pm_domain *genpd) @@ -1454,6 +1456,56 @@ static void genpd_free_dev_data(struct device *dev, dev_pm_put_subsys_data(dev); } +static void __genpd_update_cpumask(struct generic_pm_domain *genpd, + int cpu, bool set, unsigned int depth) +{ + struct gpd_link *link; + + if (!genpd_is_cpu_domain(genpd)) + return; + + list_for_each_entry(link, &genpd->slave_links, slave_node) { + struct generic_pm_domain *master = link->master; + + genpd_lock_nested(master, depth + 1); + __genpd_update_cpumask(master, cpu, set, depth + 1); + genpd_unlock(master); + } + + if (set) + cpumask_set_cpu(cpu, genpd->cpus); + else + cpumask_clear_cpu(cpu, genpd->cpus); +} + +static void genpd_update_cpumask(struct generic_pm_domain *genpd, + struct device *dev, bool set) +{ + int cpu; + + if (!genpd_is_cpu_domain(genpd)) + return; + + for_each_possible_cpu(cpu) { + if (get_cpu_device(cpu) == dev) { + __genpd_update_cpumask(genpd, cpu, set, 0); + return; + } + } +} + +static void genpd_set_cpumask(struct generic_pm_domain *genpd, + struct device *dev) +{ + genpd_update_cpumask(genpd, dev, true); +} + +static void genpd_clear_cpumask(struct generic_pm_domain *genpd, + struct device *dev) +{ + genpd_update_cpumask(genpd, dev, false); +} + static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, struct gpd_timing_data *td) { @@ -1475,6 +1527,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, genpd_lock(genpd); + genpd_set_cpumask(genpd, dev); dev_pm_domain_set(dev, &genpd->domain); genpd->device_count++; @@ -1532,6 +1585,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, genpd->device_count--; genpd->max_off_time_changed = true; + genpd_clear_cpumask(genpd, dev); dev_pm_domain_set(dev, NULL); list_del_init(&pdd->list_node); @@ -1768,11 +1822,18 @@ int pm_genpd_init(struct generic_pm_domain *genpd, if (genpd_is_always_on(genpd) && !genpd_status_on(genpd)) return -EINVAL; + if (genpd_is_cpu_domain(genpd) && + !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) + return -ENOMEM; + /* Use only one "off" state if there were no states declared */ if (genpd->state_count == 0) { ret = genpd_set_default_power_state(genpd); - if (ret) + if (ret) { + if (genpd_is_cpu_domain(genpd)) + free_cpumask_var(genpd->cpus); return ret; + } } else if (!gov && genpd->state_count > 1) { pr_warn("%s: no governor for states\n", genpd->name); } @@ -1818,6 +1879,8 @@ static int genpd_remove(struct generic_pm_domain *genpd) list_del(&genpd->gpd_list_node); genpd_unlock(genpd); cancel_work_sync(&genpd->power_off_work); + if (genpd_is_cpu_domain(genpd)) + free_cpumask_var(genpd->cpus); if (genpd->free_states) genpd->free_states(genpd->states, genpd->state_count); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 8e1399231753..a6e251fe9deb 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -16,6 +16,7 @@ #include #include #include +#include /* * Flags to control the behaviour of a genpd. @@ -42,11 +43,22 @@ * GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered * on, in case any of its attached devices is used * in the wakeup path to serve system wakeups. + * + * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get + * devices attached, which may belong to CPUs or + * possibly have subdomains with CPUs attached. + * This flag enables the genpd backend driver to + * deploy idle power management support for CPUs + * and groups of CPUs. Note that, the backend + * driver must then comply with the so called, + * last-man-standing algorithm, for the CPUs in the + * PM domain. */ #define GENPD_FLAG_PM_CLK (1U << 0) #define GENPD_FLAG_IRQ_SAFE (1U << 1) #define GENPD_FLAG_ALWAYS_ON (1U << 2) #define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3) +#define GENPD_FLAG_CPU_DOMAIN (1U << 4) enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ @@ -94,6 +106,7 @@ struct generic_pm_domain { unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ unsigned int performance_state; /* Aggregated max performance state */ + cpumask_var_t cpus; /* A cpumask of the attached CPUs */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); struct opp_table *opp_table; /* OPP table of the genpd */ -- cgit v1.2.3 From f2a424f6c613a98560dc49fd9984589401d51648 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Sat, 16 Mar 2019 00:59:25 -0400 Subject: PM / core: Introduce dpm_async_fn() helper When we want to execute device pm functions asynchronously, we'll do the following for the device: 1) reinit_completion(&dev->power.completion); 2) Check if the device enables asynchronous suspend. 3) If necessary, execute the corresponding function asynchronously. There are a lot of such repeated operations here, in fact we can avoid this. So introduce dpm_async_fn() to have better code readability and reuse. And use this function to do some cleanup. Signed-off-by: Yangtao Li Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 62 ++++++++++++++++++----------------------------- 1 file changed, 23 insertions(+), 39 deletions(-) (limited to 'drivers/base') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 41eba82ee7b9..9b8c829798f1 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -706,6 +706,19 @@ static bool is_async(struct device *dev) && !pm_trace_is_enabled(); } +static bool dpm_async_fn(struct device *dev, async_func_t func) +{ + reinit_completion(&dev->power.completion); + + if (is_async(dev)) { + get_device(dev); + async_schedule(func, dev); + return true; + } + + return false; +} + static void async_resume_noirq(void *data, async_cookie_t cookie) { struct device *dev = (struct device *)data; @@ -732,13 +745,8 @@ void dpm_noirq_resume_devices(pm_message_t state) * in case the starting of async threads is * delayed by non-async resuming devices. */ - list_for_each_entry(dev, &dpm_noirq_list, power.entry) { - reinit_completion(&dev->power.completion); - if (is_async(dev)) { - get_device(dev); - async_schedule_dev(async_resume_noirq, dev); - } - } + list_for_each_entry(dev, &dpm_noirq_list, power.entry) + dpm_async_fn(dev, async_resume_noirq); while (!list_empty(&dpm_noirq_list)) { dev = to_device(dpm_noirq_list.next); @@ -889,13 +897,8 @@ void dpm_resume_early(pm_message_t state) * in case the starting of async threads is * delayed by non-async resuming devices. */ - list_for_each_entry(dev, &dpm_late_early_list, power.entry) { - reinit_completion(&dev->power.completion); - if (is_async(dev)) { - get_device(dev); - async_schedule_dev(async_resume_early, dev); - } - } + list_for_each_entry(dev, &dpm_late_early_list, power.entry) + dpm_async_fn(dev, async_resume_early); while (!list_empty(&dpm_late_early_list)) { dev = to_device(dpm_late_early_list.next); @@ -1053,13 +1056,8 @@ void dpm_resume(pm_message_t state) pm_transition = state; async_error = 0; - list_for_each_entry(dev, &dpm_suspended_list, power.entry) { - reinit_completion(&dev->power.completion); - if (is_async(dev)) { - get_device(dev); - async_schedule_dev(async_resume, dev); - } - } + list_for_each_entry(dev, &dpm_suspended_list, power.entry) + dpm_async_fn(dev, async_resume); while (!list_empty(&dpm_suspended_list)) { dev = to_device(dpm_suspended_list.next); @@ -1373,13 +1371,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie) static int device_suspend_noirq(struct device *dev) { - reinit_completion(&dev->power.completion); - - if (is_async(dev)) { - get_device(dev); - async_schedule_dev(async_suspend_noirq, dev); + if (dpm_async_fn(dev, async_suspend_noirq)) return 0; - } + return __device_suspend_noirq(dev, pm_transition, false); } @@ -1576,13 +1570,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie) static int device_suspend_late(struct device *dev) { - reinit_completion(&dev->power.completion); - - if (is_async(dev)) { - get_device(dev); - async_schedule_dev(async_suspend_late, dev); + if (dpm_async_fn(dev, async_suspend_late)) return 0; - } return __device_suspend_late(dev, pm_transition, false); } @@ -1842,13 +1831,8 @@ static void async_suspend(void *data, async_cookie_t cookie) static int device_suspend(struct device *dev) { - reinit_completion(&dev->power.completion); - - if (is_async(dev)) { - get_device(dev); - async_schedule_dev(async_suspend, dev); + if (dpm_async_fn(dev, async_suspend)) return 0; - } return __device_suspend(dev, pm_transition, false); } -- cgit v1.2.3 From e94999688e3aa3c0a8ad5a60352cdc3ca3030434 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Thu, 11 Apr 2019 20:17:33 +0200 Subject: PM / Domains: Add genpd governor for CPUs After some preceding changes, PM domains managed by genpd may contain CPU devices, so idle state residency values should be taken into account during the state selection process. [The residency value is the minimum amount of time to be spent by a CPU (or a group of CPUs) in an idle state in order to save more energy than could be saved by picking up a shallower idle state.] For this purpose, add a new genpd governor, pm_domain_cpu_gov, to be used for selecting idle states of PM domains with CPU devices attached either directly or through subdomains. The new governor computes the minimum expected idle duration for all online CPUs attached to a PM domain and its subdomains. Next, it finds the deepest idle state whose target residency is within the expected idle duration and selects it as the target idle state of the domain. It should be noted that the minimum expected idle duration computation is based on the closest timer event information stored in the per-CPU variables cpuidle_devices for all of the CPUs in the domain. That needs to be revisited in future, as obviously there are other reasons why a CPU may be woken up from idle. Co-developed-by: Lina Iyer Acked-by: Daniel Lezcano Signed-off-by: Ulf Hansson [ rjw: Changelog ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain_governor.c | 67 +++++++++++++++++++++++++++++++++++- include/linux/pm_domain.h | 4 +++ 2 files changed, 70 insertions(+), 1 deletion(-) (limited to 'drivers/base') diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 4d07e38a8247..7912bc957244 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -10,6 +10,9 @@ #include #include #include +#include +#include +#include static int dev_update_qos_constraint(struct device *dev, void *data) { @@ -210,8 +213,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) struct generic_pm_domain *genpd = pd_to_genpd(pd); struct gpd_link *link; - if (!genpd->max_off_time_changed) + if (!genpd->max_off_time_changed) { + genpd->state_idx = genpd->cached_power_down_state_idx; return genpd->cached_power_down_ok; + } /* * We have to invalidate the cached results for the masters, so @@ -236,6 +241,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) genpd->state_idx--; } + genpd->cached_power_down_state_idx = genpd->state_idx; return genpd->cached_power_down_ok; } @@ -244,6 +250,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain) return false; } +#ifdef CONFIG_CPU_IDLE +static bool cpu_power_down_ok(struct dev_pm_domain *pd) +{ + struct generic_pm_domain *genpd = pd_to_genpd(pd); + struct cpuidle_device *dev; + ktime_t domain_wakeup, next_hrtimer; + s64 idle_duration_ns; + int cpu, i; + + /* Validate dev PM QoS constraints. */ + if (!default_power_down_ok(pd)) + return false; + + if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) + return true; + + /* + * Find the next wakeup for any of the online CPUs within the PM domain + * and its subdomains. Note, we only need the genpd->cpus, as it already + * contains a mask of all CPUs from subdomains. + */ + domain_wakeup = ktime_set(KTIME_SEC_MAX, 0); + for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) { + dev = per_cpu(cpuidle_devices, cpu); + if (dev) { + next_hrtimer = READ_ONCE(dev->next_hrtimer); + if (ktime_before(next_hrtimer, domain_wakeup)) + domain_wakeup = next_hrtimer; + } + } + + /* The minimum idle duration is from now - until the next wakeup. */ + idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get())); + if (idle_duration_ns <= 0) + return false; + + /* + * Find the deepest idle state that has its residency value satisfied + * and by also taking into account the power off latency for the state. + * Start at the state picked by the dev PM QoS constraint validation. + */ + i = genpd->state_idx; + do { + if (idle_duration_ns >= (genpd->states[i].residency_ns + + genpd->states[i].power_off_latency_ns)) { + genpd->state_idx = i; + return true; + } + } while (--i >= 0); + + return false; +} + +struct dev_power_governor pm_domain_cpu_gov = { + .suspend_ok = default_suspend_ok, + .power_down_ok = cpu_power_down_ok, +}; +#endif + struct dev_power_governor simple_qos_governor = { .suspend_ok = default_suspend_ok, .power_down_ok = default_power_down_ok, diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index a6e251fe9deb..bc82e74560ee 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -118,6 +118,7 @@ struct generic_pm_domain { s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ bool max_off_time_changed; bool cached_power_down_ok; + bool cached_power_down_state_idx; int (*attach_dev)(struct generic_pm_domain *domain, struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, @@ -202,6 +203,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; +#ifdef CONFIG_CPU_IDLE +extern struct dev_power_governor pm_domain_cpu_gov; +#endif #else static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev) -- cgit v1.2.3 From dc351d4c5f4fe4d0f274d6d660227be0c3a03317 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 10 Apr 2019 11:55:16 +0200 Subject: PM / core: Propagate dev->power.wakeup_path when no callbacks The dev->power.direct_complete flag may become set in device_prepare() in case the device don't have any PM callbacks (dev->power.no_pm_callbacks is set). This leads to a broken behaviour, when there is child having wakeup enabled and relies on its parent to be used in the wakeup path. More precisely, when the direct complete path becomes selected for the child in __device_suspend(), the propagation of the dev->power.wakeup_path becomes skipped as well. Let's address this problem, by checking if the device is a part the wakeup path or has wakeup enabled, then prevent the direct complete path from being used. Reported-by: Loic Pallardy Signed-off-by: Ulf Hansson [ rjw: Comment cleanup ] Signed-off-by: Rafael J. Wysocki --- drivers/base/power/main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/base') diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9b8c829798f1..43e863cc0c1b 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1736,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + /* Avoid direct_complete to let wakeup_path propagate. */ + if (device_may_wakeup(dev) || dev->power.wakeup_path) + dev->power.direct_complete = false; + if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); -- cgit v1.2.3