summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorLiu, Chuansheng <chuansheng.liu@intel.com>2014-02-18 10:28:46 +0800
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-02-20 01:30:09 +0100
commit9e5e7910df824ba02aedd2b5d2ca556426ea6d0b (patch)
treeb9db6a7e5e1921a8a9da0c8b1b588fda45f3f80f /drivers/base
parent76569faa62c46382e080c3e190c66e19515aae1c (diff)
downloadlinux-9e5e7910df824ba02aedd2b5d2ca556426ea6d0b.tar.bz2
PM / sleep: Asynchronous threads for resume_early
In analogy with commits 5af84b82701a and 97df8c12995, using asynchronous threads can improve the overall resume_early time significantly. This patch is for resume_early phase. Signed-off-by: Chuansheng Liu <chuansheng.liu@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c55
1 files changed, 44 insertions, 11 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ea3f1d2c28cf..6d41165701c4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -595,7 +595,7 @@ static void dpm_resume_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -610,6 +610,8 @@ static int device_resume_early(struct device *dev, pm_message_t state)
if (!dev->power.is_late_suspended)
goto Out;
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -636,38 +638,69 @@ static int device_resume_early(struct device *dev, pm_message_t state)
TRACE_RESUME(error);
pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_early(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static void dpm_resume_early(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.next);
- int error;
+ pm_transition = state;
+
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_early, dev);
+ }
+ }
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_early(dev, state);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
+ if (!is_async(dev)) {
+ int error;
+ error = device_resume_early(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " early", error);
+ }
+ }
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "early");
}