From fe2e39d8782d885755139304d8dba0b3e5bfa878 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 28 Mar 2012 23:29:45 +0200 Subject: firmware_class: Rework usermodehelper check Instead of two functions, read_lock_usermodehelper() and usermodehelper_is_disabled(), used in combination, introduce usermodehelper_read_trylock() that will only return with umhelper_sem held if usermodehelper_disabled is unset (and will return -EAGAIN otherwise) and make _request_firmware() use it. Rename read_unlock_usermodehelper() to usermodehelper_read_unlock() to follow the naming convention of the new function. Signed-off-by: Rafael J. Wysocki Acked-by: Greg Kroah-Hartman Cc: stable@vger.kernel.org --- kernel/kmod.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/kmod.c b/kernel/kmod.c index 957a7aab8ebc..4079ac1d5e79 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -339,17 +339,24 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); */ #define RUNNING_HELPERS_TIMEOUT (5 * HZ) -void read_lock_usermodehelper(void) +int usermodehelper_read_trylock(void) { + int ret = 0; + down_read(&umhelper_sem); + if (usermodehelper_disabled) { + up_read(&umhelper_sem); + ret = -EAGAIN; + } + return ret; } -EXPORT_SYMBOL_GPL(read_lock_usermodehelper); +EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); -void read_unlock_usermodehelper(void) +void usermodehelper_read_unlock(void) { up_read(&umhelper_sem); } -EXPORT_SYMBOL_GPL(read_unlock_usermodehelper); +EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); /** * usermodehelper_disable - prevent new helpers from being started @@ -390,15 +397,6 @@ void usermodehelper_enable(void) up_write(&umhelper_sem); } -/** - * usermodehelper_is_disabled - check if new helpers are allowed to be started - */ -bool usermodehelper_is_disabled(void) -{ - return usermodehelper_disabled; -} -EXPORT_SYMBOL_GPL(usermodehelper_is_disabled); - static void helper_lock(void) { atomic_inc(&running_helpers); -- cgit v1.2.3 From 9b78c1da60b3c62ccdd1509f0902ad19ceaf776b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 28 Mar 2012 23:30:02 +0200 Subject: firmware_class: Do not warn that system is not ready from async loads If firmware is requested asynchronously, by calling request_firmware_nowait(), there is no reason to fail the request (and warn the user) when the system is (presumably temporarily) unready to handle it (because user space is not available yet or frozen). For this reason, introduce an alternative routine for read-locking umhelper_sem, usermodehelper_read_lock_wait(), that will wait for usermodehelper_disabled to be unset (possibly with a timeout) and make request_firmware_work_func() use it instead of usermodehelper_read_trylock(). Accordingly, modify request_firmware() so that it uses usermodehelper_read_trylock() to acquire umhelper_sem and remove the code related to that lock from _request_firmware(). Signed-off-by: Rafael J. Wysocki Acked-by: Greg Kroah-Hartman Cc: stable@vger.kernel.org --- drivers/base/firmware_class.c | 51 +++++++++++++++++++++---------------- include/linux/kmod.h | 1 + kernel/kmod.c | 58 +++++++++++++++++++++++++++++++++---------- 3 files changed, 76 insertions(+), 34 deletions(-) (limited to 'kernel') diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 60290671f04a..72c644b191a4 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -81,6 +81,11 @@ enum { static int loading_timeout = 60; /* In seconds */ +static inline long firmware_loading_timeout(void) +{ + return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT; +} + /* fw_lock could be moved to 'struct firmware_priv' but since it is just * guarding for corner cases a global lock should be OK */ static DEFINE_MUTEX(fw_lock); @@ -541,31 +546,22 @@ static void _request_firmware_cleanup(const struct firmware **firmware_p) static int _request_firmware(const struct firmware *firmware, const char *name, struct device *device, - bool uevent, bool nowait) + bool uevent, bool nowait, long timeout) { struct firmware_priv *fw_priv; - int retval; - - retval = usermodehelper_read_trylock(); - if (WARN_ON(retval)) { - dev_err(device, "firmware: %s will not be loaded\n", name); - return retval; - } + int retval = 0; if (uevent) dev_dbg(device, "firmware: requesting %s\n", name); fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); - if (IS_ERR(fw_priv)) { - retval = PTR_ERR(fw_priv); - goto out; - } + if (IS_ERR(fw_priv)) + return PTR_ERR(fw_priv); if (uevent) { - if (loading_timeout > 0) + if (timeout != MAX_SCHEDULE_TIMEOUT) mod_timer(&fw_priv->timeout, - round_jiffies_up(jiffies + - loading_timeout * HZ)); + round_jiffies_up(jiffies + timeout)); kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); } @@ -582,9 +578,6 @@ static int _request_firmware(const struct firmware *firmware, mutex_unlock(&fw_lock); fw_destroy_instance(fw_priv); - -out: - usermodehelper_read_unlock(); return retval; } @@ -613,7 +606,14 @@ request_firmware(const struct firmware **firmware_p, const char *name, if (ret <= 0) return ret; - ret = _request_firmware(*firmware_p, name, device, true, false); + ret = usermodehelper_read_trylock(); + if (WARN_ON(ret)) { + dev_err(device, "firmware: %s will not be loaded\n", name); + } else { + ret = _request_firmware(*firmware_p, name, device, true, false, + firmware_loading_timeout()); + usermodehelper_read_unlock(); + } if (ret) _request_firmware_cleanup(firmware_p); @@ -648,6 +648,7 @@ static int request_firmware_work_func(void *arg) { struct firmware_work *fw_work = arg; const struct firmware *fw; + long timeout; int ret; if (!arg) { @@ -659,8 +660,16 @@ static int request_firmware_work_func(void *arg) if (ret <= 0) goto out; - ret = _request_firmware(fw, fw_work->name, fw_work->device, - fw_work->uevent, true); + timeout = usermodehelper_read_lock_wait(firmware_loading_timeout()); + if (timeout) { + ret = _request_firmware(fw, fw_work->name, fw_work->device, + fw_work->uevent, true, timeout); + usermodehelper_read_unlock(); + } else { + dev_dbg(fw_work->device, "firmware: %s loading timed out\n", + fw_work->name); + ret = -EAGAIN; + } if (ret) _request_firmware_cleanup(&fw); diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 97d22c3e08b1..b087377ae2c4 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -115,6 +115,7 @@ extern void usermodehelper_init(void); extern int usermodehelper_disable(void); extern void usermodehelper_enable(void); extern int usermodehelper_read_trylock(void); +extern long usermodehelper_read_lock_wait(long timeout); extern void usermodehelper_read_unlock(void); #endif /* __LINUX_KMOD_H__ */ diff --git a/kernel/kmod.c b/kernel/kmod.c index 4079ac1d5e79..da7fcca279f9 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -333,6 +333,12 @@ static atomic_t running_helpers = ATOMIC_INIT(0); */ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); +/* + * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled + * to become 'false'. + */ +static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); + /* * Time to wait for running_helpers to become zero before the setting of * usermodehelper_disabled in usermodehelper_disable() fails @@ -352,12 +358,50 @@ int usermodehelper_read_trylock(void) } EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); +long usermodehelper_read_lock_wait(long timeout) +{ + DEFINE_WAIT(wait); + + if (timeout < 0) + return -EINVAL; + + down_read(&umhelper_sem); + for (;;) { + prepare_to_wait(&usermodehelper_disabled_waitq, &wait, + TASK_UNINTERRUPTIBLE); + if (!usermodehelper_disabled) + break; + + up_read(&umhelper_sem); + + timeout = schedule_timeout(timeout); + if (!timeout) + break; + + down_read(&umhelper_sem); + } + finish_wait(&usermodehelper_disabled_waitq, &wait); + return timeout; +} +EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait); + void usermodehelper_read_unlock(void) { up_read(&umhelper_sem); } EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); +/** + * usermodehelper_enable - allow new helpers to be started again + */ +void usermodehelper_enable(void) +{ + down_write(&umhelper_sem); + usermodehelper_disabled = 0; + wake_up(&usermodehelper_disabled_waitq); + up_write(&umhelper_sem); +} + /** * usermodehelper_disable - prevent new helpers from being started */ @@ -381,22 +425,10 @@ int usermodehelper_disable(void) if (retval) return 0; - down_write(&umhelper_sem); - usermodehelper_disabled = 0; - up_write(&umhelper_sem); + usermodehelper_enable(); return -EAGAIN; } -/** - * usermodehelper_enable - allow new helpers to be started again - */ -void usermodehelper_enable(void) -{ - down_write(&umhelper_sem); - usermodehelper_disabled = 0; - up_write(&umhelper_sem); -} - static void helper_lock(void) { atomic_inc(&running_helpers); -- cgit v1.2.3 From 7b5179ac14dbad945647ac9e76bbbf14ed9e0dbe Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 28 Mar 2012 23:30:14 +0200 Subject: PM / Hibernate: Disable usermode helpers right before freezing tasks There is no reason to call usermodehelper_disable() before creating memory bitmaps in hibernate() and software_resume(), so call it right before freeze_processes(), in accordance with the other suspend and hibernation code. Consequently, call usermodehelper_enable() right after the thawing of tasks rather than after freeing the memory bitmaps. Signed-off-by: Rafael J. Wysocki Acked-by: Greg Kroah-Hartman Cc: stable@vger.kernel.org --- kernel/power/hibernate.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 0a186cfde788..639ff6e4ae9e 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -611,19 +611,19 @@ int hibernate(void) if (error) goto Exit; - error = usermodehelper_disable(); - if (error) - goto Exit; - /* Allocate memory management structures */ error = create_basic_memory_bitmaps(); if (error) - goto Enable_umh; + goto Exit; printk(KERN_INFO "PM: Syncing filesystems ... "); sys_sync(); printk("done.\n"); + error = usermodehelper_disable(); + if (error) + goto Exit; + error = freeze_processes(); if (error) goto Free_bitmaps; @@ -660,9 +660,8 @@ int hibernate(void) freezer_test_done = false; Free_bitmaps: - free_basic_memory_bitmaps(); - Enable_umh: usermodehelper_enable(); + free_basic_memory_bitmaps(); Exit: pm_notifier_call_chain(PM_POST_HIBERNATION); pm_restore_console(); @@ -777,15 +776,13 @@ static int software_resume(void) if (error) goto close_finish; - error = usermodehelper_disable(); + error = create_basic_memory_bitmaps(); if (error) goto close_finish; - error = create_basic_memory_bitmaps(); - if (error) { - usermodehelper_enable(); + error = usermodehelper_disable(); + if (error) goto close_finish; - } pr_debug("PM: Preparing processes for restore.\n"); error = freeze_processes(); @@ -805,8 +802,8 @@ static int software_resume(void) swsusp_free(); thaw_processes(); Done: - free_basic_memory_bitmaps(); usermodehelper_enable(); + free_basic_memory_bitmaps(); Finish: pm_notifier_call_chain(PM_POST_RESTORE); pm_restore_console(); -- cgit v1.2.3 From 1e73203cd1157a03facc41ffb54050f5b28e55bd Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 28 Mar 2012 23:30:21 +0200 Subject: PM / Sleep: Move disabling of usermode helpers to the freezer The core suspend/hibernation code calls usermodehelper_disable() to avoid race conditions between the freezer and the starting of usermode helpers and each code path has to do that on its own. However, it is always called right before freeze_processes() and usermodehelper_enable() is always called right after thaw_processes(). For this reason, to avoid code duplication and to make the connection between usermodehelper_disable() and the freezer more visible, make freeze_processes() call it and remove the direct usermodehelper_disable() and usermodehelper_enable() calls from all suspend/hibernation code paths. Signed-off-by: Rafael J. Wysocki Acked-by: Greg Kroah-Hartman Cc: stable@vger.kernel.org --- kernel/power/hibernate.c | 11 ----------- kernel/power/process.c | 7 +++++++ kernel/power/suspend.c | 7 ------- kernel/power/user.c | 10 +--------- 4 files changed, 8 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 639ff6e4ae9e..e09dfbfeecee 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -620,10 +619,6 @@ int hibernate(void) sys_sync(); printk("done.\n"); - error = usermodehelper_disable(); - if (error) - goto Exit; - error = freeze_processes(); if (error) goto Free_bitmaps; @@ -660,7 +655,6 @@ int hibernate(void) freezer_test_done = false; Free_bitmaps: - usermodehelper_enable(); free_basic_memory_bitmaps(); Exit: pm_notifier_call_chain(PM_POST_HIBERNATION); @@ -780,10 +774,6 @@ static int software_resume(void) if (error) goto close_finish; - error = usermodehelper_disable(); - if (error) - goto close_finish; - pr_debug("PM: Preparing processes for restore.\n"); error = freeze_processes(); if (error) { @@ -802,7 +792,6 @@ static int software_resume(void) swsusp_free(); thaw_processes(); Done: - usermodehelper_enable(); free_basic_memory_bitmaps(); Finish: pm_notifier_call_chain(PM_POST_RESTORE); diff --git a/kernel/power/process.c b/kernel/power/process.c index 0d2aeb226108..56eaac7e88ab 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -16,6 +16,7 @@ #include #include #include +#include /* * Timeout for stopping processes @@ -122,6 +123,10 @@ int freeze_processes(void) { int error; + error = usermodehelper_disable(); + if (error) + return error; + if (!pm_freezing) atomic_inc(&system_freezing_cnt); @@ -187,6 +192,8 @@ void thaw_processes(void) } while_each_thread(g, p); read_unlock(&tasklist_lock); + usermodehelper_enable(); + schedule(); printk("done.\n"); } diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 88e5c967370d..396d262b8fd0 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -102,17 +101,12 @@ static int suspend_prepare(void) if (error) goto Finish; - error = usermodehelper_disable(); - if (error) - goto Finish; - error = suspend_freeze_processes(); if (!error) return 0; suspend_stats.failed_freeze++; dpm_save_failed_step(SUSPEND_FREEZE); - usermodehelper_enable(); Finish: pm_notifier_call_chain(PM_POST_SUSPEND); pm_restore_console(); @@ -259,7 +253,6 @@ int suspend_devices_and_enter(suspend_state_t state) static void suspend_finish(void) { suspend_thaw_processes(); - usermodehelper_enable(); pm_notifier_call_chain(PM_POST_SUSPEND); pm_restore_console(); } diff --git a/kernel/power/user.c b/kernel/power/user.c index 33c4329205af..91b0fd021a95 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -222,14 +221,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, sys_sync(); printk("done.\n"); - error = usermodehelper_disable(); - if (error) - break; - error = freeze_processes(); - if (error) - usermodehelper_enable(); - else + if (!error) data->frozen = 1; break; @@ -238,7 +231,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, break; pm_restore_gfp_mask(); thaw_processes(); - usermodehelper_enable(); data->frozen = 0; break; -- cgit v1.2.3 From 247bc03742545fec2f79939a3b9f738392a0f7b4 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 28 Mar 2012 23:30:28 +0200 Subject: PM / Sleep: Mitigate race between the freezer and request_firmware() There is a race condition between the freezer and request_firmware() such that if request_firmware() is run on one CPU and freeze_processes() is run on another CPU and usermodehelper_disable() called by it succeeds to grab umhelper_sem for writing before usermodehelper_read_trylock() called from request_firmware() acquires it for reading, the request_firmware() will fail and trigger a WARN_ON() complaining that it was called at a wrong time. However, in fact, it wasn't called at a wrong time and freeze_processes() simply happened to be executed simultaneously. To avoid this race, at least in some cases, modify usermodehelper_read_trylock() so that it doesn't fail if the freezing of tasks has just started and hasn't been completed yet. Instead, during the freezing of tasks, it will try to freeze the task that has called it so that it can wait until user space is thawed without triggering the scary warning. For this purpose, change usermodehelper_disabled so that it can take three different values, UMH_ENABLED (0), UMH_FREEZING and UMH_DISABLED. The first one means that usermode helpers are enabled, the last one means "hard disable" (i.e. the system is not ready for usermode helpers to be used) and the second one is reserved for the freezer. Namely, when freeze_processes() is started, it sets usermodehelper_disabled to UMH_FREEZING which tells usermodehelper_read_trylock() that it shouldn't fail just yet and should call try_to_freeze() if woken up and cannot return immediately. This way all freezable tasks that happen to call request_firmware() right before freeze_processes() is started and lose the race for umhelper_sem with it will be frozen and will sleep until thaw_processes() unsets usermodehelper_disabled. [For the non-freezable callers of request_firmware() the race for umhelper_sem against freeze_processes() is unfortunately unavoidable.] Reported-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki Acked-by: Greg Kroah-Hartman Cc: stable@vger.kernel.org --- include/linux/kmod.h | 21 +++++++++++++++++++-- kernel/kmod.c | 47 +++++++++++++++++++++++++++++++++++++---------- kernel/power/process.c | 3 ++- 3 files changed, 58 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/include/linux/kmod.h b/include/linux/kmod.h index b087377ae2c4..dd99c329e161 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -110,10 +110,27 @@ call_usermodehelper(char *path, char **argv, char **envp, int wait) extern struct ctl_table usermodehelper_table[]; +enum umh_disable_depth { + UMH_ENABLED = 0, + UMH_FREEZING, + UMH_DISABLED, +}; + extern void usermodehelper_init(void); -extern int usermodehelper_disable(void); -extern void usermodehelper_enable(void); +extern int __usermodehelper_disable(enum umh_disable_depth depth); +extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth); + +static inline int usermodehelper_disable(void) +{ + return __usermodehelper_disable(UMH_DISABLED); +} + +static inline void usermodehelper_enable(void) +{ + __usermodehelper_set_disable_depth(UMH_ENABLED); +} + extern int usermodehelper_read_trylock(void); extern long usermodehelper_read_lock_wait(long timeout); extern void usermodehelper_read_unlock(void); diff --git a/kernel/kmod.c b/kernel/kmod.c index da7fcca279f9..05698a7415fe 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -322,7 +322,7 @@ static void __call_usermodehelper(struct work_struct *work) * land has been frozen during a system-wide hibernation or suspend operation). * Should always be manipulated under umhelper_sem acquired for write. */ -static int usermodehelper_disabled = 1; +static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED; /* Number of helpers running */ static atomic_t running_helpers = ATOMIC_INIT(0); @@ -347,13 +347,30 @@ static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq); int usermodehelper_read_trylock(void) { + DEFINE_WAIT(wait); int ret = 0; down_read(&umhelper_sem); - if (usermodehelper_disabled) { + for (;;) { + prepare_to_wait(&usermodehelper_disabled_waitq, &wait, + TASK_INTERRUPTIBLE); + if (!usermodehelper_disabled) + break; + + if (usermodehelper_disabled == UMH_DISABLED) + ret = -EAGAIN; + up_read(&umhelper_sem); - ret = -EAGAIN; + + if (ret) + break; + + schedule(); + try_to_freeze(); + + down_read(&umhelper_sem); } + finish_wait(&usermodehelper_disabled_waitq, &wait); return ret; } EXPORT_SYMBOL_GPL(usermodehelper_read_trylock); @@ -392,25 +409,35 @@ void usermodehelper_read_unlock(void) EXPORT_SYMBOL_GPL(usermodehelper_read_unlock); /** - * usermodehelper_enable - allow new helpers to be started again + * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled. + * depth: New value to assign to usermodehelper_disabled. + * + * Change the value of usermodehelper_disabled (under umhelper_sem locked for + * writing) and wakeup tasks waiting for it to change. */ -void usermodehelper_enable(void) +void __usermodehelper_set_disable_depth(enum umh_disable_depth depth) { down_write(&umhelper_sem); - usermodehelper_disabled = 0; + usermodehelper_disabled = depth; wake_up(&usermodehelper_disabled_waitq); up_write(&umhelper_sem); } /** - * usermodehelper_disable - prevent new helpers from being started + * __usermodehelper_disable - Prevent new helpers from being started. + * @depth: New value to assign to usermodehelper_disabled. + * + * Set usermodehelper_disabled to @depth and wait for running helpers to exit. */ -int usermodehelper_disable(void) +int __usermodehelper_disable(enum umh_disable_depth depth) { long retval; + if (!depth) + return -EINVAL; + down_write(&umhelper_sem); - usermodehelper_disabled = 1; + usermodehelper_disabled = depth; up_write(&umhelper_sem); /* @@ -425,7 +452,7 @@ int usermodehelper_disable(void) if (retval) return 0; - usermodehelper_enable(); + __usermodehelper_set_disable_depth(UMH_ENABLED); return -EAGAIN; } diff --git a/kernel/power/process.c b/kernel/power/process.c index 56eaac7e88ab..19db29f67558 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -123,7 +123,7 @@ int freeze_processes(void) { int error; - error = usermodehelper_disable(); + error = __usermodehelper_disable(UMH_FREEZING); if (error) return error; @@ -135,6 +135,7 @@ int freeze_processes(void) error = try_to_freeze_tasks(true); if (!error) { printk("done."); + __usermodehelper_set_disable_depth(UMH_DISABLED); oom_killer_disable(); } printk("\n"); -- cgit v1.2.3 From c4772d192c70b61d52262b0db76f7abd8aeb51c6 Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Wed, 28 Mar 2012 23:31:24 +0200 Subject: PM / QoS: add pm_qos_update_request_timeout() API The new API, pm_qos_update_request_timeout() is to provide a timeout with pm_qos_update_request. For example, pm_qos_update_request_timeout(req, 100, 1000), means that QoS request on req with value 100 will be active for 1000 microseconds. After 1000 microseconds, the QoS request thru req is reset. If there were another pm_qos_update_request(req, x) during the 1000 us, this new request with value x will override as this is another request on the same req handle. A new request on the same req handle will always override the previous request whether it is the conventional request or it is the new timeout request. Signed-off-by: MyungJoo Ham Signed-off-by: Kyungmin Park Acked-by: Mark Gross Signed-off-by: Rafael J. Wysocki --- include/linux/pm_qos.h | 4 ++++ kernel/power/qos.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) (limited to 'kernel') diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 2e9191a712f3..233149cb19f4 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -8,6 +8,7 @@ #include #include #include +#include enum { PM_QOS_RESERVED = 0, @@ -29,6 +30,7 @@ enum { struct pm_qos_request { struct plist_node node; int pm_qos_class; + struct delayed_work work; /* for pm_qos_update_request_timeout */ }; struct dev_pm_qos_request { @@ -73,6 +75,8 @@ void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, s32 value); void pm_qos_update_request(struct pm_qos_request *req, s32 new_value); +void pm_qos_update_request_timeout(struct pm_qos_request *req, + s32 new_value, unsigned long timeout_us); void pm_qos_remove_request(struct pm_qos_request *req); int pm_qos_request(int pm_qos_class); diff --git a/kernel/power/qos.c b/kernel/power/qos.c index d6d6dbd1ecc0..6a031e684026 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -229,6 +229,21 @@ int pm_qos_request_active(struct pm_qos_request *req) } EXPORT_SYMBOL_GPL(pm_qos_request_active); +/** + * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout + * @work: work struct for the delayed work (timeout) + * + * This cancels the timeout request by falling back to the default at timeout. + */ +static void pm_qos_work_fn(struct work_struct *work) +{ + struct pm_qos_request *req = container_of(to_delayed_work(work), + struct pm_qos_request, + work); + + pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); +} + /** * pm_qos_add_request - inserts new qos request into the list * @req: pointer to a preallocated handle @@ -253,6 +268,7 @@ void pm_qos_add_request(struct pm_qos_request *req, return; } req->pm_qos_class = pm_qos_class; + INIT_DELAYED_WORK(&req->work, pm_qos_work_fn); pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, &req->node, PM_QOS_ADD_REQ, value); } @@ -279,6 +295,9 @@ void pm_qos_update_request(struct pm_qos_request *req, return; } + if (delayed_work_pending(&req->work)) + cancel_delayed_work_sync(&req->work); + if (new_value != req->node.prio) pm_qos_update_target( pm_qos_array[req->pm_qos_class]->constraints, @@ -286,6 +305,34 @@ void pm_qos_update_request(struct pm_qos_request *req, } EXPORT_SYMBOL_GPL(pm_qos_update_request); +/** + * pm_qos_update_request_timeout - modifies an existing qos request temporarily. + * @req : handle to list element holding a pm_qos request to use + * @new_value: defines the temporal qos request + * @timeout_us: the effective duration of this qos request in usecs. + * + * After timeout_us, this qos request is cancelled automatically. + */ +void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value, + unsigned long timeout_us) +{ + if (!req) + return; + if (WARN(!pm_qos_request_active(req), + "%s called for unknown object.", __func__)) + return; + + if (delayed_work_pending(&req->work)) + cancel_delayed_work_sync(&req->work); + + if (new_value != req->node.prio) + pm_qos_update_target( + pm_qos_array[req->pm_qos_class]->constraints, + &req->node, PM_QOS_UPDATE_REQ, new_value); + + schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); +} + /** * pm_qos_remove_request - modifies an existing qos request * @req: handle to request list element @@ -305,6 +352,9 @@ void pm_qos_remove_request(struct pm_qos_request *req) return; } + if (delayed_work_pending(&req->work)) + cancel_delayed_work_sync(&req->work); + pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, &req->node, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); -- cgit v1.2.3