diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2011-01-31 11:06:39 +0100 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2011-03-15 00:43:13 +0100 |
commit | 023d3779145ec6b7a0f38f19672a347b92feb74e (patch) | |
tree | 1ec78dc731dad2b8155faf6625617284f1b3b76d /drivers/base | |
parent | dc1b83ab08f1954335692cdcd499f78c94f4c42a (diff) | |
download | linux-023d3779145ec6b7a0f38f19672a347b92feb74e.tar.bz2 |
PM / Wakeup: Combine atomic counters to avoid reordering issues
The memory barrier in wakeup_source_deactivate() is supposed to
prevent the callers of pm_wakeup_pending() and pm_get_wakeup_count()
from seeing the new value of events_in_progress (0, in particular)
and the old value of event_count at the same time. However, if
wakeup_source_deactivate() is executed by CPU0 and, for instance,
pm_wakeup_pending() is executed by CPU1, where both processors can
reorder operations, the memory barrier in wakeup_source_deactivate()
doesn't affect CPU1 which can reorder reads. In that case CPU1 may
very well decide to fetch event_count before it's modified and
events_in_progress after it's been updated, so pm_wakeup_pending()
may fail to detect a wakeup event. This issue can be addressed by
using a single atomic variable to store both events_in_progress
and event_count, so that they can be updated together in a single
atomic operation.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/wakeup.c | 61 |
1 files changed, 39 insertions, 22 deletions
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 8ec406d8f548..e5e73b5efc80 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -24,12 +24,26 @@ */ bool events_check_enabled; -/* The counter of registered wakeup events. */ -static atomic_t event_count = ATOMIC_INIT(0); -/* A preserved old value of event_count. */ +/* + * Combined counters of registered wakeup events and wakeup events in progress. + * They need to be modified together atomically, so it's better to use one + * atomic variable to hold them both. + */ +static atomic_t combined_event_count = ATOMIC_INIT(0); + +#define IN_PROGRESS_BITS (sizeof(int) * 4) +#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) + +static void split_counters(unsigned int *cnt, unsigned int *inpr) +{ + unsigned int comb = atomic_read(&combined_event_count); + + *cnt = (comb >> IN_PROGRESS_BITS); + *inpr = comb & MAX_IN_PROGRESS; +} + +/* A preserved old value of the events counter. */ static unsigned int saved_count; -/* The counter of wakeup events being processed. */ -static atomic_t events_in_progress = ATOMIC_INIT(0); static DEFINE_SPINLOCK(events_lock); @@ -307,7 +321,8 @@ static void wakeup_source_activate(struct wakeup_source *ws) ws->timer_expires = jiffies; ws->last_time = ktime_get(); - atomic_inc(&events_in_progress); + /* Increment the counter of events in progress. */ + atomic_inc(&combined_event_count); } /** @@ -394,14 +409,10 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) del_timer(&ws->timer); /* - * event_count has to be incremented before events_in_progress is - * modified, so that the callers of pm_check_wakeup_events() and - * pm_save_wakeup_count() don't see the old value of event_count and - * events_in_progress equal to zero at the same time. + * Increment the counter of registered wakeup events and decrement the + * couter of wakeup events in progress simultaneously. */ - atomic_inc(&event_count); - smp_mb__before_atomic_dec(); - atomic_dec(&events_in_progress); + atomic_add(MAX_IN_PROGRESS, &combined_event_count); } /** @@ -556,8 +567,10 @@ bool pm_wakeup_pending(void) spin_lock_irqsave(&events_lock, flags); if (events_check_enabled) { - ret = ((unsigned int)atomic_read(&event_count) != saved_count) - || atomic_read(&events_in_progress); + unsigned int cnt, inpr; + + split_counters(&cnt, &inpr); + ret = (cnt != saved_count || inpr > 0); events_check_enabled = !ret; } spin_unlock_irqrestore(&events_lock, flags); @@ -579,19 +592,22 @@ bool pm_wakeup_pending(void) */ bool pm_get_wakeup_count(unsigned int *count) { - bool ret; + unsigned int cnt, inpr; if (capable(CAP_SYS_ADMIN)) events_check_enabled = false; - while (atomic_read(&events_in_progress) && !signal_pending(current)) { + for (;;) { + split_counters(&cnt, &inpr); + if (inpr == 0 || signal_pending(current)) + break; pm_wakeup_update_hit_counts(); schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); } - ret = !atomic_read(&events_in_progress); - *count = atomic_read(&event_count); - return ret; + split_counters(&cnt, &inpr); + *count = cnt; + return !inpr; } /** @@ -605,11 +621,12 @@ bool pm_get_wakeup_count(unsigned int *count) */ bool pm_save_wakeup_count(unsigned int count) { + unsigned int cnt, inpr; bool ret = false; spin_lock_irq(&events_lock); - if (count == (unsigned int)atomic_read(&event_count) - && !atomic_read(&events_in_progress)) { + split_counters(&cnt, &inpr); + if (cnt == count && inpr == 0) { saved_count = count; events_check_enabled = true; ret = true; |