summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Nazarewicz <m.nazarewicz@samsung.com>2010-05-05 12:53:11 +0200
committerGreg Kroah-Hartman <gregkh@suse.de>2010-05-20 13:21:42 -0700
commit22c43c81a51e05f61e90445ceb59d486c12fd921 (patch)
tree88582f01bf413bdc4d9d95a512116c9fe44afa33
parent24337c133ff92ba8d7c42819db17f7f2b0de3129 (diff)
downloadlinux-22c43c81a51e05f61e90445ceb59d486c12fd921.tar.bz2
wait_event_interruptible_locked() interface
New wait_event_interruptible{,_exclusive}_locked{,_irq} macros added. They work just like versions without _locked* suffix but require the wait queue's lock to be held. Also __wake_up_locked() is now exported as to pair it with the above macros. The use case of this new facility is when one uses wait queue's lock to protect a data structure. This may be advantageous if the structure needs to be protected by a spinlock anyway. In particular, with additional spinlock the following code has to be used to wait for a condition: spin_lock(&data.lock); ... for (ret = 0; !ret && !(condition); ) { spin_unlock(&data.lock); ret = wait_event_interruptible(data.wqh, (condition)); spin_lock(&data.lock); } ... spin_unlock(&data.lock); This looks bizarre plus wait_event_interruptible() locks the wait queue's lock anyway so there is a unlock+lock sequence where it could be avoided. To avoid those problems and benefit from wait queue's lock, a code similar to the following should be used: /* Waiting */ spin_lock(&data.wqh.lock); ... ret = wait_event_interruptible_locked(data.wqh, (condition)); ... spin_unlock(&data.wqh.lock); /* Waiting exclusively */ spin_lock(&data.whq.lock); ... ret = wait_event_interruptible_exclusive_locked(data.whq, (condition)); ... spin_unlock(&data.whq.lock); /* Waking up */ spin_lock(&data.wqh.lock); ... wake_up_locked(&data.wqh); ... spin_unlock(&data.wqh.lock); When spin_lock_irq() is used matching versions of macros need to be used (*_locked_irq()). Signed-off-by: Michal Nazarewicz <m.nazarewicz@samsung.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Takashi Iwai <tiwai@suse.de> Cc: David Howells <dhowells@redhat.com> Cc: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--include/linux/wait.h149
-rw-r--r--kernel/sched.c1
2 files changed, 150 insertions, 0 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a48e16b77d5e..fc3c040e5e3a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -362,6 +362,155 @@ do { \
__ret; \
})
+
+#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
+({ \
+ int __ret = 0; \
+ DEFINE_WAIT(__wait); \
+ if (exclusive) \
+ __wait.flags |= WQ_FLAG_EXCLUSIVE; \
+ do { \
+ if (likely(list_empty(&__wait.task_list))) \
+ __add_wait_queue_tail(&(wq), &__wait); \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ if (irq) \
+ spin_unlock_irq(&(wq).lock); \
+ else \
+ spin_unlock(&(wq).lock); \
+ schedule(); \
+ if (irq) \
+ spin_lock_irq(&(wq).lock); \
+ else \
+ spin_lock(&(wq).lock); \
+ } while (!(condition)); \
+ __remove_wait_queue(&(wq), &__wait); \
+ __set_current_state(TASK_RUNNING); \
+ __ret; \
+})
+
+
+/**
+ * wait_event_interruptible_locked - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * It must be called with wq.lock being held. This spinlock is
+ * unlocked while sleeping but @condition testing is done while lock
+ * is held and when this macro exits the lock is held.
+ *
+ * The lock is locked/unlocked using spin_lock()/spin_unlock()
+ * functions which must match the way they are locked/unlocked outside
+ * of this macro.
+ *
+ * wake_up_locked() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_locked(wq, condition) \
+ ((condition) \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
+
+/**
+ * wait_event_interruptible_locked_irq - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * It must be called with wq.lock being held. This spinlock is
+ * unlocked while sleeping but @condition testing is done while lock
+ * is held and when this macro exits the lock is held.
+ *
+ * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
+ * functions which must match the way they are locked/unlocked outside
+ * of this macro.
+ *
+ * wake_up_locked() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_locked_irq(wq, condition) \
+ ((condition) \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
+
+/**
+ * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * It must be called with wq.lock being held. This spinlock is
+ * unlocked while sleeping but @condition testing is done while lock
+ * is held and when this macro exits the lock is held.
+ *
+ * The lock is locked/unlocked using spin_lock()/spin_unlock()
+ * functions which must match the way they are locked/unlocked outside
+ * of this macro.
+ *
+ * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
+ * set thus when other process waits process on the list if this
+ * process is awaken further processes are not considered.
+ *
+ * wake_up_locked() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_exclusive_locked(wq, condition) \
+ ((condition) \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
+
+/**
+ * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * It must be called with wq.lock being held. This spinlock is
+ * unlocked while sleeping but @condition testing is done while lock
+ * is held and when this macro exits the lock is held.
+ *
+ * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
+ * functions which must match the way they are locked/unlocked outside
+ * of this macro.
+ *
+ * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
+ * set thus when other process waits process on the list if this
+ * process is awaken further processes are not considered.
+ *
+ * wake_up_locked() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
+ ((condition) \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
+
+
+
#define __wait_event_killable(wq, condition, ret) \
do { \
DEFINE_WAIT(__wait); \
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c2a54f70ffe..9584b66c249a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3950,6 +3950,7 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{
__wake_up_common(q, mode, 1, 0, NULL);
}
+EXPORT_SYMBOL_GPL(__wake_up_locked);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
{