summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/rtmutex.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b025295f4966..44ee8f85a78b 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -955,14 +955,13 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
}
/*
- * Wake up the next waiter on the lock.
- *
* Remove the top waiter from the current tasks pi waiter list and
- * wake it up.
+ * queue it up.
*
* Called with lock->wait_lock held.
*/
-static void wakeup_next_waiter(struct rt_mutex *lock)
+static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+ struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
unsigned long flags;
@@ -991,12 +990,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
raw_spin_unlock_irqrestore(&current->pi_lock, flags);
- /*
- * It's safe to dereference waiter as it cannot go away as
- * long as we hold lock->wait_lock. The waiter task needs to
- * acquire it in order to dequeue the waiter.
- */
- wake_up_process(waiter->task);
+ wake_q_add(wake_q, waiter->task);
}
/*
@@ -1258,6 +1252,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
static void __sched
rt_mutex_slowunlock(struct rt_mutex *lock)
{
+ WAKE_Q(wake_q);
+
raw_spin_lock(&lock->wait_lock);
debug_rt_mutex_unlock(lock);
@@ -1306,10 +1302,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
/*
* The wakeup next waiter path does not suffer from the above
* race. See the comments there.
+ *
+ * Queue the next waiter for wakeup once we release the wait_lock.
*/
- wakeup_next_waiter(lock);
+ mark_wakeup_next_waiter(&wake_q, lock);
raw_spin_unlock(&lock->wait_lock);
+ wake_up_q(&wake_q);
/* Undo pi boosting if necessary: */
rt_mutex_adjust_prio(current);