summaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex_api.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-08-15 23:27:58 +0200
committerIngo Molnar <mingo@kernel.org>2021-08-17 17:04:01 +0200
commit830e6acc8a1cafe153a0d88f9b2455965b396131 (patch)
treec45e285d78476b3b1dcb6b9a70f42a38d820f7fb /kernel/locking/rtmutex_api.c
parent531ae4b06a737ed5539cd75dc6f6b9a28f900bba (diff)
downloadlinux-830e6acc8a1cafe153a0d88f9b2455965b396131.tar.bz2
locking/rtmutex: Split out the inner parts of 'struct rtmutex'
RT builds substitutions for rwsem, mutex, spinlock and rwlock around rtmutexes. Split the inner working out so each lock substitution can use them with the appropriate lockdep annotations. This avoids having an extra unused lockdep map in the wrapped rtmutex. No functional change. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211302.784739994@linutronix.de
Diffstat (limited to 'kernel/locking/rtmutex_api.c')
-rw-r--r--kernel/locking/rtmutex_api.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index fc1322f5b219..38de4b137b9e 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -26,12 +26,18 @@ static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
might_sleep();
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- ret = __rt_mutex_lock(lock, state);
+ ret = __rt_mutex_lock(&lock->rtmutex, state);
if (ret)
mutex_release(&lock->dep_map, _RET_IP_);
return ret;
}
+void rt_mutex_base_init(struct rt_mutex_base *rtb)
+{
+ __rt_mutex_base_init(rtb);
+}
+EXPORT_SYMBOL(rt_mutex_base_init);
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
* rt_mutex_lock_nested - lock a rt_mutex
@@ -93,7 +99,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
return 0;
- ret = __rt_mutex_trylock(lock);
+ ret = __rt_mutex_trylock(&lock->rtmutex);
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
@@ -109,19 +115,19 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
- __rt_mutex_unlock(lock);
+ __rt_mutex_unlock(&lock->rtmutex);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
/*
* Futex variants, must not use fastpath.
*/
-int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
+int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
{
return rt_mutex_slowtrylock(lock);
}
-int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
{
return __rt_mutex_slowtrylock(lock);
}
@@ -133,7 +139,7 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
* @lock: The rt_mutex to be unlocked
* @wake_q: The wake queue head from which to get the next lock waiter
*/
-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
+bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
struct wake_q_head *wake_q)
{
lockdep_assert_held(&lock->wait_lock);
@@ -156,7 +162,7 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
return true; /* call postunlock() */
}
-void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
+void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
{
DEFINE_WAKE_Q(wake_q);
unsigned long flags;
@@ -182,12 +188,11 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
* Initializing of a locked rt_mutex is not allowed
*/
void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key)
{
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ __rt_mutex_base_init(&lock->rtmutex);
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
-
- __rt_mutex_basic_init(lock);
}
EXPORT_SYMBOL_GPL(__rt_mutex_init);
@@ -205,10 +210,10 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
* possible at this point because the pi_state which contains the rtmutex
* is not yet visible to other tasks.
*/
-void __sched rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
struct task_struct *proxy_owner)
{
- __rt_mutex_basic_init(lock);
+ __rt_mutex_base_init(lock);
rt_mutex_set_owner(lock, proxy_owner);
}
@@ -224,7 +229,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex *lock,
* possible because it belongs to the pi_state which is about to be freed
* and it is not longer visible to other tasks.
*/
-void __sched rt_mutex_proxy_unlock(struct rt_mutex *lock)
+void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL);
@@ -249,7 +254,7 @@ void __sched rt_mutex_proxy_unlock(struct rt_mutex *lock)
*
* Special API call for PI-futex support.
*/
-int __sched __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task)
{
@@ -296,7 +301,7 @@ int __sched __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
*
* Special API call for PI-futex support.
*/
-int __sched rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task)
{
@@ -328,7 +333,7 @@ int __sched rt_mutex_start_proxy_lock(struct rt_mutex *lock,
*
* Special API call for PI-futex support
*/
-int __sched rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
+int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
@@ -368,7 +373,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
*
* Special API call for PI-futex support
*/
-bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
+bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
{
bool cleanup = false;
@@ -413,7 +418,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
void __sched rt_mutex_adjust_pi(struct task_struct *task)
{
struct rt_mutex_waiter *waiter;
- struct rt_mutex *next_lock;
+ struct rt_mutex_base *next_lock;
unsigned long flags;
raw_spin_lock_irqsave(&task->pi_lock, flags);