summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-09-23 18:54:46 +0200
committerPeter Zijlstra <peterz@infradead.org>2021-10-01 13:57:51 +0200
commitef1f4804b27a54da34de6984d16f1fe8f2cc7011 (patch)
treee71b16b881af4a56507699b96f3f810428fb72b1
parent3e9cc688e56cc2abb9b6067f57c8397f6c96d42c (diff)
downloadlinux-ef1f4804b27a54da34de6984d16f1fe8f2cc7011.tar.bz2
locking/rt: Take RCU nesting into account for __might_resched()
The general rule that rcu_read_lock() held sections cannot voluntary sleep does apply even on RT kernels. Though the substitution of spin/rw locks on RT enabled kernels has to be exempt from that rule. On !RT a spin_lock() can obviously nest inside a RCU read side critical section as the lock acquisition is not going to block, but on RT this is not longer the case due to the 'sleeping' spinlock substitution. The RT patches contained a cheap hack to ignore the RCU nesting depth in might_sleep() checks, which was a pragmatic but incorrect workaround. Instead of generally ignoring the RCU nesting depth in __might_sleep() and __might_resched() checks, pass the rcu_preempt_depth() via the offsets argument to __might_resched() from spin/read/write_lock() which makes the checks work correctly even in RCU read side critical sections. The actual blocking on such a substituted lock within a RCU read side critical section is already handled correctly in __schedule() by treating it as a "preemption" of the RCU read side critical section. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210923165358.368305497@linutronix.de
-rw-r--r--kernel/locking/spinlock_rt.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
index c5289240cfb4..b2e553f9255b 100644
--- a/kernel/locking/spinlock_rt.c
+++ b/kernel/locking/spinlock_rt.c
@@ -24,6 +24,17 @@
#define RT_MUTEX_BUILD_SPINLOCKS
#include "rtmutex.c"
+/*
+ * __might_resched() skips the state check as rtlocks are state
+ * preserving. Take RCU nesting into account as spin/read/write_lock() can
+ * legitimately nest into an RCU read side critical section.
+ */
+#define RTLOCK_RESCHED_OFFSETS \
+ (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
+
+#define rtlock_might_resched() \
+ __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
+
static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
{
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
@@ -32,7 +43,7 @@ static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
static __always_inline void __rt_spin_lock(spinlock_t *lock)
{
- __might_resched(__FILE__, __LINE__, 0);
+ rtlock_might_resched();
rtlock_lock(&lock->lock);
rcu_read_lock();
migrate_disable();
@@ -210,7 +221,7 @@ EXPORT_SYMBOL(rt_write_trylock);
void __sched rt_read_lock(rwlock_t *rwlock)
{
- __might_resched(__FILE__, __LINE__, 0);
+ rtlock_might_resched();
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
rcu_read_lock();
@@ -220,7 +231,7 @@ EXPORT_SYMBOL(rt_read_lock);
void __sched rt_write_lock(rwlock_t *rwlock)
{
- __might_resched(__FILE__, __LINE__, 0);
+ rtlock_might_resched();
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
rcu_read_lock();