summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-03-28 18:53:25 -0700
committerPaul E. McKenney <paulmck@kernel.org>2020-04-27 11:03:50 -0700
commit52b1fc3f798d02a3a9d1cf7a84e98a795223410a (patch)
tree1b8f8f588009bf7d08e9d7eaef4512138cd3d84b /kernel/rcu
parent5f5fa7ea89dc82d34ed458f4d7a8634e8e9eefce (diff)
downloadlinux-52b1fc3f798d02a3a9d1cf7a84e98a795223410a.tar.bz2
rcutorture: Add test of holding scheduler locks across rcu_read_unlock()
Now that it should be safe to hold scheduler locks across rcu_read_unlock(), even in cases where the corresponding RCU read-side critical section might have been preempted and boosted, the commit adds a test of this capability to rcutorture. This has been tested on current mainline (which can deadlock in this situation), and lockdep duly reported the expected deadlock. On -rcu, lockdep is silent, thus far, anyway. Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/rcutorture.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 5453bd557f43..b348cf816d89 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1147,6 +1147,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
struct torture_random_state *trsp,
struct rt_read_seg *rtrsp)
{
+ unsigned long flags;
int idxnew = -1;
int idxold = *readstate;
int statesnew = ~*readstate & newstate;
@@ -1181,8 +1182,15 @@ static void rcutorture_one_extend(int *readstate, int newstate,
rcu_read_unlock_bh();
if (statesold & RCUTORTURE_RDR_SCHED)
rcu_read_unlock_sched();
- if (statesold & RCUTORTURE_RDR_RCU)
+ if (statesold & RCUTORTURE_RDR_RCU) {
+ bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
+
+ if (lockit)
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
+ if (lockit)
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+ }
/* Delay if neither beginning nor end and there was a change. */
if ((statesnew || statesold) && *readstate && newstate)