summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2014-11-18 16:30:01 +0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-01-06 11:02:52 -0800
commitabaf3f9d275b8d856ae5e47531e40c0bfeac012b (patch)
tree017561fdbffe2f3e840d6af7c5a7bbcf015c70c9 /kernel/rcu
parent3ba4d0e09bf965297e97adf195e0ea246cfe5c74 (diff)
downloadlinux-abaf3f9d275b8d856ae5e47531e40c0bfeac012b.tar.bz2
rcu: Revert "Allow post-unlock reference for rt_mutex" to avoid priority-inversion
The patch dfeb9765ce3c ("Allow post-unlock reference for rt_mutex") ensured rcu-boost safe even the rt_mutex has post-unlock reference. But rt_mutex allowing post-unlock reference is definitely a bug and it was fixed by the commit 27e35715df54 ("rtmutex: Plug slow unlock race"). This fix made the previous patch (dfeb9765ce3c) useless. And even worse, the priority-inversion introduced by the the previous patch still exists. rcu_read_unlock_special() { rt_mutex_unlock(&rnp->boost_mtx); /* Priority-Inversion: * the current task had been deboosted and preempted as a low * priority task immediately, it could wait long before reschedule in, * and the rcu-booster also waits on this low priority task and sleeps. * This priority-inversion makes rcu-booster can't work * as expected. */ complete(&rnp->boost_completion); } Just revert the patch to avoid it. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.h5
-rw-r--r--kernel/rcu/tree_plugin.h8
2 files changed, 1 insertions, 12 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 883ebc8e2b6e..95356477d560 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -172,11 +172,6 @@ struct rcu_node {
/* queued on this rcu_node structure that */
/* are blocking the current grace period, */
/* there can be no such task. */
- struct completion boost_completion;
- /* Used to ensure that the rt_mutex used */
- /* to carry out the boosting is fully */
- /* released with no future boostee accesses */
- /* before that rt_mutex is re-initialized. */
struct rt_mutex boost_mtx;
/* Used only for the priority-boosting */
/* side effect, not as a lock. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1fac68220999..625e26040e6b 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -429,10 +429,8 @@ void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */
- if (drop_boost_mutex) {
+ if (drop_boost_mutex)
rt_mutex_unlock(&rnp->boost_mtx);
- complete(&rnp->boost_completion);
- }
#endif /* #ifdef CONFIG_RCU_BOOST */
/*
@@ -1081,15 +1079,11 @@ static int rcu_boost(struct rcu_node *rnp)
*/
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
- init_completion(&rnp->boost_completion);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
/* Lock only for side effect: boosts task t's priority. */
rt_mutex_lock(&rnp->boost_mtx);
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
- /* Wait for boostee to be done w/boost_mtx before reinitializing. */
- wait_for_completion(&rnp->boost_completion);
-
return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
ACCESS_ONCE(rnp->boost_tasks) != NULL;
}