summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-26 13:44:00 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-08-30 16:03:45 -0700
commitfced9c8cfe6bc8a26dbbf785927aa673c83a7a35 (patch)
tree04b80df17ce5b236c0aa4fef28a5d1ab6bf889b0 /kernel/rcu
parentd3052109c0bc9e536d17d627ae628ed8ceb6928c (diff)
downloadlinux-fced9c8cfe6bc8a26dbbf785927aa673c83a7a35.tar.bz2
rcu: Avoid resched_cpu() when rescheduling the current CPU
The resched_cpu() interface is quite handy, but it does acquire the specified CPU's runqueue lock, which does not come for free. This commit therefore substitutes the following when directing resched_cpu() at the current CPU: set_tsk_need_resched(current); set_preempt_need_resched(); Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c11
-rw-r--r--kernel/rcu/tree_exp.h17
-rw-r--r--kernel/rcu/tree_plugin.h6
3 files changed, 21 insertions, 13 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 96731f62594a..92346ab8077d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1354,7 +1354,8 @@ static void print_cpu_stall(void)
* progress and it could be we're stuck in kernel space without context
* switches for an entirely unreasonable amount of time.
*/
- resched_cpu(smp_processor_id());
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
}
static void check_cpu_stall(struct rcu_data *rdp)
@@ -2675,10 +2676,12 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
WARN_ON_ONCE(!rdp->beenonline);
/* Report any deferred quiescent states if preemption enabled. */
- if (!(preempt_count() & PREEMPT_MASK))
+ if (!(preempt_count() & PREEMPT_MASK)) {
rcu_preempt_deferred_qs(current);
- else if (rcu_preempt_need_deferred_qs(current))
- resched_cpu(rdp->cpu); /* Provoke future context switch. */
+ } else if (rcu_preempt_need_deferred_qs(current)) {
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ }
/* Update RCU state based on any recent quiescent states. */
rcu_check_quiescent_state(rdp);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 78553a8fa3c6..030df96e0d3c 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -672,7 +672,8 @@ static void sync_rcu_exp_handler(void *unused)
rcu_report_exp_rdp(rdp);
} else {
rdp->deferred_qs = true;
- resched_cpu(rdp->cpu);
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
}
return;
}
@@ -710,15 +711,16 @@ static void sync_rcu_exp_handler(void *unused)
* because we are in an interrupt handler, which will cause that
* function to take an early exit without doing anything.
*
- * Otherwise, use resched_cpu() to force a context switch after
- * the CPU enables everything.
+ * Otherwise, force a context switch after the CPU enables everything.
*/
rdp->deferred_qs = true;
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
- WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()))
+ WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
rcu_preempt_deferred_qs(t);
- else
- resched_cpu(rdp->cpu);
+ } else {
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
+ }
}
/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
@@ -779,7 +781,8 @@ static void sync_sched_exp_handler(void *unused)
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
- resched_cpu(smp_processor_id());
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
}
/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1e80a0da7924..978ce3539809 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -791,8 +791,10 @@ static void rcu_flavor_check_callbacks(int user)
if (t->rcu_read_lock_nesting > 0 ||
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
/* No QS, force context switch if deferred. */
- if (rcu_preempt_need_deferred_qs(t))
- resched_cpu(smp_processor_id());
+ if (rcu_preempt_need_deferred_qs(t)) {
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
+ }
} else if (rcu_preempt_need_deferred_qs(t)) {
rcu_preempt_deferred_qs(t); /* Report deferred QS. */
return;