diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-08-03 21:00:38 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-08-30 16:03:50 -0700 |
commit | 2dba13f0b6c2b26ff371b8927ac58d20a7d94713 (patch) | |
tree | fa3fcb58680ea2b5afc82f91b4d148234ea78114 /kernel/rcu | |
parent | c458a89e964dbf3c56b23eca2018bd0e2380969d (diff) | |
download | linux-2dba13f0b6c2b26ff371b8927ac58d20a7d94713.tar.bz2 |
rcu: Switch urgent quiescent-state requests to rcu_data structure
This commit removes ->rcu_need_heavy_qs and ->rcu_urgent_qs from the
rcu_dynticks structure and updates the code to access them from the
rcu_data structure.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/tree.c | 12 | ||||
-rw-r--r-- | kernel/rcu/tree.h | 2 | ||||
-rw-r--r-- | kernel/rcu/tree_exp.h | 2 | ||||
-rw-r--r-- | kernel/rcu/tree_plugin.h | 14 |
4 files changed, 14 insertions, 16 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e778fd5546d1..7ec0ba885273 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -362,7 +362,7 @@ static void __maybe_unused rcu_momentary_dyntick_idle(void) struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); int special; - raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false); + raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks); /* It is illegal to call this from idle state. */ WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR)); @@ -928,7 +928,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t) cpu = task_cpu(t); if (!task_curr(t)) return; /* This task is not running on that CPU. */ - smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true); + smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true); } #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) @@ -1081,8 +1081,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) * is set way high. */ jtsq = READ_ONCE(jiffies_to_sched_qs); - ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); - rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu); + ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); + rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); if (!READ_ONCE(*rnhqp) && (time_after(jiffies, rcu_state.gp_start + jtsq * 2) || time_after(jiffies, rcu_state.jiffies_resched))) { @@ -2499,13 +2499,13 @@ void rcu_check_callbacks(int user) trace_rcu_utilization(TPS("Start scheduler-tick")); raw_cpu_inc(rcu_data.ticks_this_gp); /* The load-acquire pairs with the store-release setting to true. */ - if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { + if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { /* Idle and userspace execution already are quiescent states. */ if (!rcu_is_cpu_rrupt_from_idle() && !user) { set_tsk_need_resched(current); set_preempt_need_resched(); } - __this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); + __this_cpu_write(rcu_data.rcu_urgent_qs, false); } rcu_flavor_check_callbacks(user); if (rcu_pending()) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 36a47c7bd882..4c31066ddb94 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -41,8 +41,6 @@ struct rcu_dynticks { long dynticks_nesting; /* Track process nesting level. */ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ atomic_t dynticks; /* Even value for idle, else odd. */ - bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */ - bool rcu_urgent_qs; /* GP old need light quiescent state. */ }; /* Communicate arguments to a workqueue handler. */ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 030df96e0d3c..11387fcd4d85 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -780,7 +780,7 @@ static void sync_sched_exp_handler(void *unused) } __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); /* Store .exp before .rcu_urgent_qs. */ - smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true); + smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); set_tsk_need_resched(current); set_preempt_need_resched(); } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b5aeb2fe4cfe..161760957a07 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -967,17 +967,17 @@ void rcu_all_qs(void) { unsigned long flags; - if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs)) + if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) return; preempt_disable(); /* Load rcu_urgent_qs before other flags. */ - if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) { + if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { preempt_enable(); return; } - this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); + this_cpu_write(rcu_data.rcu_urgent_qs, false); barrier(); /* Avoid RCU read-side critical sections leaking down. */ - if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) { + if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) { local_irq_save(flags); rcu_momentary_dyntick_idle(); local_irq_restore(flags); @@ -997,10 +997,10 @@ void rcu_note_context_switch(bool preempt) trace_rcu_utilization(TPS("Start context switch")); rcu_qs(); /* Load rcu_urgent_qs before other flags. */ - if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) + if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) goto out; - this_cpu_write(rcu_dynticks.rcu_urgent_qs, false); - if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) + this_cpu_write(rcu_data.rcu_urgent_qs, false); + if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) rcu_momentary_dyntick_idle(); if (!preempt) rcu_tasks_qs(current); |