summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-06-02 13:41:08 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-08-13 14:35:49 -0700
commit4fd8c5f153bc41ae847b9ddb1539b34f70c18278 (patch)
tree9ee82606a6959988cb925ec1404e37b66f6145a4 /kernel
parent523bddd553c09a2cf051eb724bffba680424f5ec (diff)
downloadlinux-4fd8c5f153bc41ae847b9ddb1539b34f70c18278.tar.bz2
rcu/nocb: Reduce ->nocb_lock contention with separate ->nocb_gp_lock
The sleep/wakeup of the no-CBs grace-period kthreads is synchronized using the ->nocb_lock of the first CPU corresponding to that kthread. This commit provides a separate ->nocb_gp_lock for this purpose, thus reducing contention on ->nocb_lock. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcu/tree.h3
-rw-r--r--kernel/rcu/tree_plugin.h9
2 files changed, 7 insertions, 5 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 7062f9d9c053..2c3e9068671c 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -202,7 +202,8 @@ struct rcu_data {
struct timer_list nocb_timer; /* Enforce finite deferral. */
/* The following fields are used by GP kthread, hence own cacheline. */
- bool nocb_gp_sleep ____cacheline_internodealigned_in_smp;
+ raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;
+ bool nocb_gp_sleep;
/* Is the nocb GP thread asleep? */
struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */
bool nocb_cb_sleep; /* Is the nocb CB thread asleep? */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index c4cbfb5dc48d..e92bc39c4008 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1604,9 +1604,9 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force,
del_timer(&rdp->nocb_timer);
rcu_nocb_unlock_irqrestore(rdp, flags);
smp_mb(); /* enqueue before ->nocb_gp_sleep. */
- rcu_nocb_lock_irqsave(rdp_gp, flags);
+ raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
- rcu_nocb_unlock_irqrestore(rdp_gp, flags);
+ raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
wake_up_process(rdp_gp->nocb_gp_kthread);
} else {
rcu_nocb_unlock_irqrestore(rdp, flags);
@@ -1761,9 +1761,9 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
}
if (!rcu_nocb_poll) {
- rcu_nocb_lock_irqsave(my_rdp, flags);
+ raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
- rcu_nocb_unlock_irqrestore(my_rdp, flags);
+ raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
}
WARN_ON(signal_pending(current));
}
@@ -1943,6 +1943,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
init_swait_queue_head(&rdp->nocb_cb_wq);
init_swait_queue_head(&rdp->nocb_gp_wq);
raw_spin_lock_init(&rdp->nocb_lock);
+ raw_spin_lock_init(&rdp->nocb_gp_lock);
timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
}