diff options
author | Paul E. McKenney <paulmck@kernel.org> | 2021-11-09 15:34:56 -0800 |
---|---|---|
committer | Paul E. McKenney <paulmck@kernel.org> | 2021-12-09 10:51:44 -0800 |
commit | 57881863ad15fbccbfa637b5e4b67cd3a4520643 (patch) | |
tree | a1cce01b702f39806c1f98255feb2d7ddba1898e /kernel/rcu/tasks.h | |
parent | 4d1114c05467b5f421d99121bff22a9633390722 (diff) | |
download | linux-57881863ad15fbccbfa637b5e4b67cd3a4520643.tar.bz2 |
rcu-tasks: Abstract invocations of callbacks
This commit adds a rcu_tasks_invoke_cbs() function that invokes all
ready callbacks on all of the per-CPU lists that are currently in use.
Reported-by: Martin Lau <kafai@fb.com>
Cc: Neeraj Upadhyay <neeraj.iitr10@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu/tasks.h')
-rw-r--r-- | kernel/rcu/tasks.h | 56 |
1 files changed, 34 insertions, 22 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index e63987a5af56..6b3406a47720 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -255,14 +255,42 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) return needgpcb; } -/* RCU-tasks kthread that detects grace periods and invokes callbacks. */ -static int __noreturn rcu_tasks_kthread(void *arg) +// Advance callbacks and invoke any that are ready. +static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp) { + int cpu; unsigned long flags; int len; - int needgpcb; struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); struct rcu_head *rhp; + + for (cpu = 0; cpu < rtp->percpu_enqueue_lim; cpu++) { + struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); + + if (rcu_segcblist_empty(&rtpcp->cblist)) + continue; + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); + rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + len = rcl.len; + for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { + local_bh_disable(); + rhp->func(rhp); + local_bh_enable(); + cond_resched(); + } + raw_spin_lock_irqsave_rcu_node(rtpcp, flags); + rcu_segcblist_add_len(&rtpcp->cblist, -len); + (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); + raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + } +} + +/* RCU-tasks kthread that detects grace periods and invokes callbacks. */ +static int __noreturn rcu_tasks_kthread(void *arg) +{ + int needgpcb; struct rcu_tasks *rtp = arg; /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ @@ -276,8 +304,6 @@ static int __noreturn rcu_tasks_kthread(void *arg) * This loop is terminated by the system going down. ;-) */ for (;;) { - struct rcu_tasks_percpu *rtpcp; - set_tasks_gp_state(rtp, RTGS_WAIT_CBS); /* If there were none, wait a bit and start over. */ @@ -292,24 +318,10 @@ static int __noreturn rcu_tasks_kthread(void *arg) rcu_seq_end(&rtp->tasks_gp_seq); } - /* Invoke the callbacks. */ + /* Invoke callbacks. */ set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); - rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); - rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); - len = rcl.len; - for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { - local_bh_disable(); - rhp->func(rhp); - local_bh_enable(); - cond_resched(); - } - raw_spin_lock_irqsave_rcu_node(rtpcp, flags); - rcu_segcblist_add_len(&rtpcp->cblist, -len); - (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); - raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); + rcu_tasks_invoke_cbs(rtp); + /* Paranoid sleep to keep this from entering a tight loop */ schedule_timeout_idle(rtp->gp_sleep); } |