From 7d0ae8086b828311250c6afdf800b568ac9bd693 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Mar 2015 14:57:58 -0800 Subject: rcu: Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE() This commit moves from the old ACCESS_ONCE() API to the new READ_ONCE() and WRITE_ONCE() APIs. Signed-off-by: Paul E. McKenney [ paulmck: Updated to include kernel/torture.c as suggested by Jason Low. ] --- include/linux/rculist.h | 6 +- include/linux/rcupdate.h | 16 ++--- kernel/rcu/rcutorture.c | 2 +- kernel/rcu/srcu.c | 10 +-- kernel/rcu/tiny_plugin.h | 12 ++-- kernel/rcu/tree.c | 184 +++++++++++++++++++++++------------------------ kernel/rcu/tree_plugin.h | 93 ++++++++++++------------ kernel/rcu/tree_trace.c | 6 +- kernel/rcu/update.c | 30 ++++---- kernel/torture.c | 26 +++---- 10 files changed, 193 insertions(+), 192 deletions(-) diff --git a/include/linux/rculist.h b/include/linux/rculist.h index a18b16f1dc0e..665397247e82 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -29,8 +29,8 @@ */ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) { - ACCESS_ONCE(list->next) = list; - ACCESS_ONCE(list->prev) = list; + WRITE_ONCE(list->next, list); + WRITE_ONCE(list->prev, list); } /* @@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list, #define list_first_or_null_rcu(ptr, type, member) \ ({ \ struct list_head *__ptr = (ptr); \ - struct list_head *__next = ACCESS_ONCE(__ptr->next); \ + struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ }) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 573a5afd5ed8..87bb0eee665b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -364,8 +364,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu; #define rcu_note_voluntary_context_switch(t) \ do { \ rcu_all_qs(); \ - if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ - ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ + if (READ_ONCE((t)->rcu_tasks_holdout)) \ + WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) #else /* #ifdef CONFIG_TASKS_RCU */ #define TASKS_RCU(x) do { } while (0) @@ -609,7 +609,7 @@ static inline void rcu_preempt_sleep_check(void) #define __rcu_access_pointer(p, space) \ ({ \ - typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ + typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) @@ -630,7 +630,7 @@ static inline void rcu_preempt_sleep_check(void) #define __rcu_access_index(p, space) \ ({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ + typeof(p) _________p1 = READ_ONCE(p); \ rcu_dereference_sparse(p, space); \ (_________p1); \ }) @@ -659,7 +659,7 @@ static inline void rcu_preempt_sleep_check(void) */ #define lockless_dereference(p) \ ({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ + typeof(p) _________p1 = READ_ONCE(p); \ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ (_________p1); \ }) @@ -702,7 +702,7 @@ static inline void rcu_preempt_sleep_check(void) * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the - * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful * when the value of this pointer is accessed, but the pointer is not * dereferenced, for example, when testing an RCU-protected pointer against * NULL. Although rcu_access_pointer() may also be used in cases where @@ -791,7 +791,7 @@ static inline void rcu_preempt_sleep_check(void) * @p: The index to read * * Return the value of the specified RCU-protected index, but omit the - * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful * when the value of this index is accessed, but the index is not * dereferenced, for example, when testing an RCU-protected index against * -1. Although rcu_access_index() may also be used in cases where @@ -827,7 +827,7 @@ static inline void rcu_preempt_sleep_check(void) * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit - * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This + * both the smp_read_barrier_depends() and the READ_ONCE(). This * is useful in cases where update-side locks prevent the value of the * pointer from changing. Please note that this primitive does -not- * prevent the compiler from repeating this reference or combining it diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 8dbe27611ec3..a67ef6ff86b0 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1413,7 +1413,7 @@ static int rcu_torture_barrier_cbs(void *arg) do { wait_event(barrier_cbs_wq[myid], (newphase = - ACCESS_ONCE(barrier_phase)) != lastphase || + READ_ONCE(barrier_phase)) != lastphase || torture_must_stop()); lastphase = newphase; smp_mb(); /* ensure barrier_phase load before ->call(). */ diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c index cad76e76b4e7..fb33d35ee0b7 100644 --- a/kernel/rcu/srcu.c +++ b/kernel/rcu/srcu.c @@ -151,7 +151,7 @@ static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx) unsigned long t; for_each_possible_cpu(cpu) { - t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); + t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); sum += t; } return sum; @@ -168,7 +168,7 @@ static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx) unsigned long t; for_each_possible_cpu(cpu) { - t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); + t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); sum += t; } return sum; @@ -265,8 +265,8 @@ static int srcu_readers_active(struct srcu_struct *sp) unsigned long sum = 0; for_each_possible_cpu(cpu) { - sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); - sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); + sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); + sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); } return sum; } @@ -296,7 +296,7 @@ int __srcu_read_lock(struct srcu_struct *sp) { int idx; - idx = ACCESS_ONCE(sp->completed) & 0x1; + idx = READ_ONCE(sp->completed) & 0x1; preempt_disable(); __this_cpu_inc(sp->per_cpu_ref->c[idx]); smp_mb(); /* B */ /* Avoid leaking the critical section. */ diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h index f94e209a10d6..e492a5253e0f 100644 --- a/kernel/rcu/tiny_plugin.h +++ b/kernel/rcu/tiny_plugin.h @@ -144,16 +144,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp) return; rcp->ticks_this_gp++; j = jiffies; - js = ACCESS_ONCE(rcp->jiffies_stall); + js = READ_ONCE(rcp->jiffies_stall); if (rcp->rcucblist && ULONG_CMP_GE(j, js)) { pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n", rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE, jiffies - rcp->gp_start, rcp->qlen); dump_stack(); - ACCESS_ONCE(rcp->jiffies_stall) = jiffies + - 3 * rcu_jiffies_till_stall_check() + 3; + WRITE_ONCE(rcp->jiffies_stall, + jiffies + 3 * rcu_jiffies_till_stall_check() + 3); } else if (ULONG_CMP_GE(j, js)) { - ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); + WRITE_ONCE(rcp->jiffies_stall, + jiffies + rcu_jiffies_till_stall_check()); } } @@ -161,7 +162,8 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp) { rcp->ticks_this_gp = 0; rcp->gp_start = jiffies; - ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check(); + WRITE_ONCE(rcp->jiffies_stall, + jiffies + rcu_jiffies_till_stall_check()); } static void check_cpu_stalls(void) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8cf7304b2867..0628df155970 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -191,17 +191,17 @@ unsigned long rcutorture_vernum; */ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) { - return ACCESS_ONCE(rnp->qsmaskinitnext); + return READ_ONCE(rnp->qsmaskinitnext); } /* - * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s + * Return true if an RCU grace period is in progress. The READ_ONCE()s * permit this function to be invoked without holding the root rcu_node * structure's ->lock, but of course results can be subject to change. */ static int rcu_gp_in_progress(struct rcu_state *rsp) { - return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); + return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); } /* @@ -278,8 +278,8 @@ static void rcu_momentary_dyntick_idle(void) if (!(resched_mask & rsp->flavor_mask)) continue; smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */ - if (ACCESS_ONCE(rdp->mynode->completed) != - ACCESS_ONCE(rdp->cond_resched_completed)) + if (READ_ONCE(rdp->mynode->completed) != + READ_ONCE(rdp->cond_resched_completed)) continue; /* @@ -491,9 +491,9 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, break; } if (rsp != NULL) { - *flags = ACCESS_ONCE(rsp->gp_flags); - *gpnum = ACCESS_ONCE(rsp->gpnum); - *completed = ACCESS_ONCE(rsp->completed); + *flags = READ_ONCE(rsp->gp_flags); + *gpnum = READ_ONCE(rsp->gpnum); + *completed = READ_ONCE(rsp->completed); return; } *flags = 0; @@ -539,10 +539,10 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) static int rcu_future_needs_gp(struct rcu_state *rsp) { struct rcu_node *rnp = rcu_get_root(rsp); - int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1; + int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; int *fp = &rnp->need_future_gp[idx]; - return ACCESS_ONCE(*fp); + return READ_ONCE(*fp); } /* @@ -565,7 +565,7 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) return 1; /* Yes, this CPU has newly registered callbacks. */ for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) if (rdp->nxttail[i - 1] != rdp->nxttail[i] && - ULONG_CMP_LT(ACCESS_ONCE(rsp->completed), + ULONG_CMP_LT(READ_ONCE(rsp->completed), rdp->nxtcompleted[i])) return 1; /* Yes, CBs for future grace period. */ return 0; /* No grace period needed. */ @@ -1011,9 +1011,9 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp, trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); return 1; } else { - if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4, + if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rdp->mynode->gpnum)) - ACCESS_ONCE(rdp->gpwrap) = true; + WRITE_ONCE(rdp->gpwrap, true); return 0; } } @@ -1093,12 +1093,12 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, if (ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs) || ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { - if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { - ACCESS_ONCE(rdp->cond_resched_completed) = - ACCESS_ONCE(rdp->mynode->completed); + if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) { + WRITE_ONCE(rdp->cond_resched_completed, + READ_ONCE(rdp->mynode->completed)); smp_mb(); /* ->cond_resched_completed before *rcrmp. */ - ACCESS_ONCE(*rcrmp) = - ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask; + WRITE_ONCE(*rcrmp, + READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask); resched_cpu(rdp->cpu); /* Force CPU into scheduler. */ rdp->rsp->jiffies_resched += 5; /* Enable beating. */ } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { @@ -1119,9 +1119,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp) rsp->gp_start = j; smp_wmb(); /* Record start time before stall time. */ j1 = rcu_jiffies_till_stall_check(); - ACCESS_ONCE(rsp->jiffies_stall) = j + j1; + WRITE_ONCE(rsp->jiffies_stall, j + j1); rsp->jiffies_resched = j + j1 / 2; - rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs); + rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs); } /* @@ -1133,7 +1133,7 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) unsigned long j; j = jiffies; - gpa = ACCESS_ONCE(rsp->gp_activity); + gpa = READ_ONCE(rsp->gp_activity); if (j - gpa > 2 * HZ) pr_err("%s kthread starved for %ld jiffies!\n", rsp->name, j - gpa); @@ -1173,12 +1173,13 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) /* Only let one CPU complain about others per time interval. */ raw_spin_lock_irqsave(&rnp->lock, flags); - delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); + delta = jiffies - READ_ONCE(rsp->jiffies_stall); if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { raw_spin_unlock_irqrestore(&rnp->lock, flags); return; } - ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; + WRITE_ONCE(rsp->jiffies_stall, + jiffies + 3 * rcu_jiffies_till_stall_check() + 3); raw_spin_unlock_irqrestore(&rnp->lock, flags); /* @@ -1212,12 +1213,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) if (ndetected) { rcu_dump_cpu_stacks(rsp); } else { - if (ACCESS_ONCE(rsp->gpnum) != gpnum || - ACCESS_ONCE(rsp->completed) == gpnum) { + if (READ_ONCE(rsp->gpnum) != gpnum || + READ_ONCE(rsp->completed) == gpnum) { pr_err("INFO: Stall ended before state dump start\n"); } else { j = jiffies; - gpa = ACCESS_ONCE(rsp->gp_activity); + gpa = READ_ONCE(rsp->gp_activity); pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", rsp->name, j - gpa, j, gpa, jiffies_till_next_fqs, @@ -1262,9 +1263,9 @@ static void print_cpu_stall(struct rcu_state *rsp) rcu_dump_cpu_stacks(rsp); raw_spin_lock_irqsave(&rnp->lock, flags); - if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall))) - ACCESS_ONCE(rsp->jiffies_stall) = jiffies + - 3 * rcu_jiffies_till_stall_check() + 3; + if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) + WRITE_ONCE(rsp->jiffies_stall, + jiffies + 3 * rcu_jiffies_till_stall_check() + 3); raw_spin_unlock_irqrestore(&rnp->lock, flags); /* @@ -1307,20 +1308,20 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) * Given this check, comparisons of jiffies, rsp->jiffies_stall, * and rsp->gp_start suffice to forestall false positives. */ - gpnum = ACCESS_ONCE(rsp->gpnum); + gpnum = READ_ONCE(rsp->gpnum); smp_rmb(); /* Pick up ->gpnum first... */ - js = ACCESS_ONCE(rsp->jiffies_stall); + js = READ_ONCE(rsp->jiffies_stall); smp_rmb(); /* ...then ->jiffies_stall before the rest... */ - gps = ACCESS_ONCE(rsp->gp_start); + gps = READ_ONCE(rsp->gp_start); smp_rmb(); /* ...and finally ->gp_start before ->completed. */ - completed = ACCESS_ONCE(rsp->completed); + completed = READ_ONCE(rsp->completed); if (ULONG_CMP_GE(completed, gpnum) || ULONG_CMP_LT(j, js) || ULONG_CMP_GE(gps, js)) return; /* No stall or GP completed since entering function. */ rnp = rdp->mynode; if (rcu_gp_in_progress(rsp) && - (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) { + (READ_ONCE(rnp->qsmask) & rdp->grpmask)) { /* We haven't checked in, so go dump stack. */ print_cpu_stall(rsp); @@ -1347,7 +1348,7 @@ void rcu_cpu_stall_reset(void) struct rcu_state *rsp; for_each_rcu_flavor(rsp) - ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2; + WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2); } /* @@ -1457,7 +1458,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, * doing some extra useless work. */ if (rnp->gpnum != rnp->completed || - ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) { + READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) { rnp->need_future_gp[c & 0x1]++; trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); goto out; @@ -1542,7 +1543,7 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) static void rcu_gp_kthread_wake(struct rcu_state *rsp) { if (current == rsp->gp_kthread || - !ACCESS_ONCE(rsp->gp_flags) || + !READ_ONCE(rsp->gp_flags) || !rsp->gp_kthread) return; wake_up(&rsp->gp_wq); @@ -1677,7 +1678,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, /* Handle the ends of any preceding grace periods first. */ if (rdp->completed == rnp->completed && - !unlikely(ACCESS_ONCE(rdp->gpwrap))) { + !unlikely(READ_ONCE(rdp->gpwrap))) { /* No grace period end, so just accelerate recent callbacks. */ ret = rcu_accelerate_cbs(rsp, rnp, rdp); @@ -1692,7 +1693,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend")); } - if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) { + if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) { /* * If the current grace period is waiting for this CPU, * set up to detect a quiescent state, otherwise don't @@ -1704,7 +1705,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); zero_cpu_stall_ticks(rdp); - ACCESS_ONCE(rdp->gpwrap) = false; + WRITE_ONCE(rdp->gpwrap, false); } return ret; } @@ -1717,9 +1718,9 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) local_irq_save(flags); rnp = rdp->mynode; - if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && - rdp->completed == ACCESS_ONCE(rnp->completed) && - !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */ + if ((rdp->gpnum == READ_ONCE(rnp->gpnum) && + rdp->completed == READ_ONCE(rnp->completed) && + !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ local_irq_restore(flags); return; @@ -1740,15 +1741,15 @@ static int rcu_gp_init(struct rcu_state *rsp) struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(rsp); - ACCESS_ONCE(rsp->gp_activity) = jiffies; + WRITE_ONCE(rsp->gp_activity, jiffies); raw_spin_lock_irq(&rnp->lock); smp_mb__after_unlock_lock(); - if (!ACCESS_ONCE(rsp->gp_flags)) { + if (!READ_ONCE(rsp->gp_flags)) { /* Spurious wakeup, tell caller to go back to sleep. */ raw_spin_unlock_irq(&rnp->lock); return 0; } - ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */ + WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */ if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) { /* @@ -1834,9 +1835,9 @@ static int rcu_gp_init(struct rcu_state *rsp) rdp = this_cpu_ptr(rsp->rda); rcu_preempt_check_blocked_tasks(rnp); rnp->qsmask = rnp->qsmaskinit; - ACCESS_ONCE(rnp->gpnum) = rsp->gpnum; + WRITE_ONCE(rnp->gpnum, rsp->gpnum); if (WARN_ON_ONCE(rnp->completed != rsp->completed)) - ACCESS_ONCE(rnp->completed) = rsp->completed; + WRITE_ONCE(rnp->completed, rsp->completed); if (rnp == rdp->mynode) (void)__note_gp_changes(rsp, rnp, rdp); rcu_preempt_boost_start_gp(rnp); @@ -1845,7 +1846,7 @@ static int rcu_gp_init(struct rcu_state *rsp) rnp->grphi, rnp->qsmask); raw_spin_unlock_irq(&rnp->lock); cond_resched_rcu_qs(); - ACCESS_ONCE(rsp->gp_activity) = jiffies; + WRITE_ONCE(rsp->gp_activity, jiffies); if (gp_init_delay > 0 && !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) schedule_timeout_uninterruptible(gp_init_delay); @@ -1864,7 +1865,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) unsigned long maxj; struct rcu_node *rnp = rcu_get_root(rsp); - ACCESS_ONCE(rsp->gp_activity) = jiffies; + WRITE_ONCE(rsp->gp_activity, jiffies); rsp->n_force_qs++; if (fqs_state == RCU_SAVE_DYNTICK) { /* Collect dyntick-idle snapshots. */ @@ -1882,11 +1883,11 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); } /* Clear flag to prevent immediate re-entry. */ - if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { + if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { raw_spin_lock_irq(&rnp->lock); smp_mb__after_unlock_lock(); - ACCESS_ONCE(rsp->gp_flags) = - ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS; + WRITE_ONCE(rsp->gp_flags, + READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS); raw_spin_unlock_irq(&rnp->lock); } return fqs_state; @@ -1903,7 +1904,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(rsp); - ACCESS_ONCE(rsp->gp_activity) = jiffies; + WRITE_ONCE(rsp->gp_activity, jiffies); raw_spin_lock_irq(&rnp->lock); smp_mb__after_unlock_lock(); gp_duration = jiffies - rsp->gp_start; @@ -1934,7 +1935,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) smp_mb__after_unlock_lock(); WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); WARN_ON_ONCE(rnp->qsmask); - ACCESS_ONCE(rnp->completed) = rsp->gpnum; + WRITE_ONCE(rnp->completed, rsp->gpnum); rdp = this_cpu_ptr(rsp->rda); if (rnp == rdp->mynode) needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; @@ -1942,7 +1943,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) nocb += rcu_future_gp_cleanup(rsp, rnp); raw_spin_unlock_irq(&rnp->lock); cond_resched_rcu_qs(); - ACCESS_ONCE(rsp->gp_activity) = jiffies; + WRITE_ONCE(rsp->gp_activity, jiffies); } rnp = rcu_get_root(rsp); raw_spin_lock_irq(&rnp->lock); @@ -1950,16 +1951,16 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) rcu_nocb_gp_set(rnp, nocb); /* Declare grace period done. */ - ACCESS_ONCE(rsp->completed) = rsp->gpnum; + WRITE_ONCE(rsp->completed, rsp->gpnum); trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); rsp->fqs_state = RCU_GP_IDLE; rdp = this_cpu_ptr(rsp->rda); /* Advance CBs to reduce false positives below. */ needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; if (needgp || cpu_needs_another_gp(rsp, rdp)) { - ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; + WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); trace_rcu_grace_period(rsp->name, - ACCESS_ONCE(rsp->gpnum), + READ_ONCE(rsp->gpnum), TPS("newreq")); } raw_spin_unlock_irq(&rnp->lock); @@ -1983,20 +1984,20 @@ static int __noreturn rcu_gp_kthread(void *arg) /* Handle grace-period start. */ for (;;) { trace_rcu_grace_period(rsp->name, - ACCESS_ONCE(rsp->gpnum), + READ_ONCE(rsp->gpnum), TPS("reqwait")); rsp->gp_state = RCU_GP_WAIT_GPS; wait_event_interruptible(rsp->gp_wq, - ACCESS_ONCE(rsp->gp_flags) & + READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_INIT); /* Locking provides needed memory barrier. */ if (rcu_gp_init(rsp)) break; cond_resched_rcu_qs(); - ACCESS_ONCE(rsp->gp_activity) = jiffies; + WRITE_ONCE(rsp->gp_activity, jiffies); WARN_ON(signal_pending(current)); trace_rcu_grace_period(rsp->name, - ACCESS_ONCE(rsp->gpnum), + READ_ONCE(rsp->gpnum), TPS("reqwaitsig")); } @@ -2012,39 +2013,39 @@ static int __noreturn rcu_gp_kthread(void *arg) if (!ret) rsp->jiffies_force_qs = jiffies + j; trace_rcu_grace_period(rsp->name, - ACCESS_ONCE(rsp->gpnum), + READ_ONCE(rsp->gpnum), TPS("fqswait")); rsp->gp_state = RCU_GP_WAIT_FQS; ret = wait_event_interruptible_timeout(rsp->gp_wq, - ((gf = ACCESS_ONCE(rsp->gp_flags)) & + ((gf = READ_ONCE(rsp->gp_flags)) & RCU_GP_FLAG_FQS) || - (!ACCESS_ONCE(rnp->qsmask) && + (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)), j); /* Locking provides needed memory barriers. */ /* If grace period done, leave loop. */ - if (!ACCESS_ONCE(rnp->qsmask) && + if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp)) break; /* If time for quiescent-state forcing, do it. */ if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || (gf & RCU_GP_FLAG_FQS)) { trace_rcu_grace_period(rsp->name, - ACCESS_ONCE(rsp->gpnum), + READ_ONCE(rsp->gpnum), TPS("fqsstart")); fqs_state = rcu_gp_fqs(rsp, fqs_state); trace_rcu_grace_period(rsp->name, - ACCESS_ONCE(rsp->gpnum), + READ_ONCE(rsp->gpnum), TPS("fqsend")); cond_resched_rcu_qs(); - ACCESS_ONCE(rsp->gp_activity) = jiffies; + WRITE_ONCE(rsp->gp_activity, jiffies); } else { /* Deal with stray signal. */ cond_resched_rcu_qs(); - ACCESS_ONCE(rsp->gp_activity) = jiffies; + WRITE_ONCE(rsp->gp_activity, jiffies); WARN_ON(signal_pending(current)); trace_rcu_grace_period(rsp->name, - ACCESS_ONCE(rsp->gpnum), + READ_ONCE(rsp->gpnum), TPS("fqswaitsig")); } j = jiffies_till_next_fqs; @@ -2086,8 +2087,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, */ return false; } - ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT; - trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum), + WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); + trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq")); /* @@ -2359,7 +2360,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, rsp->qlen += rdp->qlen; rdp->n_cbs_orphaned += rdp->qlen; rdp->qlen_lazy = 0; - ACCESS_ONCE(rdp->qlen) = 0; + WRITE_ONCE(rdp->qlen, 0); } /* @@ -2580,7 +2581,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) /* If no callbacks are ready, just return. */ if (!cpu_has_callbacks_ready_to_invoke(rdp)) { trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0); - trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), + trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist), need_resched(), is_idle_task(current), rcu_is_callbacks_kthread()); return; @@ -2636,7 +2637,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) } smp_mb(); /* List handling before counting for rcu_barrier(). */ rdp->qlen_lazy -= count_lazy; - ACCESS_ONCE(rdp->qlen) = rdp->qlen - count; + WRITE_ONCE(rdp->qlen, rdp->qlen - count); rdp->n_cbs_invoked += count; /* Reinstate batch limit if we have worked down the excess. */ @@ -2793,7 +2794,7 @@ static void force_quiescent_state(struct rcu_state *rsp) /* Funnel through hierarchy to reduce memory contention. */ rnp = __this_cpu_read(rsp->rda->mynode); for (; rnp != NULL; rnp = rnp->parent) { - ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || + ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || !raw_spin_trylock(&rnp->fqslock); if (rnp_old != NULL) raw_spin_unlock(&rnp_old->fqslock); @@ -2809,13 +2810,12 @@ static void force_quiescent_state(struct rcu_state *rsp) raw_spin_lock_irqsave(&rnp_old->lock, flags); smp_mb__after_unlock_lock(); raw_spin_unlock(&rnp_old->fqslock); - if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { + if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { rsp->n_force_qs_lh++; raw_spin_unlock_irqrestore(&rnp_old->lock, flags); return; /* Someone beat us to it. */ } - ACCESS_ONCE(rsp->gp_flags) = - ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS; + WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); raw_spin_unlock_irqrestore(&rnp_old->lock, flags); rcu_gp_kthread_wake(rsp); } @@ -2881,7 +2881,7 @@ static void rcu_process_callbacks(struct softirq_action *unused) */ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { - if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) + if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; if (likely(!rsp->boost)) { rcu_do_batch(rsp, rdp); @@ -2972,7 +2972,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */ if (debug_rcu_head_queue(head)) { /* Probable double call_rcu(), so leak the callback. */ - ACCESS_ONCE(head->func) = rcu_leak_callback; + WRITE_ONCE(head->func, rcu_leak_callback); WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n"); return; } @@ -3011,7 +3011,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), if (!likely(rdp->nxtlist)) init_default_callback_list(rdp); } - ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1; + WRITE_ONCE(rdp->qlen, rdp->qlen + 1); if (lazy) rdp->qlen_lazy++; else @@ -3450,14 +3450,14 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) } /* Has another RCU grace period completed? */ - if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ + if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ rdp->n_rp_gp_completed++; return 1; } /* Has a new RCU grace period started? */ - if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum || - unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */ + if (READ_ONCE(rnp->gpnum) != rdp->gpnum || + unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */ rdp->n_rp_gp_started++; return 1; } @@ -3564,7 +3564,7 @@ static void _rcu_barrier(struct rcu_state *rsp) { int cpu; struct rcu_data *rdp; - unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); + unsigned long snap = READ_ONCE(rsp->n_barrier_done); unsigned long snap_done; _rcu_barrier_trace(rsp, "Begin", -1, snap); @@ -3606,10 +3606,10 @@ static void _rcu_barrier(struct rcu_state *rsp) /* * Increment ->n_barrier_done to avoid duplicate work. Use - * ACCESS_ONCE() to prevent the compiler from speculating + * WRITE_ONCE() to prevent the compiler from speculating * the increment to precede the early-exit check. */ - ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1; + WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1); WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ @@ -3645,7 +3645,7 @@ static void _rcu_barrier(struct rcu_state *rsp) __call_rcu(&rdp->barrier_head, rcu_barrier_callback, rsp, cpu, 0); } - } else if (ACCESS_ONCE(rdp->qlen)) { + } else if (READ_ONCE(rdp->qlen)) { _rcu_barrier_trace(rsp, "OnlineQ", cpu, rsp->n_barrier_done); smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); @@ -3665,7 +3665,7 @@ static void _rcu_barrier(struct rcu_state *rsp) /* Increment ->n_barrier_done to prevent duplicate work. */ smp_mb(); /* Keep increment after above mechanism. */ - ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1; + WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1); WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); smp_mb(); /* Keep increment before caller's subsequent code. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 8c0ec0f5a027..58b1ebdc4387 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -570,7 +570,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp) static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) { return !rcu_preempted_readers_exp(rnp) && - ACCESS_ONCE(rnp->expmask) == 0; + READ_ONCE(rnp->expmask) == 0; } /* @@ -716,7 +716,7 @@ void synchronize_rcu_expedited(void) int trycount = 0; smp_mb(); /* Caller's modifications seen first by other CPUs. */ - snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; + snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1; smp_mb(); /* Above access cannot bleed into critical section. */ /* @@ -740,7 +740,7 @@ void synchronize_rcu_expedited(void) */ while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { if (ULONG_CMP_LT(snap, - ACCESS_ONCE(sync_rcu_preempt_exp_count))) { + READ_ONCE(sync_rcu_preempt_exp_count))) { put_online_cpus(); goto mb_ret; /* Others did our work for us. */ } @@ -752,7 +752,7 @@ void synchronize_rcu_expedited(void) return; } } - if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) { + if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) { put_online_cpus(); goto unlock_mb_ret; /* Others did our work for us. */ } @@ -780,8 +780,7 @@ void synchronize_rcu_expedited(void) /* Clean up and exit. */ smp_mb(); /* ensure expedited GP seen before counter increment. */ - ACCESS_ONCE(sync_rcu_preempt_exp_count) = - sync_rcu_preempt_exp_count + 1; + WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1); unlock_mb_ret: mutex_unlock(&sync_rcu_preempt_exp_mutex); mb_ret: @@ -994,8 +993,8 @@ static int rcu_boost(struct rcu_node *rnp) struct task_struct *t; struct list_head *tb; - if (ACCESS_ONCE(rnp->exp_tasks) == NULL && - ACCESS_ONCE(rnp->boost_tasks) == NULL) + if (READ_ONCE(rnp->exp_tasks) == NULL && + READ_ONCE(rnp->boost_tasks) == NULL) return 0; /* Nothing left to boost. */ raw_spin_lock_irqsave(&rnp->lock, flags); @@ -1048,8 +1047,8 @@ static int rcu_boost(struct rcu_node *rnp) rt_mutex_lock(&rnp->boost_mtx); rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ - return ACCESS_ONCE(rnp->exp_tasks) != NULL || - ACCESS_ONCE(rnp->boost_tasks) != NULL; + return READ_ONCE(rnp->exp_tasks) != NULL || + READ_ONCE(rnp->boost_tasks) != NULL; } /* @@ -1462,7 +1461,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) * callbacks not yet ready to invoke. */ if ((rdp->completed != rnp->completed || - unlikely(ACCESS_ONCE(rdp->gpwrap))) && + unlikely(READ_ONCE(rdp->gpwrap))) && rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL]) note_gp_changes(rsp, rdp); @@ -1534,7 +1533,7 @@ static void rcu_prepare_for_idle(void) int tne; /* Handle nohz enablement switches conservatively. */ - tne = ACCESS_ONCE(tick_nohz_active); + tne = READ_ONCE(tick_nohz_active); if (tne != rdtp->tick_nohz_enabled_snap) { if (rcu_cpu_has_callbacks(NULL)) invoke_rcu_core(); /* force nohz to see update. */ @@ -1760,7 +1759,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu) atomic_read(&rdtp->dynticks) & 0xfff, rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting, rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), - ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, + READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, fast_no_hz); } @@ -1898,11 +1897,11 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) { struct rcu_data *rdp_leader = rdp->nocb_leader; - if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) + if (!READ_ONCE(rdp_leader->nocb_kthread)) return; - if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { + if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { /* Prior smp_mb__after_atomic() orders against prior enqueue. */ - ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; + WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); wake_up(&rdp_leader->nocb_wq); } } @@ -1934,14 +1933,14 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu) ret = atomic_long_read(&rdp->nocb_q_count); #ifdef CONFIG_PROVE_RCU - rhp = ACCESS_ONCE(rdp->nocb_head); + rhp = READ_ONCE(rdp->nocb_head); if (!rhp) - rhp = ACCESS_ONCE(rdp->nocb_gp_head); + rhp = READ_ONCE(rdp->nocb_gp_head); if (!rhp) - rhp = ACCESS_ONCE(rdp->nocb_follower_head); + rhp = READ_ONCE(rdp->nocb_follower_head); /* Having no rcuo kthread but CBs after scheduler starts is bad! */ - if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp && + if (!READ_ONCE(rdp->nocb_kthread) && rhp && rcu_scheduler_fully_active) { /* RCU callback enqueued before CPU first came online??? */ pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n", @@ -1975,12 +1974,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp, atomic_long_add(rhcount, &rdp->nocb_q_count); /* rcu_barrier() relies on ->nocb_q_count add before xchg. */ old_rhpp = xchg(&rdp->nocb_tail, rhtp); - ACCESS_ONCE(*old_rhpp) = rhp; + WRITE_ONCE(*old_rhpp, rhp); atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy); smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */ /* If we are not being polled and there is a kthread, awaken it ... */ - t = ACCESS_ONCE(rdp->nocb_kthread); + t = READ_ONCE(rdp->nocb_kthread); if (rcu_nocb_poll || !t) { trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNotPoll")); @@ -2118,7 +2117,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) for (;;) { wait_event_interruptible( rnp->nocb_gp_wq[c & 0x1], - (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); + (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); if (likely(d)) break; WARN_ON(signal_pending(current)); @@ -2145,7 +2144,7 @@ wait_again: if (!rcu_nocb_poll) { trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); wait_event_interruptible(my_rdp->nocb_wq, - !ACCESS_ONCE(my_rdp->nocb_leader_sleep)); + !READ_ONCE(my_rdp->nocb_leader_sleep)); /* Memory barrier handled by smp_mb() calls below and repoll. */ } else if (firsttime) { firsttime = false; /* Don't drown trace log with "Poll"! */ @@ -2159,12 +2158,12 @@ wait_again: */ gotcbs = false; for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { - rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head); + rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); if (!rdp->nocb_gp_head) continue; /* No CBs here, try next follower. */ /* Move callbacks to wait-for-GP list, which is empty. */ - ACCESS_ONCE(rdp->nocb_head) = NULL; + WRITE_ONCE(rdp->nocb_head, NULL); rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head); gotcbs = true; } @@ -2184,7 +2183,7 @@ wait_again: my_rdp->nocb_leader_sleep = true; smp_mb(); /* Ensure _sleep true before scan. */ for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) - if (ACCESS_ONCE(rdp->nocb_head)) { + if (READ_ONCE(rdp->nocb_head)) { /* Found CB, so short-circuit next wait. */ my_rdp->nocb_leader_sleep = false; break; @@ -2205,7 +2204,7 @@ wait_again: /* Each pass through the following loop wakes a follower, if needed. */ for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { - if (ACCESS_ONCE(rdp->nocb_head)) + if (READ_ONCE(rdp->nocb_head)) my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ if (!rdp->nocb_gp_head) continue; /* No CBs, so no need to wake follower. */ @@ -2241,7 +2240,7 @@ static void nocb_follower_wait(struct rcu_data *rdp) trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "FollowerSleep"); wait_event_interruptible(rdp->nocb_wq, - ACCESS_ONCE(rdp->nocb_follower_head)); + READ_ONCE(rdp->nocb_follower_head)); } else if (firsttime) { /* Don't drown trace log with "Poll"! */ firsttime = false; @@ -2282,10 +2281,10 @@ static int rcu_nocb_kthread(void *arg) nocb_follower_wait(rdp); /* Pull the ready-to-invoke callbacks onto local list. */ - list = ACCESS_ONCE(rdp->nocb_follower_head); + list = READ_ONCE(rdp->nocb_follower_head); BUG_ON(!list); trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty"); - ACCESS_ONCE(rdp->nocb_follower_head) = NULL; + WRITE_ONCE(rdp->nocb_follower_head, NULL); tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head); /* Each pass through the following loop invokes a callback. */ @@ -2324,7 +2323,7 @@ static int rcu_nocb_kthread(void *arg) /* Is a deferred wakeup of rcu_nocb_kthread() required? */ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) { - return ACCESS_ONCE(rdp->nocb_defer_wakeup); + return READ_ONCE(rdp->nocb_defer_wakeup); } /* Do a deferred wakeup of rcu_nocb_kthread(). */ @@ -2334,8 +2333,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) if (!rcu_nocb_need_deferred_wakeup(rdp)) return; - ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup); - ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT; + ndw = READ_ONCE(rdp->nocb_defer_wakeup); + WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT); wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE); trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); } @@ -2448,7 +2447,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu) t = kthread_run(rcu_nocb_kthread, rdp_spawn, "rcuo%c/%d", rsp->abbr, cpu); BUG_ON(IS_ERR(t)); - ACCESS_ONCE(rdp_spawn->nocb_kthread) = t; + WRITE_ONCE(rdp_spawn->nocb_kthread, t); } /* @@ -2663,7 +2662,7 @@ static void rcu_sysidle_enter(int irq) /* Record start of fully idle period. */ j = jiffies; - ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; + WRITE_ONCE(rdtp->dynticks_idle_jiffies, j); smp_mb__before_atomic(); atomic_inc(&rdtp->dynticks_idle); smp_mb__after_atomic(); @@ -2681,7 +2680,7 @@ static void rcu_sysidle_enter(int irq) */ void rcu_sysidle_force_exit(void) { - int oldstate = ACCESS_ONCE(full_sysidle_state); + int oldstate = READ_ONCE(full_sysidle_state); int newoldstate; /* @@ -2794,7 +2793,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, smp_mb(); /* Read counters before timestamps. */ /* Pick up timestamps. */ - j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies); + j = READ_ONCE(rdtp->dynticks_idle_jiffies); /* If this CPU entered idle more recently, update maxj timestamp. */ if (ULONG_CMP_LT(*maxj, j)) *maxj = j; @@ -2831,11 +2830,11 @@ static unsigned long rcu_sysidle_delay(void) static void rcu_sysidle(unsigned long j) { /* Check the current state. */ - switch (ACCESS_ONCE(full_sysidle_state)) { + switch (READ_ONCE(full_sysidle_state)) { case RCU_SYSIDLE_NOT: /* First time all are idle, so note a short idle period. */ - ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT; + WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT); break; case RCU_SYSIDLE_SHORT: @@ -2873,7 +2872,7 @@ static void rcu_sysidle_cancel(void) { smp_mb(); if (full_sysidle_state > RCU_SYSIDLE_SHORT) - ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT; + WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT); } /* @@ -2925,7 +2924,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp) smp_mb(); /* grace period precedes setting inuse. */ rshp = container_of(rhp, struct rcu_sysidle_head, rh); - ACCESS_ONCE(rshp->inuse) = 0; + WRITE_ONCE(rshp->inuse, 0); } /* @@ -2936,7 +2935,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp) bool rcu_sys_is_idle(void) { static struct rcu_sysidle_head rsh; - int rss = ACCESS_ONCE(full_sysidle_state); + int rss = READ_ONCE(full_sysidle_state); if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu)) return false; @@ -2964,7 +2963,7 @@ bool rcu_sys_is_idle(void) } rcu_sysidle_report(rcu_state_p, isidle, maxj, false); oldrss = rss; - rss = ACCESS_ONCE(full_sysidle_state); + rss = READ_ONCE(full_sysidle_state); } } @@ -3048,7 +3047,7 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp) #ifdef CONFIG_NO_HZ_FULL if (tick_nohz_full_cpu(smp_processor_id()) && (!rcu_gp_in_progress(rsp) || - ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ))) + ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ))) return 1; #endif /* #ifdef CONFIG_NO_HZ_FULL */ return 0; @@ -3077,7 +3076,7 @@ static void rcu_bind_gp_kthread(void) static void rcu_dynticks_task_enter(void) { #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) - ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id(); + WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ } @@ -3085,6 +3084,6 @@ static void rcu_dynticks_task_enter(void) static void rcu_dynticks_task_exit(void) { #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) - ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1; + WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */ } diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c index f92361efd0f5..3ea7ffc7d5c4 100644 --- a/kernel/rcu/tree_trace.c +++ b/kernel/rcu/tree_trace.c @@ -277,7 +277,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n", rsp->n_force_qs, rsp->n_force_qs_ngp, rsp->n_force_qs - rsp->n_force_qs_ngp, - ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen); + READ_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen); for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { if (rnp->level != level) { seq_puts(m, "\n"); @@ -323,8 +323,8 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp) struct rcu_node *rnp = &rsp->node[0]; raw_spin_lock_irqsave(&rnp->lock, flags); - completed = ACCESS_ONCE(rsp->completed); - gpnum = ACCESS_ONCE(rsp->gpnum); + completed = READ_ONCE(rsp->completed); + gpnum = READ_ONCE(rsp->gpnum); if (completed == gpnum) gpage = 0; else diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 1f133350da01..afaecb7a799a 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -150,14 +150,14 @@ void __rcu_read_unlock(void) barrier(); /* critical section before exit code. */ t->rcu_read_lock_nesting = INT_MIN; barrier(); /* assign before ->rcu_read_unlock_special load */ - if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s))) + if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) rcu_read_unlock_special(t); barrier(); /* ->rcu_read_unlock_special load before assign */ t->rcu_read_lock_nesting = 0; } #ifdef CONFIG_PROVE_LOCKING { - int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); + int rrln = READ_ONCE(t->rcu_read_lock_nesting); WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); } @@ -389,17 +389,17 @@ module_param(rcu_cpu_stall_timeout, int, 0644); int rcu_jiffies_till_stall_check(void) { - int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); + int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); /* * Limit check must be consistent with the Kconfig limits * for CONFIG_RCU_CPU_STALL_TIMEOUT. */ if (till_stall_check < 3) { - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; + WRITE_ONCE(rcu_cpu_stall_timeout, 3); till_stall_check = 3; } else if (till_stall_check > 300) { - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; + WRITE_ONCE(rcu_cpu_stall_timeout, 300); till_stall_check = 300; } return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; @@ -550,12 +550,12 @@ static void check_holdout_task(struct task_struct *t, { int cpu; - if (!ACCESS_ONCE(t->rcu_tasks_holdout) || - t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) || - !ACCESS_ONCE(t->on_rq) || + if (!READ_ONCE(t->rcu_tasks_holdout) || + t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || + !READ_ONCE(t->on_rq) || (IS_ENABLED(CONFIG_NO_HZ_FULL) && !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { - ACCESS_ONCE(t->rcu_tasks_holdout) = false; + WRITE_ONCE(t->rcu_tasks_holdout, false); list_del_init(&t->rcu_tasks_holdout_list); put_task_struct(t); return; @@ -639,11 +639,11 @@ static int __noreturn rcu_tasks_kthread(void *arg) */ rcu_read_lock(); for_each_process_thread(g, t) { - if (t != current && ACCESS_ONCE(t->on_rq) && + if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { get_task_struct(t); - t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw); - ACCESS_ONCE(t->rcu_tasks_holdout) = true; + t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); + WRITE_ONCE(t->rcu_tasks_holdout, true); list_add(&t->rcu_tasks_holdout_list, &rcu_tasks_holdouts); } @@ -672,7 +672,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) struct task_struct *t1; schedule_timeout_interruptible(HZ); - rtst = ACCESS_ONCE(rcu_task_stall_timeout); + rtst = READ_ONCE(rcu_task_stall_timeout); needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); if (needreport) @@ -728,7 +728,7 @@ static void rcu_spawn_tasks_kthread(void) static struct task_struct *rcu_tasks_kthread_ptr; struct task_struct *t; - if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) { + if (READ_ONCE(rcu_tasks_kthread_ptr)) { smp_mb(); /* Ensure caller sees full kthread. */ return; } @@ -740,7 +740,7 @@ static void rcu_spawn_tasks_kthread(void) t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); BUG_ON(IS_ERR(t)); smp_mb(); /* Ensure others see full kthread. */ - ACCESS_ONCE(rcu_tasks_kthread_ptr) = t; + WRITE_ONCE(rcu_tasks_kthread_ptr, t); mutex_unlock(&rcu_tasks_kthread_mutex); } diff --git a/kernel/torture.c b/kernel/torture.c index dd70993c266c..3e4840633d3e 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -409,7 +409,7 @@ static void (*torture_shutdown_hook)(void); */ void torture_shutdown_absorb(const char *title) { - while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { + while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { pr_notice("torture thread %s parking due to system shutdown\n", title); schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); @@ -480,9 +480,9 @@ static int torture_shutdown_notify(struct notifier_block *unused1, unsigned long unused2, void *unused3) { mutex_lock(&fullstop_mutex); - if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) { + if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) { VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected"); - ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN; + WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN); } else { pr_warn("Concurrent rmmod and shutdown illegal!\n"); } @@ -523,13 +523,13 @@ static int stutter; */ void stutter_wait(const char *title) { - while (ACCESS_ONCE(stutter_pause_test) || - (torture_runnable && !ACCESS_ONCE(*torture_runnable))) { + while (READ_ONCE(stutter_pause_test) || + (torture_runnable && !READ_ONCE(*torture_runnable))) { if (stutter_pause_test) - if (ACCESS_ONCE(stutter_pause_test) == 1) + if (READ_ONCE(stutter_pause_test) == 1) schedule_timeout_interruptible(1); else - while (ACCESS_ONCE(stutter_pause_test)) + while (READ_ONCE(stutter_pause_test)) cond_resched(); else schedule_timeout_interruptible(round_jiffies_relative(HZ)); @@ -549,14 +549,14 @@ static int torture_stutter(void *arg) if (!torture_must_stop()) { if (stutter > 1) { schedule_timeout_interruptible(stutter - 1); - ACCESS_ONCE(stutter_pause_test) = 2; + WRITE_ONCE(stutter_pause_test, 2); } schedule_timeout_interruptible(1); - ACCESS_ONCE(stutter_pause_test) = 1; + WRITE_ONCE(stutter_pause_test, 1); } if (!torture_must_stop()) schedule_timeout_interruptible(stutter); - ACCESS_ONCE(stutter_pause_test) = 0; + WRITE_ONCE(stutter_pause_test, 0); torture_shutdown_absorb("torture_stutter"); } while (!torture_must_stop()); torture_kthread_stopping("torture_stutter"); @@ -642,13 +642,13 @@ EXPORT_SYMBOL_GPL(torture_init_end); bool torture_cleanup_begin(void) { mutex_lock(&fullstop_mutex); - if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { + if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { pr_warn("Concurrent rmmod and shutdown illegal!\n"); mutex_unlock(&fullstop_mutex); schedule_timeout_uninterruptible(10); return true; } - ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD; + WRITE_ONCE(fullstop, FULLSTOP_RMMOD); mutex_unlock(&fullstop_mutex); torture_shutdown_cleanup(); torture_shuffle_cleanup(); @@ -681,7 +681,7 @@ EXPORT_SYMBOL_GPL(torture_must_stop); */ bool torture_must_stop_irq(void) { - return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP; + return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP; } EXPORT_SYMBOL_GPL(torture_must_stop_irq); -- cgit v1.2.3 From e90328b87eeff91574c18815bd6991b03bd6ecc0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 19 Apr 2015 18:16:02 -0700 Subject: mce: Stop using array-index-based RCU primitives Because mce is arch-specific x86 code, there is little or no performance benefit of using rcu_dereference_index_check() over using smp_load_acquire(). It also turns out that mce is the only place that array-index-based RCU is used, and it would be convenient to drop this portion of the RCU API. This patch therefore changes rcu_dereference_index_check() uses to smp_load_acquire(), but keeping the lockdep diagnostics, and also changes rcu_access_index() uses to READ_ONCE(). Signed-off-by: Paul E. McKenney Cc: linux-edac@vger.kernel.org Cc: Tony Luck Acked-by: Borislav Petkov --- arch/x86/kernel/cpu/mcheck/mce.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index e535533d5ab8..1fd04a02d90a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -53,9 +53,12 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); #define rcu_dereference_check_mce(p) \ - rcu_dereference_index_check((p), \ - rcu_read_lock_sched_held() || \ - lockdep_is_held(&mce_chrdev_read_mutex)) +({ \ + rcu_lockdep_assert(rcu_read_lock_sched_held() || \ + lockdep_is_held(&mce_chrdev_read_mutex), \ + "suspicious rcu_dereference_check_mce() usage"); \ + smp_load_acquire(&(p)); \ +}) #define CREATE_TRACE_POINTS #include @@ -1884,7 +1887,7 @@ out: static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait) { poll_wait(file, &mce_chrdev_wait, wait); - if (rcu_access_index(mcelog.next)) + if (READ_ONCE(mcelog.next)) return POLLIN | POLLRDNORM; if (!mce_apei_read_done && apei_check_mce()) return POLLIN | POLLRDNORM; -- cgit v1.2.3 From 1ebee8017d84ec8a0ba893cf7b8be3f70ead088b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 19 Apr 2015 18:21:47 -0700 Subject: rcu: Eliminate array-index-based RCU primitives Now that rcu_access_index() and rcu_dereference_index_check() are no longer used, the commit removes them from the RCU API. This means that RCU's data dependencies now involve only pointers, give or take the occasional cast to and then back from an integer type to do pointer arithmetic. This in turn eliminates the need for a number of operations on values carrying RCU data dependencies. Signed-off-by: Paul E. McKenney Cc: linux-edac@vger.kernel.org Cc: Tony Luck Acked-by: Borislav Petkov --- include/linux/rcupdate.h | 50 ------------------------------------------------ 1 file changed, 50 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 87bb0eee665b..b97842ff71d2 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -628,21 +628,6 @@ static inline void rcu_preempt_sleep_check(void) ((typeof(*p) __force __kernel *)(p)); \ }) -#define __rcu_access_index(p, space) \ -({ \ - typeof(p) _________p1 = READ_ONCE(p); \ - rcu_dereference_sparse(p, space); \ - (_________p1); \ -}) -#define __rcu_dereference_index_check(p, c) \ -({ \ - /* Dependency order vs. p above. */ \ - typeof(p) _________p1 = lockless_dereference(p); \ - rcu_lockdep_assert(c, \ - "suspicious rcu_dereference_index_check() usage"); \ - (_________p1); \ -}) - /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable * @v: The value to statically initialize with. @@ -786,41 +771,6 @@ static inline void rcu_preempt_sleep_check(void) */ #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) -/** - * rcu_access_index() - fetch RCU index with no dereferencing - * @p: The index to read - * - * Return the value of the specified RCU-protected index, but omit the - * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful - * when the value of this index is accessed, but the index is not - * dereferenced, for example, when testing an RCU-protected index against - * -1. Although rcu_access_index() may also be used in cases where - * update-side locks prevent the value of the index from changing, you - * should instead use rcu_dereference_index_protected() for this use case. - */ -#define rcu_access_index(p) __rcu_access_index((p), __rcu) - -/** - * rcu_dereference_index_check() - rcu_dereference for indices with debug checking - * @p: The pointer to read, prior to dereferencing - * @c: The conditions under which the dereference will take place - * - * Similar to rcu_dereference_check(), but omits the sparse checking. - * This allows rcu_dereference_index_check() to be used on integers, - * which can then be used as array indices. Attempting to use - * rcu_dereference_check() on an integer will give compiler warnings - * because the sparse address-space mechanism relies on dereferencing - * the RCU-protected pointer. Dereferencing integers is not something - * that even gcc will put up with. - * - * Note that this function does not implicitly check for RCU read-side - * critical sections. If this function gains lots of uses, it might - * make sense to provide versions for each flavor of RCU, but it does - * not make sense as of early 2010. - */ -#define rcu_dereference_index_check(p, c) \ - __rcu_dereference_index_check((p), (c)) - /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented * @p: The pointer to read, prior to dereferencing -- cgit v1.2.3 From cf9fbf8017e2ab5cb33b6602b626f7f005718124 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 06:09:27 -0700 Subject: documentation: RCU-protected array indexes no longer supported Signed-off-by: Paul E. McKenney --- Documentation/RCU/arrayRCU.txt | 20 ++++++++++++++++---- Documentation/RCU/lockdep.txt | 10 ---------- Documentation/RCU/rcu_dereference.txt | 33 ++++++++++++--------------------- Documentation/RCU/whatisRCU.txt | 2 -- 4 files changed, 28 insertions(+), 37 deletions(-) diff --git a/Documentation/RCU/arrayRCU.txt b/Documentation/RCU/arrayRCU.txt index 453ebe6953ee..f05a9afb2c39 100644 --- a/Documentation/RCU/arrayRCU.txt +++ b/Documentation/RCU/arrayRCU.txt @@ -10,7 +10,19 @@ also be used to protect arrays. Three situations are as follows: 3. Resizeable Arrays -Each of these situations are discussed below. +Each of these three situations involves an RCU-protected pointer to an +array that is separately indexed. It might be tempting to consider use +of RCU to instead protect the index into an array, however, this use +case is -not- supported. The problem with RCU-protected indexes into +arrays is that compilers can play way too many optimization games with +integers, which means that the rules governing handling of these indexes +are far more trouble than they are worth. If RCU-protected indexes into +arrays prove to be particularly valuable (which they have not thus far), +explicit cooperation from the compiler will be required to permit them +to be safely used. + +That aside, each of the three RCU-protected pointer situations are +described in the following sections. Situation 1: Hash Tables @@ -36,9 +48,9 @@ Quick Quiz: Why is it so important that updates be rare when Situation 3: Resizeable Arrays Use of RCU for resizeable arrays is demonstrated by the grow_ary() -function used by the System V IPC code. The array is used to map from -semaphore, message-queue, and shared-memory IDs to the data structure -that represents the corresponding IPC construct. The grow_ary() +function formerly used by the System V IPC code. The array is used +to map from semaphore, message-queue, and shared-memory IDs to the data +structure that represents the corresponding IPC construct. The grow_ary() function does not acquire any locks; instead its caller must hold the ids->sem semaphore. diff --git a/Documentation/RCU/lockdep.txt b/Documentation/RCU/lockdep.txt index cd83d2348fef..da51d3068850 100644 --- a/Documentation/RCU/lockdep.txt +++ b/Documentation/RCU/lockdep.txt @@ -47,11 +47,6 @@ checking of rcu_dereference() primitives: Use explicit check expression "c" along with srcu_read_lock_held()(). This is useful in code that is invoked by both SRCU readers and updaters. - rcu_dereference_index_check(p, c): - Use explicit check expression "c", but the caller - must supply one of the rcu_read_lock_held() functions. - This is useful in code that uses RCU-protected arrays - that is invoked by both RCU readers and updaters. rcu_dereference_raw(p): Don't check. (Use sparingly, if at all.) rcu_dereference_protected(p, c): @@ -64,11 +59,6 @@ checking of rcu_dereference() primitives: but retain the compiler constraints that prevent duplicating or coalescsing. This is useful when when testing the value of the pointer itself, for example, against NULL. - rcu_access_index(idx): - Return the value of the index and omit all barriers, but - retain the compiler constraints that prevent duplicating - or coalescsing. This is useful when when testing the - value of the index itself, for example, against -1. The rcu_dereference_check() check expression can be any boolean expression, but would normally include a lockdep expression. However, diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt index ceb05da5a5ac..66864d2a7f60 100644 --- a/Documentation/RCU/rcu_dereference.txt +++ b/Documentation/RCU/rcu_dereference.txt @@ -25,17 +25,6 @@ o You must use one of the rcu_dereference() family of primitives for an example where the compiler can in fact deduce the exact value of the pointer, and thus cause misordering. -o Do not use single-element RCU-protected arrays. The compiler - is within its right to assume that the value of an index into - such an array must necessarily evaluate to zero. The compiler - could then substitute the constant zero for the computation, so - that the array index no longer depended on the value returned - by rcu_dereference(). If the array index no longer depends - on rcu_dereference(), then both the compiler and the CPU - are within their rights to order the array access before the - rcu_dereference(), which can cause the array access to return - garbage. - o Avoid cancellation when using the "+" and "-" infix arithmetic operators. For example, for a given variable "x", avoid "(x-x)". There are similar arithmetic pitfalls from other @@ -76,14 +65,15 @@ o Do not use the results from the boolean "&&" and "||" when dereferencing. For example, the following (rather improbable) code is buggy: - int a[2]; - int index; - int force_zero_index = 1; + int *p; + int *q; ... - r1 = rcu_dereference(i1) - r2 = a[r1 && force_zero_index]; /* BUGGY!!! */ + p = rcu_dereference(gp) + q = &global_q; + q += p != &oom_p1 && p != &oom_p2; + r1 = *q; /* BUGGY!!! */ The reason this is buggy is that "&&" and "||" are often compiled using branches. While weak-memory machines such as ARM or PowerPC @@ -94,14 +84,15 @@ o Do not use the results from relational operators ("==", "!=", ">", ">=", "<", or "<=") when dereferencing. For example, the following (quite strange) code is buggy: - int a[2]; - int index; - int flip_index = 0; + int *p; + int *q; ... - r1 = rcu_dereference(i1) - r2 = a[r1 != flip_index]; /* BUGGY!!! */ + p = rcu_dereference(gp) + q = &global_q; + q += p > &oom_p; + r1 = *q; /* BUGGY!!! */ As before, the reason this is buggy is that relational operators are often compiled using branches. And as before, although diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 88dfce182f66..b201d4cd77f9 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -879,9 +879,7 @@ SRCU: Initialization/cleanup All: lockdep-checked RCU-protected pointer access - rcu_access_index rcu_access_pointer - rcu_dereference_index_check rcu_dereference_raw rcu_lockdep_assert rcu_sleep_check -- cgit v1.2.3 From 29c6820f5164b1432b1ba60b38175cb24367dd98 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 21 Apr 2015 14:05:25 -0700 Subject: mce: mce_chrdev_write() can be static Signed-off-by: Fengguang Wu Signed-off-by: Paul E. McKenney --- arch/x86/kernel/cpu/mcheck/mce.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 1fd04a02d90a..d4298feaa6fb 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1932,8 +1932,8 @@ void register_mce_write_callback(ssize_t (*fn)(struct file *filp, } EXPORT_SYMBOL_GPL(register_mce_write_callback); -ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf, - size_t usize, loff_t *off) +static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf, + size_t usize, loff_t *off) { if (mce_write) return mce_write(filp, ubuf, usize, off); -- cgit v1.2.3 From d956028e99b30726b0bce0ca684b40b1ad67b514 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 31 Mar 2015 09:39:41 +0100 Subject: documentation: memory-barriers: Fix smp_mb__before_spinlock() semantics Our current documentation claims that, when followed by an ACQUIRE, smp_mb__before_spinlock() orders prior loads against subsequent loads and stores, which isn't the intent. This commit therefore fixes the documentation to state that this sequence orders only prior stores against subsequent loads and stores. In addition, the original intent of smp_mb__before_spinlock() was to only order prior loads against subsequent stores, however, people have started using it as if it ordered prior loads against subsequent loads and stores. This commit therefore also updates smp_mb__before_spinlock()'s header comment to reflect this new reality. Cc: Oleg Nesterov Cc: "Paul E. McKenney" Cc: Peter Zijlstra Signed-off-by: Will Deacon Signed-off-by: Paul E. McKenney --- Documentation/memory-barriers.txt | 7 +++---- include/linux/spinlock.h | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index f95746189b5d..1f362fd2ecb4 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -1784,10 +1784,9 @@ for each construct. These operations all imply certain barriers: Memory operations issued before the ACQUIRE may be completed after the ACQUIRE operation has completed. An smp_mb__before_spinlock(), - combined with a following ACQUIRE, orders prior loads against - subsequent loads and stores and also orders prior stores against - subsequent stores. Note that this is weaker than smp_mb()! The - smp_mb__before_spinlock() primitive is free on many architectures. + combined with a following ACQUIRE, orders prior stores against + subsequent loads and stores. Note that this is weaker than smp_mb()! + The smp_mb__before_spinlock() primitive is free on many architectures. (2) RELEASE operation implication: diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3e18379dfa6f..0063b24b4f36 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -120,7 +120,7 @@ do { \ /* * Despite its name it doesn't necessarily has to be a full barrier. * It should only guarantee that a STORE before the critical section - * can not be reordered with a LOAD inside this section. + * can not be reordered with LOADs and STOREs inside this section. * spin_lock() is the one-way barrier, this LOAD can not escape out * of the region. So the default implementation simply ensures that * a STORE can not move into the critical section, smp_wmb() should -- cgit v1.2.3 From ee7c29be3695996536395f647e8a2ed6b1ab3a0d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 7 Apr 2015 12:45:41 -0700 Subject: documentation: Update rcu_dereference.txt based on WG21 discussions This commit provides another caveat for the care and feeding of pointers returned by rcu_dereference() that was pointed out in discussions within the C++ standards committee. Signed-off-by: Paul E. McKenney Reviewed-by: Mathieu Desnoyers --- Documentation/RCU/rcu_dereference.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt index ceb05da5a5ac..2d05c9241a33 100644 --- a/Documentation/RCU/rcu_dereference.txt +++ b/Documentation/RCU/rcu_dereference.txt @@ -193,6 +193,11 @@ o Be very careful about comparing pointers obtained from pointer. Note that the volatile cast in rcu_dereference() will normally prevent the compiler from knowing too much. + However, please note that if the compiler knows that the + pointer takes on only one of two values, a not-equal + comparison will provide exactly the information that the + compiler needs to deduce the value of the pointer. + o Disable any value-speculation optimizations that your compiler might provide, especially if you are making use of feedback-based optimizations that take data collected from prior runs. Such -- cgit v1.2.3 From ed38446424dd531f1b7a167677232a6d400d69d5 Mon Sep 17 00:00:00 2001 From: Milos Vyletel Date: Fri, 17 Apr 2015 16:38:04 +0200 Subject: documentation: State that rcu_dereference() reloads pointer Make a note stating that repeated calls of rcu_dereference() may not return the same pointer if update happens while in critical section. Reported-by: Jeff Haran Signed-off-by: Milos Vyletel Reviewed-by: Steven Rostedt Signed-off-by: Paul E. McKenney --- Documentation/RCU/whatisRCU.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 88dfce182f66..16622c9e86b5 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -256,7 +256,9 @@ rcu_dereference() If you are going to be fetching multiple fields from the RCU-protected structure, using the local variable is of course preferred. Repeated rcu_dereference() calls look - ugly and incur unnecessary overhead on Alpha CPUs. + ugly, do not guarantee that the same pointer will be returned + if an update happened while in the critical section, and incur + unnecessary overhead on Alpha CPUs. Note that the value returned by rcu_dereference() is valid only within the enclosing RCU read-side critical section. -- cgit v1.2.3 From e63c887cfed2077b2db29f27024d0a9f88151c40 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Mar 2015 12:56:43 -0800 Subject: rcu: Convert from rcu_preempt_state to *rcu_state_p It would be good to move more code from #ifdef to IS_ENABLED(), but that does not work if the body of the IS_ENABLED() "if" statement references a variable (such as rcu_preempt_state) that does not exist if the IS_ENABLED() Kconfig variable is not set. This commit therefore substitutes *rcu_state_p for all uses of rcu_preempt_state in kernel/rcu/tree_preempt.h, which should enable elimination of a few #ifdefs. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 58b1ebdc4387..9a04764dd239 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -150,7 +150,7 @@ static void rcu_preempt_note_context_switch(void) !t->rcu_read_unlock_special.b.blocked) { /* Possibly blocking in an RCU read-side critical section. */ - rdp = this_cpu_ptr(rcu_preempt_state.rda); + rdp = this_cpu_ptr(rcu_state_p->rda); rnp = rdp->mynode; raw_spin_lock_irqsave(&rnp->lock, flags); smp_mb__after_unlock_lock(); @@ -353,8 +353,7 @@ void rcu_read_unlock_special(struct task_struct *t) rnp->grplo, rnp->grphi, !!rnp->gp_tasks); - rcu_report_unblock_qs_rnp(&rcu_preempt_state, - rnp, flags); + rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags); } else { raw_spin_unlock_irqrestore(&rnp->lock, flags); } @@ -370,7 +369,7 @@ void rcu_read_unlock_special(struct task_struct *t) * then we need to report up the rcu_node hierarchy. */ if (!empty_exp && empty_exp_now) - rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); + rcu_report_exp_rnp(rcu_state_p, rnp, true); } else { local_irq_restore(flags); } @@ -500,7 +499,7 @@ static void rcu_preempt_check_callbacks(void) static void rcu_preempt_do_callbacks(void) { - rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); + rcu_do_batch(rcu_state_p, this_cpu_ptr(&rcu_preempt_data)); } #endif /* #ifdef CONFIG_RCU_BOOST */ @@ -510,7 +509,7 @@ static void rcu_preempt_do_callbacks(void) */ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { - __call_rcu(head, func, &rcu_preempt_state, -1, 0); + __call_rcu(head, func, rcu_state_p, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu); @@ -711,7 +710,7 @@ sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp) void synchronize_rcu_expedited(void) { struct rcu_node *rnp; - struct rcu_state *rsp = &rcu_preempt_state; + struct rcu_state *rsp = rcu_state_p; unsigned long snap; int trycount = 0; @@ -798,7 +797,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); */ void rcu_barrier(void) { - _rcu_barrier(&rcu_preempt_state); + _rcu_barrier(rcu_state_p); } EXPORT_SYMBOL_GPL(rcu_barrier); @@ -807,7 +806,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier); */ static void __init __rcu_init_preempt(void) { - rcu_init_one(&rcu_preempt_state, &rcu_preempt_data); + rcu_init_one(rcu_state_p, &rcu_preempt_data); } /* @@ -1172,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct sched_param sp; struct task_struct *t; - if (&rcu_preempt_state != rsp) + if (rcu_state_p != rsp) return 0; if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) -- cgit v1.2.3 From 727b705baf7d091a9bc5494d7f1d9699b6932531 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Mar 2015 14:49:26 -0800 Subject: rcu: Eliminate a few RCU_BOOST #ifdefs in favor of IS_ENABLED() This commit removes a few RCU_BOOST #ifdefs, replacing them with IS_ENABLED()-protected return statements. This relies on the optimizer to remove any resulting dead code. There are several other RCU_BOOST #ifdefs, however these rely on some per-CPU variables that are available only under RCU_BOOST. These might be converted later, if the simplification proves to outweigh the increase in memory footprint. One hoped-for advantage is more easily locating compiler errors in obscure combinations of Kconfig parameters. Signed-off-by: Paul E. McKenney Cc: --- kernel/rcu/tree.h | 2 -- kernel/rcu/tree_plugin.h | 35 ++++++++++++++++++++--------------- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a69d3dab2ec4..dd5ce4034875 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -170,7 +170,6 @@ struct rcu_node { /* if there is no such task. If there */ /* is no current expedited grace period, */ /* then there can cannot be any such task. */ -#ifdef CONFIG_RCU_BOOST struct list_head *boost_tasks; /* Pointer to first task that needs to be */ /* priority boosted, or NULL if no priority */ @@ -208,7 +207,6 @@ struct rcu_node { unsigned long n_balk_nos; /* Refused to boost: not sure why, though. */ /* This can happen due to race conditions. */ -#endif /* #ifdef CONFIG_RCU_BOOST */ #ifdef CONFIG_RCU_NOCB_CPU wait_queue_head_t nocb_gp_wq[2]; /* Place for rcu_nocb_kthread() to wait GP. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 9a04764dd239..8f8142778684 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -43,7 +43,17 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); -#endif /* #ifdef CONFIG_RCU_BOOST */ +#else /* #ifdef CONFIG_RCU_BOOST */ + +/* + * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, + * all uses are in dead code. Provide a definition to keep the compiler + * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. + * This probably needs to be excluded from -rt builds. + */ +#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) + +#endif /* #else #ifdef CONFIG_RCU_BOOST */ #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ @@ -180,10 +190,9 @@ static void rcu_preempt_note_context_switch(void) if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); rnp->gp_tasks = &t->rcu_node_entry; -#ifdef CONFIG_RCU_BOOST - if (rnp->boost_tasks != NULL) + if (IS_ENABLED(CONFIG_RCU_BOOST) && + rnp->boost_tasks != NULL) rnp->boost_tasks = rnp->gp_tasks; -#endif /* #ifdef CONFIG_RCU_BOOST */ } else { list_add(&t->rcu_node_entry, &rnp->blkd_tasks); if (rnp->qsmask & rdp->grpmask) @@ -263,9 +272,7 @@ void rcu_read_unlock_special(struct task_struct *t) bool empty_exp_now; unsigned long flags; struct list_head *np; -#ifdef CONFIG_RCU_BOOST bool drop_boost_mutex = false; -#endif /* #ifdef CONFIG_RCU_BOOST */ struct rcu_node *rnp; union rcu_special special; @@ -331,12 +338,12 @@ void rcu_read_unlock_special(struct task_struct *t) rnp->gp_tasks = np; if (&t->rcu_node_entry == rnp->exp_tasks) rnp->exp_tasks = np; -#ifdef CONFIG_RCU_BOOST - if (&t->rcu_node_entry == rnp->boost_tasks) - rnp->boost_tasks = np; - /* Snapshot ->boost_mtx ownership with rcu_node lock held. */ - drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; -#endif /* #ifdef CONFIG_RCU_BOOST */ + if (IS_ENABLED(CONFIG_RCU_BOOST)) { + if (&t->rcu_node_entry == rnp->boost_tasks) + rnp->boost_tasks = np; + /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ + drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; + } /* * If this was the last task on the current list, and if @@ -358,11 +365,9 @@ void rcu_read_unlock_special(struct task_struct *t) raw_spin_unlock_irqrestore(&rnp->lock, flags); } -#ifdef CONFIG_RCU_BOOST /* Unboost if we were boosted. */ - if (drop_boost_mutex) + if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) rt_mutex_unlock(&rnp->boost_mtx); -#endif /* #ifdef CONFIG_RCU_BOOST */ /* * If this was the last task on the expedited lists, -- cgit v1.2.3 From b28a7c016618e5e32e0703e3dd111dbba02715ff Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Mar 2015 07:39:27 -0800 Subject: rcu: Tell the compiler that rcu_state_p is immutable This commit adds a "const" tag to the declarations of rcu_state_p, which should allow the compiler to generate better code and also to catch erroneous assignments to this variable. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree_plugin.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0628df155970..f031700514dd 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -110,7 +110,7 @@ struct rcu_state sname##_state = { \ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); -static struct rcu_state *rcu_state_p; +static struct rcu_state *const rcu_state_p; LIST_HEAD(rcu_struct_flavors); /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 8f8142778684..18b057adc21a 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -100,7 +100,7 @@ static void __init rcu_bootup_announce_oddness(void) #ifdef CONFIG_PREEMPT_RCU RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); -static struct rcu_state *rcu_state_p = &rcu_preempt_state; +static struct rcu_state *const rcu_state_p = &rcu_preempt_state; static int rcu_preempted_readers_exp(struct rcu_node *rnp); static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, @@ -834,7 +834,7 @@ void exit_rcu(void) #else /* #ifdef CONFIG_PREEMPT_RCU */ -static struct rcu_state *rcu_state_p = &rcu_sched_state; +static struct rcu_state *const rcu_state_p = &rcu_sched_state; /* * Tell them what RCU they are running. -- cgit v1.2.3 From 2927a689e8ad5c12d6300b41e873d2b7957bc0e1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Mar 2015 07:53:04 -0800 Subject: rcu: Create an immutable rcu_data_p pointer to default rcu_data structure This commit creates an immutable rcu_data_p pointer that references rcu_preempt_data for TREE_PREEMPT_RCU builds and that references rcu_sched_data for TREE_RCU builds. This rcu_data_p pointer will enable more code to move from #ifdef to IS_ENABLED(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 1 + kernel/rcu/tree_plugin.h | 16 +++++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f031700514dd..213f644d6fb1 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -111,6 +111,7 @@ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); static struct rcu_state *const rcu_state_p; +static struct rcu_data __percpu *const rcu_data_p; LIST_HEAD(rcu_struct_flavors); /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 18b057adc21a..5c0122f09ed0 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -101,6 +101,7 @@ static void __init rcu_bootup_announce_oddness(void) RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu); static struct rcu_state *const rcu_state_p = &rcu_preempt_state; +static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data; static int rcu_preempted_readers_exp(struct rcu_node *rnp); static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, @@ -126,11 +127,11 @@ static void __init rcu_bootup_announce(void) */ static void rcu_preempt_qs(void) { - if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) { + if (!__this_cpu_read(rcu_data_p->passed_quiesce)) { trace_rcu_grace_period(TPS("rcu_preempt"), - __this_cpu_read(rcu_preempt_data.gpnum), + __this_cpu_read(rcu_data_p->gpnum), TPS("cpuqs")); - __this_cpu_write(rcu_preempt_data.passed_quiesce, 1); + __this_cpu_write(rcu_data_p->passed_quiesce, 1); barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */ current->rcu_read_unlock_special.b.need_qs = false; } @@ -495,8 +496,8 @@ static void rcu_preempt_check_callbacks(void) return; } if (t->rcu_read_lock_nesting > 0 && - __this_cpu_read(rcu_preempt_data.qs_pending) && - !__this_cpu_read(rcu_preempt_data.passed_quiesce)) + __this_cpu_read(rcu_data_p->qs_pending) && + !__this_cpu_read(rcu_data_p->passed_quiesce)) t->rcu_read_unlock_special.b.need_qs = true; } @@ -504,7 +505,7 @@ static void rcu_preempt_check_callbacks(void) static void rcu_preempt_do_callbacks(void) { - rcu_do_batch(rcu_state_p, this_cpu_ptr(&rcu_preempt_data)); + rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p)); } #endif /* #ifdef CONFIG_RCU_BOOST */ @@ -811,7 +812,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier); */ static void __init __rcu_init_preempt(void) { - rcu_init_one(rcu_state_p, &rcu_preempt_data); + rcu_init_one(rcu_state_p, rcu_data_p); } /* @@ -835,6 +836,7 @@ void exit_rcu(void) #else /* #ifdef CONFIG_PREEMPT_RCU */ static struct rcu_state *const rcu_state_p = &rcu_sched_state; +static struct rcu_data __percpu *const rcu_data_p = &rcu_sched_data; /* * Tell them what RCU they are running. -- cgit v1.2.3 From 3382adbc1bb8c80ea512243acf6059564287620b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 4 Mar 2015 15:41:24 -0800 Subject: rcu: Eliminate a few CONFIG_RCU_NOCB_CPU_ALL #ifdefs This commit converts several CONFIG_RCU_NOCB_CPU_ALL #ifdefs to instead use IS_ENABLED(). This change should help avoid hiding code from compiler diagnostics. Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 4 ++-- include/linux/rcutree.h | 2 -- kernel/rcu/tree_plugin.h | 22 ++++++++++++---------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 87bb0eee665b..5ec20bc4af76 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1153,13 +1153,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #define kfree_rcu(ptr, rcu_head) \ __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) -#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) +#ifdef CONFIG_TINY_RCU static inline int rcu_needs_cpu(unsigned long *delta_jiffies) { *delta_jiffies = ULONG_MAX; return 0; } -#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */ +#endif /* #ifdef CONFIG_TINY_RCU */ #if defined(CONFIG_RCU_NOCB_CPU_ALL) static inline bool rcu_is_nocb_cpu(int cpu) { return true; } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d2e583a6aaca..0bd400b02430 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -31,9 +31,7 @@ #define __LINUX_RCUTREE_H void rcu_note_context_switch(void); -#ifndef CONFIG_RCU_NOCB_CPU_ALL int rcu_needs_cpu(unsigned long *delta_jiffies); -#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ void rcu_cpu_stall_reset(void); /* diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 5c0122f09ed0..0730bfcf65db 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1372,13 +1372,12 @@ static void rcu_prepare_kthreads(int cpu) * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs * any flavor of RCU. */ -#ifndef CONFIG_RCU_NOCB_CPU_ALL int rcu_needs_cpu(unsigned long *delta_jiffies) { *delta_jiffies = ULONG_MAX; - return rcu_cpu_has_callbacks(NULL); + return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) + ? 0 : rcu_cpu_has_callbacks(NULL); } -#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up @@ -1485,11 +1484,15 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) * * The caller must have disabled interrupts. */ -#ifndef CONFIG_RCU_NOCB_CPU_ALL int rcu_needs_cpu(unsigned long *dj) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) { + *dj = ULONG_MAX; + return 0; + } + /* Snapshot to detect later posting of non-lazy callback. */ rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; @@ -1516,7 +1519,6 @@ int rcu_needs_cpu(unsigned long *dj) } return 0; } -#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ /* * Prepare a CPU for idle from an RCU perspective. The first major task @@ -1530,7 +1532,6 @@ int rcu_needs_cpu(unsigned long *dj) */ static void rcu_prepare_for_idle(void) { -#ifndef CONFIG_RCU_NOCB_CPU_ALL bool needwake; struct rcu_data *rdp; struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); @@ -1538,6 +1539,9 @@ static void rcu_prepare_for_idle(void) struct rcu_state *rsp; int tne; + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) + return; + /* Handle nohz enablement switches conservatively. */ tne = READ_ONCE(tick_nohz_active); if (tne != rdtp->tick_nohz_enabled_snap) { @@ -1585,7 +1589,6 @@ static void rcu_prepare_for_idle(void) if (needwake) rcu_gp_kthread_wake(rsp); } -#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ } /* @@ -1595,12 +1598,11 @@ static void rcu_prepare_for_idle(void) */ static void rcu_cleanup_after_idle(void) { -#ifndef CONFIG_RCU_NOCB_CPU_ALL - if (rcu_is_nocb_cpu(smp_processor_id())) + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) || + rcu_is_nocb_cpu(smp_processor_id())) return; if (rcu_try_advance_all_cbs()) invoke_rcu_core(); -#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ } /* -- cgit v1.2.3 From cd73ca21cd2bb3711b8d80ba74c90d37ef15fe4d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 16 Mar 2015 11:53:52 -0700 Subject: rcu: Force wakeup of rcu_gp_kthread at grace-period end The rcu_gp_kthread_wake() refuses to do a wakeup unless at least one of the ->gp_flags bits are set, which normally will not be the case when the last quiescent state is reported. This results in up to a 3-jiffy delay given default Kconfig settings. This commit therefore has rcu_report_qs_rsp() set RCU_GP_FLAG_FQS before invoking rcu_gp_kthread_wake() in order to force a more immediate wakeup at grace-period end, thus reducing grace-period latencies. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 213f644d6fb1..7d369b829598 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2139,6 +2139,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) __releases(rcu_get_root(rsp)->lock) { WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); rcu_gp_kthread_wake(rsp); } -- cgit v1.2.3 From 5ce035fb7df413a72b6b956d4aa212a866f3b565 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 30 Mar 2015 16:46:16 -0700 Subject: rcu: tree_plugin: Use bool function return values of true/false not 1/0 Use the normal return values for bool functions Signed-off-by: Joe Perches Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0730bfcf65db..f8af20273868 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -3056,9 +3056,9 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp) if (tick_nohz_full_cpu(smp_processor_id()) && (!rcu_gp_in_progress(rsp) || ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ))) - return 1; + return true; #endif /* #ifdef CONFIG_NO_HZ_FULL */ - return 0; + return false; } /* -- cgit v1.2.3 From a76ff6884bfedfafcbb0d9c84c7a6b6a546cba6d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 1 Apr 2015 08:19:59 -0700 Subject: powerpc: Fix smp_mb__before_spinlock() Currently, smp_mb__before_spinlock() is defined to be smp_wmb() in core code, but this is not sufficient on PowerPC. This patch therefore supplies an override for the generic definition to strengthen smp_mb__before_spinlock() to smp_mb(), as is needed on PowerPC. Signed-off-by: Paul E. McKenney Cc: --- arch/powerpc/include/asm/barrier.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index a3bf5be111ff..1124f59b8df4 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -89,5 +89,6 @@ do { \ #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() +#define smp_mb__before_spinlock() smp_mb() #endif /* _ASM_POWERPC_BARRIER_H */ -- cgit v1.2.3 From 82efed06d5e370f1526ec93ff4c2c2496542f615 Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Tue, 7 Apr 2015 15:12:07 -0700 Subject: rcu: Fix missing task information during rcu-preempt stall The first item list_for_each_entry_continue(alist) iterates over is alist->next, rather than alist itself. Consequently, rcu_print_detail_task_stall_rnp() skips the task referenced by gp_tasks. Use gp_tasks->prev as the argument to list_for_each_entry_continue() instead. Signed-off-by: Patrick Daly Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index f8af20273868..853c7b8ea833 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -395,7 +395,7 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) raw_spin_unlock_irqrestore(&rnp->lock, flags); return; } - t = list_entry(rnp->gp_tasks, + t = list_entry(rnp->gp_tasks->prev, struct task_struct, rcu_node_entry); list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) sched_show_task(t); @@ -452,7 +452,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp) if (!rcu_preempt_blocked_readers_cgp(rnp)) return 0; rcu_print_task_stall_begin(rnp); - t = list_entry(rnp->gp_tasks, + t = list_entry(rnp->gp_tasks->prev, struct task_struct, rcu_node_entry); list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { pr_cont(" P%d", t->pid); -- cgit v1.2.3 From 81e701e4376232b2779f52f15e3b7413131bd8e4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 16 Apr 2015 11:02:25 -0700 Subject: rcu: Add more debug info on "kthread starved" RCU CPU stall warnings This commit adds grace number and command-flags information to the "kthread starved" message that is sometimes printed out as part of RCU CPU stall warnings. This message is caused by the corresponding RCU grace-period kthread not having run for at least two seconds, and this added information can be helpful when debugging. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 7d369b829598..52f064ac7b49 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1136,8 +1136,9 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp) j = jiffies; gpa = READ_ONCE(rsp->gp_activity); if (j - gpa > 2 * HZ) - pr_err("%s kthread starved for %ld jiffies!\n", - rsp->name, j - gpa); + pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x\n", + rsp->name, j - gpa, + rsp->gpnum, rsp->completed, rsp->gp_flags); } /* -- cgit v1.2.3 From 5af4692a75daf08dddc93dbb4cd2a1b3d3b617af Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 25 Apr 2015 12:48:29 -0700 Subject: smp: Make control dependencies work on Alpha, improve documentation The current formulation of control dependencies fails on DEC Alpha, which does not respect dependencies of any kind unless an explicit memory barrier is provided. This means that the current fomulation of control dependencies fails on Alpha. This commit therefore creates a READ_ONCE_CTRL() that has the same overhead on non-Alpha systems, but causes Alpha to produce the needed ordering. This commit also applies READ_ONCE_CTRL() to the one known use of control dependencies. Use of READ_ONCE_CTRL() also has the beneficial effect of adding a bit of self-documentation to control dependencies. Signed-off-by: Paul E. McKenney Acked-by: Peter Zijlstra (Intel) --- Documentation/memory-barriers.txt | 55 +++++++++++++++++++++++---------------- include/linux/compiler.h | 16 ++++++++++++ kernel/events/ring_buffer.c | 2 +- 3 files changed, 50 insertions(+), 23 deletions(-) diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index f95746189b5d..a3014bcc5b08 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -617,16 +617,16 @@ case what's actually required is: However, stores are not speculated. This means that ordering -is- provided for load-store control dependencies, as in the following example: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); if (q) { ACCESS_ONCE(b) = p; } -Control dependencies pair normally with other types of barriers. -That said, please note that ACCESS_ONCE() is not optional! Without the -ACCESS_ONCE(), might combine the load from 'a' with other loads from -'a', and the store to 'b' with other stores to 'b', with possible highly -counterintuitive effects on ordering. +Control dependencies pair normally with other types of barriers. That +said, please note that READ_ONCE_CTRL() is not optional! Without the +READ_ONCE_CTRL(), the compiler might combine the load from 'a' with +other loads from 'a', and the store to 'b' with other stores to 'b', +with possible highly counterintuitive effects on ordering. Worse yet, if the compiler is able to prove (say) that the value of variable 'a' is always non-zero, it would be well within its rights @@ -636,12 +636,15 @@ as follows: q = a; b = p; /* BUG: Compiler and CPU can both reorder!!! */ -So don't leave out the ACCESS_ONCE(). +Finally, the READ_ONCE_CTRL() includes an smp_read_barrier_depends() +that DEC Alpha needs in order to respect control depedencies. + +So don't leave out the READ_ONCE_CTRL(). It is tempting to try to enforce ordering on identical stores on both branches of the "if" statement as follows: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); if (q) { barrier(); ACCESS_ONCE(b) = p; @@ -655,7 +658,7 @@ branches of the "if" statement as follows: Unfortunately, current compilers will transform this as follows at high optimization levels: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); barrier(); ACCESS_ONCE(b) = p; /* BUG: No ordering vs. load from a!!! */ if (q) { @@ -685,7 +688,7 @@ memory barriers, for example, smp_store_release(): In contrast, without explicit memory barriers, two-legged-if control ordering is guaranteed only when the stores differ, for example: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); if (q) { ACCESS_ONCE(b) = p; do_something(); @@ -694,14 +697,14 @@ ordering is guaranteed only when the stores differ, for example: do_something_else(); } -The initial ACCESS_ONCE() is still required to prevent the compiler from -proving the value of 'a'. +The initial READ_ONCE_CTRL() is still required to prevent the compiler +from proving the value of 'a'. In addition, you need to be careful what you do with the local variable 'q', otherwise the compiler might be able to guess the value and again remove the needed conditional. For example: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); if (q % MAX) { ACCESS_ONCE(b) = p; do_something(); @@ -714,7 +717,7 @@ If MAX is defined to be 1, then the compiler knows that (q % MAX) is equal to zero, in which case the compiler is within its rights to transform the above code into the following: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); ACCESS_ONCE(b) = p; do_something_else(); @@ -725,7 +728,7 @@ is gone, and the barrier won't bring it back. Therefore, if you are relying on this ordering, you should make sure that MAX is greater than one, perhaps as follows: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */ if (q % MAX) { ACCESS_ONCE(b) = p; @@ -742,14 +745,15 @@ of the 'if' statement. You must also be careful not to rely too much on boolean short-circuit evaluation. Consider this example: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); if (a || 1 > 0) ACCESS_ONCE(b) = 1; -Because the second condition is always true, the compiler can transform -this example as following, defeating control dependency: +Because the first condition cannot fault and the second condition is +always true, the compiler can transform this example as following, +defeating control dependency: - q = ACCESS_ONCE(a); + q = READ_ONCE_CTRL(a); ACCESS_ONCE(b) = 1; This example underscores the need to ensure that the compiler cannot @@ -762,8 +766,8 @@ demonstrated by two related examples, with the initial values of x and y both being zero: CPU 0 CPU 1 - ===================== ===================== - r1 = ACCESS_ONCE(x); r2 = ACCESS_ONCE(y); + ======================= ======================= + r1 = READ_ONCE_CTRL(x); r2 = READ_ONCE_CTRL(y); if (r1 > 0) if (r2 > 0) ACCESS_ONCE(y) = 1; ACCESS_ONCE(x) = 1; @@ -783,7 +787,8 @@ But because control dependencies do -not- provide transitivity, the above assertion can fail after the combined three-CPU example completes. If you need the three-CPU example to provide ordering, you will need smp_mb() between the loads and stores in the CPU 0 and CPU 1 code fragments, -that is, just before or just after the "if" statements. +that is, just before or just after the "if" statements. Furthermore, +the original two-CPU example is very fragile and should be avoided. These two examples are the LB and WWC litmus tests from this paper: http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf and this @@ -791,6 +796,12 @@ site: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html. In summary: + (*) Control dependencies must be headed by READ_ONCE_CTRL(). + Or, as a much less preferable alternative, interpose + be headed by READ_ONCE() or an ACCESS_ONCE() read and must + have smp_read_barrier_depends() between this read and the + control-dependent write. + (*) Control dependencies can order prior loads against later stores. However, they do -not- guarantee any other sort of ordering: Not prior loads against later loads, nor prior stores against diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 867722591be2..5d66777914db 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -252,6 +252,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define WRITE_ONCE(x, val) \ ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; }) +/** + * READ_ONCE_CTRL - Read a value heading a control dependency + * @x: The value to be read, heading the control dependency + * + * Control dependencies are tricky. See Documentation/memory-barriers.txt + * for important information on how to use them. Note that in many cases, + * use of smp_load_acquire() will be much simpler. Control dependencies + * should be avoided except on the hottest of hotpaths. + */ +#define READ_ONCE_CTRL(x) \ +({ \ + typeof(x) __val = READ_ONCE(x); \ + smp_read_barrier_depends(); /* Enforce control dependency. */ \ + __val; \ +}) + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 232f00f273cb..17fcb73c4a50 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -141,7 +141,7 @@ int perf_output_begin(struct perf_output_handle *handle, perf_output_get_handle(handle); do { - tail = ACCESS_ONCE(rb->user_page->data_tail); + tail = READ_ONCE_CTRL(rb->user_page->data_tail); offset = head = local_read(&rb->head); if (!rb->overwrite && unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size)) -- cgit v1.2.3 From 30ff1533b8f75255bdf02bc3361f1c558138f471 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 1 May 2015 13:01:38 -0700 Subject: rcu: Make synchronize_sched_expedited() call wait_rcu_gp() Currently, synchronize_sched_expedited() will call synchronize_sched() if there is danger of counter wrap. But if configuration says to always do expedited grace periods, synchronize_sched() will just call synchronize_sched_expedited() right back again. In theory, the old expedited operations will complete, the counters will get back in synch, and the recursion will end. But we could easily run out of stack long before that time. This commit therefore makes synchronize_sched_expedited() invoke the underlying wait_rcu_gp(call_rcu_sched) instead of synchronize_sched(), the same as all the other calls out from synchronize_sched_expedited(). This bug was introduced by commit 1924bcb02597 (Avoid counter wrap in synchronize_sched_expedited()). Reported-by: Rik van Riel Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 52f064ac7b49..f02830c85ec2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3290,7 +3290,7 @@ void synchronize_sched_expedited(void) if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start), (ulong)atomic_long_read(&rsp->expedited_done) + ULONG_MAX / 8)) { - synchronize_sched(); + wait_rcu_gp(call_rcu_sched); atomic_long_inc(&rsp->expedited_wrap); return; } -- cgit v1.2.3 From c92fb05795f57463cb763a82f9053d294f77ea87 Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Tue, 5 May 2015 21:57:06 +0800 Subject: rcu: Make rcu_*_data variables static rcu_bh_data, rcu_sched_data and rcu_preempt_data are never used outside kernel/rcu/tree.c and thus can be made static. Doing so fixes a section mismatch warning reported by clang when building LLVMLinux with -Wsection, because these variables were declared in .data..percpu and defined in .data..percpu..shared_aligned since commit 11bbb235c26f ("rcu: Use DEFINE_PER_CPU_SHARED_ALIGNED for rcu_data"). Signed-off-by: Nicolas Iooss Reviewed-by: Josh Triplett Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- kernel/rcu/tree.h | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f02830c85ec2..6efb0b66a30d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -91,7 +91,7 @@ static const char *tp_##sname##_varname __used __tracepoint_string = sname##_var #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \ DEFINE_RCU_TPS(sname) \ -DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \ +static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \ struct rcu_state sname##_state = { \ .level = { &sname##_state.node[0] }, \ .rda = &sname##_data, \ diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index dd5ce4034875..8079c5b22a8f 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -517,14 +517,11 @@ extern struct list_head rcu_struct_flavors; * RCU implementation internal declarations: */ extern struct rcu_state rcu_sched_state; -DECLARE_PER_CPU(struct rcu_data, rcu_sched_data); extern struct rcu_state rcu_bh_state; -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); #ifdef CONFIG_PREEMPT_RCU extern struct rcu_state rcu_preempt_state; -DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_RCU_BOOST -- cgit v1.2.3 From 82072c4fcf095ce03a05860365c157c8bb58945b Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Mon, 11 May 2015 18:12:27 +0200 Subject: rcu: Change function declaration to bool rcu_cpu_has_callbacks() is declared int. The current declaration was introduced in commit c0f4dfd4f90f (rcu: Make RCU_FAST_NO_HZ take advantage of numbered callbacks). But it is actually returning bool and as the function description states " * Return true if the specified CPU has any callback....", this probably should be a bool as all (3) call-sites currently treat it as bool. Type-checking coccinelle spatches are being used to locate type mismatches between function signatures and return values in this case this produced: ./kernel/rcu/tree.c:3538 WARNING: return of wrong type int != bool, Patch was compile tested with x86_64_defconfig (implies CONFIG_TREE_RCU=y) Patch is against 4.1-rc3 (localversion-next is -next-20150511) and fixes Signed-off-by: Nicholas Mc Guire Reviewed-by: Josh Triplett Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 6efb0b66a30d..7b9dd4f62569 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3496,7 +3496,7 @@ static int rcu_pending(void) * non-NULL, store an indication of whether all callbacks are lazy. * (If there are no callbacks, all of them are deemed to be lazy.) */ -static int __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy) +static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy) { bool al = true; bool hc = false; -- cgit v1.2.3 From f517700cce37ffcb36e7afae0294fd11c72ed134 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Thu, 26 Mar 2015 13:27:08 +0800 Subject: rculist: Fix another sparse warning This fixes the following sparse warnings: make C=1 CF=-D__CHECK_ENDIAN__ net/tipc/name_table.o net/tipc/name_table.c:977:17: error: incompatible types in comparison expression (different address spaces) net/tipc/name_table.c:977:17: error: incompatible types in comparison expression (different address spaces) To silence these spare complaints, an RCU annotation should be added to "next" pointer of hlist_node structure through hlist_next_rcu() macro when iterating over a hlist with hlist_for_each_entry_from_rcu(). Signed-off-by: Ying Xue Signed-off-by: Paul E. McKenney --- include/linux/rculist.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 665397247e82..17c6b1f84a77 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -549,8 +549,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, */ #define hlist_for_each_entry_from_rcu(pos, member) \ for (; pos; \ - pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ - typeof(*(pos)), member)) + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif -- cgit v1.2.3 From ea46351cea79f54729d8546e5bd7f091a2e6484b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 3 Mar 2015 14:05:26 -0800 Subject: rcu: Eliminate HOTPLUG_CPU #ifdef in favor of IS_ENABLED() This commit removes a HOTPLUG_CPU #ifdef, replacing it with IS_ENABLED()-protected return statements. This relies on the optimizer to remove any resulting dead code. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 39 ++++++++++++++------------------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0628df155970..f2e888c8ec5a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2335,8 +2335,6 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) rcu_report_qs_rdp(rdp->cpu, rsp, rdp); } -#ifdef CONFIG_HOTPLUG_CPU - /* * Send the specified CPU's RCU callbacks to the orphanage. The * specified CPU must be offline, and the caller must hold the @@ -2347,7 +2345,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { /* No-CBs CPUs do not have orphanable callbacks. */ - if (rcu_is_nocb_cpu(rdp->cpu)) + if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu)) return; /* @@ -2406,7 +2404,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags) struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); /* No-CBs CPUs are handled specially. */ - if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) + if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || + rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags)) return; /* Do the accounting first. */ @@ -2453,6 +2452,9 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda)); RCU_TRACE(struct rcu_node *rnp = rdp->mynode); + if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) + return; + RCU_TRACE(mask = rdp->grpmask); trace_rcu_grace_period(rsp->name, rnp->gpnum + 1 - !!(rnp->qsmask & mask), @@ -2481,7 +2483,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) long mask; struct rcu_node *rnp = rnp_leaf; - if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) + if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || + rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) return; for (;;) { mask = rnp->grpmask; @@ -2512,6 +2515,9 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ + if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) + return; + /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ mask = rdp->grpmask; raw_spin_lock_irqsave(&rnp->lock, flags); @@ -2533,6 +2539,9 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ + if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) + return; + /* Adjust any no-longer-needed kthreads. */ rcu_boost_kthread_setaffinity(rnp, -1); @@ -2547,26 +2556,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) cpu, rdp->qlen, rdp->nxtlist); } -#else /* #ifdef CONFIG_HOTPLUG_CPU */ - -static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) -{ -} - -static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf) -{ -} - -static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) -{ -} - -static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) -{ -} - -#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ - /* * Invoke any RCU callbacks that have made it to the end of their grace * period. Thottle as specified by rdp->blimit. -- cgit v1.2.3 From 0a0ba1c93f8a0ff28bacec0d1d018081e762e2f0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 8 Mar 2015 14:20:30 -0700 Subject: rcu: Adjust ->lock acquisition for tasks no longer migrating Tasks are no longer migrated away from a given rcu_node structure when all CPUs corresponding to that rcu_node structure have gone offline. This means that rcu_read_unlock_special() no longer needs to loop retrying rcu_node ->lock acquisition because the current task is guaranteed to stay put. This commit takes a small and paranoid step towards relying on this guarantee by placing a WARN_ON_ONCE() just after the early exit from the lock-acquisition loop. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 58b1ebdc4387..c8340e929eb4 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -307,9 +307,11 @@ void rcu_read_unlock_special(struct task_struct *t) t->rcu_read_unlock_special.b.blocked = false; /* - * Remove this task from the list it blocked on. The - * task can migrate while we acquire the lock, but at - * most one time. So at most two passes through loop. + * Remove this task from the list it blocked on. The task + * now remains queued on the rcu_node corresponding to + * the CPU it first blocked on, so the first attempt to + * acquire the task's rcu_node's ->lock will succeed. + * Keep the loop and add a WARN_ON() out of sheer paranoia. */ for (;;) { rnp = t->rcu_blocked_node; @@ -317,6 +319,7 @@ void rcu_read_unlock_special(struct task_struct *t) smp_mb__after_unlock_lock(); if (rnp == t->rcu_blocked_node) break; + WARN_ON_ONCE(1); raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ } empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); -- cgit v1.2.3 From c5b5539506f86469dca08310657ca93bbb6c00a5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 9 Mar 2015 16:58:41 -0700 Subject: rcu: Remove dead code from force_qs_rnp() Because force_qs_rnp() is invoked only from the force-quiescent-state code which runs only in the context of the grace-period kthread, a grace period must always be in progress throughout force_qs_rnp()'s execution. This commit therefore removes the rcu_gp_in_progress() check and the associated dead code. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f2e888c8ec5a..e338a12c3a1b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2720,10 +2720,6 @@ static void force_qs_rnp(struct rcu_state *rsp, mask = 0; raw_spin_lock_irqsave(&rnp->lock, flags); smp_mb__after_unlock_lock(); - if (!rcu_gp_in_progress(rsp)) { - raw_spin_unlock_irqrestore(&rnp->lock, flags); - return; - } if (rnp->qsmask == 0) { if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || -- cgit v1.2.3 From cce7f1fc015a98ca9263bd5730c00258bc214e53 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 9 Mar 2015 17:00:56 -0700 Subject: rcu: Remove redundant offline check Because offline CPUs are propagated up the rcu_node tree's ->qsmaskinit bits just before each grace period starts, the ->qsmaskinit bit cannot be clear when the corresponding ->qsmask bit is set. Furthermore, this condition used to correspond to a CPU that was on its way offline, and making RCU's notion of an offline CPU more precise has eliminated this situation. This commit therefore removes the now-redundant offline check from force_qs_rnp(). Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index e338a12c3a1b..a1df68fce545 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2749,8 +2749,6 @@ static void force_qs_rnp(struct rcu_state *rsp, bit = 1; for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { if ((rnp->qsmask & bit) != 0) { - if ((rnp->qsmaskinit & bit) == 0) - *isidle = false; /* Pending hotplug. */ if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) mask |= bit; } -- cgit v1.2.3 From a738eec6c6082f48cbcf0157fd9f550e286ea04b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 10 Mar 2015 14:53:29 -0700 Subject: rcu: Correctly initialize ->rcu_qs_ctr_snap at online time The rcu_data structure's ->rcu_qs_ctr_snap field is initialized at CPU-online time from the current CPU's element of the per-CPU rcu_qs_ctr variable. Unfortunately, this is at CPU_UP_PREPARE time, so has nothing to do with the CPU being onlined. This commit therefore initializes this variable from the incoming CPU's element of rcu_qs_ctr. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index a1df68fce545..d198a33d54bd 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3763,7 +3763,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ rdp->completed = rnp->completed; rdp->passed_quiesce = false; - rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); + rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu); rdp->qs_pending = false; trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); raw_spin_unlock_irqrestore(&rnp->lock, flags); -- cgit v1.2.3 From 64eaf974218d576812919c8b1a8d87ded4e695d9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 15 Apr 2015 12:45:41 -0700 Subject: cpu: Handle smpboot_unpark_threads() uniformly Commit 00df35f99191 (cpu: Defer smpboot kthread unparking until CPU known to scheduler) put the online path's call to smpboot_unpark_threads() into a CPU-hotplug notifier. This commit places the offline-failure paths call into the same notifier for the sake of uniformity. Note that it is not currently possible to place the offline path's call to smpboot_park_threads() into an existing notifier because the CPU_DYING notifiers run in a restricted environment, and the CPU_UP_PREPARE notifiers run too soon. Signed-off-by: Paul E. McKenney --- kernel/cpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/cpu.c b/kernel/cpu.c index 94bbe4695232..260eb7db281e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -398,7 +398,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ - smpboot_unpark_threads(cpu); cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } @@ -463,6 +462,7 @@ static int smpboot_thread_call(struct notifier_block *nfb, switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DOWN_FAILED: case CPU_ONLINE: smpboot_unpark_threads(cpu); break; -- cgit v1.2.3 From 927da9dfd13aec358496de9488384f1a663c679a Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Mon, 27 Apr 2015 18:47:58 -0400 Subject: cpu: Remove new instance of __cpuinit that crept back in We removed __cpuinit support (leaving no-op stubs) quite some time ago. However a new instance was added in commit 00df35f991914db6b8bde8cf0980 ("cpu: Defer smpboot kthread unparking until CPU known to scheduler") Since we want to clobber the stubs soon, get this removed now. Cc: Paul E. McKenney Signed-off-by: Paul Gortmaker Signed-off-by: Paul E. McKenney --- kernel/cpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/cpu.c b/kernel/cpu.c index 260eb7db281e..9c9c9fab16cc 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -479,7 +479,7 @@ static struct notifier_block smpboot_thread_notifier = { .priority = CPU_PRI_SMPBOOT, }; -void __cpuinit smpboot_thread_init(void) +void smpboot_thread_init(void) { register_cpu_notifier(&smpboot_thread_notifier); } -- cgit v1.2.3 From eab128e8305f2bc4c91406031aab26d86fecced6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 15 Apr 2015 12:08:22 -0700 Subject: rcu: Modulate grace-period slow init to normalize delay Currently, the larger the gp_init_delay boot parameter, the slower rcutorture will sequence through grace periods. This commit avoids this issue by decreasing the probability of slowing initialization of a given grace period as the degree of slowness increases. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0628df155970..c34422d92aa9 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -169,7 +169,17 @@ module_param(gp_init_delay, int, 0644); #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ static const int gp_init_delay; #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ -#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */ + +/* + * Number of grace periods between delays, normalized by the duration of + * the delay. The longer the the delay, the more the grace periods between + * each delay. The reason for this normalization is that it means that, + * for non-zero delays, the overall slowdown of grace periods is constant + * regardless of the duration of the delay. This arrangement balances + * the need for long delays to increase some race probabilities with the + * need for fast grace periods to increase other race probabilities. + */ +#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ /* * Track the rcutorture test sequence number and the update version @@ -1848,7 +1858,8 @@ static int rcu_gp_init(struct rcu_state *rsp) cond_resched_rcu_qs(); WRITE_ONCE(rsp->gp_activity, jiffies); if (gp_init_delay > 0 && - !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) + !(rsp->gpnum % + (rcu_num_nodes * PER_RCU_NODE_PERIOD * gp_init_delay))) schedule_timeout_uninterruptible(gp_init_delay); } -- cgit v1.2.3 From 3eaaaf6cd6d7fbaf552cc543ccb93d7da81f43ec Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 9 Mar 2015 16:51:17 -0700 Subject: rcu: Shut up spurious gcc uninitialized-variable warning Because gcc doesn't realize that rcu_num_lvls must be strictly greater than zero, some versions give a spurious warning about levelcnt[0] being uninitialized in rcu_init_one(). This commit updates the condition on the pre-existing panic() in order to educate gcc on this point. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index c34422d92aa9..9b076b284695 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3982,9 +3982,9 @@ static void __init rcu_init_one(struct rcu_state *rsp, BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ - /* Silence gcc 4.8 warning about array index out of range. */ - if (rcu_num_lvls > RCU_NUM_LVLS) - panic("rcu_init_one: rcu_num_lvls overflow"); + /* Silence gcc 4.8 false positive about array index out of range. */ + if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS) + panic("rcu_init_one: rcu_num_lvls out of range"); /* Initialize the level-tracking arrays. */ -- cgit v1.2.3 From 0f41c0ddadfb3d5baffe62351c380e2881aacd58 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 10 Mar 2015 18:33:20 -0700 Subject: rcu: Provide diagnostic option to slow down grace-period scans Grace-period scans of the rcu_node combining tree normally proceed quite quickly, so that it is very difficult to reproduce races against them. This commit therefore allows grace-period pre-initialization and cleanup to be artificially slowed down, increasing race-reproduction probability. A pair of pairs of new Kconfig parameters are provided, RCU_TORTURE_TEST_SLOW_PREINIT to enable the slowing down of propagating CPU-hotplug changes up the combining tree along with RCU_TORTURE_TEST_SLOW_PREINIT_DELAY to specify the delay in jiffies, and RCU_TORTURE_TEST_SLOW_CLEANUP to enable the slowing down of the end-of-grace-period cleanup scan along with RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY to specify the delay in jiffies. Boot-time parameters named rcutree.gp_preinit_delay and rcutree.gp_cleanup_delay allow these delays to be specified at boot time. Signed-off-by: Paul E. McKenney --- Documentation/kernel-parameters.txt | 16 ++++++- kernel/rcu/tree.c | 29 ++++++++++-- lib/Kconfig.debug | 54 +++++++++++++++++++++- .../selftests/rcutorture/configs/rcu/CFcommon | 2 + 4 files changed, 93 insertions(+), 8 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 61ab1628a057..10a4fb80c033 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2992,11 +2992,23 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Set maximum number of finished RCU callbacks to process in one batch. + rcutree.gp_cleanup_delay= [KNL] + Set the number of jiffies to delay each step of + RCU grace-period cleanup. This only has effect + when CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP is set. + rcutree.gp_init_delay= [KNL] Set the number of jiffies to delay each step of RCU grace-period initialization. This only has - effect when CONFIG_RCU_TORTURE_TEST_SLOW_INIT is - set. + effect when CONFIG_RCU_TORTURE_TEST_SLOW_INIT + is set. + + rcutree.gp_preinit_delay= [KNL] + Set the number of jiffies to delay each step of + RCU grace-period pre-initialization, that is, + the propagation of recent CPU-hotplug changes up + the rcu_node combining tree. This only has effect + when CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT is set. rcutree.rcu_fanout_leaf= [KNL] Increase the number of CPUs assigned to each diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9b076b284695..2f3cb5513ca3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -163,6 +163,14 @@ static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; module_param(kthread_prio, int, 0644); /* Delay in jiffies for grace-period initialization delays, debug only. */ + +#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT +static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY; +module_param(gp_preinit_delay, int, 0644); +#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */ +static const int gp_preinit_delay; +#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */ + #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY; module_param(gp_init_delay, int, 0644); @@ -170,6 +178,13 @@ module_param(gp_init_delay, int, 0644); static const int gp_init_delay; #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ +#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP +static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY; +module_param(gp_cleanup_delay, int, 0644); +#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */ +static const int gp_cleanup_delay; +#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */ + /* * Number of grace periods between delays, normalized by the duration of * the delay. The longer the the delay, the more the grace periods between @@ -1742,6 +1757,13 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp) rcu_gp_kthread_wake(rsp); } +static void rcu_gp_slow(struct rcu_state *rsp, int delay) +{ + if (delay > 0 && + !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) + schedule_timeout_uninterruptible(delay); +} + /* * Initialize a new grace period. Return 0 if no grace period required. */ @@ -1784,6 +1806,7 @@ static int rcu_gp_init(struct rcu_state *rsp) * will handle subsequent offline CPUs. */ rcu_for_each_leaf_node(rsp, rnp) { + rcu_gp_slow(rsp, gp_preinit_delay); raw_spin_lock_irq(&rnp->lock); smp_mb__after_unlock_lock(); if (rnp->qsmaskinit == rnp->qsmaskinitnext && @@ -1840,6 +1863,7 @@ static int rcu_gp_init(struct rcu_state *rsp) * process finishes, because this kthread handles both. */ rcu_for_each_node_breadth_first(rsp, rnp) { + rcu_gp_slow(rsp, gp_init_delay); raw_spin_lock_irq(&rnp->lock); smp_mb__after_unlock_lock(); rdp = this_cpu_ptr(rsp->rda); @@ -1857,10 +1881,6 @@ static int rcu_gp_init(struct rcu_state *rsp) raw_spin_unlock_irq(&rnp->lock); cond_resched_rcu_qs(); WRITE_ONCE(rsp->gp_activity, jiffies); - if (gp_init_delay > 0 && - !(rsp->gpnum % - (rcu_num_nodes * PER_RCU_NODE_PERIOD * gp_init_delay))) - schedule_timeout_uninterruptible(gp_init_delay); } return 1; @@ -1955,6 +1975,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) raw_spin_unlock_irq(&rnp->lock); cond_resched_rcu_qs(); WRITE_ONCE(rsp->gp_activity, jiffies); + rcu_gp_slow(rsp, gp_cleanup_delay); } rnp = rcu_get_root(rsp); raw_spin_lock_irq(&rnp->lock); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ba2b0c87e65b..e1af93ae246b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1261,12 +1261,38 @@ config RCU_TORTURE_TEST_RUNNABLE Say N here if you want the RCU torture tests to start only after being manually enabled via /proc. +config RCU_TORTURE_TEST_SLOW_PREINIT + bool "Slow down RCU grace-period pre-initialization to expose races" + depends on RCU_TORTURE_TEST + help + This option delays grace-period pre-initialization (the + propagation of CPU-hotplug changes up the rcu_node combining + tree) for a few jiffies between initializing each pair of + consecutive rcu_node structures. This helps to expose races + involving grace-period pre-initialization, in other words, it + makes your kernel less stable. It can also greatly increase + grace-period latency, especially on systems with large numbers + of CPUs. This is useful when torture-testing RCU, but in + almost no other circumstance. + + Say Y here if you want your system to crash and hang more often. + Say N if you want a sane system. + +config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY + int "How much to slow down RCU grace-period pre-initialization" + range 0 5 + default 3 + depends on RCU_TORTURE_TEST_SLOW_PREINIT + help + This option specifies the number of jiffies to wait between + each rcu_node structure pre-initialization step. + config RCU_TORTURE_TEST_SLOW_INIT bool "Slow down RCU grace-period initialization to expose races" depends on RCU_TORTURE_TEST help - This option makes grace-period initialization block for a - few jiffies between initializing each pair of consecutive + This option delays grace-period initialization for a few + jiffies between initializing each pair of consecutive rcu_node structures. This helps to expose races involving grace-period initialization, in other words, it makes your kernel less stable. It can also greatly increase grace-period @@ -1286,6 +1312,30 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY This option specifies the number of jiffies to wait between each rcu_node structure initialization. +config RCU_TORTURE_TEST_SLOW_CLEANUP + bool "Slow down RCU grace-period cleanup to expose races" + depends on RCU_TORTURE_TEST + help + This option delays grace-period cleanup for a few jiffies + between cleaning up each pair of consecutive rcu_node + structures. This helps to expose races involving grace-period + cleanup, in other words, it makes your kernel less stable. + It can also greatly increase grace-period latency, especially + on systems with large numbers of CPUs. This is useful when + torture-testing RCU, but in almost no other circumstance. + + Say Y here if you want your system to crash and hang more often. + Say N if you want a sane system. + +config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY + int "How much to slow down RCU grace-period cleanup" + range 0 5 + default 3 + depends on RCU_TORTURE_TEST_SLOW_CLEANUP + help + This option specifies the number of jiffies to wait between + each rcu_node structure cleanup operation. + config RCU_CPU_STALL_TIMEOUT int "RCU CPU stall timeout in seconds" depends on RCU_STALL_COMMON diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFcommon b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon index 49701218dc62..f824b4c9d9d9 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/CFcommon +++ b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon @@ -1,3 +1,5 @@ CONFIG_RCU_TORTURE_TEST=y CONFIG_PRINTK_TIME=y +CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y +CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y -- cgit v1.2.3 From 82d0f4c089991e6b302d961f0320282bf91652d7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 05:42:50 -0700 Subject: rcu: Directly drive TASKS_RCU from Kconfig Currently, Kconfig will ask the user whether TASKS_RCU should be set. This is silly because Kconfig already has all the information that it needs to set this parameter. This commit therefore directly drives the value of TASKS_RCU via "select" statements. Which means that as subsystems require TASKS_RCU, those subsystems will need to add "select" statements of their own. Reported-by: Ingo Molnar Signed-off-by: Paul E. McKenney Cc: Steven Rostedt Reviewed-by: Pranith Kumar --- init/Kconfig | 4 +--- lib/Kconfig.debug | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index dc24dec60232..73db30a76afa 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -509,7 +509,7 @@ config SRCU sections. config TASKS_RCU - bool "Task_based RCU implementation using voluntary context switch" + bool default n select SRCU help @@ -517,8 +517,6 @@ config TASKS_RCU only voluntary context switch (not preemption!), idle, and user-mode execution as quiescent states. - If unsure, say N. - config RCU_STALL_COMMON def_bool ( TREE_RCU || PREEMPT_RCU || RCU_TRACE ) help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e1af93ae246b..c4e1cf04cf57 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1233,6 +1233,7 @@ config RCU_TORTURE_TEST depends on DEBUG_KERNEL select TORTURE_TEST select SRCU + select TASKS_RCU default n help This option provides a kernel module that runs torture tests -- cgit v1.2.3 From 7db21edfecb079b2257ced7a2cf2e32ae5e5d966 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 06:17:15 -0700 Subject: rcu: Directly drive RCU_USER_QS from Kconfig Currently, Kconfig will ask the user whether RCU_USER_QS should be set. This is silly because Kconfig already has all the information that it needs to set this parameter. This commit therefore directly drives the value of RCU_USER_QS via NO_HZ_FULL's "select" statement. Reported-by: Ingo Molnar Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar Acked-by: Frederic Weisbecker --- init/Kconfig | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index 73db30a76afa..927210810189 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -529,9 +529,7 @@ config CONTEXT_TRACKING bool config RCU_USER_QS - bool "Consider userspace as in RCU extended quiescent state" - depends on HAVE_CONTEXT_TRACKING && SMP - select CONTEXT_TRACKING + bool help This option sets hooks on kernel / userspace boundaries and puts RCU in extended quiescent state when the CPU runs in @@ -539,12 +537,6 @@ config RCU_USER_QS excluded from the global RCU state machine and thus doesn't try to keep the timer tick on for RCU. - Unless you want to hack and help the development of the full - dynticks mode, you shouldn't enable this option. It also - adds unnecessary overhead. - - If unsure say N - config CONTEXT_TRACKING_FORCE bool "Force context tracking" depends on CONTEXT_TRACKING -- cgit v1.2.3 From 7fa270010e0ddd3693381431f373b3e3135b0695 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 10:27:15 -0700 Subject: rcu: Convert CONFIG_RCU_FANOUT_EXACT to boot parameter The CONFIG_RCU_FANOUT_EXACT Kconfig parameter is used primarily (and perhaps only) by rcutorture to verify that RCU works correctly in specific rcu_node combining-tree configurations. It therefore does not make much sense have this as a question to people attempting to configure their kernels. So this commit creates an rcutree.rcu_fanout_exact= boot parameter that rcutorture can use, and eliminates the original CONFIG_RCU_FANOUT_EXACT Kconfig parameter. Reported-by: Ingo Molnar Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar --- Documentation/kernel-parameters.txt | 6 ++++++ init/Kconfig | 14 -------------- kernel/rcu/tree.c | 7 +++++-- kernel/rcu/tree_plugin.h | 2 +- 4 files changed, 12 insertions(+), 17 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 10a4fb80c033..f5582dcdf80d 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -3010,6 +3010,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. the rcu_node combining tree. This only has effect when CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT is set. + rcutree.rcu_fanout_exact= [KNL] + Disable autobalancing of the rcu_node combining + tree. This is used by rcutorture, and might + possibly be useful for architectures having high + cache-to-cache transfer latencies. + rcutree.rcu_fanout_leaf= [KNL] Increase the number of CPUs assigned to each leaf rcu_node structure. Useful for very large diff --git a/init/Kconfig b/init/Kconfig index 927210810189..0ec82362cfc0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -611,20 +611,6 @@ config RCU_FANOUT_LEAF Take the default if unsure. -config RCU_FANOUT_EXACT - bool "Disable tree-based hierarchical RCU auto-balancing" - depends on TREE_RCU || PREEMPT_RCU - default n - help - This option forces use of the exact RCU_FANOUT value specified, - regardless of imbalances in the hierarchy. This is useful for - testing RCU itself, and might one day be useful on systems with - strong NUMA behavior. - - Without RCU_FANOUT_EXACT, the code will balance the hierarchy. - - Say N if unsure. - config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" depends on NO_HZ_COMMON && SMP diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 2f3cb5513ca3..b49c474e1fff 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -113,6 +113,9 @@ RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); static struct rcu_state *rcu_state_p; LIST_HEAD(rcu_struct_flavors); +/* Control rcu_node-tree auto-balancing at boot time. */ +static bool rcu_fanout_exact; +module_param(rcu_fanout_exact, bool, 0444); /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; module_param(rcu_fanout_leaf, int, 0444); @@ -3956,13 +3959,13 @@ void rcu_scheduler_starting(void) /* * Compute the per-level fanout, either using the exact fanout specified - * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. + * or balancing the tree, depending on the rcu_fanout_exact boot parameter. */ static void __init rcu_init_levelspread(struct rcu_state *rsp) { int i; - if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) { + if (rcu_fanout_exact) { rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; for (i = rcu_num_lvls - 2; i >= 0; i--) rsp->levelspread[i] = CONFIG_RCU_FANOUT; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 58b1ebdc4387..eb460ec747ef 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -64,7 +64,7 @@ static void __init rcu_bootup_announce_oddness(void) (!IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)) pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", CONFIG_RCU_FANOUT); - if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) + if (rcu_fanout_exact) pr_info("\tHierarchical RCU autobalancing is disabled.\n"); if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n"); -- cgit v1.2.3 From a3dc2948cec80f20a89e9b646d0c01b121e48e02 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 11:40:50 -0700 Subject: rcu: Enable diagnostic dump of rcu_node combining tree The purpose of this commit is to make it easier to verify that RCU's combining tree is set up correctly, which is useful to have when making changes in how that tree is initialized. Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar [ paulmck: Fold fix found by Fengguang's 0-day test robot. ] --- Documentation/kernel-parameters.txt | 5 +++++ kernel/rcu/tree.c | 27 +++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f5582dcdf80d..a1cb88d9864e 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2992,6 +2992,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Set maximum number of finished RCU callbacks to process in one batch. + rcutree.dump_tree= [KNL] + Dump the structure of the rcu_node combining tree + out at early boot. This is used for diagnostic + purposes, to verify correct tree setup. + rcutree.gp_cleanup_delay= [KNL] Set the number of jiffies to delay each step of RCU grace-period cleanup. This only has effect diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b49c474e1fff..1bc14c670641 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -113,6 +113,9 @@ RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh); static struct rcu_state *rcu_state_p; LIST_HEAD(rcu_struct_flavors); +/* Dump rcu_node combining tree at boot to verify correct setup. */ +static bool dump_tree; +module_param(dump_tree, bool, 0444); /* Control rcu_node-tree auto-balancing at boot time. */ static bool rcu_fanout_exact; module_param(rcu_fanout_exact, bool, 0444); @@ -4144,6 +4147,28 @@ static void __init rcu_init_geometry(void) rcu_num_nodes -= n; } +/* + * Dump out the structure of the rcu_node combining tree associated + * with the rcu_state structure referenced by rsp. + */ +static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp) +{ + int level = 0; + struct rcu_node *rnp; + + pr_info("rcu_node tree layout dump\n"); + pr_info(" "); + rcu_for_each_node_breadth_first(rsp, rnp) { + if (rnp->level != level) { + pr_cont("\n"); + pr_info(" "); + level = rnp->level; + } + pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum); + } + pr_cont("\n"); +} + void __init rcu_init(void) { int cpu; @@ -4154,6 +4179,8 @@ void __init rcu_init(void) rcu_init_geometry(); rcu_init_one(&rcu_bh_state, &rcu_bh_data); rcu_init_one(&rcu_sched_state, &rcu_sched_data); + if (dump_tree) + rcu_dump_rcu_node_tree(&rcu_sched_state); __rcu_init_preempt(); open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); -- cgit v1.2.3 From 78cae10b3a90438689f8cedb66241ce9cff9c214 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 12:19:45 -0700 Subject: rcu: Create RCU_EXPERT Kconfig and hide booleans behind it This commit creates an RCU_EXPERT Kconfig and hides the independent boolean RCU-related user-visible Kconfig parameters behind it, namely RCU_FAST_NO_HZ and RCU_BOOST. This prevents Kconfig from asking about these parameters unless the user really wants to be asked. Reported-by: Linus Torvalds Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar --- init/Kconfig | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index 0ec82362cfc0..7eb4c7b3543c 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -501,6 +501,21 @@ config TINY_RCU endchoice +config RCU_EXPERT + bool "Make expert-level adjustments to RCU configuration" + default n + help + This option needs to be enabled if you wish to make + expert-level adjustments to RCU configuration. By default, + no such adjustments can be made, which has the often-beneficial + side-effect of preventing "make oldconfig" from asking you all + sorts of detailed questions about how you would like numerous + obscure RCU options to be set up. + + Say Y if you need to make expert-level adjustments to RCU. + + Say N if you are unsure. + config SRCU bool help @@ -613,7 +628,7 @@ config RCU_FANOUT_LEAF config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" - depends on NO_HZ_COMMON && SMP + depends on NO_HZ_COMMON && SMP && RCU_EXPERT default n help This option permits CPUs to enter dynticks-idle state even if @@ -639,7 +654,7 @@ config TREE_RCU_TRACE config RCU_BOOST bool "Enable RCU priority boosting" - depends on RT_MUTEXES && PREEMPT_RCU + depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT default n help This option boosts the priority of preempted RCU readers that -- cgit v1.2.3 From 8739c5cb0fb145aeed8c56ddb5ba79381c74cb97 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 18:27:54 -0700 Subject: rcu: Break dependency of RCU_FANOUT_LEAF on RCU_FANOUT RCU_FANOUT_LEAF's range and default values depend on the value of RCU_FANOUT, which at the time seemed like a cute way to save two lines of Kconfig code. However, adding a dependency from both of these Kconfig parameters on RCU_EXPERT requires that RCU_FANOUT_LEAF operate correctly even if RCU_FANOUT is undefined. This commit therefore allows RCU_FANOUT_LEAF to take on the full range of permitted values, even in cases where RCU_FANOUT is undefined. Signed-off-by: Paul E. McKenney [ paulmck: Eliminate redundant "default" as suggested by Pranith Kumar. ] Reviewed-by: Pranith Kumar --- init/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index 7eb4c7b3543c..ac5386937d37 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -601,8 +601,8 @@ config RCU_FANOUT config RCU_FANOUT_LEAF int "Tree-based hierarchical RCU leaf-level fanout value" - range 2 RCU_FANOUT if 64BIT - range 2 RCU_FANOUT if !64BIT + range 2 64 if 64BIT + range 2 32 if !64BIT depends on TREE_RCU || PREEMPT_RCU default 16 help -- cgit v1.2.3 From 05c5df31afd1092ca6322094d22aff6351fa67fe Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 14:27:43 -0700 Subject: rcu: Make RCU able to tolerate undefined CONFIG_RCU_FANOUT This commit introduces an RCU_FANOUT C-preprocessor macro so that RCU will build even when CONFIG_RCU_FANOUT is undefined. The RCU_FANOUT macro is set to the value of CONFIG_RCU_FANOUT when defined, otherwise it is set to 32 for 32-bit systems and 64 for 64-bit systems. This commit then makes CONFIG_RCU_FANOUT depend on CONFIG_RCU_EXPERT, so that Kconfig users won't be asked about CONFIG_RCU_FANOUT unless they want to be. Reported-by: Ingo Molnar Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar --- init/Kconfig | 2 +- kernel/rcu/tree.c | 4 ++-- kernel/rcu/tree.h | 18 +++++++++++++++--- kernel/rcu/tree_plugin.h | 6 +++--- 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index ac5386937d37..fd2d4fb517ca 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -583,7 +583,7 @@ config RCU_FANOUT int "Tree-based hierarchical RCU fanout value" range 2 64 if 64BIT range 2 32 if !64BIT - depends on TREE_RCU || PREEMPT_RCU + depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT default 64 if 64BIT default 32 if !64BIT help diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1bc14c670641..ba3f8d59d948 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3971,7 +3971,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) if (rcu_fanout_exact) { rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; for (i = rcu_num_lvls - 2; i >= 0; i--) - rsp->levelspread[i] = CONFIG_RCU_FANOUT; + rsp->levelspread[i] = RCU_FANOUT; } else { int ccur; int cprv; @@ -4111,7 +4111,7 @@ static void __init rcu_init_geometry(void) rcu_capacity[0] = 1; rcu_capacity[1] = rcu_fanout_leaf; for (i = 2; i <= MAX_RCU_LVLS; i++) - rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT; + rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT; /* * The boot-time rcu_fanout_leaf parameter is only permitted diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index a69d3dab2ec4..ac3020fff028 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -35,11 +35,23 @@ * In practice, this did work well going from three levels to four. * Of course, your mileage may vary. */ + #define MAX_RCU_LVLS 4 + +#ifdef CONFIG_RCU_FANOUT +#define RCU_FANOUT CONFIG_RCU_FANOUT +#else /* #ifdef CONFIG_RCU_FANOUT */ +# ifdef CONFIG_64BIT +# define RCU_FANOUT 64 +# else +# define RCU_FANOUT 32 +# endif +#endif /* #else #ifdef CONFIG_RCU_FANOUT */ + #define RCU_FANOUT_1 (CONFIG_RCU_FANOUT_LEAF) -#define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT) -#define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT) -#define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT) +#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT) +#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT) +#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT) #if NR_CPUS <= RCU_FANOUT_1 # define RCU_NUM_LVLS 1 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index eb460ec747ef..d7e505970f24 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -60,10 +60,10 @@ static void __init rcu_bootup_announce_oddness(void) { if (IS_ENABLED(CONFIG_RCU_TRACE)) pr_info("\tRCU debugfs-based tracing is enabled.\n"); - if ((IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || - (!IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)) + if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || + (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", - CONFIG_RCU_FANOUT); + RCU_FANOUT); if (rcu_fanout_exact) pr_info("\tHierarchical RCU autobalancing is disabled.\n"); if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) -- cgit v1.2.3 From 47d631af58bb9b2f2dd3d0da8c98a79a5e75c738 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 21 Apr 2015 09:12:13 -0700 Subject: rcu: Make RCU able to tolerate undefined CONFIG_RCU_FANOUT_LEAF This commit introduces an RCU_FANOUT_LEAF C-preprocessor macro so that RCU will build even when CONFIG_RCU_FANOUT_LEAF is undefined. The RCU_FANOUT_LEAF macro is set to the value of CONFIG_RCU_FANOUT_LEAF when defined, otherwise it is set to 32 for 32-bit systems and 64 for 64-bit systems. This commit then makes CONFIG_RCU_FANOUT_LEAF depend on CONFIG_RCU_EXPERT, so that Kconfig users won't be asked about CONFIG_RCU_FANOUT_LEAF unless they want to be. Reported-by: Ingo Molnar Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar --- init/Kconfig | 2 +- kernel/rcu/tree.c | 8 ++++---- kernel/rcu/tree.h | 12 +++++++++++- kernel/rcu/tree_plugin.h | 6 +++--- 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index fd2d4fb517ca..78176001f73b 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -603,7 +603,7 @@ config RCU_FANOUT_LEAF int "Tree-based hierarchical RCU leaf-level fanout value" range 2 64 if 64BIT range 2 32 if !64BIT - depends on TREE_RCU || PREEMPT_RCU + depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT default 16 help This option controls the leaf-level fanout of hierarchical diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ba3f8d59d948..1edd11298224 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -119,8 +119,8 @@ module_param(dump_tree, bool, 0444); /* Control rcu_node-tree auto-balancing at boot time. */ static bool rcu_fanout_exact; module_param(rcu_fanout_exact, bool, 0444); -/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ -static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; +/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ +static int rcu_fanout_leaf = RCU_FANOUT_LEAF; module_param(rcu_fanout_leaf, int, 0444); int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */ @@ -4097,7 +4097,7 @@ static void __init rcu_init_geometry(void) jiffies_till_next_fqs = d; /* If the compile-time values are accurate, just leave. */ - if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF && + if (rcu_fanout_leaf == RCU_FANOUT_LEAF && nr_cpu_ids == NR_CPUS) return; pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n", @@ -4121,7 +4121,7 @@ static void __init rcu_init_geometry(void) * the configured number of CPUs. Complain and fall back to the * compile-time values if these limits are exceeded. */ - if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF || + if (rcu_fanout_leaf < RCU_FANOUT_LEAF || rcu_fanout_leaf > sizeof(unsigned long) * 8 || n > rcu_capacity[MAX_RCU_LVLS]) { WARN_ON(1); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index ac3020fff028..7d949c186302 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -48,7 +48,17 @@ # endif #endif /* #else #ifdef CONFIG_RCU_FANOUT */ -#define RCU_FANOUT_1 (CONFIG_RCU_FANOUT_LEAF) +#ifdef CONFIG_RCU_FANOUT_LEAF +#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF +#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */ +# ifdef CONFIG_64BIT +# define RCU_FANOUT_LEAF 64 +# else +# define RCU_FANOUT_LEAF 32 +# endif +#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */ + +#define RCU_FANOUT_1 (RCU_FANOUT_LEAF) #define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT) #define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT) #define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index d7e505970f24..713503853841 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -76,10 +76,10 @@ static void __init rcu_bootup_announce_oddness(void) pr_info("\tAdditional per-CPU info printed with stalls.\n"); if (NUM_RCU_LVL_4 != 0) pr_info("\tFour-level hierarchy is enabled.\n"); - if (CONFIG_RCU_FANOUT_LEAF != 16) + if (RCU_FANOUT_LEAF != 16) pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", - CONFIG_RCU_FANOUT_LEAF); - if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF) + RCU_FANOUT_LEAF); + if (rcu_fanout_leaf != RCU_FANOUT_LEAF) pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); if (nr_cpu_ids != NR_CPUS) pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids); -- cgit v1.2.3 From 26730f55c2842b4ee06a5307d58265db7dd26065 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 21 Apr 2015 09:22:14 -0700 Subject: rcu: Make RCU able to tolerate undefined CONFIG_RCU_KTHREAD_PRIO This commit updates the initialization of the kthread_prio boot parameter so that RCU will build even when CONFIG_RCU_KTHREAD_PRIO is undefined. The kthread_prio boot parameter is set to CONFIG_RCU_KTHREAD_PRIO if that is defined, otherwise to 1 if CONFIG_RCU_BOOST is defined and to zero otherwise. This commit then makes CONFIG_RCU_KTHREAD_PRIO depend on CONFIG_RCU_EXPERT, so that Kconfig users won't be asked about CONFIG_RCU_KTHREAD_PRIO unless they want to be. Reported-by: Linus Torvalds Reported-by: Ingo Molnar Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar --- init/Kconfig | 1 + kernel/rcu/tree.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/init/Kconfig b/init/Kconfig index 78176001f73b..af2c93c4a105 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -671,6 +671,7 @@ config RCU_KTHREAD_PRIO range 0 99 if !RCU_BOOST default 1 if RCU_BOOST default 0 if !RCU_BOOST + depends on RCU_EXPERT help This option specifies the SCHED_FIFO priority value that will be assigned to the rcuc/n and rcub/n threads and is also the value diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1edd11298224..0e9ce1272971 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -165,7 +165,11 @@ static void invoke_rcu_core(void); static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); /* rcuc/rcub kthread realtime priority */ +#ifdef CONFIG_RCU_KTHREAD_PRIO static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; +#else /* #ifdef CONFIG_RCU_KTHREAD_PRIO */ +static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; +#endif /* #else #ifdef CONFIG_RCU_KTHREAD_PRIO */ module_param(kthread_prio, int, 0644); /* Delay in jiffies for grace-period initialization delays, debug only. */ -- cgit v1.2.3 From e72aeafc66060d3fc6b4d2120db00058572b3186 Mon Sep 17 00:00:00 2001 From: Pranith Kumar Date: Tue, 21 Apr 2015 17:29:42 -0400 Subject: rcu: Remove prompt for RCU implementation The RCU implementation is chosen based on PREEMPT and SMP config options and is not really a user-selectable choice. This commit removes the menu entry, given that there is not much point in calling something a choice when there is in fact no choice.. The TINY_RCU, TREE_RCU, and PREEMPT_RCU Kconfig options continue to be selected based solely on the values of the PREEMPT and SMP options. Signed-off-by: Pranith Kumar Signed-off-by: Paul E. McKenney --- init/Kconfig | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index af2c93c4a105..4c08197044f1 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -465,13 +465,9 @@ endmenu # "CPU/Task time and stats accounting" menu "RCU Subsystem" -choice - prompt "RCU Implementation" - default TREE_RCU - config TREE_RCU - bool "Tree-based hierarchical RCU" - depends on !PREEMPT && SMP + bool + default y if !PREEMPT && SMP help This option selects the RCU implementation that is designed for very large SMP system with hundreds or @@ -479,8 +475,8 @@ config TREE_RCU smaller systems. config PREEMPT_RCU - bool "Preemptible tree-based hierarchical RCU" - depends on PREEMPT + bool + default y if PREEMPT help This option selects the RCU implementation that is designed for very large SMP systems with hundreds or @@ -491,16 +487,14 @@ config PREEMPT_RCU Select this option if you are unsure. config TINY_RCU - bool "UP-only small-memory-footprint RCU" - depends on !PREEMPT && !SMP + bool + default y if !PREEMPT && !SMP help This option selects the RCU implementation that is designed for UP systems from which real-time response is not required. This option greatly reduces the memory footprint of RCU. -endchoice - config RCU_EXPERT bool "Make expert-level adjustments to RCU configuration" default n -- cgit v1.2.3 From 1ce46ee597bc36fde6984e91aecc2d662a754199 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 5 May 2015 23:04:22 -0700 Subject: rcu: Conditionally compile RCU's eqs warnings This commit applies some warning-omission micro-optimizations to RCU's various extended-quiescent-state functions, which are on the kernel/user hotpath for CONFIG_NO_HZ_FULL=y. Reported-by: Rik van Riel Reported by: Mike Galbraith Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 23 +++++++++++++++-------- lib/Kconfig.debug | 11 +++++++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0e9ce1272971..991fa5c5dc5e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -620,7 +620,8 @@ static void rcu_eqs_enter_common(long long oldval, bool user) struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting); - if (!user && !is_idle_task(current)) { + if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + !user && !is_idle_task(current)) { struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); @@ -639,7 +640,8 @@ static void rcu_eqs_enter_common(long long oldval, bool user) smp_mb__before_atomic(); /* See above. */ atomic_inc(&rdtp->dynticks); smp_mb__after_atomic(); /* Force ordering with next sojourn. */ - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + atomic_read(&rdtp->dynticks) & 0x1); rcu_dynticks_task_enter(); /* @@ -665,7 +667,8 @@ static void rcu_eqs_enter(bool user) rdtp = this_cpu_ptr(&rcu_dynticks); oldval = rdtp->dynticks_nesting; - WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + (oldval & DYNTICK_TASK_NEST_MASK) == 0); if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) { rdtp->dynticks_nesting = 0; rcu_eqs_enter_common(oldval, user); @@ -738,7 +741,8 @@ void rcu_irq_exit(void) rdtp = this_cpu_ptr(&rcu_dynticks); oldval = rdtp->dynticks_nesting; rdtp->dynticks_nesting--; - WARN_ON_ONCE(rdtp->dynticks_nesting < 0); + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + rdtp->dynticks_nesting < 0); if (rdtp->dynticks_nesting) trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting); else @@ -763,10 +767,12 @@ static void rcu_eqs_exit_common(long long oldval, int user) atomic_inc(&rdtp->dynticks); /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ smp_mb__after_atomic(); /* See above. */ - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + !(atomic_read(&rdtp->dynticks) & 0x1)); rcu_cleanup_after_idle(); trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); - if (!user && !is_idle_task(current)) { + if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + !user && !is_idle_task(current)) { struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); @@ -790,7 +796,7 @@ static void rcu_eqs_exit(bool user) rdtp = this_cpu_ptr(&rcu_dynticks); oldval = rdtp->dynticks_nesting; - WARN_ON_ONCE(oldval < 0); + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0); if (oldval & DYNTICK_TASK_NEST_MASK) { rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; } else { @@ -863,7 +869,8 @@ void rcu_irq_enter(void) rdtp = this_cpu_ptr(&rcu_dynticks); oldval = rdtp->dynticks_nesting; rdtp->dynticks_nesting++; - WARN_ON_ONCE(rdtp->dynticks_nesting == 0); + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && + rdtp->dynticks_nesting == 0); if (oldval) trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting); else diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c4e1cf04cf57..b908048f8d6a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1373,6 +1373,17 @@ config RCU_TRACE Say Y here if you want to enable RCU tracing Say N if you are unsure. +config RCU_EQS_DEBUG + bool "Use this when adding any sort of NO_HZ support to your arch" + depends on DEBUG_KERNEL + help + This option provides consistency checks in RCU's handling of + NO_HZ. These checks have proven quite helpful in detecting + bugs in arch-specific NO_HZ code. + + Say N here if you need ultimate kernel/user switch latencies + Say Y if you are unsure + endmenu # "RCU Debugging" config DEBUG_BLOCK_EXT_DEVT -- cgit v1.2.3 From 51952bc633064311410b041fad38da1614f4539e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 21 Apr 2015 11:15:30 -0700 Subject: rcu: Further shrink Tiny RCU by making empty functions static inlines The Tiny RCU counterparts to rcu_idle_enter(), rcu_idle_exit(), rcu_irq_enter(), and rcu_irq_exit() are empty functions, but each has EXPORT_SYMBOL_GPL(), which needlessly consumes extra memory, especially in kernels built with module support. This commit therefore moves these functions to static inlines in rcutiny.h, removing the need for exports. This won't affect the size of the tiniest kernels, which are likely built without module support, but might help semi-tiny kernels that might include module support. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcupdate.h | 4 ---- include/linux/rcutiny.h | 16 ++++++++++++++++ include/linux/rcutree.h | 5 +++++ kernel/rcu/tiny.c | 33 --------------------------------- 4 files changed, 21 insertions(+), 37 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 87bb0eee665b..1b3d7bcb3a6c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -292,10 +292,6 @@ void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); struct notifier_block; -void rcu_idle_enter(void); -void rcu_idle_exit(void); -void rcu_irq_enter(void); -void rcu_irq_exit(void); int rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu); diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 937edaeb150d..3df6c1ec4e25 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -159,6 +159,22 @@ static inline void rcu_cpu_stall_reset(void) { } +static inline void rcu_idle_enter(void) +{ +} + +static inline void rcu_idle_exit(void) +{ +} + +static inline void rcu_irq_enter(void) +{ +} + +static inline void rcu_irq_exit(void) +{ +} + static inline void exit_rcu(void) { } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d2e583a6aaca..f22d83f49e56 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -93,6 +93,11 @@ void rcu_force_quiescent_state(void); void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); +void rcu_idle_enter(void); +void rcu_idle_exit(void); +void rcu_irq_enter(void); +void rcu_irq_exit(void); + void exit_rcu(void); void rcu_scheduler_starting(void); diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 069742d61c68..a501b4ab9b1c 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -49,39 +49,6 @@ static void __call_rcu(struct rcu_head *head, #include "tiny_plugin.h" -/* - * Enter idle, which is an extended quiescent state if we have fully - * entered that mode. - */ -void rcu_idle_enter(void) -{ -} -EXPORT_SYMBOL_GPL(rcu_idle_enter); - -/* - * Exit an interrupt handler towards idle. - */ -void rcu_irq_exit(void) -{ -} -EXPORT_SYMBOL_GPL(rcu_irq_exit); - -/* - * Exit idle, so that we are no longer in an extended quiescent state. - */ -void rcu_idle_exit(void) -{ -} -EXPORT_SYMBOL_GPL(rcu_idle_exit); - -/* - * Enter an interrupt handler, moving away from idle. - */ -void rcu_irq_enter(void) -{ -} -EXPORT_SYMBOL_GPL(rcu_irq_enter); - #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) /* -- cgit v1.2.3 From f13bad9042dcf9b60b48a0137951b614a2ee24b5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 21 Apr 2015 12:11:23 -0700 Subject: rcutorture: Test both RCU-sched and RCU-bh for Tiny RCU Tiny RCU supports both RCU-sched and RCU-bh, but only RCU-sched is currently tested by the rcutorture scripts. This commit therefore changes the TINY02 configuration to test RCU-bh, with TINY01 continuing to test RCU-sched. This shortcoming of the current rcutorture tests was located by mutation testing by Iftekhar. The idea behind mutation testing is to automatically mutate the code under test. If a given mutant is not caught by testing, this is a hint that the testing might need to be improved, as was the case here. Note that this is only a hint because it is possible to mutate the code into something else that still works. For example, a mutation that removes (say) a WARN_ON() will not normally result in a test failure. This change resulted in the test failure caused by list mishandling, which is fixed by the next commit. Reported-by: "Ahmed, Iftekhar" Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot b/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot index 0f0802730014..6c1a292a65fb 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot @@ -1,2 +1,3 @@ rcupdate.rcu_self_test=1 rcupdate.rcu_self_test_bh=1 +rcutorture.torture_type=rcu_bh -- cgit v1.2.3 From 6e91f8cb138625be96070b778d9ba71ce520ea7e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 11 May 2015 11:13:05 -0700 Subject: rcu: Correctly handle non-empty Tiny RCU callback list with none ready If, at the time __rcu_process_callbacks() is invoked, there are callbacks in Tiny RCU's callback list, but none of them are ready to be invoked, the current list-management code will knit the non-ready callbacks out of the list. This can result in hangs and possibly worse. This commit therefore inserts a check for there being no callbacks that can be invoked immediately. This bug is unlikely to occur -- you have to get a new callback between the time rcu_sched_qs() or rcu_bh_qs() was called, but before we get to __rcu_process_callbacks(). It was detected by the addition of RCU-bh testing to rcutorture, which in turn was instigated by Iftekhar Ahmed's mutation testing. Although this bug was made much more likely by 915e8a4fe45e (rcu: Remove fastpath from __rcu_process_callbacks()), this did not cause the bug, but rather made it much more probable. That said, it takes more than 40 hours of rcutorture testing, on average, for this bug to appear, so this fix cannot be considered an emergency. Signed-off-by: Paul E. McKenney Cc: Reviewed-by: Josh Triplett --- kernel/rcu/tiny.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index a501b4ab9b1c..591af0cb7b9f 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -137,6 +137,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) /* Move the ready-to-invoke callbacks to a local list. */ local_irq_save(flags); + if (rcp->donetail == &rcp->rcucblist) { + /* No callbacks ready, so just leave. */ + local_irq_restore(flags); + return; + } RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); list = rcp->rcucblist; rcp->rcucblist = *rcp->donetail; -- cgit v1.2.3 From f548d99ef4f5ec8f7080e88ad07c44d16d058ddc Mon Sep 17 00:00:00 2001 From: Alexey Kodanev Date: Sat, 7 Mar 2015 03:06:53 +0300 Subject: locktorture: fix deadlock in 'rw_lock_irq' type torture_rwlock_read_unlock_irq() must use read_unlock_irqrestore() instead of write_unlock_irqrestore(). Use read_unlock_irqrestore() instead of write_unlock_irqrestore(). Signed-off-by: Alexey Kodanev Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/locking/locktorture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index ec8cce259779..6a2723c88c33 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -309,7 +309,7 @@ static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) static void torture_rwlock_read_unlock_irq(void) __releases(torture_rwlock) { - write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); + read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); } static struct lock_torture_ops rw_lock_irq_ops = { -- cgit v1.2.3 From f76b244e478f570eaa34daa4034c4380fdda82f6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 11 Mar 2015 15:25:53 -0700 Subject: rcutorture: Exchange TREE03 and TREE04 geometries Given that the combination of PREEMPT_RCU and HOTPLUG_CPU is producing the most bugs lately, this commit swaps the TREE03 and TREE04 rcu_node-tree geometries so that the test exercising PREEMPT_RCU and HOTPLUG_CPU has three-level rather than two-level rcu_node trees. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- tools/testing/selftests/rcutorture/configs/rcu/TREE03 | 4 ++-- tools/testing/selftests/rcutorture/configs/rcu/TREE04 | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 index 53f24e0a0ab6..4dc8339bab5e 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 @@ -9,8 +9,8 @@ CONFIG_NO_HZ_IDLE=n CONFIG_NO_HZ_FULL=n CONFIG_RCU_TRACE=y CONFIG_HOTPLUG_CPU=y -CONFIG_RCU_FANOUT=4 -CONFIG_RCU_FANOUT_LEAF=4 +CONFIG_RCU_FANOUT=2 +CONFIG_RCU_FANOUT_LEAF=2 CONFIG_RCU_FANOUT_EXACT=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 index 0f84db35b36d..4eb6d374a0ca 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 @@ -13,8 +13,8 @@ CONFIG_RCU_TRACE=y CONFIG_HOTPLUG_CPU=n CONFIG_SUSPEND=n CONFIG_HIBERNATION=n -CONFIG_RCU_FANOUT=2 -CONFIG_RCU_FANOUT_LEAF=2 +CONFIG_RCU_FANOUT=4 +CONFIG_RCU_FANOUT_LEAF=4 CONFIG_RCU_FANOUT_EXACT=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n -- cgit v1.2.3 From 6530b3f4c5043aa8ef66faa8296291b1ea6ba2eb Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 12 Mar 2015 11:42:48 -0700 Subject: rcutorture: Exchange TREE03 and TREE08 NR_CPUS, speed up CPU hotplug TREE03 has been especially effective at finding bugs lately. This commit makes it even more effective by speeding up its CPU hotplug testing and increasing its NR_CPUs from 8 to 16. TREE08's NR_CPUS is decreased from 16 to 8 in order to maintain the same test duration. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- tools/testing/selftests/rcutorture/configs/rcu/TREE03 | 2 +- tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE08 | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 index 4dc8339bab5e..05a81380a5c2 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 @@ -1,5 +1,5 @@ CONFIG_SMP=y -CONFIG_NR_CPUS=8 +CONFIG_NR_CPUS=16 CONFIG_PREEMPT_NONE=n CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot new file mode 100644 index 000000000000..120c0c88d100 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot @@ -0,0 +1 @@ +rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 index 5812027d6f9f..acce6552f7f0 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 @@ -1,5 +1,5 @@ CONFIG_SMP=y -CONFIG_NR_CPUS=16 +CONFIG_NR_CPUS=8 CONFIG_PREEMPT_NONE=n CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=y -- cgit v1.2.3 From 3838cc1850ccd09f93e729e9047ec1995026f83e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 12 Mar 2015 13:55:48 -0700 Subject: rcutorture: Allow negative values of nreaders to oversubscribe By default, with rcutorture.nreaders equal to -1, rcutorture provisions N-1 reader kthreads, where N is the number of CPUs. This avoids rcutorture-induced stalls, but also avoids heavier levels of torture. This commit therefore allows negative values of rcutorture.nreaders to specify larger numbers of reader kthreads, so that for example rcutorture.nreaders=-2 provisions N kthreads and rcutorture.nreaders=-5 provisions N+3 kthreads. Signed-off-by: Paul E. McKenney [ paulmck: Update documentation, as suggested by Josh Triplett. ] --- Documentation/kernel-parameters.txt | 6 +++++- kernel/rcu/rcutorture.c | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 61ab1628a057..04b811086dca 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -3101,7 +3101,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. test, hence the "fake". rcutorture.nreaders= [KNL] - Set number of RCU readers. + Set number of RCU readers. The value -1 selects + N-1, where N is the number of CPUs. A value + "n" less than -1 selects N-n-2, where N is again + the number of CPUs. For example, -2 selects N + (the number of CPUs), -3 selects N+1, and so on. rcutorture.object_debug= [KNL] Enable debug-object double-call_rcu() testing. diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index a67ef6ff86b0..7294d605c481 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1701,7 +1701,7 @@ rcu_torture_init(void) if (nreaders >= 0) { nrealreaders = nreaders; } else { - nrealreaders = num_online_cpus() - 1; + nrealreaders = num_online_cpus() - 2 - nreaders; if (nrealreaders <= 0) nrealreaders = 1; } -- cgit v1.2.3 From 61d49d2f9888ace06ab127cf6f063bed7c3b2e53 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 1 Apr 2015 08:42:27 -0700 Subject: locktorture: Change longdelay_us to longdelay_ms The locktorture long delays are in milliseconds rather than microseconds, so this commit changes the name of the corresponding variable from longdelay_us to longdelay_ms. Reported-by: Ben Goodwyn Signed-off-by: Paul E. McKenney Cc: Davidlohr Bueso Reviewed-by: Josh Triplett --- kernel/locking/locktorture.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 6a2723c88c33..32244186f1f2 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -122,12 +122,12 @@ static int torture_lock_busted_write_lock(void) static void torture_lock_busted_write_delay(struct torture_random_state *trsp) { - const unsigned long longdelay_us = 100; + const unsigned long longdelay_ms = 100; /* We want a long delay occasionally to force massive contention. */ if (!(torture_random(trsp) % - (cxt.nrealwriters_stress * 2000 * longdelay_us))) - mdelay(longdelay_us); + (cxt.nrealwriters_stress * 2000 * longdelay_ms))) + mdelay(longdelay_ms); #ifdef CONFIG_PREEMPT if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) preempt_schedule(); /* Allow test to be preempted. */ @@ -160,14 +160,14 @@ static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) static void torture_spin_lock_write_delay(struct torture_random_state *trsp) { const unsigned long shortdelay_us = 2; - const unsigned long longdelay_us = 100; + const unsigned long longdelay_ms = 100; /* We want a short delay mostly to emulate likely code, and * we want a long delay occasionally to force massive contention. */ if (!(torture_random(trsp) % - (cxt.nrealwriters_stress * 2000 * longdelay_us))) - mdelay(longdelay_us); + (cxt.nrealwriters_stress * 2000 * longdelay_ms))) + mdelay(longdelay_ms); if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 2 * shortdelay_us))) udelay(shortdelay_us); -- cgit v1.2.3 From 6c7ed42c81a2d9a7e0646240599552040375fa02 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 13 Apr 2015 11:58:08 -0700 Subject: rcutorture: Replace barriers with smp_store_release() and smp_load_acquire() The rcutorture.c file uses several explicit memory barriers that can easily be converted to smp_store_release() and smp_load_acquire(), which improves maintainability and also improves performance a bit. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/rcu/rcutorture.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 7294d605c481..90ff8dfc51e5 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -672,8 +672,8 @@ static void rcu_torture_boost_cb(struct rcu_head *head) struct rcu_boost_inflight *rbip = container_of(head, struct rcu_boost_inflight, rcu); - smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */ - rbip->inflight = 0; + /* Ensure RCU-core accesses precede clearing ->inflight */ + smp_store_release(&rbip->inflight, 0); } static int rcu_torture_boost(void *arg) @@ -710,9 +710,9 @@ static int rcu_torture_boost(void *arg) call_rcu_time = jiffies; while (ULONG_CMP_LT(jiffies, endtime)) { /* If we don't have a callback in flight, post one. */ - if (!rbi.inflight) { - smp_mb(); /* RCU core before ->inflight = 1. */ - rbi.inflight = 1; + if (!smp_load_acquire(&rbi.inflight)) { + /* RCU core before ->inflight = 1. */ + smp_store_release(&rbi.inflight, 1); call_rcu(&rbi.rcu, rcu_torture_boost_cb); if (jiffies - call_rcu_time > test_boost_duration * HZ - HZ / 2) { @@ -751,11 +751,10 @@ checkwait: stutter_wait("rcu_torture_boost"); } while (!torture_must_stop()); /* Clean up and exit. */ - while (!kthread_should_stop() || rbi.inflight) { + while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { torture_shutdown_absorb("rcu_torture_boost"); schedule_timeout_uninterruptible(1); } - smp_mb(); /* order accesses to ->inflight before stack-frame death. */ destroy_rcu_head_on_stack(&rbi.rcu); torture_kthread_stopping("rcu_torture_boost"); return 0; @@ -1413,12 +1412,15 @@ static int rcu_torture_barrier_cbs(void *arg) do { wait_event(barrier_cbs_wq[myid], (newphase = - READ_ONCE(barrier_phase)) != lastphase || + smp_load_acquire(&barrier_phase)) != lastphase || torture_must_stop()); lastphase = newphase; - smp_mb(); /* ensure barrier_phase load before ->call(). */ if (torture_must_stop()) break; + /* + * The above smp_load_acquire() ensures barrier_phase load + * is ordered before the folloiwng ->call(). + */ cur_ops->call(&rcu, rcu_torture_barrier_cbf); if (atomic_dec_and_test(&barrier_cbs_count)) wake_up(&barrier_wq); @@ -1439,8 +1441,8 @@ static int rcu_torture_barrier(void *arg) do { atomic_set(&barrier_cbs_invoked, 0); atomic_set(&barrier_cbs_count, n_barrier_cbs); - smp_mb(); /* Ensure barrier_phase after prior assignments. */ - barrier_phase = !barrier_phase; + /* Ensure barrier_phase ordered after prior assignments. */ + smp_store_release(&barrier_phase, !barrier_phase); for (i = 0; i < n_barrier_cbs; i++) wake_up(&barrier_cbs_wq[i]); wait_event(barrier_wq, -- cgit v1.2.3 From ca1d51ed9809a99d71c23a343b3acd3fd4ad8cbe Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 14 Apr 2015 12:28:22 -0700 Subject: rcutorture: Test SRCU cleanup code path The current rcutorture testing does not do any cleanup operations. This works because the srcu_struct is statically allocated, but it does represent a memory leak of the associated dynamically allocated ->per_cpu_ref per-CPU variables. However, rcutorture currently uses a statically allocated srcu_struct, which cannot legally be passed to cleanup_srcu_struct(). Therefore, this commit adds a second form of srcu (called srcud) that dynamically allocates and frees the associated per-CPU variables. This commit also adds a ->cleanup() member to rcu_torture_ops that is invoked at the end of the test, after ->cb_barriers(). This ->cleanup() pointer is NULL for all existing tests, and thus only used for scrud. Finally, the SRCU-P torture-test configuration selects scrud instead of srcu, with SRCU-N continuing to use srcu, thereby testing both static and dynamic srcu_struct structures. Reported-by: "Ahmed, Iftekhar" Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/rcu/rcutorture.c | 77 ++++++++++++++++------ .../selftests/rcutorture/configs/rcu/SRCU-P.boot | 2 +- 2 files changed, 59 insertions(+), 20 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 90ff8dfc51e5..59e32684c23b 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -241,6 +241,7 @@ rcu_torture_free(struct rcu_torture *p) struct rcu_torture_ops { int ttype; void (*init)(void); + void (*cleanup)(void); int (*readlock)(void); void (*read_delay)(struct torture_random_state *rrsp); void (*readunlock)(int idx); @@ -477,10 +478,12 @@ static struct rcu_torture_ops rcu_busted_ops = { */ DEFINE_STATIC_SRCU(srcu_ctl); +static struct srcu_struct srcu_ctld; +static struct srcu_struct *srcu_ctlp = &srcu_ctl; -static int srcu_torture_read_lock(void) __acquires(&srcu_ctl) +static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) { - return srcu_read_lock(&srcu_ctl); + return srcu_read_lock(srcu_ctlp); } static void srcu_read_delay(struct torture_random_state *rrsp) @@ -499,49 +502,49 @@ static void srcu_read_delay(struct torture_random_state *rrsp) rcu_read_delay(rrsp); } -static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl) +static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) { - srcu_read_unlock(&srcu_ctl, idx); + srcu_read_unlock(srcu_ctlp, idx); } static unsigned long srcu_torture_completed(void) { - return srcu_batches_completed(&srcu_ctl); + return srcu_batches_completed(srcu_ctlp); } static void srcu_torture_deferred_free(struct rcu_torture *rp) { - call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb); + call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); } static void srcu_torture_synchronize(void) { - synchronize_srcu(&srcu_ctl); + synchronize_srcu(srcu_ctlp); } static void srcu_torture_call(struct rcu_head *head, void (*func)(struct rcu_head *head)) { - call_srcu(&srcu_ctl, head, func); + call_srcu(srcu_ctlp, head, func); } static void srcu_torture_barrier(void) { - srcu_barrier(&srcu_ctl); + srcu_barrier(srcu_ctlp); } static void srcu_torture_stats(void) { int cpu; - int idx = srcu_ctl.completed & 0x1; + int idx = srcu_ctlp->completed & 0x1; pr_alert("%s%s per-CPU(idx=%d):", torture_type, TORTURE_FLAG, idx); for_each_possible_cpu(cpu) { long c0, c1; - c0 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx]; - c1 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]; + c0 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[!idx]; + c1 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[idx]; pr_cont(" %d(%ld,%ld)", cpu, c0, c1); } pr_cont("\n"); @@ -549,7 +552,7 @@ static void srcu_torture_stats(void) static void srcu_torture_synchronize_expedited(void) { - synchronize_srcu_expedited(&srcu_ctl); + synchronize_srcu_expedited(srcu_ctlp); } static struct rcu_torture_ops srcu_ops = { @@ -569,6 +572,38 @@ static struct rcu_torture_ops srcu_ops = { .name = "srcu" }; +static void srcu_torture_init(void) +{ + rcu_sync_torture_init(); + WARN_ON(init_srcu_struct(&srcu_ctld)); + srcu_ctlp = &srcu_ctld; +} + +static void srcu_torture_cleanup(void) +{ + cleanup_srcu_struct(&srcu_ctld); + srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ +} + +/* As above, but dynamically allocated. */ +static struct rcu_torture_ops srcud_ops = { + .ttype = SRCU_FLAVOR, + .init = srcu_torture_init, + .cleanup = srcu_torture_cleanup, + .readlock = srcu_torture_read_lock, + .read_delay = srcu_read_delay, + .readunlock = srcu_torture_read_unlock, + .started = NULL, + .completed = srcu_torture_completed, + .deferred_free = srcu_torture_deferred_free, + .sync = srcu_torture_synchronize, + .exp_sync = srcu_torture_synchronize_expedited, + .call = srcu_torture_call, + .cb_barrier = srcu_torture_barrier, + .stats = srcu_torture_stats, + .name = "srcud" +}; + /* * Definitions for sched torture testing. */ @@ -1053,7 +1088,7 @@ static void rcu_torture_timer(unsigned long unused) p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || - srcu_read_lock_held(&srcu_ctl)); + srcu_read_lock_held(srcu_ctlp)); if (p == NULL) { /* Leave because rcu_torture_writer is not yet underway */ cur_ops->readunlock(idx); @@ -1127,7 +1162,7 @@ rcu_torture_reader(void *arg) p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || - srcu_read_lock_held(&srcu_ctl)); + srcu_read_lock_held(srcu_ctlp)); if (p == NULL) { /* Wait for rcu_torture_writer to get underway */ cur_ops->readunlock(idx); @@ -1590,10 +1625,14 @@ rcu_torture_cleanup(void) rcutorture_booster_cleanup(i); } - /* Wait for all RCU callbacks to fire. */ - + /* + * Wait for all RCU callbacks to fire, then do flavor-specific + * cleanup operations. + */ if (cur_ops->cb_barrier != NULL) cur_ops->cb_barrier(); + if (cur_ops->cleanup != NULL) + cur_ops->cleanup(); rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ @@ -1670,8 +1709,8 @@ rcu_torture_init(void) int cpu; int firsterr = 0; static struct rcu_torture_ops *torture_ops[] = { - &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops, - RCUTORTURE_TASKS_OPS + &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, + &sched_ops, RCUTORTURE_TASKS_OPS }; if (!torture_init_begin(torture_type, verbose, &torture_runnable)) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot index 238bfe3bd0cc..84a7d51b7481 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot @@ -1 +1 @@ -rcutorture.torture_type=srcu +rcutorture.torture_type=srcud -- cgit v1.2.3 From 9a8e7062f6ee38cf53e4be1d52556fd93fac8b00 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 06:12:16 -0700 Subject: rcutorture: TASKS_RCU set directly, so don't explicitly set it The TASKS01, TASKS02, and TASKS03 rcutorture config fragments currently set CONFIG_TASKS_RCU. However, now that the value of this Kconfig parameter is set via "select" statements, it is no longer necessary to set it explicitly. This commit therefore removes it from the Kconfig fragments. Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar Reviewed-by: Josh Triplett --- tools/testing/selftests/rcutorture/configs/rcu/TASKS01 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TASKS02 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TASKS03 | 1 - 3 files changed, 3 deletions(-) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 index 97f0a0b27ef7..32c5de503316 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 @@ -6,4 +6,3 @@ CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_RCU=y -CONFIG_TASKS_RCU=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 index 696d2ea74d13..ad2be91e5ee7 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS02 @@ -2,4 +2,3 @@ CONFIG_SMP=n CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n -CONFIG_TASKS_RCU=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 index 9c60da5b5d1d..111494018301 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 @@ -6,7 +6,6 @@ CONFIG_HIBERNATION=n CONFIG_PREEMPT_NONE=n CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=y -CONFIG_TASKS_RCU=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=n CONFIG_NO_HZ_FULL=y -- cgit v1.2.3 From c5e8e98cd11950f121e9df19f46a86892412763c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 10:41:13 -0700 Subject: rcutorture: Update configuration fragments for rcutree.rcu_fanout_exact This commit updates rcutortures configuration-fragment files to account for the move from the CONFIG_RCU_FANOUT_EXACT Kconfig parameter to the new rcutree.rcu_fanout_exact= boot parameter. Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar Reviewed-by: Josh Triplett --- tools/testing/selftests/rcutorture/configs/rcu/TREE02 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE02-T | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE03 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE04 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE05 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE06 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE07 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE08 | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE08-T | 1 - tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot | 1 + 12 files changed, 3 insertions(+), 9 deletions(-) create mode 100644 tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02 index 629122fb8b4a..ea131cc5f7dd 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02 @@ -14,7 +14,6 @@ CONFIG_SUSPEND=n CONFIG_HIBERNATION=n CONFIG_RCU_FANOUT=3 CONFIG_RCU_FANOUT_LEAF=3 -CONFIG_RCU_FANOUT_EXACT=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T index a25de47888a4..2ac9e68ea3d1 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T @@ -14,7 +14,6 @@ CONFIG_SUSPEND=n CONFIG_HIBERNATION=n CONFIG_RCU_FANOUT=3 CONFIG_RCU_FANOUT_LEAF=3 -CONFIG_RCU_FANOUT_EXACT=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 index 05a81380a5c2..d75d986fa688 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 @@ -11,7 +11,6 @@ CONFIG_RCU_TRACE=y CONFIG_HOTPLUG_CPU=y CONFIG_RCU_FANOUT=2 CONFIG_RCU_FANOUT_LEAF=2 -CONFIG_RCU_FANOUT_EXACT=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_CPU_STALL_INFO=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 index 4eb6d374a0ca..30b0a5679e48 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 @@ -15,7 +15,6 @@ CONFIG_SUSPEND=n CONFIG_HIBERNATION=n CONFIG_RCU_FANOUT=4 CONFIG_RCU_FANOUT_LEAF=4 -CONFIG_RCU_FANOUT_EXACT=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_CPU_STALL_INFO=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 index 212e3bfd2b2a..79572319896f 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 @@ -12,7 +12,6 @@ CONFIG_RCU_TRACE=n CONFIG_HOTPLUG_CPU=y CONFIG_RCU_FANOUT=6 CONFIG_RCU_FANOUT_LEAF=6 -CONFIG_RCU_FANOUT_EXACT=n CONFIG_RCU_NOCB_CPU=y CONFIG_RCU_NOCB_CPU_NONE=y CONFIG_DEBUG_LOCK_ALLOC=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 index 7eee63b44218..b94c400def4a 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 @@ -14,7 +14,6 @@ CONFIG_SUSPEND=n CONFIG_HIBERNATION=n CONFIG_RCU_FANOUT=6 CONFIG_RCU_FANOUT_LEAF=6 -CONFIG_RCU_FANOUT_EXACT=y CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot index da9a03a398db..dd90f28ed700 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot @@ -1,3 +1,4 @@ rcupdate.rcu_self_test=1 rcupdate.rcu_self_test_bh=1 rcupdate.rcu_self_test_sched=1 +rcutree.rcu_fanout_exact=1 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 index 92a97fa97dec..d715f99c3297 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 @@ -15,7 +15,6 @@ CONFIG_RCU_TRACE=y CONFIG_HOTPLUG_CPU=y CONFIG_RCU_FANOUT=2 CONFIG_RCU_FANOUT_LEAF=2 -CONFIG_RCU_FANOUT_EXACT=n CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_CPU_STALL_INFO=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 index acce6552f7f0..5f77ebeec4d1 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 @@ -13,7 +13,6 @@ CONFIG_HOTPLUG_CPU=n CONFIG_SUSPEND=n CONFIG_HIBERNATION=n CONFIG_RCU_FANOUT=3 -CONFIG_RCU_FANOUT_EXACT=y CONFIG_RCU_FANOUT_LEAF=2 CONFIG_RCU_NOCB_CPU=y CONFIG_RCU_NOCB_CPU_ALL=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T index 3eaeccacb083..b2b8cea69dc9 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T @@ -13,7 +13,6 @@ CONFIG_HOTPLUG_CPU=n CONFIG_SUSPEND=n CONFIG_HIBERNATION=n CONFIG_RCU_FANOUT=3 -CONFIG_RCU_FANOUT_EXACT=y CONFIG_RCU_FANOUT_LEAF=2 CONFIG_RCU_NOCB_CPU=y CONFIG_RCU_NOCB_CPU_ALL=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot new file mode 100644 index 000000000000..883149b5f2d1 --- /dev/null +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot @@ -0,0 +1 @@ +rcutree.rcu_fanout_exact=1 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot index 2561daf605ad..fb066dc82769 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot @@ -1,3 +1,4 @@ rcutorture.torture_type=sched rcupdate.rcu_self_test=1 rcupdate.rcu_self_test_sched=1 +rcutree.rcu_fanout_exact=1 -- cgit v1.2.3 From c4295bfe7eb867d8159f6b755e96fda06b96c132 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 20 Apr 2015 12:36:10 -0700 Subject: rcutorture: Make rcutorture scripts force RCU_EXPERT This commit causes the rcutorture scripts to force RCU_EXPERT so that these scripts can cause rcutorture to torture RCU in the various required configurations. However, SRCU-P, TASKS03, and TREE09 retain !RCU_EXPERT in order to ensure testing of the vanilla configuration. Signed-off-by: Paul E. McKenney Reviewed-by: Pranith Kumar Reviewed-by: Josh Triplett --- tools/testing/selftests/rcutorture/configs/rcu/SRCU-N | 1 + tools/testing/selftests/rcutorture/configs/rcu/SRCU-P | 1 + tools/testing/selftests/rcutorture/configs/rcu/TASKS01 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TASKS03 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE01 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE02 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE03 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE04 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE05 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE06 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE07 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE08 | 1 + tools/testing/selftests/rcutorture/configs/rcu/TREE09 | 1 + 13 files changed, 13 insertions(+) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N index 9fbb41b9b314..1a087c3c8bb8 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-N @@ -5,3 +5,4 @@ CONFIG_HOTPLUG_CPU=y CONFIG_PREEMPT_NONE=y CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=n +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P index 4b6f272dba27..4837430a71c0 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P +++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P @@ -5,3 +5,4 @@ CONFIG_HOTPLUG_CPU=y CONFIG_PREEMPT_NONE=n CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=y +#CHECK#CONFIG_RCU_EXPERT=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 index 32c5de503316..9318de8d5e88 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 @@ -6,3 +6,4 @@ CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_RCU=y +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 index 111494018301..c70c51d5ded1 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 @@ -10,3 +10,4 @@ CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=n CONFIG_NO_HZ_FULL=y CONFIG_NO_HZ_FULL_ALL=y +#CHECK#CONFIG_RCU_EXPERT=n diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01 index f8a10a7500c6..8e9137f66831 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01 @@ -16,3 +16,4 @@ CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_CPU_STALL_INFO=n CONFIG_RCU_BOOST=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02 index ea131cc5f7dd..aeea6a204d14 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02 @@ -20,3 +20,4 @@ CONFIG_PROVE_LOCKING=n CONFIG_RCU_CPU_STALL_INFO=n CONFIG_RCU_BOOST=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 index d75d986fa688..72aa7d87ea99 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03 @@ -17,3 +17,4 @@ CONFIG_RCU_CPU_STALL_INFO=n CONFIG_RCU_BOOST=y CONFIG_RCU_KTHREAD_PRIO=2 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 index 30b0a5679e48..d34e4b05941f 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 @@ -19,3 +19,4 @@ CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_CPU_STALL_INFO=y CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 index 79572319896f..2f9b93a777dd 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 @@ -19,3 +19,4 @@ CONFIG_PROVE_LOCKING=y CONFIG_PROVE_RCU=y CONFIG_RCU_CPU_STALL_INFO=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 index b94c400def4a..f7b2e87af79a 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 @@ -20,3 +20,4 @@ CONFIG_PROVE_LOCKING=y CONFIG_PROVE_RCU=y CONFIG_RCU_CPU_STALL_INFO=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 index d715f99c3297..ce18d597b553 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 @@ -19,3 +19,4 @@ CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_CPU_STALL_INFO=y CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 index 5f77ebeec4d1..fc1fed642917 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 @@ -22,3 +22,4 @@ CONFIG_PROVE_RCU=y CONFIG_RCU_CPU_STALL_INFO=n CONFIG_RCU_BOOST=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 b/tools/testing/selftests/rcutorture/configs/rcu/TREE09 index 6076b36f6c0b..aa4ed08d999d 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE09 @@ -16,3 +16,4 @@ CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_RCU_CPU_STALL_INFO=n CONFIG_RCU_BOOST=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n +#CHECK#CONFIG_RCU_EXPERT=n -- cgit v1.2.3 From ccd60ad3f87f6a5f2974a7f206e77b49b251e94c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 22 Apr 2015 07:20:51 -0700 Subject: rcutorture: Update TREE_RCU-kconfig.txt This commit updates TREE_RCU-kconfig.txt to reflect changes in RCU's Kconfig setup. This commit also updates rcutorture's Kconfig fragments to account for Kconfig parameters that are now driven directly off of other Kconfig parameters. The #CHECK# prefix tells the rcutorture scripts to take no action to try to set the Kconfig parameter, but to check that it does in fact get set. This is useful for verifying that Kconfig parameters that are supposed to be automatically set do in fact get set to the required values. Reported-by: Pranith Kumar Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- .../selftests/rcutorture/configs/rcu/TASKS01 | 3 +- .../selftests/rcutorture/configs/rcu/TINY02 | 2 +- .../selftests/rcutorture/configs/rcu/TREE04 | 2 +- .../selftests/rcutorture/configs/rcu/TREE05 | 2 +- .../selftests/rcutorture/configs/rcu/TREE06 | 2 +- .../selftests/rcutorture/configs/rcu/TREE07 | 2 +- .../selftests/rcutorture/configs/rcu/TREE08 | 2 +- .../selftests/rcutorture/doc/TREE_RCU-kconfig.txt | 36 ++++++++-------------- 8 files changed, 21 insertions(+), 30 deletions(-) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 index 9318de8d5e88..2cc0e60eba6e 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 @@ -5,5 +5,6 @@ CONFIG_PREEMPT_NONE=n CONFIG_PREEMPT_VOLUNTARY=n CONFIG_PREEMPT=y CONFIG_DEBUG_LOCK_ALLOC=y -CONFIG_PROVE_RCU=y +CONFIG_PROVE_LOCKING=n +#CHECK#CONFIG_PROVE_RCU=n CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TINY02 b/tools/testing/selftests/rcutorture/configs/rcu/TINY02 index 36e41df3d27a..f1892e0371c9 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TINY02 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TINY02 @@ -8,7 +8,7 @@ CONFIG_NO_HZ_IDLE=n CONFIG_NO_HZ_FULL=n CONFIG_RCU_TRACE=y CONFIG_PROVE_LOCKING=y -CONFIG_PROVE_RCU=y +#CHECK#CONFIG_PROVE_RCU=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_DEBUG_OBJECTS_RCU_HEAD=n CONFIG_PREEMPT_COUNT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 index d34e4b05941f..3f5112751cda 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04 @@ -17,6 +17,6 @@ CONFIG_RCU_FANOUT=4 CONFIG_RCU_FANOUT_LEAF=4 CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n -CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_RCU_CPU_STALL_INFO=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 index 2f9b93a777dd..c04dfea6fd21 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05 @@ -16,7 +16,7 @@ CONFIG_RCU_NOCB_CPU=y CONFIG_RCU_NOCB_CPU_NONE=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y -CONFIG_PROVE_RCU=y +#CHECK#CONFIG_PROVE_RCU=y CONFIG_RCU_CPU_STALL_INFO=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 index f7b2e87af79a..f51d2c73a68e 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06 @@ -17,7 +17,7 @@ CONFIG_RCU_FANOUT_LEAF=6 CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y -CONFIG_PROVE_RCU=y +#CHECK#CONFIG_PROVE_RCU=y CONFIG_RCU_CPU_STALL_INFO=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=y CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 index ce18d597b553..f422af4ff5a3 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07 @@ -17,6 +17,6 @@ CONFIG_RCU_FANOUT=2 CONFIG_RCU_FANOUT_LEAF=2 CONFIG_RCU_NOCB_CPU=n CONFIG_DEBUG_LOCK_ALLOC=n -CONFIG_RCU_CPU_STALL_INFO=y +CONFIG_RCU_CPU_STALL_INFO=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n CONFIG_RCU_EXPERT=y diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 index fc1fed642917..a24d2ca30646 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08 @@ -18,7 +18,7 @@ CONFIG_RCU_NOCB_CPU=y CONFIG_RCU_NOCB_CPU_ALL=y CONFIG_DEBUG_LOCK_ALLOC=n CONFIG_PROVE_LOCKING=y -CONFIG_PROVE_RCU=y +#CHECK#CONFIG_PROVE_RCU=y CONFIG_RCU_CPU_STALL_INFO=n CONFIG_RCU_BOOST=n CONFIG_DEBUG_OBJECTS_RCU_HEAD=n diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt index ec03c883db00..b24c0004fc49 100644 --- a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt +++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt @@ -12,13 +12,12 @@ CONFIG_NO_HZ_IDLE -- Do those not otherwise specified. (Groups of two.) CONFIG_NO_HZ_FULL -- Do two, one with CONFIG_NO_HZ_FULL_SYSIDLE. CONFIG_NO_HZ_FULL_SYSIDLE -- Do one. CONFIG_PREEMPT -- Do half. (First three and #8.) -CONFIG_PROVE_LOCKING -- Do all but two, covering CONFIG_PROVE_RCU and not. -CONFIG_PROVE_RCU -- Do all but one under CONFIG_PROVE_LOCKING. +CONFIG_PROVE_LOCKING -- Do several, covering CONFIG_DEBUG_LOCK_ALLOC=y and not. +CONFIG_PROVE_RCU -- Hardwired to CONFIG_PROVE_LOCKING. CONFIG_RCU_BOOST -- one of PREEMPT_RCU. CONFIG_RCU_KTHREAD_PRIO -- set to 2 for _BOOST testing. -CONFIG_RCU_CPU_STALL_INFO -- Do one. -CONFIG_RCU_FANOUT -- Cover hierarchy as currently, but overlap with others. -CONFIG_RCU_FANOUT_EXACT -- Do one. +CONFIG_RCU_CPU_STALL_INFO -- Now default, avoid at least twice. +CONFIG_RCU_FANOUT -- Cover hierarchy, but overlap with others. CONFIG_RCU_FANOUT_LEAF -- Do one non-default. CONFIG_RCU_FAST_NO_HZ -- Do one, but not with CONFIG_RCU_NOCB_CPU_ALL. CONFIG_RCU_NOCB_CPU -- Do three, see below. @@ -27,28 +26,19 @@ CONFIG_RCU_NOCB_CPU_NONE -- Do one. CONFIG_RCU_NOCB_CPU_ZERO -- Do one. CONFIG_RCU_TRACE -- Do half. CONFIG_SMP -- Need one !SMP for PREEMPT_RCU. +!RCU_EXPERT -- Do a few, but these have to be vanilla configurations. RCU-bh: Do one with PREEMPT and one with !PREEMPT. RCU-sched: Do one with PREEMPT but not BOOST. -Hierarchy: - -TREE01. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=8, CONFIG_RCU_FANOUT_EXACT=n. -TREE02. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=3, CONFIG_RCU_FANOUT_EXACT=n, - CONFIG_RCU_FANOUT_LEAF=3. -TREE03. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=4, CONFIG_RCU_FANOUT_EXACT=n, - CONFIG_RCU_FANOUT_LEAF=4. -TREE04. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=2, CONFIG_RCU_FANOUT_EXACT=n, - CONFIG_RCU_FANOUT_LEAF=2. -TREE05. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=6, CONFIG_RCU_FANOUT_EXACT=n - CONFIG_RCU_FANOUT_LEAF=6. -TREE06. CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=6, CONFIG_RCU_FANOUT_EXACT=y - CONFIG_RCU_FANOUT_LEAF=6. -TREE07. CONFIG_NR_CPUS=16, CONFIG_RCU_FANOUT=2, CONFIG_RCU_FANOUT_EXACT=n, - CONFIG_RCU_FANOUT_LEAF=2. -TREE08. CONFIG_NR_CPUS=16, CONFIG_RCU_FANOUT=3, CONFIG_RCU_FANOUT_EXACT=y, - CONFIG_RCU_FANOUT_LEAF=2. -TREE09. CONFIG_NR_CPUS=1. +Boot parameters: + +nohz_full - do at least one. +maxcpu -- do at least one. +rcupdate.rcu_self_test_bh -- Do at least one each, offloaded and not. +rcupdate.rcu_self_test_sched -- Do at least one each, offloaded and not. +rcupdate.rcu_self_test -- Do at least one each, offloaded and not. +rcutree.rcu_fanout_exact -- Do at least one. Kconfig Parameters Ignored: -- cgit v1.2.3 From 8ba8b664d4e43abf6bd896f3f614c8eb22384635 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 23 Apr 2015 12:55:54 -0700 Subject: rcutorture: Display "make oldconfig" errors The current rcutorture scripting fails to dump out errors from "make oldconfig", so this commit addresses this issue. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- tools/testing/selftests/rcutorture/bin/configinit.sh | 2 +- tools/testing/selftests/rcutorture/bin/kvm-recheck.sh | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh index 15f1a17ca96e..3f81a1095206 100755 --- a/tools/testing/selftests/rcutorture/bin/configinit.sh +++ b/tools/testing/selftests/rcutorture/bin/configinit.sh @@ -66,7 +66,7 @@ make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1 mv $builddir/.config $builddir/.config.sav sh $T/upd.sh < $builddir/.config.sav > $builddir/.config cp $builddir/.config $builddir/.config.new -yes '' | make $buildloc oldconfig > $builddir/Make.modconfig.out 2>&1 +yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err # verify new config matches specification. configcheck.sh $builddir/.config $c diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh index 4f5b20f367a9..d86bdd6b6cc2 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh @@ -43,6 +43,10 @@ do if test -f "$i/console.log" then configcheck.sh $i/.config $i/ConfigFragment + if test -r $i/Make.oldconfig.err + then + cat $i/Make.oldconfig.err + fi parse-build.sh $i/Make.out $configfile parse-torture.sh $i/console.log $configfile parse-console.sh $i/console.log $configfile -- cgit v1.2.3 From 7d3bb54adeb13bb98badd86c24648d51ebe28331 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 11 May 2015 13:55:47 -0700 Subject: rcutorture: Allow repetition factors in Kconfig-fragment lists Although it is currently possible to run the same test in parallel, '--config "TINY01 TINY01 TINY01"' can get a bit verbose, especially if you want to run 48 instances of TINY01 in parallel. This commit therefore allows prefixing the Kconfig fragment with a repeat count, for example, '--config "48*TINY01"' to run 48 instances in parallel. At least assuming that you have 48 CPUs and also gave '--cpus 48'. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- tools/testing/selftests/rcutorture/bin/kvm.sh | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index dd2812ceb0ba..fbe2dbff1e21 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -55,7 +55,7 @@ usage () { echo " --bootargs kernel-boot-arguments" echo " --bootimage relative-path-to-kernel-boot-image" echo " --buildonly" - echo " --configs \"config-file list\"" + echo " --configs \"config-file list w/ repeat factor (3*TINY01)\"" echo " --cpus N" echo " --datestamp string" echo " --defconfig string" @@ -178,13 +178,26 @@ fi touch $T/cfgcpu for CF in $configs do - if test -f "$CONFIGFRAG/$CF" + case $CF in + [0-9]\**|[0-9][0-9]\**|[0-9][0-9][0-9]\**) + config_reps=`echo $CF | sed -e 's/\*.*$//'` + CF1=`echo $CF | sed -e 's/^[^*]*\*//'` + ;; + *) + config_reps=1 + CF1=$CF + ;; + esac + if test -f "$CONFIGFRAG/$CF1" then - cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF` - cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF" "$cpu_count"` - echo $CF $cpu_count >> $T/cfgcpu + cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF1` + cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` + for ((cur_rep=0;cur_rep<$config_reps;cur_rep++)) + do + echo $CF1 $cpu_count >> $T/cfgcpu + done else - echo "The --configs file $CF does not exist, terminating." + echo "The --configs file $CF1 does not exist, terminating." exit 1 fi done -- cgit v1.2.3