From 90127d605f403d814f4986436871210bf8ceb335 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 May 2018 10:29:18 -0700 Subject: torture: Make online/offline messages appear only for verbose=2 Some bugs reproduce quickly only at high CPU-hotplug rates, so the rcutorture TREE03 scenario now has only 200 milliseconds spacing between CPU-hotplug operations. At this rate, the torture-test pair of console messages per operation becomes a bit voluminous. This commit therefore converts the torture-test set of "verbose" kernel-boot arguments from bool to int, and prints the extra console messages only when verbose=2. The default is still verbose=1. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 42fcb7f05fac..a5540bd831c4 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -101,7 +101,7 @@ torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds."); torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs"); -torture_param(bool, verbose, true, +torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); static char *torture_type = "rcu"; -- cgit v1.2.3 From 60500037637397dcc8ea3d3c2f16e05ea6695a86 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 15 May 2018 12:25:05 -0700 Subject: torture: Keep old-school dmesg format This commit adds "#define pr_fmt(fmt) fmt" to the torture-test files in order to keep the current dmesg format. Once Joe's commits have hit mainline, these definitions will be changed in order to automatically generate the dmesg line prefix that the scripts expect. This will have the beneficial side-effect of allowing printk() formats to be used more widely and of shortening some pr_*() lines. Signed-off-by: Paul E. McKenney Cc: Joe Perches --- kernel/locking/locktorture.c | 3 +++ kernel/rcu/rcuperf.c | 3 +++ kernel/rcu/rcutorture.c | 3 +++ kernel/torture.c | 3 +++ 4 files changed, 12 insertions(+) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 4a2e13870a9b..57bef4fbfb31 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -21,6 +21,9 @@ * Davidlohr Bueso * Based on kernel/rcu/torture.c. */ + +#define pr_fmt(fmt) fmt + #include #include #include diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index fb8094848906..df29119b2013 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -19,6 +19,9 @@ * * Authors: Paul E. McKenney */ + +#define pr_fmt(fmt) fmt + #include #include #include diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index a5540bd831c4..5604bfac8df4 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -22,6 +22,9 @@ * * See also: Documentation/RCU/torture.txt */ + +#define pr_fmt(fmt) fmt + #include #include #include diff --git a/kernel/torture.c b/kernel/torture.c index 840fd33c1cda..1ac24a826589 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -20,6 +20,9 @@ * Author: Paul E. McKenney * Based on kernel/rcu/torture.c. */ + +#define pr_fmt(fmt) fmt + #include #include #include -- cgit v1.2.3 From 17ef2fe97c8c8e754e4a702c42f8e5b0ffadf4dd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 27 Apr 2018 11:39:34 -0700 Subject: rcu: Make rcutorture's batches-completed API use ->gp_seq The rcutorture test invokes rcu_batches_started(), rcu_batches_completed(), rcu_batches_started_bh(), rcu_batches_completed_bh(), rcu_batches_started_sched(), and rcu_batches_completed_sched() to do grace-period consistency checks, and rcuperf uses the _completed variants for statistics. These functions use ->gpnum and ->completed. This commit therefore replaces them with rcu_get_gp_seq(), rcu_bh_get_gp_seq(), and rcu_sched_get_gp_seq(), adjusting rcutorture and rcuperf to make use of them. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 18 ++++++----------- kernel/rcu/rcuperf.c | 26 +++++++++---------------- kernel/rcu/rcutorture.c | 50 +++++++++++++++++------------------------------- kernel/rcu/tree.c | 51 ++++++++++++------------------------------------- 4 files changed, 45 insertions(+), 100 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index db0870acfdff..f0907f9f6cd0 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -463,12 +463,9 @@ void srcutorture_get_gp_data(enum rcutorture_type test_type, #endif #ifdef CONFIG_TINY_RCU -static inline unsigned long rcu_batches_started(void) { return 0; } -static inline unsigned long rcu_batches_started_bh(void) { return 0; } -static inline unsigned long rcu_batches_started_sched(void) { return 0; } -static inline unsigned long rcu_batches_completed(void) { return 0; } -static inline unsigned long rcu_batches_completed_bh(void) { return 0; } -static inline unsigned long rcu_batches_completed_sched(void) { return 0; } +static inline unsigned long rcu_get_gp_seq(void) { return 0; } +static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; } +static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; } static inline unsigned long rcu_exp_batches_completed(void) { return 0; } static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; } static inline unsigned long @@ -480,12 +477,9 @@ static inline void show_rcu_gp_kthreads(void) { } #else /* #ifdef CONFIG_TINY_RCU */ extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_vernum; -unsigned long rcu_batches_started(void); -unsigned long rcu_batches_started_bh(void); -unsigned long rcu_batches_started_sched(void); -unsigned long rcu_batches_completed(void); -unsigned long rcu_batches_completed_bh(void); -unsigned long rcu_batches_completed_sched(void); +unsigned long rcu_get_gp_seq(void); +unsigned long rcu_bh_get_gp_seq(void); +unsigned long rcu_sched_get_gp_seq(void); unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed_sched(void); unsigned long srcu_batches_completed(struct srcu_struct *sp); diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index df29119b2013..2b5a613afcf3 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -138,8 +138,7 @@ struct rcu_perf_ops { void (*cleanup)(void); int (*readlock)(void); void (*readunlock)(int idx); - unsigned long (*started)(void); - unsigned long (*completed)(void); + unsigned long (*get_gp_seq)(void); unsigned long (*exp_completed)(void); void (*async)(struct rcu_head *head, rcu_callback_t func); void (*gp_barrier)(void); @@ -179,8 +178,7 @@ static struct rcu_perf_ops rcu_ops = { .init = rcu_sync_perf_init, .readlock = rcu_perf_read_lock, .readunlock = rcu_perf_read_unlock, - .started = rcu_batches_started, - .completed = rcu_batches_completed, + .get_gp_seq = rcu_get_gp_seq, .exp_completed = rcu_exp_batches_completed, .async = call_rcu, .gp_barrier = rcu_barrier, @@ -209,8 +207,7 @@ static struct rcu_perf_ops rcu_bh_ops = { .init = rcu_sync_perf_init, .readlock = rcu_bh_perf_read_lock, .readunlock = rcu_bh_perf_read_unlock, - .started = rcu_batches_started_bh, - .completed = rcu_batches_completed_bh, + .get_gp_seq = rcu_bh_get_gp_seq, .exp_completed = rcu_exp_batches_completed_sched, .async = call_rcu_bh, .gp_barrier = rcu_barrier_bh, @@ -266,8 +263,7 @@ static struct rcu_perf_ops srcu_ops = { .init = rcu_sync_perf_init, .readlock = srcu_perf_read_lock, .readunlock = srcu_perf_read_unlock, - .started = NULL, - .completed = srcu_perf_completed, + .get_gp_seq = srcu_perf_completed, .exp_completed = srcu_perf_completed, .async = srcu_call_rcu, .gp_barrier = srcu_rcu_barrier, @@ -295,8 +291,7 @@ static struct rcu_perf_ops srcud_ops = { .cleanup = srcu_sync_perf_cleanup, .readlock = srcu_perf_read_lock, .readunlock = srcu_perf_read_unlock, - .started = NULL, - .completed = srcu_perf_completed, + .get_gp_seq = srcu_perf_completed, .exp_completed = srcu_perf_completed, .async = srcu_call_rcu, .gp_barrier = srcu_rcu_barrier, @@ -325,8 +320,7 @@ static struct rcu_perf_ops sched_ops = { .init = rcu_sync_perf_init, .readlock = sched_perf_read_lock, .readunlock = sched_perf_read_unlock, - .started = rcu_batches_started_sched, - .completed = rcu_batches_completed_sched, + .get_gp_seq = rcu_sched_get_gp_seq, .exp_completed = rcu_exp_batches_completed_sched, .async = call_rcu_sched, .gp_barrier = rcu_barrier_sched, @@ -353,8 +347,7 @@ static struct rcu_perf_ops tasks_ops = { .init = rcu_sync_perf_init, .readlock = tasks_perf_read_lock, .readunlock = tasks_perf_read_unlock, - .started = rcu_no_completed, - .completed = rcu_no_completed, + .get_gp_seq = rcu_no_completed, .async = call_rcu_tasks, .gp_barrier = rcu_barrier_tasks, .sync = synchronize_rcu_tasks, @@ -447,8 +440,7 @@ rcu_perf_writer(void *arg) b_rcu_perf_writer_started = cur_ops->exp_completed() / 2; } else { - b_rcu_perf_writer_started = - cur_ops->completed(); + b_rcu_perf_writer_started = cur_ops->get_gp_seq(); } } @@ -505,7 +497,7 @@ retry: cur_ops->exp_completed() / 2; } else { b_rcu_perf_writer_finished = - cur_ops->completed(); + cur_ops->get_gp_seq(); } if (shutdown) { smp_mb(); /* Assign before wake. */ diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 5604bfac8df4..1f66597c7783 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -264,8 +264,7 @@ struct rcu_torture_ops { int (*readlock)(void); void (*read_delay)(struct torture_random_state *rrsp); void (*readunlock)(int idx); - unsigned long (*started)(void); - unsigned long (*completed)(void); + unsigned long (*get_gp_seq)(void); void (*deferred_free)(struct rcu_torture *p); void (*sync)(void); void (*exp_sync)(void); @@ -305,10 +304,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp) * force_quiescent_state. */ if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { - started = cur_ops->completed(); + started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); mdelay(longdelay_ms); - completed = cur_ops->completed(); + completed = cur_ops->get_gp_seq(); do_trace_rcu_torture_read(cur_ops->name, NULL, ts, started, completed); } @@ -400,8 +399,7 @@ static struct rcu_torture_ops rcu_ops = { .readlock = rcu_torture_read_lock, .read_delay = rcu_read_delay, .readunlock = rcu_torture_read_unlock, - .started = rcu_batches_started, - .completed = rcu_batches_completed, + .get_gp_seq = rcu_get_gp_seq, .deferred_free = rcu_torture_deferred_free, .sync = synchronize_rcu, .exp_sync = synchronize_rcu_expedited, @@ -442,8 +440,7 @@ static struct rcu_torture_ops rcu_bh_ops = { .readlock = rcu_bh_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_bh_torture_read_unlock, - .started = rcu_batches_started_bh, - .completed = rcu_batches_completed_bh, + .get_gp_seq = rcu_bh_get_gp_seq, .deferred_free = rcu_bh_torture_deferred_free, .sync = synchronize_rcu_bh, .exp_sync = synchronize_rcu_bh_expedited, @@ -486,8 +483,7 @@ static struct rcu_torture_ops rcu_busted_ops = { .readlock = rcu_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_torture_read_unlock, - .started = rcu_no_completed, - .completed = rcu_no_completed, + .get_gp_seq = rcu_no_completed, .deferred_free = rcu_busted_torture_deferred_free, .sync = synchronize_rcu_busted, .exp_sync = synchronize_rcu_busted, @@ -575,8 +571,7 @@ static struct rcu_torture_ops srcu_ops = { .readlock = srcu_torture_read_lock, .read_delay = srcu_read_delay, .readunlock = srcu_torture_read_unlock, - .started = NULL, - .completed = srcu_torture_completed, + .get_gp_seq = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, .exp_sync = srcu_torture_synchronize_expedited, @@ -613,8 +608,7 @@ static struct rcu_torture_ops srcud_ops = { .readlock = srcu_torture_read_lock, .read_delay = srcu_read_delay, .readunlock = srcu_torture_read_unlock, - .started = NULL, - .completed = srcu_torture_completed, + .get_gp_seq = srcu_torture_completed, .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, .exp_sync = srcu_torture_synchronize_expedited, @@ -651,8 +645,7 @@ static struct rcu_torture_ops sched_ops = { .readlock = sched_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = sched_torture_read_unlock, - .started = rcu_batches_started_sched, - .completed = rcu_batches_completed_sched, + .get_gp_seq = rcu_sched_get_gp_seq, .deferred_free = rcu_sched_torture_deferred_free, .sync = synchronize_sched, .exp_sync = synchronize_sched_expedited, @@ -690,8 +683,7 @@ static struct rcu_torture_ops tasks_ops = { .readlock = tasks_torture_read_lock, .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = tasks_torture_read_unlock, - .started = rcu_no_completed, - .completed = rcu_no_completed, + .get_gp_seq = rcu_no_completed, .deferred_free = rcu_tasks_torture_deferred_free, .sync = synchronize_rcu_tasks, .exp_sync = synchronize_rcu_tasks, @@ -1104,10 +1096,7 @@ static void rcu_torture_timer(struct timer_list *unused) unsigned long long ts; idx = cur_ops->readlock(); - if (cur_ops->started) - started = cur_ops->started(); - else - started = cur_ops->completed(); + started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || @@ -1131,7 +1120,7 @@ static void rcu_torture_timer(struct timer_list *unused) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - completed = cur_ops->completed(); + completed = cur_ops->get_gp_seq(); if (pipe_count > 1) { do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, started, completed); @@ -1139,8 +1128,8 @@ static void rcu_torture_timer(struct timer_list *unused) } __this_cpu_inc(rcu_torture_count[pipe_count]); completed = completed - started; - if (cur_ops->started) - completed++; + if (completed > ULONG_MAX >> 1) + completed = 0; /* Not all gp_seq have full range. */ if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; @@ -1187,10 +1176,7 @@ rcu_torture_reader(void *arg) mod_timer(&t, jiffies + 1); } idx = cur_ops->readlock(); - if (cur_ops->started) - started = cur_ops->started(); - else - started = cur_ops->completed(); + started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || @@ -1212,7 +1198,7 @@ rcu_torture_reader(void *arg) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - completed = cur_ops->completed(); + completed = cur_ops->get_gp_seq(); if (pipe_count > 1) { do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, started, completed); @@ -1220,8 +1206,8 @@ rcu_torture_reader(void *arg) } __this_cpu_inc(rcu_torture_count[pipe_count]); completed = completed - started; - if (cur_ops->started) - completed++; + if (completed > ULONG_MAX >> 1) + completed = 0; /* Not all gp_seq have full range. */ if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3c3af7e2758f..547112bec26a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -530,58 +530,31 @@ static void force_quiescent_state(struct rcu_state *rsp); static int rcu_pending(void); /* - * Return the number of RCU batches started thus far for debug & stats. + * Return the number of RCU GPs completed thus far for debug & stats. */ -unsigned long rcu_batches_started(void) +unsigned long rcu_get_gp_seq(void) { - return rcu_state_p->gpnum; + return rcu_seq_ctr(READ_ONCE(rcu_state_p->gp_seq)); } -EXPORT_SYMBOL_GPL(rcu_batches_started); +EXPORT_SYMBOL_GPL(rcu_get_gp_seq); /* - * Return the number of RCU-sched batches started thus far for debug & stats. + * Return the number of RCU-sched GPs completed thus far for debug & stats. */ -unsigned long rcu_batches_started_sched(void) +unsigned long rcu_sched_get_gp_seq(void) { - return rcu_sched_state.gpnum; + return rcu_seq_ctr(READ_ONCE(rcu_sched_state.gp_seq)); } -EXPORT_SYMBOL_GPL(rcu_batches_started_sched); +EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); /* - * Return the number of RCU BH batches started thus far for debug & stats. + * Return the number of RCU-bh GPs completed thus far for debug & stats. */ -unsigned long rcu_batches_started_bh(void) +unsigned long rcu_bh_get_gp_seq(void) { - return rcu_bh_state.gpnum; + return rcu_seq_ctr(READ_ONCE(rcu_bh_state.gp_seq)); } -EXPORT_SYMBOL_GPL(rcu_batches_started_bh); - -/* - * Return the number of RCU batches completed thus far for debug & stats. - */ -unsigned long rcu_batches_completed(void) -{ - return rcu_state_p->completed; -} -EXPORT_SYMBOL_GPL(rcu_batches_completed); - -/* - * Return the number of RCU-sched batches completed thus far for debug & stats. - */ -unsigned long rcu_batches_completed_sched(void) -{ - return rcu_sched_state.completed; -} -EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); - -/* - * Return the number of RCU BH batches completed thus far for debug & stats. - */ -unsigned long rcu_batches_completed_bh(void) -{ - return rcu_bh_state.completed; -} -EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); +EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); /* * Return the number of RCU expedited batches completed thus far for -- cgit v1.2.3 From aebc82644b2c8eafa15e8c481fbafc1b41f4fbf9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 1 May 2018 06:42:51 -0700 Subject: rcutorture: Convert rcutorture_get_gp_data() to ->gp_seq SRCU has long used ->srcu_gp_seq, and now RCU uses ->gp_seq. This commit therefore moves the rcutorture_get_gp_data() function from a ->gpnum / ->completed pair to ->gp_seq. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 17 ++++++----------- kernel/rcu/rcutorture.c | 24 ++++++++++-------------- kernel/rcu/srcutree.c | 5 ++--- kernel/rcu/tree.c | 5 ++--- 4 files changed, 20 insertions(+), 31 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 7568a3fd0815..003671825d62 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -425,7 +425,7 @@ enum rcutorture_type { #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, - unsigned long *gpnum, unsigned long *completed); + unsigned long *gp_seq); void rcutorture_record_test_transition(void); void rcutorture_record_progress(unsigned long vernum); void do_trace_rcu_torture_read(const char *rcutorturename, @@ -435,13 +435,10 @@ void do_trace_rcu_torture_read(const char *rcutorturename, unsigned long c); #else static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, - int *flags, - unsigned long *gpnum, - unsigned long *completed) + int *flags, unsigned long *gp_seq) { *flags = 0; - *gpnum = 0; - *completed = 0; + *gp_seq = 0; } static inline void rcutorture_record_test_transition(void) { } static inline void rcutorture_record_progress(unsigned long vernum) { } @@ -461,21 +458,19 @@ void do_trace_rcu_torture_read(const char *rcutorturename, static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, struct srcu_struct *sp, int *flags, - unsigned long *gpnum, - unsigned long *completed) + unsigned long *gp_seq) { if (test_type != SRCU_FLAVOR) return; *flags = 0; - *completed = sp->srcu_idx; - *gpnum = *completed; + *gp_seq = sp->srcu_idx; } #elif defined(CONFIG_TREE_SRCU) void srcutorture_get_gp_data(enum rcutorture_type test_type, struct srcu_struct *sp, int *flags, - unsigned long *gpnum, unsigned long *completed); + unsigned long *gp_seq); #endif diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 1f66597c7783..81fb43530d64 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1313,18 +1313,16 @@ rcu_torture_stats_print(void) if (rtcv_snap == rcu_torture_current_version && rcu_torture_current != NULL) { int __maybe_unused flags = 0; - unsigned long __maybe_unused gpnum = 0; - unsigned long __maybe_unused completed = 0; + unsigned long __maybe_unused gp_seq = 0; rcutorture_get_gp_data(cur_ops->ttype, - &flags, &gpnum, &completed); + &flags, &gp_seq); srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, - &flags, &gpnum, &completed); + &flags, &gp_seq); wtp = READ_ONCE(writer_task); - pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx cpu %d\n", + pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", rcu_torture_writer_state_getname(), - rcu_torture_writer_state, - gpnum, completed, flags, + rcu_torture_writer_state, gp_seq, flags, wtp == NULL ? ~0UL : wtp->state, wtp == NULL ? -1 : (int)task_cpu(wtp)); if (!splatted && wtp) { @@ -1605,8 +1603,7 @@ static void rcu_torture_cleanup(void) { int flags = 0; - unsigned long gpnum = 0; - unsigned long completed = 0; + unsigned long gp_seq = 0; int i; rcutorture_record_test_transition(); @@ -1637,11 +1634,10 @@ rcu_torture_cleanup(void) fakewriter_tasks = NULL; } - rcutorture_get_gp_data(cur_ops->ttype, &flags, &gpnum, &completed); - srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, - &flags, &gpnum, &completed); - pr_alert("%s: End-test grace-period state: g%lu c%lu f%#x\n", - cur_ops->name, gpnum, completed, flags); + rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); + srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); + pr_alert("%s: End-test grace-period state: g%lu f%#x\n", + cur_ops->name, gp_seq, flags); torture_stop_kthread(rcu_torture_stats, stats_task); torture_stop_kthread(rcu_torture_fqs, fqs_task); for (i = 0; i < ncbflooders; i++) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 5a1a9a07b407..d6d6ea9738c0 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -1249,13 +1249,12 @@ static void process_srcu(struct work_struct *work) void srcutorture_get_gp_data(enum rcutorture_type test_type, struct srcu_struct *sp, int *flags, - unsigned long *gpnum, unsigned long *completed) + unsigned long *gp_seq) { if (test_type != SRCU_FLAVOR) return; *flags = 0; - *completed = rcu_seq_ctr(sp->srcu_gp_seq); - *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed); + *gp_seq = rcu_seq_current(&sp->srcu_gp_seq); } EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9e619c4878d3..4a528a062cd4 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -638,7 +638,7 @@ EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); * Send along grace-period-related data for rcutorture diagnostics. */ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, - unsigned long *gpnum, unsigned long *completed) + unsigned long *gp_seq) { struct rcu_state *rsp = NULL; @@ -658,8 +658,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, if (rsp == NULL) return; *flags = READ_ONCE(rsp->gp_flags); - *gpnum = READ_ONCE(rsp->gpnum); - *completed = READ_ONCE(rsp->completed); + *gp_seq = rcu_seq_current(&rsp->gp_seq); } EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); -- cgit v1.2.3 From d72193123c81ae6123d108b3be2096f3f13b25a6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 15 May 2018 15:24:41 -0700 Subject: rcutorture: Correctly handle grace-period sequence wrap The new ->gq_seq grace-period sequence numbers must be shifted down, which give artifacts when these numbers wrap. This commit therefore enables rcutorture and rcuperf to handle grace-period sequence numbers even if they do wrap. It does this by allowing a special subtraction function to be specified, and this function subtracts before shifting. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 9 +++++++++ kernel/rcu/rcuperf.c | 18 ++++++++++++++++-- kernel/rcu/rcutorture.c | 19 +++++++++++++------ kernel/rcu/tree.c | 6 +++--- 4 files changed, 41 insertions(+), 11 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 1c5cbd9d7c97..aa215d6355f8 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -142,6 +142,15 @@ static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) new); } +/* + * Roughly how many full grace periods have elapsed between the collection + * of the two specified grace periods? + */ +static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) +{ + return (new - old) >> RCU_SEQ_CTR_SHIFT; +} + /* * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally * by call_rcu() and rcu callback execution, and are therefore not part of the diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 2b5a613afcf3..b080bc4a4f45 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -139,6 +139,7 @@ struct rcu_perf_ops { int (*readlock)(void); void (*readunlock)(int idx); unsigned long (*get_gp_seq)(void); + unsigned long (*gp_diff)(unsigned long new, unsigned long old); unsigned long (*exp_completed)(void); void (*async)(struct rcu_head *head, rcu_callback_t func); void (*gp_barrier)(void); @@ -179,6 +180,7 @@ static struct rcu_perf_ops rcu_ops = { .readlock = rcu_perf_read_lock, .readunlock = rcu_perf_read_unlock, .get_gp_seq = rcu_get_gp_seq, + .gp_diff = rcu_seq_diff, .exp_completed = rcu_exp_batches_completed, .async = call_rcu, .gp_barrier = rcu_barrier, @@ -208,6 +210,7 @@ static struct rcu_perf_ops rcu_bh_ops = { .readlock = rcu_bh_perf_read_lock, .readunlock = rcu_bh_perf_read_unlock, .get_gp_seq = rcu_bh_get_gp_seq, + .gp_diff = rcu_seq_diff, .exp_completed = rcu_exp_batches_completed_sched, .async = call_rcu_bh, .gp_barrier = rcu_barrier_bh, @@ -264,6 +267,7 @@ static struct rcu_perf_ops srcu_ops = { .readlock = srcu_perf_read_lock, .readunlock = srcu_perf_read_unlock, .get_gp_seq = srcu_perf_completed, + .gp_diff = rcu_seq_diff, .exp_completed = srcu_perf_completed, .async = srcu_call_rcu, .gp_barrier = srcu_rcu_barrier, @@ -292,6 +296,7 @@ static struct rcu_perf_ops srcud_ops = { .readlock = srcu_perf_read_lock, .readunlock = srcu_perf_read_unlock, .get_gp_seq = srcu_perf_completed, + .gp_diff = rcu_seq_diff, .exp_completed = srcu_perf_completed, .async = srcu_call_rcu, .gp_barrier = srcu_rcu_barrier, @@ -321,6 +326,7 @@ static struct rcu_perf_ops sched_ops = { .readlock = sched_perf_read_lock, .readunlock = sched_perf_read_unlock, .get_gp_seq = rcu_sched_get_gp_seq, + .gp_diff = rcu_seq_diff, .exp_completed = rcu_exp_batches_completed_sched, .async = call_rcu_sched, .gp_barrier = rcu_barrier_sched, @@ -348,6 +354,7 @@ static struct rcu_perf_ops tasks_ops = { .readlock = tasks_perf_read_lock, .readunlock = tasks_perf_read_unlock, .get_gp_seq = rcu_no_completed, + .gp_diff = rcu_seq_diff, .async = call_rcu_tasks, .gp_barrier = rcu_barrier_tasks, .sync = synchronize_rcu_tasks, @@ -355,6 +362,13 @@ static struct rcu_perf_ops tasks_ops = { .name = "tasks" }; +static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old) +{ + if (!cur_ops->gp_diff) + return new - old; + return cur_ops->gp_diff(new, old); +} + static bool __maybe_unused torturing_tasks(void) { return cur_ops == &tasks_ops; @@ -577,8 +591,8 @@ rcu_perf_cleanup(void) t_rcu_perf_writer_finished - t_rcu_perf_writer_started, ngps, - b_rcu_perf_writer_finished - - b_rcu_perf_writer_started); + rcuperf_seq_diff(b_rcu_perf_writer_finished, + b_rcu_perf_writer_started)); for (i = 0; i < nrealwriters; i++) { if (!writer_durations) break; diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 81fb43530d64..0481c7286875 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -265,6 +265,7 @@ struct rcu_torture_ops { void (*read_delay)(struct torture_random_state *rrsp); void (*readunlock)(int idx); unsigned long (*get_gp_seq)(void); + unsigned long (*gp_diff)(unsigned long new, unsigned long old); void (*deferred_free)(struct rcu_torture *p); void (*sync)(void); void (*exp_sync)(void); @@ -400,6 +401,7 @@ static struct rcu_torture_ops rcu_ops = { .read_delay = rcu_read_delay, .readunlock = rcu_torture_read_unlock, .get_gp_seq = rcu_get_gp_seq, + .gp_diff = rcu_seq_diff, .deferred_free = rcu_torture_deferred_free, .sync = synchronize_rcu, .exp_sync = synchronize_rcu_expedited, @@ -441,6 +443,7 @@ static struct rcu_torture_ops rcu_bh_ops = { .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = rcu_bh_torture_read_unlock, .get_gp_seq = rcu_bh_get_gp_seq, + .gp_diff = rcu_seq_diff, .deferred_free = rcu_bh_torture_deferred_free, .sync = synchronize_rcu_bh, .exp_sync = synchronize_rcu_bh_expedited, @@ -646,6 +649,7 @@ static struct rcu_torture_ops sched_ops = { .read_delay = rcu_read_delay, /* just reuse rcu's version. */ .readunlock = sched_torture_read_unlock, .get_gp_seq = rcu_sched_get_gp_seq, + .gp_diff = rcu_seq_diff, .deferred_free = rcu_sched_torture_deferred_free, .sync = synchronize_sched, .exp_sync = synchronize_sched_expedited, @@ -695,6 +699,13 @@ static struct rcu_torture_ops tasks_ops = { .name = "tasks" }; +static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) +{ + if (!cur_ops->gp_diff) + return new - old; + return cur_ops->gp_diff(new, old); +} + static bool __maybe_unused torturing_tasks(void) { return cur_ops == &tasks_ops; @@ -1127,9 +1138,7 @@ static void rcu_torture_timer(struct timer_list *unused) rcu_ftrace_dump(DUMP_ALL); } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = completed - started; - if (completed > ULONG_MAX >> 1) - completed = 0; /* Not all gp_seq have full range. */ + completed = rcutorture_seq_diff(completed, started); if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; @@ -1205,9 +1214,7 @@ rcu_torture_reader(void *arg) rcu_ftrace_dump(DUMP_ALL); } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = completed - started; - if (completed > ULONG_MAX >> 1) - completed = 0; /* Not all gp_seq have full range. */ + completed = rcutorture_seq_diff(completed, started); if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index cbf2bcde5e60..fa219eea0ae7 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -532,7 +532,7 @@ static int rcu_pending(void); */ unsigned long rcu_get_gp_seq(void) { - return rcu_seq_ctr(READ_ONCE(rcu_state_p->gp_seq)); + return READ_ONCE(rcu_state_p->gp_seq); } EXPORT_SYMBOL_GPL(rcu_get_gp_seq); @@ -541,7 +541,7 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_seq); */ unsigned long rcu_sched_get_gp_seq(void) { - return rcu_seq_ctr(READ_ONCE(rcu_sched_state.gp_seq)); + return READ_ONCE(rcu_sched_state.gp_seq); } EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); @@ -550,7 +550,7 @@ EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); */ unsigned long rcu_bh_get_gp_seq(void) { - return rcu_seq_ctr(READ_ONCE(rcu_bh_state.gp_seq)); + return READ_ONCE(rcu_bh_state.gp_seq); } EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); -- cgit v1.2.3 From a7538352da722fae5cc95ae6656ea2013f5b8b21 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 14 May 2018 13:27:33 -0700 Subject: rcu: Use pr_fmt to prefix "rcu: " to logging output This commit also adjusts some whitespace while in the area. Signed-off-by: Joe Perches Signed-off-by: Paul E. McKenney [ paulmck: Revert string-breaking %s as requested by Andy Shevchenko. ] --- kernel/rcu/rcuperf.c | 7 +++---- kernel/rcu/rcutorture.c | 4 ++-- kernel/rcu/srcutree.c | 5 ++++- kernel/rcu/tree.c | 8 +++++--- kernel/rcu/tree_plugin.h | 10 ++++++---- 5 files changed, 20 insertions(+), 14 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index b080bc4a4f45..00e395c0d7d0 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -680,12 +680,11 @@ rcu_perf_init(void) break; } if (i == ARRAY_SIZE(perf_ops)) { - pr_alert("rcu-perf: invalid perf type: \"%s\"\n", - perf_type); + pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type); pr_alert("rcu-perf types:"); for (i = 0; i < ARRAY_SIZE(perf_ops); i++) - pr_alert(" %s", perf_ops[i]->name); - pr_alert("\n"); + pr_cont(" %s", perf_ops[i]->name); + pr_cont("\n"); firsterr = -EINVAL; goto unwind; } diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 0481c7286875..90a94fecdd73 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1755,8 +1755,8 @@ rcu_torture_init(void) torture_type); pr_alert("rcu-torture types:"); for (i = 0; i < ARRAY_SIZE(torture_ops); i++) - pr_alert(" %s", torture_ops[i]->name); - pr_alert("\n"); + pr_cont(" %s", torture_ops[i]->name); + pr_cont("\n"); firsterr = -EINVAL; goto unwind; } diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index d6d6ea9738c0..e526b56998af 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -26,6 +26,8 @@ * */ +#define pr_fmt(fmt) "rcu: " fmt + #include #include #include @@ -390,7 +392,8 @@ void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) } if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || WARN_ON(srcu_readers_active(sp))) { - pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); + pr_info("%s: Active srcu_struct %p state: %d\n", + __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); return; /* Caller forgot to stop doing call_srcu()? */ } free_percpu(sp->sda); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 19beabe73629..6f2922168216 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -27,6 +27,9 @@ * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU */ + +#define pr_fmt(fmt) "rcu: " fmt + #include #include #include @@ -1374,8 +1377,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq) * See Documentation/RCU/stallwarn.txt for info on how to debug * RCU CPU stall warnings. */ - pr_err("INFO: %s detected stalls on CPUs/tasks:", - rsp->name); + pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name); print_cpu_stall_info_begin(); rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -4048,7 +4050,7 @@ static void __init rcu_init_geometry(void) if (rcu_fanout_leaf == RCU_FANOUT_LEAF && nr_cpu_ids == NR_CPUS) return; - pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", + pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", rcu_fanout_leaf, nr_cpu_ids); /* diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 54a251640f53..dbfe90191e19 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -74,8 +74,8 @@ static void __init rcu_bootup_announce_oddness(void) pr_info("\tRCU event tracing is enabled.\n"); if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) - pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", - RCU_FANOUT); + pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", + RCU_FANOUT); if (rcu_fanout_exact) pr_info("\tHierarchical RCU autobalancing is disabled.\n"); if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) @@ -88,11 +88,13 @@ static void __init rcu_bootup_announce_oddness(void) pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", RCU_FANOUT_LEAF); if (rcu_fanout_leaf != RCU_FANOUT_LEAF) - pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); + pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", + rcu_fanout_leaf); if (nr_cpu_ids != NR_CPUS) pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); #ifdef CONFIG_RCU_BOOST - pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY); + pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", + kthread_prio, CONFIG_RCU_BOOST_DELAY); #endif if (blimit != DEFAULT_RCU_BLIMIT) pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); -- cgit v1.2.3 From eac45e586cd38a1b56aa716560002e68741b78a7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 17 May 2018 11:33:17 -0700 Subject: rcu: Remove "inline" from rcu_torture_print_module_parms() This function is in rcutorture.c, which is not an include file, so there is no problem dropping the "inline", especially given that this function is invoked only twice per rcutorture run. This commit therefore delegates the inlining decision to the compiler by dropping the "inline". Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 90a94fecdd73..57a4277ccc63 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1359,7 +1359,7 @@ rcu_torture_stats(void *arg) return 0; } -static inline void +static void rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) { pr_alert("%s" TORTURE_FLAG -- cgit v1.2.3 From 028be12b294e3a059e6fc06852d458fdc82717ed Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 May 2018 09:20:34 -0700 Subject: rcutorture: Change units of onoff_interval to jiffies Some RCU bugs have been sensitive to the frequency of CPU-hotplug operations, which have been gradually increased over time. But this frequency is now at the one-second lower limit that can be specified using the rcutorture.onoff_interval kernel parameter. This commit therefore changes the units of rcutorture.onoff_interval from seconds to jiffies, and also sets the value specified for this kernel parameter in the TREE03 rcutorture scenario to 200, which is 200 milliseconds for HZ=1000. Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 4 ++-- kernel/rcu/rcutorture.c | 4 ++-- tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot | 2 +- tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index efc7aa7a0670..77bd3e635313 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3632,8 +3632,8 @@ Set time (s) after boot for CPU-hotplug testing. rcutorture.onoff_interval= [KNL] - Set time (s) between CPU-hotplug operations, or - zero to disable CPU-hotplug testing. + Set time (jiffies) between CPU-hotplug operations, + or zero to disable CPU-hotplug testing. rcutorture.shuffle_interval= [KNL] Set task-shuffle interval (s). Shuffling tasks diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 0481c7286875..eb6d4915b4e6 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -87,7 +87,7 @@ torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing"); torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); torture_param(int, onoff_interval, 0, - "Time between CPU hotplugs (s), 0=disable"); + "Time between CPU hotplugs (jiffies), 0=disable"); torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); @@ -1889,7 +1889,7 @@ rcu_torture_init(void) firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); if (firsterr) goto unwind; - firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ); + firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval); if (firsterr) goto unwind; firsterr = rcu_torture_stall_init(); diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot index b79ddb9eb9e8..5c3213cc3ad7 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot @@ -1,4 +1,4 @@ -rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30 +rcutorture.onoff_interval=200 rcutorture.onoff_holdoff=30 rcutree.gp_preinit_delay=12 rcutree.gp_init_delay=3 rcutree.gp_cleanup_delay=3 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh index 24ec91041957..7bab8246392b 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh +++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh @@ -39,7 +39,7 @@ rcutorture_param_onoff () { if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2" then echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2 - echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30 + echo rcutorture.onoff_interval=1000 rcutorture.onoff_holdoff=30 fi } -- cgit v1.2.3 From 6bea2cc5a97b7e9677088b1a93e27edb74ae0e55 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 16 May 2018 15:30:36 -0700 Subject: rcu: Remove rcutorture test version and sequence number Back when RCU had a debugfs interface, there was a test version and sequence number that allowed associating debugfs data with a particular test run, where the test run started with modprobe and ended with rmmod, which was how tests were run back on the old ABAT system within IBM. But rcutorture testing no longer runs on ABAT, and there is no longer an RCU debugfs interface, so there is no longer any need for test versions and sequence numbers. This commit therefore removes the rcutorture_record_test_transition() and rcutorture_record_progress() functions, and along with them the rcutorture_testseq and rcutorture_vernum variables that they update. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 4 ---- kernel/rcu/rcutorture.c | 4 +--- kernel/rcu/tree.c | 37 ------------------------------------- 3 files changed, 1 insertion(+), 44 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index aa215d6355f8..0453a7d12b3f 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -444,7 +444,6 @@ enum rcutorture_type { #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, unsigned long *gp_seq); -void rcutorture_record_test_transition(void); void rcutorture_record_progress(unsigned long vernum); void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, @@ -458,7 +457,6 @@ static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, *flags = 0; *gp_seq = 0; } -static inline void rcutorture_record_test_transition(void) { } static inline void rcutorture_record_progress(unsigned long vernum) { } #ifdef CONFIG_RCU_TRACE void do_trace_rcu_torture_read(const char *rcutorturename, @@ -505,8 +503,6 @@ static inline void rcu_bh_force_quiescent_state(void) { } static inline void rcu_sched_force_quiescent_state(void) { } static inline void show_rcu_gp_kthreads(void) { } #else /* #ifdef CONFIG_TINY_RCU */ -extern unsigned long rcutorture_testseq; -extern unsigned long rcutorture_vernum; unsigned long rcu_get_gp_seq(void); unsigned long rcu_bh_get_gp_seq(void); unsigned long rcu_sched_get_gp_seq(void); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index eb6d4915b4e6..335387fabac2 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1016,7 +1016,7 @@ rcu_torture_writer(void *arg) break; } } - rcutorture_record_progress(++rcu_torture_current_version); + rcu_torture_current_version++; /* Cycle through nesting levels of rcu_expedite_gp() calls. */ if (can_expedite && !(torture_random(&rand) & 0xff & (!!expediting - 1))) { @@ -1613,7 +1613,6 @@ rcu_torture_cleanup(void) unsigned long gp_seq = 0; int i; - rcutorture_record_test_transition(); if (torture_cleanup_begin()) { if (cur_ops->cb_barrier != NULL) cur_ops->cb_barrier(); @@ -1918,7 +1917,6 @@ rcu_torture_init(void) goto unwind; } } - rcutorture_record_test_transition(); torture_init_end(); return 0; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index d3333ee2c6f5..65abb399b08d 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -191,18 +191,6 @@ module_param(gp_cleanup_delay, int, 0444); */ #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ -/* - * Track the rcutorture test sequence number and the update version - * number within a given test. The rcutorture_testseq is incremented - * on every rcutorture module load and unload, so has an odd value - * when a test is running. The rcutorture_vernum is set to zero - * when rcutorture starts and is incremented on each rcutorture update. - * These variables enable correlating rcutorture output with the - * RCU tracing information. - */ -unsigned long rcutorture_testseq; -unsigned long rcutorture_vernum; - /* * Compute the mask of online CPUs for the specified rcu_node structure. * This will not be stable unless the rcu_node structure's ->lock is @@ -622,20 +610,6 @@ void show_rcu_gp_kthreads(void) } EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); -/* - * Record the number of times rcutorture tests have been initiated and - * terminated. This information allows the debugfs tracing stats to be - * correlated to the rcutorture messages, even when the rcutorture module - * is being repeatedly loaded and unloaded. In other words, we cannot - * store this state in rcutorture itself. - */ -void rcutorture_record_test_transition(void) -{ - rcutorture_testseq++; - rcutorture_vernum = 0; -} -EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); - /* * Send along grace-period-related data for rcutorture diagnostics. */ @@ -664,17 +638,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, } EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); -/* - * Record the number of writer passes through the current rcutorture test. - * This is also used to correlate debugfs tracing stats with the rcutorture - * messages. - */ -void rcutorture_record_progress(unsigned long vernum) -{ - rcutorture_vernum++; -} -EXPORT_SYMBOL_GPL(rcutorture_record_progress); - /* * Return the root node of the specified rcu_state structure. */ -- cgit v1.2.3 From 6b06aa723ed705102f3c63a494ac45352ccc0e7c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 22 May 2018 10:56:05 -0700 Subject: rcutorture: Extract common code from rcu_torture_reader() This commit extracts the code executed on each pass through the loop in rcu_torture_reader() into a new rcu_torture_one_read() function. This new function will also be used by rcu_torture_timer(). Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 98 +++++++++++++++++++++++++++---------------------- 1 file changed, 55 insertions(+), 43 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 335387fabac2..971e31ae9bcf 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1089,6 +1089,60 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp) kfree(rhp); } +/* + * Do one read-side critical section, returning false if there was + * no data to read. Can be invoked both from process context and + * from a timer handler. + */ +static bool rcu_torture_one_read(struct torture_random_state *trsp) +{ + int idx; + unsigned long started; + unsigned long completed; + struct rcu_torture *p; + int pipe_count; + unsigned long long ts; + + idx = cur_ops->readlock(); + started = cur_ops->get_gp_seq(); + ts = rcu_trace_clock_local(); + p = rcu_dereference_check(rcu_torture_current, + rcu_read_lock_bh_held() || + rcu_read_lock_sched_held() || + srcu_read_lock_held(srcu_ctlp) || + torturing_tasks()); + if (p == NULL) { + /* Wait for rcu_torture_writer to get underway */ + cur_ops->readunlock(idx); + return false; + } + if (p->rtort_mbtest == 0) + atomic_inc(&n_rcu_torture_mberror); + cur_ops->read_delay(trsp); + preempt_disable(); + pipe_count = p->rtort_pipe_count; + if (pipe_count > RCU_TORTURE_PIPE_LEN) { + /* Should not happen, but... */ + pipe_count = RCU_TORTURE_PIPE_LEN; + } + completed = cur_ops->get_gp_seq(); + if (pipe_count > 1) { + do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, + ts, started, completed); + rcu_ftrace_dump(DUMP_ALL); + } + __this_cpu_inc(rcu_torture_count[pipe_count]); + completed = rcutorture_seq_diff(completed, started); + if (completed > RCU_TORTURE_PIPE_LEN) { + /* Should not happen, but... */ + completed = RCU_TORTURE_PIPE_LEN; + } + __this_cpu_inc(rcu_torture_batch[completed]); + preempt_enable(); + cur_ops->readunlock(idx); + return true; +} + /* * RCU torture reader from timer handler. Dereferences rcu_torture_current, * incrementing the corresponding element of the pipeline array. The @@ -1165,14 +1219,8 @@ static void rcu_torture_timer(struct timer_list *unused) static int rcu_torture_reader(void *arg) { - unsigned long started; - unsigned long completed; - int idx; DEFINE_TORTURE_RANDOM(rand); - struct rcu_torture *p; - int pipe_count; struct timer_list t; - unsigned long long ts; VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); set_user_nice(current, MAX_NICE); @@ -1184,44 +1232,8 @@ rcu_torture_reader(void *arg) if (!timer_pending(&t)) mod_timer(&t, jiffies + 1); } - idx = cur_ops->readlock(); - started = cur_ops->get_gp_seq(); - ts = rcu_trace_clock_local(); - p = rcu_dereference_check(rcu_torture_current, - rcu_read_lock_bh_held() || - rcu_read_lock_sched_held() || - srcu_read_lock_held(srcu_ctlp) || - torturing_tasks()); - if (p == NULL) { - /* Wait for rcu_torture_writer to get underway */ - cur_ops->readunlock(idx); + if (!rcu_torture_one_read(&rand)) schedule_timeout_interruptible(HZ); - continue; - } - if (p->rtort_mbtest == 0) - atomic_inc(&n_rcu_torture_mberror); - cur_ops->read_delay(&rand); - preempt_disable(); - pipe_count = p->rtort_pipe_count; - if (pipe_count > RCU_TORTURE_PIPE_LEN) { - /* Should not happen, but... */ - pipe_count = RCU_TORTURE_PIPE_LEN; - } - completed = cur_ops->get_gp_seq(); - if (pipe_count > 1) { - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, - ts, started, completed); - rcu_ftrace_dump(DUMP_ALL); - } - __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = rcutorture_seq_diff(completed, started); - if (completed > RCU_TORTURE_PIPE_LEN) { - /* Should not happen, but... */ - completed = RCU_TORTURE_PIPE_LEN; - } - __this_cpu_inc(rcu_torture_batch[completed]); - preempt_enable(); - cur_ops->readunlock(idx); stutter_wait("rcu_torture_reader"); } while (!torture_must_stop()); if (irqreader && cur_ops->irq_capable) { -- cgit v1.2.3 From 8da9a59523b6608f4b21f3e489578d0993c0779f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 22 May 2018 11:17:51 -0700 Subject: rcutorture: Use atomic increment for n_rcu_torture_timers Currently, rcu_torture_timer() relies on a lock to guard updates to n_rcu_torture_timers. Unfortunately, consolidating code with rcu_torture_reader() will dispense with this lock. This commit therefore makes n_rcu_torture_timers be an atomic_long_t and uses atomic_long_inc() to carry out the update. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 971e31ae9bcf..2452e4a29923 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -151,7 +151,7 @@ static long n_rcu_torture_boost_ktrerror; static long n_rcu_torture_boost_rterror; static long n_rcu_torture_boost_failure; static long n_rcu_torture_boosts; -static long n_rcu_torture_timers; +static atomic_long_t n_rcu_torture_timers; static long n_barrier_attempts; static long n_barrier_successes; static atomic_long_t n_cbfloods; @@ -1160,6 +1160,7 @@ static void rcu_torture_timer(struct timer_list *unused) int pipe_count; unsigned long long ts; + atomic_long_inc(&n_rcu_torture_timers); idx = cur_ops->readlock(); started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); @@ -1177,7 +1178,6 @@ static void rcu_torture_timer(struct timer_list *unused) atomic_inc(&n_rcu_torture_mberror); spin_lock(&rand_lock); cur_ops->read_delay(&rand); - n_rcu_torture_timers++; spin_unlock(&rand_lock); preempt_disable(); pipe_count = p->rtort_pipe_count; @@ -1290,7 +1290,7 @@ rcu_torture_stats_print(void) pr_cont("rtbf: %ld rtb: %ld nt: %ld ", n_rcu_torture_boost_failure, n_rcu_torture_boosts, - n_rcu_torture_timers); + atomic_long_read(&n_rcu_torture_timers)); torture_onoff_stats(); pr_cont("barrier: %ld/%ld:%ld ", n_barrier_successes, -- cgit v1.2.3 From 3025520ec424df8b0fd5cdc319ad6b83406d9954 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 22 May 2018 11:38:47 -0700 Subject: rcutorture: Use per-CPU random state for rcu_torture_timer() Currently, the rcu_torture_timer() function uses a single global torture_random_state structure protected by a single global lock. This conflicts to some extent with performance and scalability, but even more with the goal of consolidating read-side testing with rcu_torture_reader(). This commit therefore creates a per-CPU torture_random_state structure for use by rcu_torture_timer() and eliminates the lock. Signed-off-by: Paul E. McKenney [ paulmck: Make rcu_torture_timer_rand static, per 0day Test Robot report. ] --- include/linux/torture.h | 2 ++ kernel/rcu/rcutorture.c | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/include/linux/torture.h b/include/linux/torture.h index a55e80817dae..61dfd93b6ee4 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -64,6 +64,8 @@ struct torture_random_state { long trs_count; }; #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } +#define DEFINE_TORTURE_RANDOM_PERCPU(name) \ + DEFINE_PER_CPU(struct torture_random_state, name) unsigned long torture_random(struct torture_random_state *trsp); /* Task shuffler, which causes CPUs to occasionally go idle. */ diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 2452e4a29923..d5a5465d2507 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1143,6 +1143,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp) return true; } +static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); + /* * RCU torture reader from timer handler. Dereferences rcu_torture_current, * incrementing the corresponding element of the pipeline array. The @@ -1154,12 +1156,12 @@ static void rcu_torture_timer(struct timer_list *unused) int idx; unsigned long started; unsigned long completed; - static DEFINE_TORTURE_RANDOM(rand); - static DEFINE_SPINLOCK(rand_lock); struct rcu_torture *p; int pipe_count; + struct torture_random_state *trsp; unsigned long long ts; + trsp = this_cpu_ptr(&rcu_torture_timer_rand); atomic_long_inc(&n_rcu_torture_timers); idx = cur_ops->readlock(); started = cur_ops->get_gp_seq(); @@ -1176,9 +1178,7 @@ static void rcu_torture_timer(struct timer_list *unused) } if (p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); - spin_lock(&rand_lock); - cur_ops->read_delay(&rand); - spin_unlock(&rand_lock); + cur_ops->read_delay(trsp); preempt_disable(); pipe_count = p->rtort_pipe_count; if (pipe_count > RCU_TORTURE_PIPE_LEN) { -- cgit v1.2.3 From 241b42522abb36c78cdc84d0cade358c4449306f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 22 May 2018 11:59:31 -0700 Subject: rcutorture: Make rcu_torture_timer() use rcu_torture_one_read() This commit saves a few lines of code by making rcu_torture_timer() invoke rcu_torture_one_read(), thus completing the consolidation of code between rcu_torture_timer() and rcu_torture_reader(). Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 47 +---------------------------------------------- 1 file changed, 1 insertion(+), 46 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index d5a5465d2507..ac700aa6dcaf 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1153,53 +1153,8 @@ static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); */ static void rcu_torture_timer(struct timer_list *unused) { - int idx; - unsigned long started; - unsigned long completed; - struct rcu_torture *p; - int pipe_count; - struct torture_random_state *trsp; - unsigned long long ts; - - trsp = this_cpu_ptr(&rcu_torture_timer_rand); atomic_long_inc(&n_rcu_torture_timers); - idx = cur_ops->readlock(); - started = cur_ops->get_gp_seq(); - ts = rcu_trace_clock_local(); - p = rcu_dereference_check(rcu_torture_current, - rcu_read_lock_bh_held() || - rcu_read_lock_sched_held() || - srcu_read_lock_held(srcu_ctlp) || - torturing_tasks()); - if (p == NULL) { - /* Leave because rcu_torture_writer is not yet underway */ - cur_ops->readunlock(idx); - return; - } - if (p->rtort_mbtest == 0) - atomic_inc(&n_rcu_torture_mberror); - cur_ops->read_delay(trsp); - preempt_disable(); - pipe_count = p->rtort_pipe_count; - if (pipe_count > RCU_TORTURE_PIPE_LEN) { - /* Should not happen, but... */ - pipe_count = RCU_TORTURE_PIPE_LEN; - } - completed = cur_ops->get_gp_seq(); - if (pipe_count > 1) { - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, - started, completed); - rcu_ftrace_dump(DUMP_ALL); - } - __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = rcutorture_seq_diff(completed, started); - if (completed > RCU_TORTURE_PIPE_LEN) { - /* Should not happen, but... */ - completed = RCU_TORTURE_PIPE_LEN; - } - __this_cpu_inc(rcu_torture_batch[completed]); - preempt_enable(); - cur_ops->readunlock(idx); + (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); /* Test call_rcu() invocation from interrupt handler. */ if (cur_ops->call) { -- cgit v1.2.3 From 2397d072f76b552fc21cda19686d24a8066ced22 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 25 May 2018 07:29:25 -0700 Subject: rcutorture: Handle extended read-side critical sections This commit enables rcutorture to test whether RCU properly aggregates different types of read-side critical sections into a larger section covering the set. It does this by extending an initial read-side critical section randomly for a random number of extensions. There is a new rcu_torture_ops field ->extendable that specifies what extensions are permitted for a given flavor of RCU (for example, SRCU does not permit any extensions, while RCU-sched permits all types). Note that if a given operation (for example, local_bh_disable()) extends an RCU read-side critical section, then rcutorture feels free to also start and end the critical section with that operation's type of disabling. Disabling operations include local_bh_disable(), local_irq_disable(), and preempt_disable(). This commit also adds a new "busted_srcud" torture type, which verifies rcutorture's ability to detect extensions of RCU read-side critical sections that are not handled. Gotta test the test, after all! Note that it is not legal to invoke local_bh_disable() with interrupts disabled, and this transition is avoided by overriding the random-number generator when it wants to call local_bh_disable() while interrupts are disabled. The code instead leaves both interrupts and bh/softirq disabled in this case. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 158 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 152 insertions(+), 6 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index ac700aa6dcaf..f97757755207 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -62,6 +62,18 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); +/* Bits for ->extendables field, extendables param, and related definitions. */ +#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ +#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) +#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */ +#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */ +#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */ +#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */ +#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \ + RCUTORTURE_RDR_PREEMPT) +#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ + /* Must be power of two minus one. */ + torture_param(int, cbflood_inter_holdoff, HZ, "Holdoff between floods (jiffies)"); torture_param(int, cbflood_intra_holdoff, 1, @@ -69,6 +81,8 @@ torture_param(int, cbflood_intra_holdoff, 1, torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable"); torture_param(int, cbflood_n_per_burst, 20000, "# callbacks per burst in flood"); +torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, + "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable"); torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); @@ -277,6 +291,8 @@ struct rcu_torture_ops { void (*stats)(void); int irq_capable; int can_boost; + int extendables; + int ext_irq_conflict; const char *name; }; @@ -452,6 +468,8 @@ static struct rcu_torture_ops rcu_bh_ops = { .fqs = rcu_bh_force_quiescent_state, .stats = NULL, .irq_capable = 1, + .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ), + .ext_irq_conflict = RCUTORTURE_RDR_RCU, .name = "rcu_bh" }; @@ -622,6 +640,26 @@ static struct rcu_torture_ops srcud_ops = { .name = "srcud" }; +/* As above, but broken due to inappropriate reader extension. */ +static struct rcu_torture_ops busted_srcud_ops = { + .ttype = SRCU_FLAVOR, + .init = srcu_torture_init, + .cleanup = srcu_torture_cleanup, + .readlock = srcu_torture_read_lock, + .read_delay = rcu_read_delay, + .readunlock = srcu_torture_read_unlock, + .get_gp_seq = srcu_torture_completed, + .deferred_free = srcu_torture_deferred_free, + .sync = srcu_torture_synchronize, + .exp_sync = srcu_torture_synchronize_expedited, + .call = srcu_torture_call, + .cb_barrier = srcu_torture_barrier, + .stats = srcu_torture_stats, + .irq_capable = 1, + .extendables = RCUTORTURE_MAX_EXTEND, + .name = "busted_srcud" +}; + /* * Definitions for sched torture testing. */ @@ -660,6 +698,7 @@ static struct rcu_torture_ops sched_ops = { .fqs = rcu_sched_force_quiescent_state, .stats = NULL, .irq_capable = 1, + .extendables = RCUTORTURE_MAX_EXTEND, .name = "sched" }; @@ -1089,6 +1128,110 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp) kfree(rhp); } +/* + * Do one extension of an RCU read-side critical section using the + * current reader state in readstate (set to zero for initial entry + * to extended critical section), set the new state as specified by + * newstate (set to zero for final exit from extended critical section), + * and random-number-generator state in trsp. If this is neither the + * beginning or end of the critical section and if there was actually a + * change, do a ->read_delay(). + */ +static void rcutorture_one_extend(int *readstate, int newstate, + struct torture_random_state *trsp) +{ + int idxnew = -1; + int idxold = *readstate; + int statesnew = ~*readstate & newstate; + int statesold = *readstate & ~newstate; + + WARN_ON_ONCE(idxold < 0); + WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); + + /* First, put new protection in place to avoid critical-section gap. */ + if (statesnew & RCUTORTURE_RDR_BH) + local_bh_disable(); + if (statesnew & RCUTORTURE_RDR_IRQ) + local_irq_disable(); + if (statesnew & RCUTORTURE_RDR_PREEMPT) + preempt_disable(); + if (statesnew & RCUTORTURE_RDR_RCU) + idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; + + /* Next, remove old protection, irq first due to bh conflict. */ + if (statesold & RCUTORTURE_RDR_IRQ) + local_irq_enable(); + if (statesold & RCUTORTURE_RDR_BH) + local_bh_enable(); + if (statesold & RCUTORTURE_RDR_PREEMPT) + preempt_enable(); + if (statesold & RCUTORTURE_RDR_RCU) + cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); + + /* Delay if neither beginning nor end and there was a change. */ + if ((statesnew || statesold) && *readstate && newstate) + cur_ops->read_delay(trsp); + + /* Update the reader state. */ + if (idxnew == -1) + idxnew = idxold & ~RCUTORTURE_RDR_MASK; + WARN_ON_ONCE(idxnew < 0); + WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); + *readstate = idxnew | newstate; + WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); + WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); +} + +/* Return the biggest extendables mask given current RCU and boot parameters. */ +static int rcutorture_extend_mask_max(void) +{ + int mask; + + WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); + mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; + mask = mask | RCUTORTURE_RDR_RCU; + return mask; +} + +/* Return a random protection state mask, but with at least one bit set. */ +static int +rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) +{ + int mask = rcutorture_extend_mask_max(); + + WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); + mask = mask & (torture_random(trsp) >> RCUTORTURE_RDR_SHIFT); + if ((mask & RCUTORTURE_RDR_IRQ) && + !(mask & RCUTORTURE_RDR_BH) && + (oldmask & RCUTORTURE_RDR_BH)) + mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */ + if ((mask & RCUTORTURE_RDR_IRQ) && + !(mask & cur_ops->ext_irq_conflict) && + (oldmask & cur_ops->ext_irq_conflict)) + mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */ + return mask ?: RCUTORTURE_RDR_RCU; +} + +/* + * Do a randomly selected number of extensions of an existing RCU read-side + * critical section. + */ +static void rcutorture_loop_extend(int *readstate, + struct torture_random_state *trsp) +{ + int i; + int mask = rcutorture_extend_mask_max(); + + WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ + if (!((mask - 1) & mask)) + return; /* Current RCU flavor not extendable. */ + i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS; + while (i--) { + mask = rcutorture_extend_mask(*readstate, trsp); + rcutorture_one_extend(readstate, mask, trsp); + } +} + /* * Do one read-side critical section, returning false if there was * no data to read. Can be invoked both from process context and @@ -1096,14 +1239,16 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp) */ static bool rcu_torture_one_read(struct torture_random_state *trsp) { - int idx; unsigned long started; unsigned long completed; + int newstate; struct rcu_torture *p; int pipe_count; + int readstate = 0; unsigned long long ts; - idx = cur_ops->readlock(); + newstate = rcutorture_extend_mask(readstate, trsp); + rcutorture_one_extend(&readstate, newstate, trsp); started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, @@ -1113,12 +1258,12 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp) torturing_tasks()); if (p == NULL) { /* Wait for rcu_torture_writer to get underway */ - cur_ops->readunlock(idx); + rcutorture_one_extend(&readstate, 0, trsp); return false; } if (p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); - cur_ops->read_delay(trsp); + rcutorture_loop_extend(&readstate, trsp); preempt_disable(); pipe_count = p->rtort_pipe_count; if (pipe_count > RCU_TORTURE_PIPE_LEN) { @@ -1139,7 +1284,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp) } __this_cpu_inc(rcu_torture_batch[completed]); preempt_enable(); - cur_ops->readunlock(idx); + rcutorture_one_extend(&readstate, 0, trsp); + WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); return true; } @@ -1704,7 +1850,7 @@ rcu_torture_init(void) int firsterr = 0; static struct rcu_torture_ops *torture_ops[] = { &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, - &sched_ops, &tasks_ops, + &busted_srcud_ops, &sched_ops, &tasks_ops, }; if (!torture_init_begin(torture_type, verbose)) -- cgit v1.2.3 From bf1bef50bee13b2292929f4b86118302a3827a32 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 10 Jun 2018 08:50:09 -0700 Subject: rcutorture: Emphasize testing of single reader protection type For RCU implementations supporting multiple types of reader protection, rcutorture currently randomly selects the combinations of types of protection for each phase of each reader. The problem with this, for example, given the four kinds of protection for RCU-sched (local_irq_disable(), local_bh_disable(), preempt_disable(), and rcu_read_lock_sched()), the reader will be protected by a single mechanism only 25% of the time. We really heavier testing of single read-side mechanisms. This commit therefore uses only a single mechanism about 60% of the time, half of the time explicitly and one-eighth of the time by chance. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index f97757755207..aa0be7ec2a26 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -69,6 +69,7 @@ MODULE_AUTHOR("Paul E. McKenney and Josh Triplett > 8; + unsigned long randmask2 = randmask1 >> 1; WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); - mask = mask & (torture_random(trsp) >> RCUTORTURE_RDR_SHIFT); + /* Half the time lots of bits, half the time only one bit. */ + if (randmask1 & 0x1) + mask = mask & randmask2; + else + mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); if ((mask & RCUTORTURE_RDR_IRQ) && !(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) -- cgit v1.2.3 From 450efca7182a516a12dfcc0311abfd242bde42b2 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Sun, 10 Jun 2018 16:45:43 -0700 Subject: rcutorture: Disable RT throttling for boost tests Currently rcutorture is not able to torture RCU boosting properly. This is because the rcutorture's boost threads which are doing the torturing may be throttled due to RT throttling. This patch makes rcutorture use the right torture technique (unthrottled rcutorture boost tasks) for torturing RCU so that the test fails correctly when no boost is available. Currently this requires accessing sysctl_sched_rt_runtime directly, but that should be Ok since rcutorture is test code. Such direct access is also only possible if rcutorture is used as a built-in so make it conditional on that. Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index aa0be7ec2a26..74e47d0a618c 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -55,6 +55,7 @@ #include #include #include +#include #include "rcu.h" @@ -772,6 +773,32 @@ static void rcu_torture_boost_cb(struct rcu_head *head) smp_store_release(&rbip->inflight, 0); } +static int old_rt_runtime = -1; + +static void rcu_torture_disable_rt_throttle(void) +{ + /* + * Disable RT throttling so that rcutorture's boost threads don't get + * throttled. Only possible if rcutorture is built-in otherwise the + * user should manually do this by setting the sched_rt_period_us and + * sched_rt_runtime sysctls. + */ + if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) + return; + + old_rt_runtime = sysctl_sched_rt_runtime; + sysctl_sched_rt_runtime = -1; +} + +static void rcu_torture_enable_rt_throttle(void) +{ + if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) + return; + + sysctl_sched_rt_runtime = old_rt_runtime; + old_rt_runtime = -1; +} + static int rcu_torture_boost(void *arg) { unsigned long call_rcu_time; @@ -1511,6 +1538,7 @@ static int rcutorture_booster_cleanup(unsigned int cpu) mutex_lock(&boost_mutex); t = boost_tasks[cpu]; boost_tasks[cpu] = NULL; + rcu_torture_enable_rt_throttle(); mutex_unlock(&boost_mutex); /* This must be outside of the mutex, otherwise deadlock! */ @@ -1527,6 +1555,7 @@ static int rcutorture_booster_init(unsigned int cpu) /* Don't allow time recalculation while creating a new task. */ mutex_lock(&boost_mutex); + rcu_torture_disable_rt_throttle(); VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, cpu_to_node(cpu), -- cgit v1.2.3 From 3b745c8969c752601cb68c82a06735363563ab42 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Sun, 10 Jun 2018 16:45:44 -0700 Subject: rcutorture: Make boost test more robust Currently, with RCU_BOOST disabled, I get no failures when forcing rcutorture to test RCU boost priority inversion. The reason seems to be that we don't check for failures if the callback never ran at all for the duration of the boost-test loop. Further, the 'rtb' and 'rtbf' counters seem to be used inconsistently. 'rtb' is incremented at the start of each test and 'rtbf' is incremented per-cpu on each failure of call_rcu. So its possible 'rtbf' > 'rtb'. To test the boost with rcutorture, I did following on a 4-CPU x86 machine: modprobe rcutorture test_boost=2 sleep 20 rmmod rcutorture With patch: rtbf: 8 rtb: 12 Without patch: rtbf: 0 rtb: 2 In summary this patch: - Increments failed and total test counters once per boost-test. - Checks for failure cases correctly. Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 45 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 6 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 74e47d0a618c..36b9b8266213 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -799,6 +799,18 @@ static void rcu_torture_enable_rt_throttle(void) old_rt_runtime = -1; } +static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) +{ + if (end - start > test_boost_duration * HZ - HZ / 2) { + VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); + n_rcu_torture_boost_failure++; + + return true; /* failed */ + } + + return false; /* passed */ +} + static int rcu_torture_boost(void *arg) { unsigned long call_rcu_time; @@ -819,6 +831,21 @@ static int rcu_torture_boost(void *arg) init_rcu_head_on_stack(&rbi.rcu); /* Each pass through the following loop does one boost-test cycle. */ do { + /* Track if the test failed already in this test interval? */ + bool failed = false; + + /* Increment n_rcu_torture_boosts once per boost-test */ + while (!kthread_should_stop()) { + if (mutex_trylock(&boost_mutex)) { + n_rcu_torture_boosts++; + mutex_unlock(&boost_mutex); + break; + } + schedule_timeout_uninterruptible(1); + } + if (kthread_should_stop()) + goto checkwait; + /* Wait for the next test interval. */ oldstarttime = boost_starttime; while (ULONG_CMP_LT(jiffies, oldstarttime)) { @@ -837,11 +864,10 @@ static int rcu_torture_boost(void *arg) /* RCU core before ->inflight = 1. */ smp_store_release(&rbi.inflight, 1); call_rcu(&rbi.rcu, rcu_torture_boost_cb); - if (jiffies - call_rcu_time > - test_boost_duration * HZ - HZ / 2) { - VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); - n_rcu_torture_boost_failure++; - } + /* Check if the boost test failed */ + failed = failed || + rcu_torture_boost_failed(call_rcu_time, + jiffies); call_rcu_time = jiffies; } stutter_wait("rcu_torture_boost"); @@ -849,6 +875,14 @@ static int rcu_torture_boost(void *arg) goto checkwait; } + /* + * If boost never happened, then inflight will always be 1, in + * this case the boost check would never happen in the above + * loop so do another one here. + */ + if (!failed && smp_load_acquire(&rbi.inflight)) + rcu_torture_boost_failed(call_rcu_time, jiffies); + /* * Set the start time of the next test interval. * Yes, this is vulnerable to long delays, but such @@ -861,7 +895,6 @@ static int rcu_torture_boost(void *arg) if (mutex_trylock(&boost_mutex)) { boost_starttime = jiffies + test_boost_interval * HZ; - n_rcu_torture_boosts++; mutex_unlock(&boost_mutex); break; } -- cgit v1.2.3 From 622be33fcbc93e9b672b99ed338369eb5e843ac3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 18 Jun 2018 16:47:34 +0200 Subject: rcutorture: Use monotonic timestamp for stall detection The get_seconds() call is deprecated because it overflows on 32-bit architectures. The algorithm in rcu_torture_stall() can deal with the overflow, but another problem here is that using a CLOCK_REALTIME stamp can lead to a false-positive stall warning when a settimeofday() happens concurrently. Using ktime_get_seconds() instead avoids those issues and will never overflow. The added cast to 'unsigned long' however is necessary to make ULONG_CMP_LT() work correctly. Signed-off-by: Arnd Bergmann Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 36b9b8266213..049b3735dba8 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1622,7 +1622,7 @@ static int rcu_torture_stall(void *args) VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); } if (!kthread_should_stop()) { - stop_at = get_seconds() + stall_cpu; + stop_at = ktime_get_seconds() + stall_cpu; /* RCU CPU stall is expected behavior in following code. */ rcu_read_lock(); if (stall_cpu_irqsoff) @@ -1631,7 +1631,8 @@ static int rcu_torture_stall(void *args) preempt_disable(); pr_alert("rcu_torture_stall start on CPU %d.\n", smp_processor_id()); - while (ULONG_CMP_LT(get_seconds(), stop_at)) + while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), + stop_at)) continue; /* Induce RCU CPU stall warning. */ if (stall_cpu_irqsoff) local_irq_enable(); -- cgit v1.2.3 From 4babd855fd6137f9792117eb73b096c221a49d3c Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Tue, 19 Jun 2018 15:14:18 -0700 Subject: rcutorture: Add support to detect if boost kthread prio is too low When rcutorture is built in to the kernel, an earlier patch detects that and raises the priority of RCU's kthreads to allow rcutorture's RCU priority boosting tests to succeed. However, if rcutorture is built as a module, those priorities must be raised manually via the rcutree.kthread_prio kernel boot parameter. If this manual step is not taken, rcutorture's RCU priority boosting tests will fail due to kthread starvation. One approach would be to raise the default priority, but that risks breaking existing users. Another approach would be to allow runtime adjustment of RCU's kthread priorities, but that introduces numerous "interesting" race conditions. This patch therefore instead detects too-low priorities, and prints a message and disables the RCU priority boosting tests in that case. Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 2 ++ kernel/rcu/rcutorture.c | 32 ++++++++++++++++++++++++++++---- kernel/rcu/tree.c | 7 +++++++ 3 files changed, 37 insertions(+), 4 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 0453a7d12b3f..bee070979970 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -502,6 +502,7 @@ static inline void rcu_force_quiescent_state(void) { } static inline void rcu_bh_force_quiescent_state(void) { } static inline void rcu_sched_force_quiescent_state(void) { } static inline void show_rcu_gp_kthreads(void) { } +static inline int rcu_get_gp_kthreads_prio(void) { return 0; } #else /* #ifdef CONFIG_TINY_RCU */ unsigned long rcu_get_gp_seq(void); unsigned long rcu_bh_get_gp_seq(void); @@ -510,6 +511,7 @@ unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed_sched(void); unsigned long srcu_batches_completed(struct srcu_struct *sp); void show_rcu_gp_kthreads(void); +int rcu_get_gp_kthreads_prio(void); void rcu_force_quiescent_state(void); void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 049b3735dba8..e3d2d4f1d928 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1787,6 +1787,32 @@ static void rcu_torture_barrier_cleanup(void) } } +static bool rcu_torture_can_boost(void) +{ + static int boost_warn_once; + int prio; + + if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) + return false; + + prio = rcu_get_gp_kthreads_prio(); + if (!prio) + return false; + + if (prio < 2) { + if (boost_warn_once == 1) + return false; + + pr_alert("%s: WARN: RCU kthread priority too low to test boosting. " + "Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 " + "on the kernel command line.\n", KBUILD_MODNAME); + boost_warn_once = 1; + return false; + } + + return true; +} + static enum cpuhp_state rcutor_hp; static void @@ -1831,8 +1857,7 @@ rcu_torture_cleanup(void) torture_stop_kthread(rcu_torture_fqs, fqs_task); for (i = 0; i < ncbflooders; i++) torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]); - if ((test_boost == 1 && cur_ops->can_boost) || - test_boost == 2) + if (rcu_torture_can_boost()) cpuhp_remove_state(rcutor_hp); /* @@ -2056,8 +2081,7 @@ rcu_torture_init(void) test_boost_interval = 1; if (test_boost_duration < 2) test_boost_duration = 2; - if ((test_boost == 1 && cur_ops->can_boost) || - test_boost == 2) { + if (rcu_torture_can_boost()) { boost_starttime = jiffies + test_boost_interval * HZ; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 65abb399b08d..b4bcb5e21ca6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -180,6 +180,13 @@ module_param(gp_init_delay, int, 0444); static int gp_cleanup_delay; module_param(gp_cleanup_delay, int, 0444); +/* Retreive RCU kthreads priority for rcutorture */ +int rcu_get_gp_kthreads_prio(void) +{ + return kthread_prio; +} +EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio); + /* * Number of grace periods between delays, normalized by the duration of * the delay. The longer the delay, the more the grace periods between -- cgit v1.2.3 From bf5b64355a3ce41752856b66c4efad4d7a88e84b Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Tue, 19 Jun 2018 15:14:19 -0700 Subject: rcutorture: Fix rcu_barrier successes counter The rcutorture test module currently increments both successes and error for the barrier test upon error, which results in misleading statistics being printed. This commit therefore changes the code to increment the success counter only when the test actually passes. This change was tested by by returning from the barrier callback without incrementing the callback counter, thus introducing what appeared to rcutorture to be rcu_barrier() failures. Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel/rcu/rcutorture.c') diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index e3d2d4f1d928..bdc86cdf3b8b 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -169,7 +169,7 @@ static long n_rcu_torture_boost_failure; static long n_rcu_torture_boosts; static atomic_long_t n_rcu_torture_timers; static long n_barrier_attempts; -static long n_barrier_successes; +static long n_barrier_successes; /* did rcu_barrier test succeed? */ static atomic_long_t n_cbfloods; static struct list_head rcu_torture_removed; @@ -1723,8 +1723,9 @@ static int rcu_torture_barrier(void *arg) atomic_read(&barrier_cbs_invoked), n_barrier_cbs); WARN_ON_ONCE(1); + } else { + n_barrier_successes++; } - n_barrier_successes++; schedule_timeout_interruptible(HZ / 10); } while (!torture_must_stop()); torture_kthread_stopping("rcu_torture_barrier"); @@ -1803,9 +1804,7 @@ static bool rcu_torture_can_boost(void) if (boost_warn_once == 1) return false; - pr_alert("%s: WARN: RCU kthread priority too low to test boosting. " - "Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 " - "on the kernel command line.\n", KBUILD_MODNAME); + pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); boost_warn_once = 1; return false; } -- cgit v1.2.3