diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-05-15 15:24:41 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2018-07-12 15:38:55 -0700 |
commit | d72193123c81ae6123d108b3be2096f3f13b25a6 (patch) | |
tree | 2ec9801c5c3b7c1f9411ba3a1bc6005d1f154198 /kernel/rcu/rcuperf.c | |
parent | 2e3e5e55010105f9d4351f68e15dbc43402a7794 (diff) | |
download | linux-d72193123c81ae6123d108b3be2096f3f13b25a6.tar.bz2 |
rcutorture: Correctly handle grace-period sequence wrap
The new ->gq_seq grace-period sequence numbers must be shifted down,
which give artifacts when these numbers wrap. This commit therefore
enables rcutorture and rcuperf to handle grace-period sequence numbers
even if they do wrap. It does this by allowing a special subtraction
function to be specified, and this function subtracts before shifting.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/rcuperf.c')
-rw-r--r-- | kernel/rcu/rcuperf.c | 18 |
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index 2b5a613afcf3..b080bc4a4f45 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -139,6 +139,7 @@ struct rcu_perf_ops { int (*readlock)(void); void (*readunlock)(int idx); unsigned long (*get_gp_seq)(void); + unsigned long (*gp_diff)(unsigned long new, unsigned long old); unsigned long (*exp_completed)(void); void (*async)(struct rcu_head *head, rcu_callback_t func); void (*gp_barrier)(void); @@ -179,6 +180,7 @@ static struct rcu_perf_ops rcu_ops = { .readlock = rcu_perf_read_lock, .readunlock = rcu_perf_read_unlock, .get_gp_seq = rcu_get_gp_seq, + .gp_diff = rcu_seq_diff, .exp_completed = rcu_exp_batches_completed, .async = call_rcu, .gp_barrier = rcu_barrier, @@ -208,6 +210,7 @@ static struct rcu_perf_ops rcu_bh_ops = { .readlock = rcu_bh_perf_read_lock, .readunlock = rcu_bh_perf_read_unlock, .get_gp_seq = rcu_bh_get_gp_seq, + .gp_diff = rcu_seq_diff, .exp_completed = rcu_exp_batches_completed_sched, .async = call_rcu_bh, .gp_barrier = rcu_barrier_bh, @@ -264,6 +267,7 @@ static struct rcu_perf_ops srcu_ops = { .readlock = srcu_perf_read_lock, .readunlock = srcu_perf_read_unlock, .get_gp_seq = srcu_perf_completed, + .gp_diff = rcu_seq_diff, .exp_completed = srcu_perf_completed, .async = srcu_call_rcu, .gp_barrier = srcu_rcu_barrier, @@ -292,6 +296,7 @@ static struct rcu_perf_ops srcud_ops = { .readlock = srcu_perf_read_lock, .readunlock = srcu_perf_read_unlock, .get_gp_seq = srcu_perf_completed, + .gp_diff = rcu_seq_diff, .exp_completed = srcu_perf_completed, .async = srcu_call_rcu, .gp_barrier = srcu_rcu_barrier, @@ -321,6 +326,7 @@ static struct rcu_perf_ops sched_ops = { .readlock = sched_perf_read_lock, .readunlock = sched_perf_read_unlock, .get_gp_seq = rcu_sched_get_gp_seq, + .gp_diff = rcu_seq_diff, .exp_completed = rcu_exp_batches_completed_sched, .async = call_rcu_sched, .gp_barrier = rcu_barrier_sched, @@ -348,6 +354,7 @@ static struct rcu_perf_ops tasks_ops = { .readlock = tasks_perf_read_lock, .readunlock = tasks_perf_read_unlock, .get_gp_seq = rcu_no_completed, + .gp_diff = rcu_seq_diff, .async = call_rcu_tasks, .gp_barrier = rcu_barrier_tasks, .sync = synchronize_rcu_tasks, @@ -355,6 +362,13 @@ static struct rcu_perf_ops tasks_ops = { .name = "tasks" }; +static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old) +{ + if (!cur_ops->gp_diff) + return new - old; + return cur_ops->gp_diff(new, old); +} + static bool __maybe_unused torturing_tasks(void) { return cur_ops == &tasks_ops; @@ -577,8 +591,8 @@ rcu_perf_cleanup(void) t_rcu_perf_writer_finished - t_rcu_perf_writer_started, ngps, - b_rcu_perf_writer_finished - - b_rcu_perf_writer_started); + rcuperf_seq_diff(b_rcu_perf_writer_finished, + b_rcu_perf_writer_started)); for (i = 0; i < nrealwriters; i++) { if (!writer_durations) break; |