diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-10-11 16:18:09 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-11-08 11:50:13 -0800 |
commit | a30489c5228fba6f16b4c740a0292879ef13371e (patch) | |
tree | 2a28a2d6180f8315a8c1cee12cdb415a90b167f0 /kernel | |
parent | 40694d6644d5cca28531707559466122eb212d8b (diff) | |
download | linux-a30489c5228fba6f16b4c740a0292879ef13371e.tar.bz2 |
rcu: Instrument synchronize_rcu_expedited() for debugfs tracing
This commit adds the counters to rcu_state and updates them in
synchronize_rcu_expedited() to provide the data needed for debugfs
tracing.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcutree.c | 18 | ||||
-rw-r--r-- | kernel/rcutree.h | 9 |
2 files changed, 24 insertions, 3 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 3c72e5e5528c..b966d56ebb51 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -2321,6 +2321,7 @@ void synchronize_sched_expedited(void) (ulong)atomic_long_read(&rsp->expedited_done) + ULONG_MAX / 8)) { synchronize_sched(); + atomic_long_inc(&rsp->expedited_wrap); return; } @@ -2341,11 +2342,14 @@ void synchronize_sched_expedited(void) synchronize_sched_expedited_cpu_stop, NULL) == -EAGAIN) { put_online_cpus(); + atomic_long_inc(&rsp->expedited_tryfail); /* Check to see if someone else did our work for us. */ s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { - smp_mb(); /* ensure test happens before caller kfree */ + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ + atomic_long_inc(&rsp->expedited_workdone1); return; } @@ -2354,13 +2358,16 @@ void synchronize_sched_expedited(void) udelay(trycount * num_online_cpus()); } else { synchronize_sched(); + atomic_long_inc(&rsp->expedited_normal); return; } /* Recheck to see if someone else did our work for us. */ s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { - smp_mb(); /* ensure test happens before caller kfree */ + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ + atomic_long_inc(&rsp->expedited_workdone2); return; } @@ -2375,6 +2382,7 @@ void synchronize_sched_expedited(void) snap = atomic_long_read(&rsp->expedited_start); smp_mb(); /* ensure read is before try_stop_cpus(). */ } + atomic_long_inc(&rsp->expedited_stoppedcpus); /* * Everyone up to our most recent fetch is covered by our grace @@ -2383,12 +2391,16 @@ void synchronize_sched_expedited(void) * than we did already did their update. */ do { + atomic_long_inc(&rsp->expedited_done_tries); s = atomic_long_read(&rsp->expedited_done); if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { - smp_mb(); /* ensure test happens before caller kfree */ + /* ensure test happens before caller kfree */ + smp_mb__before_atomic_inc(); /* ^^^ */ + atomic_long_inc(&rsp->expedited_done_lost); break; } } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s); + atomic_long_inc(&rsp->expedited_done_exit); put_online_cpus(); } diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 88f3d9d5971d..d274af357210 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -406,6 +406,15 @@ struct rcu_state { atomic_long_t expedited_start; /* Starting ticket. */ atomic_long_t expedited_done; /* Done ticket. */ + atomic_long_t expedited_wrap; /* # near-wrap incidents. */ + atomic_long_t expedited_tryfail; /* # acquisition failures. */ + atomic_long_t expedited_workdone1; /* # done by others #1. */ + atomic_long_t expedited_workdone2; /* # done by others #2. */ + atomic_long_t expedited_normal; /* # fallbacks to normal. */ + atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */ + atomic_long_t expedited_done_tries; /* # tries to update _done. */ + atomic_long_t expedited_done_lost; /* # times beaten to _done. */ + atomic_long_t expedited_done_exit; /* # times exited _done loop. */ unsigned long jiffies_force_qs; /* Time at which to invoke */ /* force_quiescent_state(). */ |