summaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@kernel.org>2014-11-21 10:31:14 +0100
committerArnaldo Carvalho de Melo <acme@redhat.com>2014-12-01 20:00:30 -0300
commit779d0b997e0787fc5f80110159b6c18ae0fae395 (patch)
tree5834910f5c9e0640ab0764351b718b79b530b522 /tools/perf/builtin-stat.c
parenta5a7fd76b55a6e6916ff22e5c8fdb39a8381be2c (diff)
downloadlinux-779d0b997e0787fc5f80110159b6c18ae0fae395.tar.bz2
perf stat: Add support for per-pkg counters
The .per-pkg file indicates that all but one value per socket should be discarded. Adding the logic of skipping the rest of the socket once first value was read. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Matt Fleming <matt.fleming@intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1416562275-12404-11-git-send-email-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index b24a7a08bd1d..860e8ad06616 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -388,10 +388,56 @@ static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
update_stats(&runtime_itlb_cache_stats[0], count[0]);
}
+static void zero_per_pkg(struct perf_evsel *counter)
+{
+ if (counter->per_pkg_mask)
+ memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
+}
+
+static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
+{
+ unsigned long *mask = counter->per_pkg_mask;
+ struct cpu_map *cpus = perf_evsel__cpus(counter);
+ int s;
+
+ *skip = false;
+
+ if (!counter->per_pkg)
+ return 0;
+
+ if (cpu_map__empty(cpus))
+ return 0;
+
+ if (!mask) {
+ mask = zalloc(MAX_NR_CPUS);
+ if (!mask)
+ return -ENOMEM;
+
+ counter->per_pkg_mask = mask;
+ }
+
+ s = cpu_map__get_socket(cpus, cpu);
+ if (s < 0)
+ return -1;
+
+ *skip = test_and_set_bit(s, mask) == 1;
+ return 0;
+}
+
static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused,
struct perf_counts_values *count)
{
struct perf_counts_values *aggr = &evsel->counts->aggr;
+ static struct perf_counts_values zero;
+ bool skip = false;
+
+ if (check_per_pkg(evsel, cpu, &skip)) {
+ pr_err("failed to read per-pkg counter\n");
+ return -1;
+ }
+
+ if (skip)
+ count = &zero;
switch (aggr_mode) {
case AGGR_CORE:
@@ -465,6 +511,9 @@ static int read_counter(struct perf_evsel *counter)
if (counter->system_wide)
nthreads = 1;
+ if (counter->per_pkg)
+ zero_per_pkg(counter);
+
for (thread = 0; thread < nthreads; thread++) {
for (cpu = 0; cpu < ncpus; cpu++) {
if (perf_evsel__read_cb(counter, cpu, thread, read_cb))