summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2011-01-04 11:55:27 -0200
committerArnaldo Carvalho de Melo <acme@redhat.com>2011-01-10 22:03:26 -0200
commit0252208eb52f6fe8731a47804eddc7ba93f60a87 (patch)
treedeaddeda57a630a1d6813ea565fc972c699e2906 /tools/perf/util/evsel.c
parent12f7e0364375ba1ba55abcc5ac082b68fb526c80 (diff)
downloadlinux-0252208eb52f6fe8731a47804eddc7ba93f60a87.tar.bz2
perf evsel: Support perf_evsel__open(cpus > 1 && threads > 1)
And a test for it: [acme@felicio linux]$ perf test 1: vmlinux symtab matches kallsyms: Ok 2: detect open syscall event: Ok 3: detect open syscall event on all cpus: Ok [acme@felicio linux]$ Translating C the test does: 1. generates different number of open syscalls on each CPU by using sched_setaffinity 2. Verifies that the expected number of events is generated on each CPU It works as expected. LKML-Reference: <new-submission> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r--tools/perf/util/evsel.c82
1 files changed, 49 insertions, 33 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1a5591d7a245..f5cfed60af98 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -127,59 +127,75 @@ int __perf_evsel__read(struct perf_evsel *evsel,
return 0;
}
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+ struct thread_map *threads)
{
- int cpu;
+ int cpu, thread;
- if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
+ if (evsel->fd == NULL &&
+ perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
return -1;
for (cpu = 0; cpu < cpus->nr; cpu++) {
- FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
- cpus->map[cpu], -1, 0);
- if (FD(evsel, cpu, 0) < 0)
- goto out_close;
+ for (thread = 0; thread < threads->nr; thread++) {
+ FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
+ threads->map[thread],
+ cpus->map[cpu], -1, 0);
+ if (FD(evsel, cpu, thread) < 0)
+ goto out_close;
+ }
}
return 0;
out_close:
- while (--cpu >= 0) {
- close(FD(evsel, cpu, 0));
- FD(evsel, cpu, 0) = -1;
- }
+ do {
+ while (--thread >= 0) {
+ close(FD(evsel, cpu, thread));
+ FD(evsel, cpu, thread) = -1;
+ }
+ thread = threads->nr;
+ } while (--cpu >= 0);
return -1;
}
-int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
+static struct {
+ struct cpu_map map;
+ int cpus[1];
+} empty_cpu_map = {
+ .map.nr = 1,
+ .cpus = { -1, },
+};
+
+static struct {
+ struct thread_map map;
+ int threads[1];
+} empty_thread_map = {
+ .map.nr = 1,
+ .threads = { -1, },
+};
+
+int perf_evsel__open(struct perf_evsel *evsel,
+ struct cpu_map *cpus, struct thread_map *threads)
{
- int thread;
-
- if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
- return -1;
- for (thread = 0; thread < threads->nr; thread++) {
- FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
- threads->map[thread], -1, -1, 0);
- if (FD(evsel, 0, thread) < 0)
- goto out_close;
+ if (cpus == NULL) {
+ /* Work around old compiler warnings about strict aliasing */
+ cpus = &empty_cpu_map.map;
}
- return 0;
+ if (threads == NULL)
+ threads = &empty_thread_map.map;
-out_close:
- while (--thread >= 0) {
- close(FD(evsel, 0, thread));
- FD(evsel, 0, thread) = -1;
- }
- return -1;
+ return __perf_evsel__open(evsel, cpus, threads);
}
-int perf_evsel__open(struct perf_evsel *evsel,
- struct cpu_map *cpus, struct thread_map *threads)
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
{
- if (threads == NULL)
- return perf_evsel__open_per_cpu(evsel, cpus);
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+}
- return perf_evsel__open_per_thread(evsel, threads);
+int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
+{
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
}