summaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r--tools/perf/builtin-trace.c92
1 files changed, 75 insertions, 17 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 835619476370..8e3c3f74a3a4 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -18,6 +18,7 @@
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
+#include <bpf/bpf.h>
#include "builtin.h"
#include "util/cgroup.h"
#include "util/color.h"
@@ -99,6 +100,7 @@ struct trace {
struct {
size_t nr;
pid_t *entries;
+ struct bpf_map *map;
} filter_pids;
double duration_filter;
double runtime_ms;
@@ -2565,9 +2567,27 @@ out_enomem:
goto out;
}
+static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
+ size_t npids __maybe_unused, pid_t *pids __maybe_unused)
+{
+ int err = 0;
+#ifdef HAVE_LIBBPF_SUPPORT
+ bool value = true;
+ int map_fd = bpf_map__fd(map);
+ size_t i;
+
+ for (i = 0; i < npids; ++i) {
+ err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
+ if (err)
+ break;
+ }
+#endif
+ return err;
+}
+
static int trace__set_filter_loop_pids(struct trace *trace)
{
- unsigned int nr = 1;
+ unsigned int nr = 1, err;
pid_t pids[32] = {
getpid(),
};
@@ -2586,7 +2606,34 @@ static int trace__set_filter_loop_pids(struct trace *trace)
thread = parent;
}
- return perf_evlist__set_filter_pids(trace->evlist, nr, pids);
+ err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
+ if (!err && trace->filter_pids.map)
+ err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
+
+ return err;
+}
+
+static int trace__set_filter_pids(struct trace *trace)
+{
+ int err = 0;
+ /*
+ * Better not use !target__has_task() here because we need to cover the
+ * case where no threads were specified in the command line, but a
+ * workload was, and in that case we will fill in the thread_map when
+ * we fork the workload in perf_evlist__prepare_workload.
+ */
+ if (trace->filter_pids.nr > 0) {
+ err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
+ trace->filter_pids.entries);
+ if (!err && trace->filter_pids.map) {
+ err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
+ trace->filter_pids.entries);
+ }
+ } else if (thread_map__pid(trace->evlist->threads, 0) == -1) {
+ err = trace__set_filter_loop_pids(trace);
+ }
+
+ return err;
}
static int trace__run(struct trace *trace, int argc, const char **argv)
@@ -2695,17 +2742,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_error_open;
}
- /*
- * Better not use !target__has_task() here because we need to cover the
- * case where no threads were specified in the command line, but a
- * workload was, and in that case we will fill in the thread_map when
- * we fork the workload in perf_evlist__prepare_workload.
- */
- if (trace->filter_pids.nr > 0)
- err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
- else if (thread_map__pid(evlist->threads, 0) == -1)
- err = trace__set_filter_loop_pids(trace);
-
+ err = trace__set_filter_pids(trace);
if (err < 0)
goto out_error_mem;
@@ -3104,8 +3141,8 @@ static int trace__set_duration(const struct option *opt, const char *str,
return 0;
}
-static int trace__set_filter_pids(const struct option *opt, const char *str,
- int unset __maybe_unused)
+static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
+ int unset __maybe_unused)
{
int ret = -1;
size_t i;
@@ -3315,6 +3352,25 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u
return 0;
}
+static struct bpf_map *bpf__find_map_by_name(const char *name)
+{
+ struct bpf_object *obj, *tmp;
+
+ bpf_object__for_each_safe(obj, tmp) {
+ struct bpf_map *map = bpf_object__find_map_by_name(obj, name);
+ if (map)
+ return map;
+
+ }
+
+ return NULL;
+}
+
+static void trace__set_bpf_map_filtered_pids(struct trace *trace)
+{
+ trace->filter_pids.map = bpf__find_map_by_name("pids_filtered");
+}
+
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
@@ -3363,7 +3419,7 @@ int cmd_trace(int argc, const char **argv)
OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
"trace events on existing thread id"),
OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
- "pids to filter (by the kernel)", trace__set_filter_pids),
+ "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
@@ -3451,8 +3507,10 @@ int cmd_trace(int argc, const char **argv)
goto out;
}
- if (evsel)
+ if (evsel) {
trace.syscalls.events.augmented = evsel;
+ trace__set_bpf_map_filtered_pids(&trace);
+ }
err = bpf__setup_stdout(trace.evlist);
if (err) {