summaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r--tools/perf/builtin-trace.c153
1 files changed, 33 insertions, 120 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index d3c757769b96..86e06f136f40 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -15,7 +15,6 @@
*/
#include "util/record.h"
-#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
#include <bpf/bpf.h>
#include "util/bpf_map.h"
@@ -80,6 +79,10 @@
#include <linux/ctype.h>
#include <perf/mmap.h>
+#ifdef HAVE_LIBTRACEEVENT
+#include <traceevent/event-parse.h>
+#endif
+
#ifndef O_CLOEXEC
# define O_CLOEXEC 02000000
#endif
@@ -88,6 +91,8 @@
# define F_LINUX_SPECIFIC_BASE 1024
#endif
+#define RAW_SYSCALL_ARGS_NUM 6
+
/*
* strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
*/
@@ -108,7 +113,7 @@ struct syscall_fmt {
const char *sys_enter,
*sys_exit;
} bpf_prog_name;
- struct syscall_arg_fmt arg[6];
+ struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
u8 nr_args;
bool errpid;
bool timeout;
@@ -120,7 +125,6 @@ struct trace {
struct syscalltbl *sctbl;
struct {
struct syscall *table;
- struct bpf_map *map;
struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
struct bpf_map *sys_enter,
*sys_exit;
@@ -924,6 +928,8 @@ static struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
{ .name = "clock_gettime",
.arg = { [0] = STRARRAY(clk_id, clockid), }, },
+ { .name = "clock_nanosleep",
+ .arg = { [2] = { .scnprintf = SCA_TIMESPEC, /* rqtp */ }, }, },
{ .name = "clone", .errpid = true, .nr_args = 5,
.arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
[1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
@@ -1053,7 +1059,8 @@ static struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
[2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
{ .name = "perf_event_open",
- .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ },
+ .arg = { [0] = { .scnprintf = SCA_PERF_ATTR, /* attr */ },
+ [2] = { .scnprintf = SCA_INT, /* cpu */ },
[3] = { .scnprintf = SCA_FD, /* group_fd */ },
[4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
{ .name = "pipe2",
@@ -1220,16 +1227,6 @@ struct syscall {
};
/*
- * Must match what is in the BPF program:
- *
- * tools/perf/examples/bpf/augmented_raw_syscalls.c
- */
-struct bpf_map_syscall_entry {
- bool enabled;
- u16 string_args_len[6];
-};
-
-/*
* We need to have this 'calculated' boolean because in some cases we really
* don't know what is the duration of a syscall, for instance, when we start
* a session and some threads are waiting for a syscall to finish, say 'poll',
@@ -1535,8 +1532,8 @@ static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
}
static pid_t workload_pid = -1;
-static bool done = false;
-static bool interrupted = false;
+static volatile sig_atomic_t done = false;
+static volatile sig_atomic_t interrupted = false;
static void sighandler_interrupt(int sig __maybe_unused)
{
@@ -1658,7 +1655,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
{
int idx;
- if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
+ if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
nr_args = sc->fmt->nr_args;
sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
@@ -1730,7 +1727,7 @@ syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field
len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
/*
* /sys/kernel/tracing/events/syscalls/sys_enter*
- * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
+ * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
* 65 int
* 23 unsigned int
* 7 unsigned long
@@ -1791,11 +1788,11 @@ static int trace__read_syscall_info(struct trace *trace, int id)
#endif
sc = trace->syscalls.table + id;
if (sc->nonexistent)
- return 0;
+ return -EEXIST;
if (name == NULL) {
sc->nonexistent = true;
- return 0;
+ return -EEXIST;
}
sc->name = name;
@@ -1809,11 +1806,18 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc->tp_format = trace_event__tp_format("syscalls", tp_name);
}
- if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
- return -ENOMEM;
-
- if (IS_ERR(sc->tp_format))
+ /*
+ * Fails to read trace point format via sysfs node, so the trace point
+ * doesn't exist. Set the 'nonexistent' flag as true.
+ */
+ if (IS_ERR(sc->tp_format)) {
+ sc->nonexistent = true;
return PTR_ERR(sc->tp_format);
+ }
+
+ if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
+ RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
+ return -ENOMEM;
sc->args = sc->tp_format->format.fields;
/*
@@ -2131,11 +2135,8 @@ static struct syscall *trace__syscall_info(struct trace *trace,
(err = trace__read_syscall_info(trace, id)) != 0)
goto out_cant_read;
- if (trace->syscalls.table[id].name == NULL) {
- if (trace->syscalls.table[id].nonexistent)
- return NULL;
+ if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
goto out_cant_read;
- }
return &trace->syscalls.table[id];
@@ -2728,8 +2729,10 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
offset = format_field__intval(field, sample, evsel->needs_swap);
syscall_arg.len = offset >> 16;
offset &= 0xffff;
+#ifdef HAVE_LIBTRACEEVENT_TEP_FIELD_IS_RELATIVE
if (field->flags & TEP_FIELD_IS_RELATIVE)
offset += field->offset + field->size;
+#endif
}
val = (uintptr_t)(sample->raw_data + offset);
@@ -3250,7 +3253,6 @@ static void trace__set_bpf_map_filtered_pids(struct trace *trace)
static void trace__set_bpf_map_syscalls(struct trace *trace)
{
- trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
}
@@ -3330,80 +3332,6 @@ static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
}
-static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
-{
- struct syscall *sc = trace__syscall_info(trace, NULL, id);
- int arg = 0;
-
- if (sc == NULL)
- goto out;
-
- for (; arg < sc->nr_args; ++arg) {
- entry->string_args_len[arg] = 0;
- if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
- /* Should be set like strace -s strsize */
- entry->string_args_len[arg] = PATH_MAX;
- }
- }
-out:
- for (; arg < 6; ++arg)
- entry->string_args_len[arg] = 0;
-}
-static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
-{
- int fd = bpf_map__fd(trace->syscalls.map);
- struct bpf_map_syscall_entry value = {
- .enabled = !trace->not_ev_qualifier,
- };
- int err = 0;
- size_t i;
-
- for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
- int key = trace->ev_qualifier_ids.entries[i];
-
- if (value.enabled) {
- trace__init_bpf_map_syscall_args(trace, key, &value);
- trace__init_syscall_bpf_progs(trace, key);
- }
-
- err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
-{
- int fd = bpf_map__fd(trace->syscalls.map);
- struct bpf_map_syscall_entry value = {
- .enabled = enabled,
- };
- int err = 0, key;
-
- for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
- if (enabled)
- trace__init_bpf_map_syscall_args(trace, key, &value);
-
- err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int trace__init_syscalls_bpf_map(struct trace *trace)
-{
- bool enabled = true;
-
- if (trace->ev_qualifier_ids.nr)
- enabled = trace->not_ev_qualifier;
-
- return __trace__init_syscalls_bpf_map(trace, enabled);
-}
-
static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
{
struct tep_format_field *field, *candidate_field;
@@ -3618,16 +3546,6 @@ static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
{
}
-static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
-{
- return 0;
-}
-
-static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
-{
- return 0;
-}
-
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
const char *name __maybe_unused)
{
@@ -3661,8 +3579,6 @@ static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
static int trace__set_ev_qualifier_filter(struct trace *trace)
{
- if (trace->syscalls.map)
- return trace__set_ev_qualifier_bpf_filter(trace);
if (trace->syscalls.events.sys_enter)
return trace__set_ev_qualifier_tp_filter(trace);
return 0;
@@ -4036,9 +3952,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_mem;
- if (trace->syscalls.map)
- trace__init_syscalls_bpf_map(trace);
-
if (trace->syscalls.prog_array.sys_enter)
trace__init_syscalls_bpf_prog_array_maps(trace);
@@ -4092,8 +4005,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
}
trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
- evlist->core.threads->nr > 1 ||
- evlist__first(evlist)->core.attr.inherit;
+ perf_thread_map__nr(evlist->core.threads) > 1 ||
+ evlist__first(evlist)->core.attr.inherit;
/*
* Now that we already used evsel->core.attr to ask the kernel to setup the