summaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c10
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h8
-rw-r--r--tools/perf/util/arm-spe.c133
-rw-r--r--tools/perf/util/auxtrace.c15
-rw-r--r--tools/perf/util/auxtrace.h6
-rw-r--r--tools/perf/util/bpf_counter.c314
-rw-r--r--tools/perf/util/bpf_counter.h72
-rw-r--r--tools/perf/util/bpf_skel/.gitignore3
-rw-r--r--tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c93
-rw-r--r--tools/perf/util/build-id.c5
-rw-r--r--tools/perf/util/build-id.h4
-rw-r--r--tools/perf/util/cgroup.c8
-rw-r--r--tools/perf/util/config.c123
-rw-r--r--tools/perf/util/config.h7
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c15
-rw-r--r--tools/perf/util/data-convert-bt.c2
-rw-r--r--tools/perf/util/db-export.c2
-rw-r--r--tools/perf/util/debug.c34
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/demangle-ocaml.c80
-rw-r--r--tools/perf/util/demangle-ocaml.h7
-rw-r--r--tools/perf/util/event.c67
-rw-r--r--tools/perf/util/event.h18
-rw-r--r--tools/perf/util/evlist.c125
-rw-r--r--tools/perf/util/evlist.h12
-rw-r--r--tools/perf/util/evsel.c63
-rw-r--r--tools/perf/util/evsel.h9
-rw-r--r--tools/perf/util/evsel_fprintf.c2
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c15
-rw-r--r--tools/perf/util/hist.h4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c334
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h7
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c15
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c12
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h2
-rw-r--r--tools/perf/util/intel-pt.c214
-rw-r--r--tools/perf/util/intlist.c27
-rw-r--r--tools/perf/util/intlist.h10
-rw-r--r--tools/perf/util/jit.h2
-rw-r--r--tools/perf/util/jitdump.c84
-rw-r--r--tools/perf/util/machine.c51
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/map.c8
-rw-r--r--tools/perf/util/map.h3
-rw-r--r--tools/perf/util/mem-events.c36
-rw-r--r--tools/perf/util/mem-events.h5
-rw-r--r--tools/perf/util/metricgroup.c2
-rw-r--r--tools/perf/util/namespaces.c23
-rw-r--r--tools/perf/util/namespaces.h3
-rw-r--r--tools/perf/util/parse-events.l1
-rw-r--r--tools/perf/util/perf_api_probe.c10
-rw-r--r--tools/perf/util/perf_api_probe.h1
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c5
-rw-r--r--tools/perf/util/perf_regs.h7
-rw-r--r--tools/perf/util/probe-event.c12
-rw-r--r--tools/perf/util/probe-file.c38
-rw-r--r--tools/perf/util/probe-finder.c8
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c21
-rw-r--r--tools/perf/util/record.c9
-rw-r--r--tools/perf/util/record.h2
-rw-r--r--tools/perf/util/session.c54
-rw-r--r--tools/perf/util/setup.py2
-rw-r--r--tools/perf/util/sort.c109
-rw-r--r--tools/perf/util/sort.h6
-rw-r--r--tools/perf/util/stat-display.c4
-rw-r--r--tools/perf/util/stat-shadow.c92
-rw-r--r--tools/perf/util/stat.c6
-rw-r--r--tools/perf/util/stat.h9
-rw-r--r--tools/perf/util/string.c9
-rw-r--r--tools/perf/util/string2.h2
-rw-r--r--tools/perf/util/symbol-elf.c25
-rw-r--r--tools/perf/util/symbol.c73
-rw-r--r--tools/perf/util/symbol_conf.h7
-rw-r--r--tools/perf/util/synthetic-events.c225
-rw-r--r--tools/perf/util/target.c34
-rw-r--r--tools/perf/util/target.h10
-rw-r--r--tools/perf/util/trace-event-info.c10
-rw-r--r--tools/perf/util/unwind-libdw.c11
-rw-r--r--tools/perf/util/xyarray.c33
85 files changed, 2507 insertions, 440 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index e2563d0154eb..e3e12f9d4733 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -135,6 +135,7 @@ perf-y += clockid.o
perf-$(CONFIG_LIBBPF) += bpf-loader.o
perf-$(CONFIG_LIBBPF) += bpf_map.o
+perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
perf-$(CONFIG_LIBELF) += symbol-elf.o
perf-$(CONFIG_LIBELF) += probe-file.o
@@ -172,6 +173,7 @@ perf-$(CONFIG_ZSTD) += zstd.o
perf-$(CONFIG_LIBCAP) += cap.o
+perf-y += demangle-ocaml.o
perf-y += demangle-java.o
perf-y += demangle-rust.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ce8c07bc8c56..e60841b86d27 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -321,12 +321,18 @@ bool ins__is_call(const struct ins *ins)
/*
* Prevents from matching commas in the comment section, e.g.:
* ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
+ *
+ * and skip comma as part of function arguments, e.g.:
+ * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc>
*/
static inline const char *validate_comma(const char *c, struct ins_operands *ops)
{
if (ops->raw_comment && c > ops->raw_comment)
return NULL;
+ if (ops->raw_func_start && c > ops->raw_func_start)
+ return NULL;
+
return c;
}
@@ -341,6 +347,8 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s
u64 start, end;
ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+ ops->raw_func_start = strchr(ops->raw, '<');
+
c = validate_comma(c, ops);
/*
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 0a0cd4f32175..096cdaf21b01 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -32,6 +32,7 @@ struct ins {
struct ins_operands {
char *raw;
char *raw_comment;
+ char *raw_func_start;
struct {
char *raw;
char *name;
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
index 90d575cee1b9..32fe41835fa6 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
@@ -172,12 +172,22 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
decoder->record.from_ip = ip;
else if (idx == SPE_ADDR_PKT_HDR_INDEX_BRANCH)
decoder->record.to_ip = ip;
+ else if (idx == SPE_ADDR_PKT_HDR_INDEX_DATA_VIRT)
+ decoder->record.virt_addr = ip;
+ else if (idx == SPE_ADDR_PKT_HDR_INDEX_DATA_PHYS)
+ decoder->record.phys_addr = ip;
break;
case ARM_SPE_COUNTER:
break;
case ARM_SPE_CONTEXT:
break;
case ARM_SPE_OP_TYPE:
+ if (idx == SPE_OP_PKT_HDR_CLASS_LD_ST_ATOMIC) {
+ if (payload & 0x1)
+ decoder->record.op = ARM_SPE_ST;
+ else
+ decoder->record.op = ARM_SPE_LD;
+ }
break;
case ARM_SPE_EVENTS:
if (payload & BIT(EV_L1D_REFILL))
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
index 24727b8ca7ff..59bdb7309674 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
@@ -24,12 +24,20 @@ enum arm_spe_sample_type {
ARM_SPE_REMOTE_ACCESS = 1 << 7,
};
+enum arm_spe_op_type {
+ ARM_SPE_LD = 1 << 0,
+ ARM_SPE_ST = 1 << 1,
+};
+
struct arm_spe_record {
enum arm_spe_sample_type type;
int err;
+ u32 op;
u64 from_ip;
u64 to_ip;
u64 timestamp;
+ u64 virt_addr;
+ u64 phys_addr;
};
struct arm_spe_insn;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 8901a1656a41..2539d4baec44 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -53,6 +53,7 @@ struct arm_spe {
u8 sample_tlb;
u8 sample_branch;
u8 sample_remote_access;
+ u8 sample_memory;
u64 l1d_miss_id;
u64 l1d_access_id;
@@ -62,6 +63,7 @@ struct arm_spe {
u64 tlb_access_id;
u64 branch_miss_id;
u64 remote_access_id;
+ u64 memory_id;
u64 kernel_start;
@@ -235,7 +237,6 @@ static void arm_spe_prep_sample(struct arm_spe *spe,
sample->cpumode = arm_spe_cpumode(spe, sample->ip);
sample->pid = speq->pid;
sample->tid = speq->tid;
- sample->addr = record->to_ip;
sample->period = 1;
sample->cpu = speq->cpu;
@@ -259,11 +260,11 @@ arm_spe_deliver_synth_event(struct arm_spe *spe,
return ret;
}
-static int
-arm_spe_synth_spe_events_sample(struct arm_spe_queue *speq,
- u64 spe_events_id)
+static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
+ u64 spe_events_id, u64 data_src)
{
struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record = &speq->decoder->record;
union perf_event *event = speq->event_buf;
struct perf_sample sample = { .ip = 0, };
@@ -271,27 +272,102 @@ arm_spe_synth_spe_events_sample(struct arm_spe_queue *speq,
sample.id = spe_events_id;
sample.stream_id = spe_events_id;
+ sample.addr = record->virt_addr;
+ sample.phys_addr = record->phys_addr;
+ sample.data_src = data_src;
return arm_spe_deliver_synth_event(spe, speq, event, &sample);
}
+static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
+ u64 spe_events_id)
+{
+ struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record = &speq->decoder->record;
+ union perf_event *event = speq->event_buf;
+ struct perf_sample sample = { .ip = 0, };
+
+ arm_spe_prep_sample(spe, speq, event, &sample);
+
+ sample.id = spe_events_id;
+ sample.stream_id = spe_events_id;
+ sample.addr = record->to_ip;
+
+ return arm_spe_deliver_synth_event(spe, speq, event, &sample);
+}
+
+#define SPE_MEM_TYPE (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS | \
+ ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS | \
+ ARM_SPE_REMOTE_ACCESS)
+
+static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
+{
+ if (type & SPE_MEM_TYPE)
+ return true;
+
+ return false;
+}
+
+static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
+{
+ union perf_mem_data_src data_src = { 0 };
+
+ if (record->op == ARM_SPE_LD)
+ data_src.mem_op = PERF_MEM_OP_LOAD;
+ else
+ data_src.mem_op = PERF_MEM_OP_STORE;
+
+ if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
+ data_src.mem_lvl = PERF_MEM_LVL_L3;
+
+ if (record->type & ARM_SPE_LLC_MISS)
+ data_src.mem_lvl |= PERF_MEM_LVL_MISS;
+ else
+ data_src.mem_lvl |= PERF_MEM_LVL_HIT;
+ } else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
+ data_src.mem_lvl = PERF_MEM_LVL_L1;
+
+ if (record->type & ARM_SPE_L1D_MISS)
+ data_src.mem_lvl |= PERF_MEM_LVL_MISS;
+ else
+ data_src.mem_lvl |= PERF_MEM_LVL_HIT;
+ }
+
+ if (record->type & ARM_SPE_REMOTE_ACCESS)
+ data_src.mem_lvl |= PERF_MEM_LVL_REM_CCE1;
+
+ if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
+ data_src.mem_dtlb = PERF_MEM_TLB_WK;
+
+ if (record->type & ARM_SPE_TLB_MISS)
+ data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
+ else
+ data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
+ }
+
+ return data_src.val;
+}
+
static int arm_spe_sample(struct arm_spe_queue *speq)
{
const struct arm_spe_record *record = &speq->decoder->record;
struct arm_spe *spe = speq->spe;
+ u64 data_src;
int err;
+ data_src = arm_spe__synth_data_source(record);
+
if (spe->sample_flc) {
if (record->type & ARM_SPE_L1D_MISS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->l1d_miss_id);
+ err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
+ data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_L1D_ACCESS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->l1d_access_id);
+ err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
+ data_src);
if (err)
return err;
}
@@ -299,15 +375,15 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
if (spe->sample_llc) {
if (record->type & ARM_SPE_LLC_MISS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->llc_miss_id);
+ err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
+ data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_LLC_ACCESS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->llc_access_id);
+ err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
+ data_src);
if (err)
return err;
}
@@ -315,31 +391,36 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
if (spe->sample_tlb) {
if (record->type & ARM_SPE_TLB_MISS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->tlb_miss_id);
+ err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
+ data_src);
if (err)
return err;
}
if (record->type & ARM_SPE_TLB_ACCESS) {
- err = arm_spe_synth_spe_events_sample(
- speq, spe->tlb_access_id);
+ err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
+ data_src);
if (err)
return err;
}
}
if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) {
- err = arm_spe_synth_spe_events_sample(speq,
- spe->branch_miss_id);
+ err = arm_spe__synth_branch_sample(speq, spe->branch_miss_id);
if (err)
return err;
}
if (spe->sample_remote_access &&
(record->type & ARM_SPE_REMOTE_ACCESS)) {
- err = arm_spe_synth_spe_events_sample(speq,
- spe->remote_access_id);
+ err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
+ data_src);
+ if (err)
+ return err;
+ }
+
+ if (spe->sample_memory && arm_spe__is_memory_event(record->type)) {
+ err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
if (err)
return err;
}
@@ -803,7 +884,7 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
attr.type = PERF_TYPE_HARDWARE;
attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
- PERF_SAMPLE_PERIOD;
+ PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC;
if (spe->timeless_decoding)
attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
else
@@ -907,6 +988,16 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
id += 1;
}
+ if (spe->synth_opts.mem) {
+ spe->sample_memory = true;
+
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->memory_id = id;
+ arm_spe_set_event_name(evlist, id, "memory");
+ }
+
return 0;
}
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index a60878498139..953f4afacd3b 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -788,6 +788,21 @@ no_opt:
return auxtrace_validate_aux_sample_size(evlist, opts);
}
+void auxtrace_regroup_aux_output(struct evlist *evlist)
+{
+ struct evsel *evsel, *aux_evsel = NULL;
+ struct evsel_config_term *term;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_aux_event(evsel))
+ aux_evsel = evsel;
+ term = evsel__get_config_term(evsel, AUX_OUTPUT);
+ /* If possible, group with the AUX event */
+ if (term && aux_evsel)
+ evlist__regroup(evlist, aux_evsel, evsel);
+ }
+}
+
struct auxtrace_record *__weak
auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err)
{
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 7e5c9e1552bd..a4fbb33b7245 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -559,6 +559,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
int auxtrace_parse_sample_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts, const char *str);
+void auxtrace_regroup_aux_output(struct evlist *evlist);
int auxtrace_record__options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts);
@@ -741,6 +742,11 @@ int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
}
static inline
+void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused)
+{
+}
+
+static inline
int auxtrace__process_event(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
new file mode 100644
index 000000000000..04f89120b323
--- /dev/null
+++ b/tools/perf/util/bpf_counter.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2019 Facebook */
+
+#include <assert.h>
+#include <limits.h>
+#include <unistd.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <linux/err.h>
+#include <linux/zalloc.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+
+#include "bpf_counter.h"
+#include "counts.h"
+#include "debug.h"
+#include "evsel.h"
+#include "target.h"
+
+#include "bpf_skel/bpf_prog_profiler.skel.h"
+
+static inline void *u64_to_ptr(__u64 ptr)
+{
+ return (void *)(unsigned long)ptr;
+}
+
+static void set_max_rlimit(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+}
+
+static struct bpf_counter *bpf_counter_alloc(void)
+{
+ struct bpf_counter *counter;
+
+ counter = zalloc(sizeof(*counter));
+ if (counter)
+ INIT_LIST_HEAD(&counter->list);
+ return counter;
+}
+
+static int bpf_program_profiler__destroy(struct evsel *evsel)
+{
+ struct bpf_counter *counter, *tmp;
+
+ list_for_each_entry_safe(counter, tmp,
+ &evsel->bpf_counter_list, list) {
+ list_del_init(&counter->list);
+ bpf_prog_profiler_bpf__destroy(counter->skel);
+ free(counter);
+ }
+ assert(list_empty(&evsel->bpf_counter_list));
+
+ return 0;
+}
+
+static char *bpf_target_prog_name(int tgt_fd)
+{
+ struct bpf_prog_info_linear *info_linear;
+ struct bpf_func_info *func_info;
+ const struct btf_type *t;
+ char *name = NULL;
+ struct btf *btf;
+
+ info_linear = bpf_program__get_prog_info_linear(
+ tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
+ if (IS_ERR_OR_NULL(info_linear)) {
+ pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd);
+ return NULL;
+ }
+
+ if (info_linear->info.btf_id == 0 ||
+ btf__get_from_id(info_linear->info.btf_id, &btf)) {
+ pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd);
+ goto out;
+ }
+
+ func_info = u64_to_ptr(info_linear->info.func_info);
+ t = btf__type_by_id(btf, func_info[0].type_id);
+ if (!t) {
+ pr_debug("btf %d doesn't have type %d\n",
+ info_linear->info.btf_id, func_info[0].type_id);
+ goto out;
+ }
+ name = strdup(btf__name_by_offset(btf, t->name_off));
+out:
+ free(info_linear);
+ return name;
+}
+
+static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
+{
+ struct bpf_prog_profiler_bpf *skel;
+ struct bpf_counter *counter;
+ struct bpf_program *prog;
+ char *prog_name;
+ int prog_fd;
+ int err;
+
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd < 0) {
+ pr_err("Failed to open fd for bpf prog %u\n", prog_id);
+ return -1;
+ }
+ counter = bpf_counter_alloc();
+ if (!counter) {
+ close(prog_fd);
+ return -1;
+ }
+
+ skel = bpf_prog_profiler_bpf__open();
+ if (!skel) {
+ pr_err("Failed to open bpf skeleton\n");
+ goto err_out;
+ }
+
+ skel->rodata->num_cpu = evsel__nr_cpus(evsel);
+
+ bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));
+ bpf_map__resize(skel->maps.fentry_readings, 1);
+ bpf_map__resize(skel->maps.accum_readings, 1);
+
+ prog_name = bpf_target_prog_name(prog_fd);
+ if (!prog_name) {
+ pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id);
+ goto err_out;
+ }
+
+ bpf_object__for_each_program(prog, skel->obj) {
+ err = bpf_program__set_attach_target(prog, prog_fd, prog_name);
+ if (err) {
+ pr_err("bpf_program__set_attach_target failed.\n"
+ "Does bpf prog %u have BTF?\n", prog_id);
+ goto err_out;
+ }
+ }
+ set_max_rlimit();
+ err = bpf_prog_profiler_bpf__load(skel);
+ if (err) {
+ pr_err("bpf_prog_profiler_bpf__load failed\n");
+ goto err_out;
+ }
+
+ assert(skel != NULL);
+ counter->skel = skel;
+ list_add(&counter->list, &evsel->bpf_counter_list);
+ close(prog_fd);
+ return 0;
+err_out:
+ bpf_prog_profiler_bpf__destroy(skel);
+ free(counter);
+ close(prog_fd);
+ return -1;
+}
+
+static int bpf_program_profiler__load(struct evsel *evsel, struct target *target)
+{
+ char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p;
+ u32 prog_id;
+ int ret;
+
+ bpf_str_ = bpf_str = strdup(target->bpf_str);
+ if (!bpf_str)
+ return -1;
+
+ while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) {
+ prog_id = strtoul(tok, &p, 10);
+ if (prog_id == 0 || prog_id == UINT_MAX ||
+ (*p != '\0' && *p != ',')) {
+ pr_err("Failed to parse bpf prog ids %s\n",
+ target->bpf_str);
+ return -1;
+ }
+
+ ret = bpf_program_profiler_load_one(evsel, prog_id);
+ if (ret) {
+ bpf_program_profiler__destroy(evsel);
+ free(bpf_str_);
+ return -1;
+ }
+ bpf_str = NULL;
+ }
+ free(bpf_str_);
+ return 0;
+}
+
+static int bpf_program_profiler__enable(struct evsel *evsel)
+{
+ struct bpf_counter *counter;
+ int ret;
+
+ list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
+ assert(counter->skel != NULL);
+ ret = bpf_prog_profiler_bpf__attach(counter->skel);
+ if (ret) {
+ bpf_program_profiler__destroy(evsel);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int bpf_program_profiler__read(struct evsel *evsel)
+{
+ // perf_cpu_map uses /sys/devices/system/cpu/online
+ int num_cpu = evsel__nr_cpus(evsel);
+ // BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
+ // Sometimes possible > online, like on a Ryzen 3900X that has 24
+ // threads but its possible showed 0-31 -acme
+ int num_cpu_bpf = libbpf_num_possible_cpus();
+ struct bpf_perf_event_value values[num_cpu_bpf];
+ struct bpf_counter *counter;
+ int reading_map_fd;
+ __u32 key = 0;
+ int err, cpu;
+
+ if (list_empty(&evsel->bpf_counter_list))
+ return -EAGAIN;
+
+ for (cpu = 0; cpu < num_cpu; cpu++) {
+ perf_counts(evsel->counts, cpu, 0)->val = 0;
+ perf_counts(evsel->counts, cpu, 0)->ena = 0;
+ perf_counts(evsel->counts, cpu, 0)->run = 0;
+ }
+ list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
+ struct bpf_prog_profiler_bpf *skel = counter->skel;
+
+ assert(skel != NULL);
+ reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
+
+ err = bpf_map_lookup_elem(reading_map_fd, &key, values);
+ if (err) {
+ pr_err("failed to read value\n");
+ return err;
+ }
+
+ for (cpu = 0; cpu < num_cpu; cpu++) {
+ perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter;
+ perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled;
+ perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running;
+ }
+ }
+ return 0;
+}
+
+static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
+ int fd)
+{
+ struct bpf_prog_profiler_bpf *skel;
+ struct bpf_counter *counter;
+ int ret;
+
+ list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
+ skel = counter->skel;
+ assert(skel != NULL);
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
+ &cpu, &fd, BPF_ANY);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct bpf_counter_ops bpf_program_profiler_ops = {
+ .load = bpf_program_profiler__load,
+ .enable = bpf_program_profiler__enable,
+ .read = bpf_program_profiler__read,
+ .destroy = bpf_program_profiler__destroy,
+ .install_pe = bpf_program_profiler__install_pe,
+};
+
+int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd)
+{
+ if (list_empty(&evsel->bpf_counter_list))
+ return 0;
+ return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd);
+}
+
+int bpf_counter__load(struct evsel *evsel, struct target *target)
+{
+ if (target__has_bpf(target))
+ evsel->bpf_counter_ops = &bpf_program_profiler_ops;
+
+ if (evsel->bpf_counter_ops)
+ return evsel->bpf_counter_ops->load(evsel, target);
+ return 0;
+}
+
+int bpf_counter__enable(struct evsel *evsel)
+{
+ if (list_empty(&evsel->bpf_counter_list))
+ return 0;
+ return evsel->bpf_counter_ops->enable(evsel);
+}
+
+int bpf_counter__read(struct evsel *evsel)
+{
+ if (list_empty(&evsel->bpf_counter_list))
+ return -EAGAIN;
+ return evsel->bpf_counter_ops->read(evsel);
+}
+
+void bpf_counter__destroy(struct evsel *evsel)
+{
+ if (list_empty(&evsel->bpf_counter_list))
+ return;
+ evsel->bpf_counter_ops->destroy(evsel);
+ evsel->bpf_counter_ops = NULL;
+}
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
new file mode 100644
index 000000000000..2eca210e5dc1
--- /dev/null
+++ b/tools/perf/util/bpf_counter.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_BPF_COUNTER_H
+#define __PERF_BPF_COUNTER_H 1
+
+#include <linux/list.h>
+
+struct evsel;
+struct target;
+struct bpf_counter;
+
+typedef int (*bpf_counter_evsel_op)(struct evsel *evsel);
+typedef int (*bpf_counter_evsel_target_op)(struct evsel *evsel,
+ struct target *target);
+typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
+ int cpu,
+ int fd);
+
+struct bpf_counter_ops {
+ bpf_counter_evsel_target_op load;
+ bpf_counter_evsel_op enable;
+ bpf_counter_evsel_op read;
+ bpf_counter_evsel_op destroy;
+ bpf_counter_evsel_install_pe_op install_pe;
+};
+
+struct bpf_counter {
+ void *skel;
+ struct list_head list;
+};
+
+#ifdef HAVE_BPF_SKEL
+
+int bpf_counter__load(struct evsel *evsel, struct target *target);
+int bpf_counter__enable(struct evsel *evsel);
+int bpf_counter__read(struct evsel *evsel);
+void bpf_counter__destroy(struct evsel *evsel);
+int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
+
+#else /* HAVE_BPF_SKEL */
+
+#include<linux/err.h>
+
+static inline int bpf_counter__load(struct evsel *evsel __maybe_unused,
+ struct target *target __maybe_unused)
+{
+ return 0;
+}
+
+static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
+{
+ return 0;
+}
+
+static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
+{
+ return -EAGAIN;
+}
+
+static inline void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
+{
+}
+
+static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
+ int cpu __maybe_unused,
+ int fd __maybe_unused)
+{
+ return 0;
+}
+
+#endif /* HAVE_BPF_SKEL */
+
+#endif /* __PERF_BPF_COUNTER_H */
diff --git a/tools/perf/util/bpf_skel/.gitignore b/tools/perf/util/bpf_skel/.gitignore
new file mode 100644
index 000000000000..5263e9e6c5d8
--- /dev/null
+++ b/tools/perf/util/bpf_skel/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+.tmp
+*.skel.h \ No newline at end of file
diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
new file mode 100644
index 000000000000..c7cec92d0236
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2020 Facebook
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* map of perf event fds, num_cpu * num_metric entries */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(int));
+} events SEC(".maps");
+
+/* readings at fentry */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} fentry_readings SEC(".maps");
+
+/* accumulated readings */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} accum_readings SEC(".maps");
+
+const volatile __u32 num_cpu = 1;
+
+SEC("fentry/XXX")
+int BPF_PROG(fentry_XXX)
+{
+ __u32 key = bpf_get_smp_processor_id();
+ struct bpf_perf_event_value *ptr;
+ __u32 zero = 0;
+ long err;
+
+ /* look up before reading, to reduce error */
+ ptr = bpf_map_lookup_elem(&fentry_readings, &zero);
+ if (!ptr)
+ return 0;
+
+ err = bpf_perf_event_read_value(&events, key, ptr, sizeof(*ptr));
+ if (err)
+ return 0;
+
+ return 0;
+}
+
+static inline void
+fexit_update_maps(struct bpf_perf_event_value *after)
+{
+ struct bpf_perf_event_value *before, diff, *accum;
+ __u32 zero = 0;
+
+ before = bpf_map_lookup_elem(&fentry_readings, &zero);
+ /* only account samples with a valid fentry_reading */
+ if (before && before->counter) {
+ struct bpf_perf_event_value *accum;
+
+ diff.counter = after->counter - before->counter;
+ diff.enabled = after->enabled - before->enabled;
+ diff.running = after->running - before->running;
+
+ accum = bpf_map_lookup_elem(&accum_readings, &zero);
+ if (accum) {
+ accum->counter += diff.counter;
+ accum->enabled += diff.enabled;
+ accum->running += diff.running;
+ }
+ }
+}
+
+SEC("fexit/XXX")
+int BPF_PROG(fexit_XXX)
+{
+ struct bpf_perf_event_value reading;
+ __u32 cpu = bpf_get_smp_processor_id();
+ __u32 one = 1, zero = 0;
+ int err;
+
+ /* read all events before updating the maps, to reduce error */
+ err = bpf_perf_event_read_value(&events, cpu, &reading, sizeof(reading));
+ if (err)
+ return 0;
+
+ fexit_update_maps(&reading);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 02df36b30ac5..e32e8f2ff3bd 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -448,7 +448,8 @@ static bool lsdir_bid_tail_filter(const char *name __maybe_unused,
int i = 0;
while (isxdigit(d->d_name[i]) && i < SBUILD_ID_SIZE - 3)
i++;
- return (i == SBUILD_ID_SIZE - 3) && (d->d_name[i] == '\0');
+ return (i >= SBUILD_ID_MIN_SIZE - 3) && (i <= SBUILD_ID_SIZE - 3) &&
+ (d->d_name[i] == '\0');
}
struct strlist *build_id_cache__list_all(bool validonly)
@@ -490,7 +491,7 @@ struct strlist *build_id_cache__list_all(bool validonly)
}
strlist__for_each_entry(nd2, linklist) {
if (snprintf(sbuild_id, SBUILD_ID_SIZE, "%s%s",
- nd->s, nd2->s) != SBUILD_ID_SIZE - 1)
+ nd->s, nd2->s) > SBUILD_ID_SIZE - 1)
goto err_out;
if (validonly && !build_id_cache__valid_id(sbuild_id))
continue;
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 02613f4b2c29..c19617151670 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -2,8 +2,10 @@
#ifndef PERF_BUILD_ID_H_
#define PERF_BUILD_ID_H_ 1
-#define BUILD_ID_SIZE 20
+#define BUILD_ID_SIZE 20 /* SHA-1 length in bytes */
+#define BUILD_ID_MIN_SIZE 16 /* MD5/UUID/GUID length in bytes */
#define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1)
+#define SBUILD_ID_MIN_SIZE (BUILD_ID_MIN_SIZE * 2 + 1)
#include "machine.h"
#include "tool.h"
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 5dff7e489921..f24ab4585553 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -161,7 +161,7 @@ void evlist__set_default_cgroup(struct evlist *evlist, struct cgroup *cgroup)
/* helper function for ftw() in match_cgroups and list_cgroups */
static int add_cgroup_name(const char *fpath, const struct stat *sb __maybe_unused,
- int typeflag)
+ int typeflag, struct FTW *ftwbuf __maybe_unused)
{
struct cgroup_name *cn;
@@ -209,12 +209,12 @@ static int list_cgroups(const char *str)
if (!s)
return -1;
/* pretend if it's added by ftw() */
- ret = add_cgroup_name(s, NULL, FTW_D);
+ ret = add_cgroup_name(s, NULL, FTW_D, NULL);
free(s);
if (ret)
return -1;
} else {
- if (add_cgroup_name("", NULL, FTW_D) < 0)
+ if (add_cgroup_name("", NULL, FTW_D, NULL) < 0)
return -1;
}
@@ -247,7 +247,7 @@ static int match_cgroups(const char *str)
prefix_len = strlen(mnt);
/* collect all cgroups in the cgroup_list */
- if (ftw(mnt, add_cgroup_name, 20) < 0)
+ if (nftw(mnt, add_cgroup_name, 20, 0) < 0)
return -1;
for (;;) {
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 6969f82843ee..6984c77068a3 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -489,7 +489,7 @@ int perf_default_config(const char *var, const char *value,
return 0;
}
-int perf_config_from_file(config_fn_t fn, const char *filename, void *data)
+static int perf_config_from_file(config_fn_t fn, const char *filename, void *data)
{
int ret;
FILE *f = fopen(filename, "r");
@@ -521,16 +521,66 @@ static int perf_env_bool(const char *k, int def)
return v ? perf_config_bool(k, v) : def;
}
-static int perf_config_system(void)
+int perf_config_system(void)
{
return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0);
}
-static int perf_config_global(void)
+int perf_config_global(void)
{
return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0);
}
+static char *home_perfconfig(void)
+{
+ const char *home = NULL;
+ char *config;
+ struct stat st;
+
+ home = getenv("HOME");
+
+ /*
+ * Skip reading user config if:
+ * - there is no place to read it from (HOME)
+ * - we are asked not to (PERF_CONFIG_NOGLOBAL=1)
+ */
+ if (!home || !*home || !perf_config_global())
+ return NULL;
+
+ config = strdup(mkpath("%s/.perfconfig", home));
+ if (config == NULL) {
+ pr_warning("Not enough memory to process %s/.perfconfig, ignoring it.", home);
+ return NULL;
+ }
+
+ if (stat(config, &st) < 0)
+ goto out_free;
+
+ if (st.st_uid && (st.st_uid != geteuid())) {
+ pr_warning("File %s not owned by current user or root, ignoring it.", config);
+ goto out_free;
+ }
+
+ if (st.st_size)
+ return config;
+
+out_free:
+ free(config);
+ return NULL;
+}
+
+const char *perf_home_perfconfig(void)
+{
+ static const char *config;
+ static bool failed;
+
+ config = failed ? NULL : home_perfconfig();
+ if (!config)
+ failed = true;
+
+ return config;
+}
+
static struct perf_config_section *find_section(struct list_head *sections,
const char *section_name)
{
@@ -676,9 +726,6 @@ int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
static int perf_config_set__init(struct perf_config_set *set)
{
int ret = -1;
- const char *home = NULL;
- char *user_config;
- struct stat st;
/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
if (config_exclusive_filename)
@@ -687,41 +734,11 @@ static int perf_config_set__init(struct perf_config_set *set)
if (perf_config_from_file(collect_config, perf_etc_perfconfig(), set) < 0)
goto out;
}
-
- home = getenv("HOME");
-
- /*
- * Skip reading user config if:
- * - there is no place to read it from (HOME)
- * - we are asked not to (PERF_CONFIG_NOGLOBAL=1)
- */
- if (!home || !*home || !perf_config_global())
- return 0;
-
- user_config = strdup(mkpath("%s/.perfconfig", home));
- if (user_config == NULL) {
- pr_warning("Not enough memory to process %s/.perfconfig, ignoring it.", home);
- goto out;
- }
-
- if (stat(user_config, &st) < 0) {
- if (errno == ENOENT)
- ret = 0;
- goto out_free;
- }
-
- ret = 0;
-
- if (st.st_uid && (st.st_uid != geteuid())) {
- pr_warning("File %s not owned by current user or root, ignoring it.", user_config);
- goto out_free;
+ if (perf_config_global() && perf_home_perfconfig()) {
+ if (perf_config_from_file(collect_config, perf_home_perfconfig(), set) < 0)
+ goto out;
}
- if (st.st_size)
- ret = perf_config_from_file(collect_config, user_config, set);
-
-out_free:
- free(user_config);
out:
return ret;
}
@@ -738,6 +755,18 @@ struct perf_config_set *perf_config_set__new(void)
return set;
}
+struct perf_config_set *perf_config_set__load_file(const char *file)
+{
+ struct perf_config_set *set = zalloc(sizeof(*set));
+
+ if (set) {
+ INIT_LIST_HEAD(&set->sections);
+ perf_config_from_file(collect_config, file, set);
+ }
+
+ return set;
+}
+
static int perf_config__init(void)
{
if (config_set == NULL)
@@ -746,17 +775,15 @@ static int perf_config__init(void)
return config_set == NULL;
}
-int perf_config(config_fn_t fn, void *data)
+int perf_config_set(struct perf_config_set *set,
+ config_fn_t fn, void *data)
{
int ret = 0;
char key[BUFSIZ];
struct perf_config_section *section;
struct perf_config_item *item;
- if (config_set == NULL && perf_config__init())
- return -1;
-
- perf_config_set__for_each_entry(config_set, section, item) {
+ perf_config_set__for_each_entry(set, section, item) {
char *value = item->value;
if (value) {
@@ -778,6 +805,14 @@ out:
return ret;
}
+int perf_config(config_fn_t fn, void *data)
+{
+ if (config_set == NULL && perf_config__init())
+ return -1;
+
+ return perf_config_set(config_set, fn, data);
+}
+
void perf_config__exit(void)
{
perf_config_set__delete(config_set);
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
index 8c881e3a3ec3..2fd77aaff4d2 100644
--- a/tools/perf/util/config.h
+++ b/tools/perf/util/config.h
@@ -27,17 +27,22 @@ extern const char *config_exclusive_filename;
typedef int (*config_fn_t)(const char *, const char *, void *);
-int perf_config_from_file(config_fn_t fn, const char *filename, void *data);
int perf_default_config(const char *, const char *, void *);
int perf_config(config_fn_t fn, void *);
+int perf_config_set(struct perf_config_set *set,
+ config_fn_t fn, void *data);
int perf_config_int(int *dest, const char *, const char *);
int perf_config_u8(u8 *dest, const char *name, const char *value);
int perf_config_u64(u64 *dest, const char *, const char *);
int perf_config_bool(const char *, const char *);
int config_error_nonbool(const char *);
const char *perf_etc_perfconfig(void);
+const char *perf_home_perfconfig(void);
+int perf_config_system(void);
+int perf_config_global(void);
struct perf_config_set *perf_config_set__new(void);
+struct perf_config_set *perf_config_set__load_file(const char *file);
void perf_config_set__delete(struct perf_config_set *set);
int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
const char *var, const char *value);
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index cd007cc9c283..3f4bc4050477 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -419,19 +419,10 @@ cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
packet->last_instr_subtype = elem->last_i_subtype;
packet->last_instr_cond = elem->last_instr_cond;
- switch (elem->last_i_type) {
- case OCSD_INSTR_BR:
- case OCSD_INSTR_BR_INDIRECT:
+ if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
packet->last_instr_taken_branch = elem->last_instr_exec;
- break;
- case OCSD_INSTR_ISB:
- case OCSD_INSTR_DSB_DMB:
- case OCSD_INSTR_WFI_WFE:
- case OCSD_INSTR_OTHER:
- default:
+ else
packet->last_instr_taken_branch = false;
- break;
- }
packet->last_instr_size = elem->last_instr_sz;
@@ -572,6 +563,8 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
case OCSD_GEN_TRC_ELEM_EVENT:
case OCSD_GEN_TRC_ELEM_SWTRACE:
case OCSD_GEN_TRC_ELEM_CUSTOM:
+ case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
+ case OCSD_GEN_TRC_ELEM_MEMTRANS:
default:
break;
}
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 27c5fef9ad54..8b67bd97d122 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -948,7 +948,7 @@ static char *change_name(char *name, char *orig_name, int dup)
goto out;
/*
* Add '_' prefix to potential keywork. According to
- * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
+ * Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
* futher CTF spec updating may require us to use '$'.
*/
if (dup < 0)
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index db7447154622..5cd189172525 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -438,6 +438,8 @@ static struct {
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "transaction abort"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "trace begin"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "trace end"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMENTRY, "vm entry"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMEXIT, "vm exit"},
{0, NULL}
};
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 50fd6a4be4e0..2c06abf6dcd2 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -10,6 +10,7 @@
#include <api/debug.h>
#include <linux/kernel.h>
#include <linux/time64.h>
+#include <sys/time.h>
#ifdef HAVE_BACKTRACE_SUPPORT
#include <execinfo.h>
#endif
@@ -31,21 +32,48 @@ int debug_ordered_events;
static int redirect_to_stderr;
int debug_data_convert;
static FILE *debug_file;
+bool debug_display_time;
void debug_set_file(FILE *file)
{
debug_file = file;
}
+void debug_set_display_time(bool set)
+{
+ debug_display_time = set;
+}
+
+static int fprintf_time(FILE *file)
+{
+ struct timeval tod;
+ struct tm ltime;
+ char date[64];
+
+ if (!debug_display_time)
+ return 0;
+
+ if (gettimeofday(&tod, NULL) != 0)
+ return 0;
+
+ if (localtime_r(&tod.tv_sec, &ltime) == NULL)
+ return 0;
+
+ strftime(date, sizeof(date), "%F %H:%M:%S", &ltime);
+ return fprintf(file, "[%s.%06lu] ", date, (long)tod.tv_usec);
+}
+
int veprintf(int level, int var, const char *fmt, va_list args)
{
int ret = 0;
if (var >= level) {
- if (use_browser >= 1 && !redirect_to_stderr)
+ if (use_browser >= 1 && !redirect_to_stderr) {
ui_helpline__vshow(fmt, args);
- else
- ret = vfprintf(debug_file, fmt, args);
+ } else {
+ ret = fprintf_time(debug_file);
+ ret += vfprintf(debug_file, fmt, args);
+ }
}
return ret;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 43f712295645..48f631966067 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -64,6 +64,7 @@ int veprintf(int level, int var, const char *fmt, va_list args);
int perf_debug_option(const char *str);
void debug_set_file(FILE *file);
+void debug_set_display_time(bool set);
void perf_debug_setup(void);
int perf_quiet_option(void);
diff --git a/tools/perf/util/demangle-ocaml.c b/tools/perf/util/demangle-ocaml.c
new file mode 100644
index 000000000000..3df14e67c622
--- /dev/null
+++ b/tools/perf/util/demangle-ocaml.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <string.h>
+#include <stdlib.h>
+#include "util/string2.h"
+
+#include "demangle-ocaml.h"
+
+#include <linux/ctype.h>
+
+static const char *caml_prefix = "caml";
+static const size_t caml_prefix_len = 4;
+
+/* mangled OCaml symbols start with "caml" followed by an upper-case letter */
+static bool
+ocaml_is_mangled(const char *sym)
+{
+ return 0 == strncmp(sym, caml_prefix, caml_prefix_len)
+ && isupper(sym[caml_prefix_len]);
+}
+
+/*
+ * input:
+ * sym: a symbol which may have been mangled by the OCaml compiler
+ * return:
+ * if the input doesn't look like a mangled OCaml symbol, NULL is returned
+ * otherwise, a newly allocated string containing the demangled symbol is returned
+ */
+char *
+ocaml_demangle_sym(const char *sym)
+{
+ char *result;
+ int j = 0;
+ int i;
+ int len;
+
+ if (!ocaml_is_mangled(sym)) {
+ return NULL;
+ }
+
+ len = strlen(sym);
+
+ /* the demangled symbol is always smaller than the mangled symbol */
+ result = malloc(len + 1);
+ if (!result)
+ return NULL;
+
+ /* skip "caml" prefix */
+ i = caml_prefix_len;
+
+ while (i < len) {
+ if (sym[i] == '_' && sym[i + 1] == '_') {
+ /* "__" -> "." */
+ result[j++] = '.';
+ i += 2;
+ }
+ else if (sym[i] == '$' && isxdigit(sym[i + 1]) && isxdigit(sym[i + 2])) {
+ /* "$xx" is a hex-encoded character */
+ result[j++] = (hex(sym[i + 1]) << 4) | hex(sym[i + 2]);
+ i += 3;
+ }
+ else {
+ result[j++] = sym[i++];
+ }
+ }
+ result[j] = '\0';
+
+ /* scan backwards to remove an "_" followed by decimal digits */
+ if (j != 0 && isdigit(result[j - 1])) {
+ while (--j) {
+ if (!isdigit(result[j])) {
+ break;
+ }
+ }
+ if (result[j] == '_') {
+ result[j] = '\0';
+ }
+ }
+
+ return result;
+}
diff --git a/tools/perf/util/demangle-ocaml.h b/tools/perf/util/demangle-ocaml.h
new file mode 100644
index 000000000000..843cc4fa10a6
--- /dev/null
+++ b/tools/perf/util/demangle-ocaml.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_DEMANGLE_OCAML
+#define __PERF_DEMANGLE_OCAML 1
+
+char * ocaml_demangle_sym(const char *str);
+
+#endif /* __PERF_DEMANGLE_OCAML */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 05616d4138a9..ac706304afe9 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -288,17 +288,36 @@ size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
{
- return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
- " %02x:%02x %"PRI_lu64" %"PRI_lu64"]: %c%c%c%c %s\n",
- event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
- event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
- event->mmap2.min, event->mmap2.ino,
- event->mmap2.ino_generation,
- (event->mmap2.prot & PROT_READ) ? 'r' : '-',
- (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
- (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
- (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
- event->mmap2.filename);
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ char sbuild_id[SBUILD_ID_SIZE];
+ struct build_id bid;
+
+ build_id__init(&bid, event->mmap2.build_id,
+ event->mmap2.build_id_size);
+ build_id__sprintf(&bid, sbuild_id);
+
+ return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
+ " <%s>]: %c%c%c%c %s\n",
+ event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
+ event->mmap2.len, event->mmap2.pgoff, sbuild_id,
+ (event->mmap2.prot & PROT_READ) ? 'r' : '-',
+ (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
+ (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
+ (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
+ event->mmap2.filename);
+ } else {
+ return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
+ " %02x:%02x %"PRI_lu64" %"PRI_lu64"]: %c%c%c%c %s\n",
+ event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
+ event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
+ event->mmap2.min, event->mmap2.ino,
+ event->mmap2.ino_generation,
+ (event->mmap2.prot & PROT_READ) ? 'r' : '-',
+ (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
+ (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
+ (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
+ event->mmap2.filename);
+ }
}
size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
@@ -626,6 +645,19 @@ struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
return al->sym;
}
+static bool check_address_range(struct intlist *addr_list, int addr_range,
+ unsigned long addr)
+{
+ struct int_node *pos;
+
+ intlist__for_each_entry(pos, addr_list) {
+ if (addr >= pos->i && addr < pos->i + addr_range)
+ return true;
+ }
+
+ return false;
+}
+
/*
* Callers need to drop the reference to al->thread, obtained in
* machine__findnew_thread()
@@ -673,6 +705,8 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
}
al->sym = map__find_symbol(al->map, al->addr);
+ } else if (symbol_conf.dso_list) {
+ al->filtered |= (1 << HIST_FILTER__DSO);
}
if (symbol_conf.sym_list) {
@@ -690,6 +724,17 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
ret = strlist__has_entry(symbol_conf.sym_list,
al_addr_str);
}
+ if (!ret && symbol_conf.addr_list && al->map) {
+ unsigned long addr = al->map->unmap_ip(al->map, al->addr);
+
+ ret = intlist__has_entry(symbol_conf.addr_list, addr);
+ if (!ret && symbol_conf.addr_range) {
+ ret = check_address_range(symbol_conf.addr_list,
+ symbol_conf.addr_range,
+ addr);
+ }
+ }
+
if (!ret)
al->filtered |= (1 << HIST_FILTER__SYMBOL);
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index ff403ea578e1..f603edbbbc6f 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -96,6 +96,8 @@ enum {
PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8,
PERF_IP_FLAG_TRACE_END = 1ULL << 9,
PERF_IP_FLAG_IN_TX = 1ULL << 10,
+ PERF_IP_FLAG_VMENTRY = 1ULL << 11,
+ PERF_IP_FLAG_VMEXIT = 1ULL << 12,
};
#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
@@ -110,7 +112,9 @@ enum {
PERF_IP_FLAG_INTERRUPT |\
PERF_IP_FLAG_TX_ABORT |\
PERF_IP_FLAG_TRACE_BEGIN |\
- PERF_IP_FLAG_TRACE_END)
+ PERF_IP_FLAG_TRACE_END |\
+ PERF_IP_FLAG_VMENTRY |\
+ PERF_IP_FLAG_VMEXIT)
#define MAX_INSN 16
@@ -136,11 +140,13 @@ struct perf_sample {
u64 data_src;
u64 phys_addr;
u64 data_page_size;
+ u64 code_page_size;
u64 cgroup;
u32 flags;
u16 insn_len;
u8 cpumode;
u16 misc;
+ u16 ins_lat;
bool no_hw_idx; /* No hw_idx collected in branch_stack */
char insn[MAX_INSN];
void *raw_data;
@@ -171,6 +177,7 @@ enum perf_synth_id {
PERF_SYNTH_INTEL_EXSTOP,
PERF_SYNTH_INTEL_PWRX,
PERF_SYNTH_INTEL_CBR,
+ PERF_SYNTH_INTEL_PSB,
};
/*
@@ -263,6 +270,12 @@ struct perf_synth_intel_cbr {
u32 reserved3;
};
+struct perf_synth_intel_psb {
+ u32 padding;
+ u32 reserved;
+ u64 offset;
+};
+
/*
* raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get
* 8-byte alignment.
@@ -412,4 +425,7 @@ extern unsigned int proc_map_timeout;
#define PAGE_SIZE_NAME_LEN 32
char *get_page_size_name(u64 size, char *str);
+void arch_perf_parse_sample_weight(struct perf_sample *data, const __u64 *array, u64 type);
+void arch_perf_synthesize_sample_weight(const struct perf_sample *data, __u64 *array, u64 type);
+
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 05363a7247c4..5121b4db66fe 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -24,6 +24,7 @@
#include "bpf-event.h"
#include "util/string2.h"
#include "util/perf_api_probe.h"
+#include "util/evsel_fprintf.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
@@ -303,6 +304,11 @@ int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *a
return evlist__add_attrs(evlist, attrs, nr_attrs);
}
+__weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused)
+{
+ return 0;
+}
+
struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
{
struct evsel *evsel;
@@ -572,6 +578,14 @@ int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
}
+#ifdef HAVE_EVENTFD_SUPPORT
+int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
+{
+ return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
+ fdarray_flag__nonfilterable);
+}
+#endif
+
int evlist__poll(struct evlist *evlist, int timeout)
{
return perf_evlist__poll(&evlist->core, timeout);
@@ -1936,6 +1950,15 @@ static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
(sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
*cmd = EVLIST_CTL_CMD_SNAPSHOT;
pr_debug("is snapshot\n");
+ } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG,
+ (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) {
+ *cmd = EVLIST_CTL_CMD_EVLIST;
+ } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG,
+ (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) {
+ *cmd = EVLIST_CTL_CMD_STOP;
+ } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG,
+ (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) {
+ *cmd = EVLIST_CTL_CMD_PING;
}
}
@@ -1957,6 +1980,98 @@ int evlist__ctlfd_ack(struct evlist *evlist)
return err;
}
+static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg)
+{
+ char *data = cmd_data + cmd_size;
+
+ /* no argument */
+ if (!*data)
+ return 0;
+
+ /* there's argument */
+ if (*data == ' ') {
+ *arg = data + 1;
+ return 1;
+ }
+
+ /* malformed */
+ return -1;
+}
+
+static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable)
+{
+ struct evsel *evsel;
+ char *name;
+ int err;
+
+ err = get_cmd_arg(cmd_data,
+ enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 :
+ sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1,
+ &name);
+ if (err < 0) {
+ pr_info("failed: wrong command\n");
+ return -1;
+ }
+
+ if (err) {
+ evsel = evlist__find_evsel_by_str(evlist, name);
+ if (evsel) {
+ if (enable)
+ evlist__enable_evsel(evlist, name);
+ else
+ evlist__disable_evsel(evlist, name);
+ pr_info("Event %s %s\n", evsel->name,
+ enable ? "enabled" : "disabled");
+ } else {
+ pr_info("failed: can't find '%s' event\n", name);
+ }
+ } else {
+ if (enable) {
+ evlist__enable(evlist);
+ pr_info(EVLIST_ENABLED_MSG);
+ } else {
+ evlist__disable(evlist);
+ pr_info(EVLIST_DISABLED_MSG);
+ }
+ }
+
+ return 0;
+}
+
+static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data)
+{
+ struct perf_attr_details details = { .verbose = false, };
+ struct evsel *evsel;
+ char *arg;
+ int err;
+
+ err = get_cmd_arg(cmd_data,
+ sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1,
+ &arg);
+ if (err < 0) {
+ pr_info("failed: wrong command\n");
+ return -1;
+ }
+
+ if (err) {
+ if (!strcmp(arg, "-v")) {
+ details.verbose = true;
+ } else if (!strcmp(arg, "-g")) {
+ details.event_group = true;
+ } else if (!strcmp(arg, "-F")) {
+ details.freq = true;
+ } else {
+ pr_info("failed: wrong command\n");
+ return -1;
+ }
+ }
+
+ evlist__for_each_entry(evlist, evsel)
+ evsel__fprintf(evsel, &details, stderr);
+
+ return 0;
+}
+
int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
{
int err = 0;
@@ -1973,12 +2088,16 @@ int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
if (err > 0) {
switch (*cmd) {
case EVLIST_CTL_CMD_ENABLE:
- evlist__enable(evlist);
- break;
case EVLIST_CTL_CMD_DISABLE:
- evlist__disable(evlist);
+ err = evlist__ctlfd_enable(evlist, cmd_data,
+ *cmd == EVLIST_CTL_CMD_ENABLE);
+ break;
+ case EVLIST_CTL_CMD_EVLIST:
+ err = evlist__ctlfd_list(evlist, cmd_data);
break;
case EVLIST_CTL_CMD_SNAPSHOT:
+ case EVLIST_CTL_CMD_STOP:
+ case EVLIST_CTL_CMD_PING:
break;
case EVLIST_CTL_CMD_ACK:
case EVLIST_CTL_CMD_UNSUPPORTED:
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 1aae75895dea..b695ffaae519 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -110,6 +110,8 @@ int __evlist__add_default_attrs(struct evlist *evlist,
#define evlist__add_default_attrs(evlist, array) \
__evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
+int arch_evlist__add_default_attrs(struct evlist *evlist);
+
int evlist__add_dummy(struct evlist *evlist);
int evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
@@ -142,6 +144,10 @@ struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char
int evlist__add_pollfd(struct evlist *evlist, int fd);
int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
+#ifdef HAVE_EVENTFD_SUPPORT
+int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd);
+#endif
+
int evlist__poll(struct evlist *evlist, int timeout);
struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id);
@@ -330,6 +336,9 @@ struct evsel *evlist__reset_weak_group(struct evlist *evlist, struct evsel *evse
#define EVLIST_CTL_CMD_DISABLE_TAG "disable"
#define EVLIST_CTL_CMD_ACK_TAG "ack\n"
#define EVLIST_CTL_CMD_SNAPSHOT_TAG "snapshot"
+#define EVLIST_CTL_CMD_EVLIST_TAG "evlist"
+#define EVLIST_CTL_CMD_STOP_TAG "stop"
+#define EVLIST_CTL_CMD_PING_TAG "ping"
#define EVLIST_CTL_CMD_MAX_LEN 64
@@ -339,6 +348,9 @@ enum evlist_ctl_cmd {
EVLIST_CTL_CMD_DISABLE,
EVLIST_CTL_CMD_ACK,
EVLIST_CTL_CMD_SNAPSHOT,
+ EVLIST_CTL_CMD_EVLIST,
+ EVLIST_CTL_CMD_STOP,
+ EVLIST_CTL_CMD_PING,
};
int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c26ea82220bd..1bf76864c4f2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -25,6 +25,7 @@
#include <stdlib.h>
#include <perf/evsel.h>
#include "asm/bug.h"
+#include "bpf_counter.h"
#include "callchain.h"
#include "cgroup.h"
#include "counts.h"
@@ -247,6 +248,7 @@ void evsel__init(struct evsel *evsel,
evsel->bpf_obj = NULL;
evsel->bpf_fd = -1;
INIT_LIST_HEAD(&evsel->config_terms);
+ INIT_LIST_HEAD(&evsel->bpf_counter_list);
perf_evsel__object.init(evsel);
evsel->sample_size = __evsel__sample_size(attr->sample_type);
evsel__calc_id_pos(evsel);
@@ -1012,6 +1014,11 @@ struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evs
return found_term;
}
+void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
+{
+ evsel__set_sample_bit(evsel, WEIGHT);
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -1166,12 +1173,14 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
}
if (opts->sample_weight)
- evsel__set_sample_bit(evsel, WEIGHT);
+ arch_evsel__set_sample_weight(evsel);
+
+ attr->task = track;
+ attr->mmap = track;
+ attr->mmap2 = track && !perf_missing_features.mmap2;
+ attr->comm = track;
+ attr->build_id = track && opts->build_id;
- attr->task = track;
- attr->mmap = track;
- attr->mmap2 = track && !perf_missing_features.mmap2;
- attr->comm = track;
/*
* ksymbol is tracked separately with text poke because it needs to be
* system wide and enabled immediately.
@@ -1191,6 +1200,9 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
if (opts->sample_data_page_size)
evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
+ if (opts->sample_code_page_size)
+ evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
+
if (opts->record_switch_events)
attr->context_switch = track;
@@ -1366,6 +1378,7 @@ void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
assert(evsel->evlist == NULL);
+ bpf_counter__destroy(evsel);
evsel__free_counts(evsel);
perf_evsel__free_fd(&evsel->core);
perf_evsel__free_id(&evsel->core);
@@ -1735,6 +1748,10 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
}
fallback_missing_features:
+ if (perf_missing_features.weight_struct) {
+ evsel__set_sample_bit(evsel, WEIGHT);
+ evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
+ }
if (perf_missing_features.clockid_wrong)
evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
if (perf_missing_features.clockid) {
@@ -1781,6 +1798,8 @@ retry_open:
FD(evsel, cpu, thread) = fd;
+ bpf_counter__install_pe(evsel, cpu, fd);
+
if (unlikely(test_attr__enabled)) {
test_attr__open(&evsel->core.attr, pid, cpus->map[cpu],
fd, group_fd, flags);
@@ -1873,7 +1892,17 @@ try_fallback:
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.data_page_size &&
+ if (!perf_missing_features.weight_struct &&
+ (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) {
+ perf_missing_features.weight_struct = true;
+ pr_debug2("switching off weight struct support\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.code_page_size &&
+ (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) {
+ perf_missing_features.code_page_size = true;
+ pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n");
+ goto out_close;
+ } else if (!perf_missing_features.data_page_size &&
(evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) {
perf_missing_features.data_page_size = true;
pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n");
@@ -2076,6 +2105,13 @@ perf_event__check_size(union perf_event *event, unsigned int sample_size)
return 0;
}
+void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
+ const __u64 *array,
+ u64 type __maybe_unused)
+{
+ data->weight = *array;
+}
+
int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
@@ -2316,9 +2352,9 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
}
}
- if (type & PERF_SAMPLE_WEIGHT) {
+ if (type & PERF_SAMPLE_WEIGHT_TYPE) {
OVERFLOW_CHECK_u64(array);
- data->weight = *array;
+ arch_perf_parse_sample_weight(data, array, type);
array++;
}
@@ -2369,6 +2405,12 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array++;
}
+ data->code_page_size = 0;
+ if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
+ data->code_page_size = *array;
+ array++;
+ }
+
if (type & PERF_SAMPLE_AUX) {
OVERFLOW_CHECK_u64(array);
sz = *array++;
@@ -2678,6 +2720,8 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
"We found oprofile daemon running, please stop it and try again.");
break;
case EINVAL:
+ if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
+ return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel.");
if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel.");
if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
@@ -2689,6 +2733,9 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
if (perf_missing_features.aux_output)
return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
break;
+ case ENODATA:
+ return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. "
+ "Please add an auxiliary event in front of the load latency event.");
default:
break;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index cd1d8dd43199..4e8e49fb7e9d 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -17,6 +17,8 @@ struct cgroup;
struct perf_counts;
struct perf_stat_evsel;
union perf_event;
+struct bpf_counter_ops;
+struct target;
typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
@@ -127,6 +129,8 @@ struct evsel {
* See also evsel__has_callchain().
*/
__u64 synth_sample_type;
+ struct list_head bpf_counter_list;
+ struct bpf_counter_ops *bpf_counter_ops;
};
struct perf_missing_features {
@@ -145,6 +149,8 @@ struct perf_missing_features {
bool branch_hw_idx;
bool cgroup;
bool data_page_size;
+ bool code_page_size;
+ bool weight_struct;
};
extern struct perf_missing_features perf_missing_features;
@@ -239,6 +245,8 @@ void __evsel__reset_sample_bit(struct evsel *evsel, enum perf_event_sample_forma
void evsel__set_sample_id(struct evsel *evsel, bool use_sample_identifier);
+void arch_evsel__set_sample_weight(struct evsel *evsel);
+
int evsel__set_filter(struct evsel *evsel, const char *filter);
int evsel__append_tp_filter(struct evsel *evsel, const char *filter);
int evsel__append_addr_filter(struct evsel *evsel, const char *filter);
@@ -424,4 +432,5 @@ static inline bool evsel__is_dummy_event(struct evsel *evsel)
struct perf_env *evsel__env(struct evsel *evsel);
int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
+
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index fb498a723a00..bfedd7b23521 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -100,6 +100,7 @@ out:
return ++printed;
}
+#ifndef PYTHON_PERF
int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
unsigned int print_opts, struct callchain_cursor *cursor,
struct strlist *bt_stop_list, FILE *fp)
@@ -239,3 +240,4 @@ int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
return printed;
}
+#endif /* PYTHON_PERF */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index c4ed3dc2c8f4..4fe9e2a54346 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3806,7 +3806,7 @@ int perf_session__read_header(struct perf_session *session)
* check for the pipe header regardless of source.
*/
err = perf_header__read_pipe(session);
- if (!err || (err && perf_data__is_pipe(data))) {
+ if (!err || perf_data__is_pipe(data)) {
data->is_pipe = true;
return err;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index a08fb9ea411b..c82f5fc26af8 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -208,10 +208,14 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
+ hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
+ hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
if (symbol_conf.nanosecs)
hists__new_col_len(hists, HISTC_TIME, 16);
else
hists__new_col_len(hists, HISTC_TIME, 12);
+ hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
if (h->srcline) {
len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
@@ -285,12 +289,13 @@ static long hist_time(unsigned long htime)
}
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
- u64 weight)
+ u64 weight, u64 ins_lat)
{
he_stat->period += period;
he_stat->weight += weight;
he_stat->nr_events += 1;
+ he_stat->ins_lat += ins_lat;
}
static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
@@ -302,6 +307,7 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
dest->period_guest_us += src->period_guest_us;
dest->nr_events += src->nr_events;
dest->weight += src->weight;
+ dest->ins_lat += src->ins_lat;
}
static void he_stat__decay(struct he_stat *he_stat)
@@ -590,6 +596,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
int64_t cmp;
u64 period = entry->stat.period;
u64 weight = entry->stat.weight;
+ u64 ins_lat = entry->stat.ins_lat;
bool leftmost = true;
p = &hists->entries_in->rb_root.rb_node;
@@ -608,11 +615,11 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (!cmp) {
if (sample_self) {
- he_stat__add_period(&he->stat, period, weight);
+ he_stat__add_period(&he->stat, period, weight, ins_lat);
hist_entry__add_callchain_period(he, period);
}
if (symbol_conf.cumulate_callchain)
- he_stat__add_period(he->stat_acc, period, weight);
+ he_stat__add_period(he->stat_acc, period, weight, ins_lat);
/*
* This mem info was allocated from sample__resolve_mem
@@ -718,10 +725,12 @@ __hists__add_entry(struct hists *hists,
.cpumode = al->cpumode,
.ip = al->addr,
.level = al->level,
+ .code_page_size = sample->code_page_size,
.stat = {
.nr_events = 1,
.period = sample->period,
.weight = sample->weight,
+ .ins_lat = sample->ins_lat,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 14f66330923d..3c537232294b 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -53,6 +53,7 @@ enum hist_column {
HISTC_DSO_TO,
HISTC_LOCAL_WEIGHT,
HISTC_GLOBAL_WEIGHT,
+ HISTC_CODE_PAGE_SIZE,
HISTC_MEM_DADDR_SYMBOL,
HISTC_MEM_DADDR_DSO,
HISTC_MEM_PHYS_DADDR,
@@ -71,6 +72,9 @@ enum hist_column {
HISTC_SYM_SIZE,
HISTC_DSO_SIZE,
HISTC_SYMBOL_IPC,
+ HISTC_MEM_BLOCKED,
+ HISTC_LOCAL_INS_LAT,
+ HISTC_GLOBAL_INS_LAT,
HISTC_NR_COLS, /* Last entry */
};
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 697513f35154..8c59677bee13 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -24,6 +24,13 @@
#include "intel-pt-decoder.h"
#include "intel-pt-log.h"
+#define BITULL(x) (1ULL << (x))
+
+/* IA32_RTIT_CTL MSR bits */
+#define INTEL_PT_CYC_ENABLE BITULL(1)
+#define INTEL_PT_CYC_THRESHOLD (BITULL(22) | BITULL(21) | BITULL(20) | BITULL(19))
+#define INTEL_PT_CYC_THRESHOLD_SHIFT 19
+
#define INTEL_PT_BLK_SIZE 1024
#define BIT63 (((uint64_t)1 << 63))
@@ -55,6 +62,7 @@ enum intel_pt_pkt_state {
INTEL_PT_STATE_TIP_PGD,
INTEL_PT_STATE_FUP,
INTEL_PT_STATE_FUP_NO_TIP,
+ INTEL_PT_STATE_FUP_IN_PSB,
INTEL_PT_STATE_RESAMPLE,
};
@@ -73,6 +81,7 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
case INTEL_PT_STATE_TIP_PGD:
case INTEL_PT_STATE_FUP:
case INTEL_PT_STATE_FUP_NO_TIP:
+ case INTEL_PT_STATE_FUP_IN_PSB:
return false;
default:
return true;
@@ -112,13 +121,14 @@ struct intel_pt_decoder {
bool have_last_ip;
bool in_psb;
bool hop;
- bool hop_psb_fup;
bool leap;
+ bool nr;
+ bool next_nr;
enum intel_pt_param_flags flags;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
- uint64_t cr3;
+ uint64_t pip_payload;
uint64_t timestamp;
uint64_t tsc_timestamp;
uint64_t ref_timestamp;
@@ -167,6 +177,8 @@ struct intel_pt_decoder {
uint64_t sample_tot_cyc_cnt;
uint64_t base_cyc_cnt;
uint64_t cyc_cnt_timestamp;
+ uint64_t ctl;
+ uint64_t cyc_threshold;
double tsc_to_cyc;
bool continuous_period;
bool overflow;
@@ -189,6 +201,7 @@ struct intel_pt_decoder {
int no_progress;
int stuck_ip_prd;
int stuck_ip_cnt;
+ uint64_t psb_ip;
const unsigned char *next_buf;
size_t next_len;
unsigned char temp_buf[INTEL_PT_PKT_MAX_SZ];
@@ -204,6 +217,14 @@ static uint64_t intel_pt_lower_power_of_2(uint64_t x)
return x << i;
}
+static uint64_t intel_pt_cyc_threshold(uint64_t ctl)
+{
+ if (!(ctl & INTEL_PT_CYC_ENABLE))
+ return 0;
+
+ return (ctl & INTEL_PT_CYC_THRESHOLD) >> INTEL_PT_CYC_THRESHOLD_SHIFT;
+}
+
static void intel_pt_setup_period(struct intel_pt_decoder *decoder)
{
if (decoder->period_type == INTEL_PT_PERIOD_TICKS) {
@@ -245,12 +266,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
decoder->flags = params->flags;
+ decoder->ctl = params->ctl;
decoder->period = params->period;
decoder->period_type = params->period_type;
decoder->max_non_turbo_ratio = params->max_non_turbo_ratio;
decoder->max_non_turbo_ratio_fp = params->max_non_turbo_ratio;
+ decoder->cyc_threshold = intel_pt_cyc_threshold(decoder->ctl);
+
intel_pt_setup_period(decoder);
decoder->mtc_shift = params->mtc_period;
@@ -481,6 +505,28 @@ static inline void intel_pt_update_in_tx(struct intel_pt_decoder *decoder)
decoder->tx_flags = decoder->packet.payload & INTEL_PT_IN_TX;
}
+static inline void intel_pt_update_pip(struct intel_pt_decoder *decoder)
+{
+ decoder->pip_payload = decoder->packet.payload;
+}
+
+static inline void intel_pt_update_nr(struct intel_pt_decoder *decoder)
+{
+ decoder->next_nr = decoder->pip_payload & 1;
+}
+
+static inline void intel_pt_set_nr(struct intel_pt_decoder *decoder)
+{
+ decoder->nr = decoder->pip_payload & 1;
+ decoder->next_nr = decoder->nr;
+}
+
+static inline void intel_pt_set_pip(struct intel_pt_decoder *decoder)
+{
+ intel_pt_update_pip(decoder);
+ intel_pt_set_nr(decoder);
+}
+
static int intel_pt_bad_packet(struct intel_pt_decoder *decoder)
{
intel_pt_clear_tx_flags(decoder);
@@ -1218,6 +1264,7 @@ static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
decoder->continuous_period = false;
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->state.type |= INTEL_PT_TRACE_END;
+ intel_pt_update_nr(decoder);
return 0;
}
if (err == INTEL_PT_RETURN)
@@ -1225,6 +1272,8 @@ static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
if (err)
return err;
+ intel_pt_update_nr(decoder);
+
if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
if (decoder->pkt_state == INTEL_PT_STATE_TIP_PGD) {
decoder->pge = false;
@@ -1337,6 +1386,7 @@ static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = decoder->last_ip;
decoder->ip = decoder->last_ip;
+ intel_pt_update_nr(decoder);
return 0;
}
@@ -1461,6 +1511,7 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Buffer overflow\n");
intel_pt_clear_tx_flags(decoder);
+ intel_pt_set_nr(decoder);
decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
decoder->overflow = true;
@@ -1735,18 +1786,14 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_PIP:
- decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+ intel_pt_set_pip(decoder);
break;
case INTEL_PT_FUP:
decoder->pge = true;
if (decoder->packet.count) {
intel_pt_set_last_ip(decoder);
- if (decoder->hop) {
- /* Act on FUP at PSBEND */
- decoder->ip = decoder->last_ip;
- decoder->hop_psb_fup = true;
- }
+ decoder->psb_ip = decoder->last_ip;
}
break;
@@ -1761,6 +1808,9 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_CYC:
+ intel_pt_calc_cyc_timestamp(decoder);
+ break;
+
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
@@ -1835,6 +1885,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
decoder->pge = false;
decoder->continuous_period = false;
decoder->state.type |= INTEL_PT_TRACE_END;
+ intel_pt_update_nr(decoder);
return 0;
case INTEL_PT_TIP_PGE:
@@ -1850,6 +1901,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
}
decoder->state.type |= INTEL_PT_TRACE_BEGIN;
intel_pt_mtc_cyc_cnt_pge(decoder);
+ intel_pt_set_nr(decoder);
return 0;
case INTEL_PT_TIP:
@@ -1860,10 +1912,11 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
intel_pt_set_ip(decoder);
decoder->state.to_ip = decoder->ip;
}
+ intel_pt_update_nr(decoder);
return 0;
case INTEL_PT_PIP:
- decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+ intel_pt_update_pip(decoder);
break;
case INTEL_PT_MTC:
@@ -1922,21 +1975,27 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
return HOP_IGNORE;
case INTEL_PT_TIP_PGD:
- if (!decoder->packet.count)
+ if (!decoder->packet.count) {
+ intel_pt_set_nr(decoder);
return HOP_IGNORE;
+ }
intel_pt_set_ip(decoder);
decoder->state.type |= INTEL_PT_TRACE_END;
decoder->state.from_ip = 0;
decoder->state.to_ip = decoder->ip;
+ intel_pt_update_nr(decoder);
return HOP_RETURN;
case INTEL_PT_TIP:
- if (!decoder->packet.count)
+ if (!decoder->packet.count) {
+ intel_pt_set_nr(decoder);
return HOP_IGNORE;
+ }
intel_pt_set_ip(decoder);
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
+ intel_pt_update_nr(decoder);
return HOP_RETURN;
case INTEL_PT_FUP:
@@ -1959,26 +2018,23 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
return HOP_RETURN;
case INTEL_PT_PSB:
+ decoder->state.psb_offset = decoder->pos;
+ decoder->psb_ip = 0;
decoder->last_ip = 0;
decoder->have_last_ip = true;
- decoder->hop_psb_fup = false;
*err = intel_pt_walk_psbend(decoder);
if (*err == -EAGAIN)
return HOP_AGAIN;
if (*err)
return HOP_RETURN;
- if (decoder->hop_psb_fup) {
- decoder->hop_psb_fup = false;
- decoder->state.type = INTEL_PT_INSTRUCTION;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
- return HOP_RETURN;
+ decoder->state.type = INTEL_PT_PSB_EVT;
+ if (decoder->psb_ip) {
+ decoder->state.type |= INTEL_PT_INSTRUCTION;
+ decoder->ip = decoder->psb_ip;
}
- if (decoder->cbr != decoder->cbr_seen) {
- decoder->state.type = 0;
- return HOP_RETURN;
- }
- return HOP_IGNORE;
+ decoder->state.from_ip = decoder->psb_ip;
+ decoder->state.to_ip = 0;
+ return HOP_RETURN;
case INTEL_PT_BAD:
case INTEL_PT_PAD:
@@ -2012,8 +2068,151 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
}
}
+struct intel_pt_psb_info {
+ struct intel_pt_pkt fup_packet;
+ bool fup;
+ int after_psbend;
+};
+
+/* Lookahead and get the FUP packet from PSB+ */
+static int intel_pt_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
+{
+ struct intel_pt_psb_info *data = pkt_info->data;
+
+ switch (pkt_info->packet.type) {
+ case INTEL_PT_PAD:
+ case INTEL_PT_MNT:
+ case INTEL_PT_TSC:
+ case INTEL_PT_TMA:
+ case INTEL_PT_MODE_EXEC:
+ case INTEL_PT_MODE_TSX:
+ case INTEL_PT_MTC:
+ case INTEL_PT_CYC:
+ case INTEL_PT_VMCS:
+ case INTEL_PT_CBR:
+ case INTEL_PT_PIP:
+ if (data->after_psbend) {
+ data->after_psbend -= 1;
+ if (!data->after_psbend)
+ return 1;
+ }
+ break;
+
+ case INTEL_PT_FUP:
+ if (data->after_psbend)
+ return 1;
+ if (data->fup || pkt_info->packet.count == 0)
+ return 1;
+ data->fup_packet = pkt_info->packet;
+ data->fup = true;
+ break;
+
+ case INTEL_PT_PSBEND:
+ if (!data->fup)
+ return 1;
+ /* Keep going to check for a TIP.PGE */
+ data->after_psbend = 6;
+ break;
+
+ case INTEL_PT_TIP_PGE:
+ /* Ignore FUP in PSB+ if followed by TIP.PGE */
+ if (data->after_psbend)
+ data->fup = false;
+ return 1;
+
+ case INTEL_PT_PTWRITE:
+ case INTEL_PT_PTWRITE_IP:
+ case INTEL_PT_EXSTOP:
+ case INTEL_PT_EXSTOP_IP:
+ case INTEL_PT_MWAIT:
+ case INTEL_PT_PWRE:
+ case INTEL_PT_PWRX:
+ case INTEL_PT_BBP:
+ case INTEL_PT_BIP:
+ case INTEL_PT_BEP:
+ case INTEL_PT_BEP_IP:
+ if (data->after_psbend) {
+ data->after_psbend -= 1;
+ if (!data->after_psbend)
+ return 1;
+ break;
+ }
+ return 1;
+
+ case INTEL_PT_OVF:
+ case INTEL_PT_BAD:
+ case INTEL_PT_TNT:
+ case INTEL_PT_TIP_PGD:
+ case INTEL_PT_TIP:
+ case INTEL_PT_PSB:
+ case INTEL_PT_TRACESTOP:
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int intel_pt_psb(struct intel_pt_decoder *decoder)
+{
+ int err;
+
+ decoder->last_ip = 0;
+ decoder->psb_ip = 0;
+ decoder->have_last_ip = true;
+ intel_pt_clear_stack(&decoder->stack);
+ err = intel_pt_walk_psbend(decoder);
+ if (err)
+ return err;
+ decoder->state.type = INTEL_PT_PSB_EVT;
+ decoder->state.from_ip = decoder->psb_ip;
+ decoder->state.to_ip = 0;
+ return 0;
+}
+
+static int intel_pt_fup_in_psb(struct intel_pt_decoder *decoder)
+{
+ int err;
+
+ if (decoder->ip != decoder->last_ip) {
+ err = intel_pt_walk_fup(decoder);
+ if (!err || err != -EAGAIN)
+ return err;
+ }
+
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ err = intel_pt_psb(decoder);
+ if (err) {
+ decoder->pkt_state = INTEL_PT_STATE_ERR3;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
+{
+ struct intel_pt_psb_info data = { .fup = false };
+
+ if (!decoder->branch_enable || !decoder->pge)
+ return false;
+
+ intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
+ if (!data.fup)
+ return false;
+
+ decoder->packet = data.fup_packet;
+ intel_pt_set_last_ip(decoder);
+ decoder->pkt_state = INTEL_PT_STATE_FUP_IN_PSB;
+
+ *err = intel_pt_fup_in_psb(decoder);
+
+ return true;
+}
+
static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
{
+ int last_packet_type = INTEL_PT_PAD;
bool no_tip = false;
int err;
@@ -2022,6 +2221,12 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
if (err)
return err;
next:
+ if (decoder->cyc_threshold) {
+ if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
+ decoder->sample_cyc = false;
+ last_packet_type = decoder->packet.type;
+ }
+
if (decoder->hop) {
switch (intel_pt_hop_trace(decoder, &no_tip, &err)) {
case HOP_IGNORE:
@@ -2055,6 +2260,7 @@ next:
case INTEL_PT_TIP_PGE: {
decoder->pge = true;
intel_pt_mtc_cyc_cnt_pge(decoder);
+ intel_pt_set_nr(decoder);
if (decoder->packet.count == 0) {
intel_pt_log_at("Skipping zero TIP.PGE",
decoder->pos);
@@ -2120,27 +2326,17 @@ next:
break;
case INTEL_PT_PSB:
- decoder->last_ip = 0;
- decoder->have_last_ip = true;
- intel_pt_clear_stack(&decoder->stack);
- err = intel_pt_walk_psbend(decoder);
+ decoder->state.psb_offset = decoder->pos;
+ decoder->psb_ip = 0;
+ if (intel_pt_psb_with_fup(decoder, &err))
+ return err;
+ err = intel_pt_psb(decoder);
if (err == -EAGAIN)
goto next;
- if (err)
- return err;
- /*
- * PSB+ CBR will not have changed but cater for the
- * possibility of another CBR change that gets caught up
- * in the PSB+.
- */
- if (decoder->cbr != decoder->cbr_seen) {
- decoder->state.type = 0;
- return 0;
- }
- break;
+ return err;
case INTEL_PT_PIP:
- decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+ intel_pt_update_pip(decoder);
break;
case INTEL_PT_MTC:
@@ -2351,6 +2547,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
uint64_t current_ip = decoder->ip;
intel_pt_set_ip(decoder);
+ decoder->psb_ip = decoder->ip;
if (current_ip)
intel_pt_log_to("Setting IP",
decoder->ip);
@@ -2378,7 +2575,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_PIP:
- decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+ intel_pt_set_pip(decoder);
break;
case INTEL_PT_MODE_EXEC:
@@ -2497,7 +2694,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_PIP:
- decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+ intel_pt_set_pip(decoder);
break;
case INTEL_PT_MODE_EXEC:
@@ -2522,18 +2719,18 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_PSB:
+ decoder->state.psb_offset = decoder->pos;
+ decoder->psb_ip = 0;
decoder->last_ip = 0;
decoder->have_last_ip = true;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psb(decoder);
if (err)
return err;
- if (decoder->ip) {
- /* Do not have a sample */
- decoder->state.type = 0;
- return 0;
- }
- break;
+ decoder->state.type = INTEL_PT_PSB_EVT;
+ decoder->state.from_ip = decoder->psb_ip;
+ decoder->state.to_ip = 0;
+ return 0;
case INTEL_PT_TNT:
case INTEL_PT_PSBEND:
@@ -2577,7 +2774,7 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
intel_pt_log("Scanning for full IP\n");
err = intel_pt_walk_to_ip(decoder);
- if (err)
+ if (err || ((decoder->state.type & INTEL_PT_PSB_EVT) && !decoder->ip))
return err;
/* In hop mode, resample to get the to_ip as an "instruction" sample */
@@ -2689,10 +2886,10 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
decoder->continuous_period = false;
decoder->have_last_ip = false;
decoder->last_ip = 0;
+ decoder->psb_ip = 0;
decoder->ip = 0;
intel_pt_clear_stack(&decoder->stack);
-leap:
err = intel_pt_scan_for_psb(decoder);
if (err)
return err;
@@ -2704,8 +2901,11 @@ leap:
if (err)
return err;
+ decoder->state.type = INTEL_PT_PSB_EVT; /* Only PSB sample */
+ decoder->state.from_ip = decoder->psb_ip;
+ decoder->state.to_ip = 0;
+
if (decoder->ip) {
- decoder->state.type = 0; /* Do not have a sample */
/*
* In hop mode, resample to get the PSB FUP ip as an
* "instruction" sample.
@@ -2714,14 +2914,6 @@ leap:
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
- } else if (decoder->leap) {
- /*
- * In leap mode, only PSB+ is decoded, so keeping leaping to the
- * next PSB until there is an ip.
- */
- goto leap;
- } else {
- return intel_pt_sync_ip(decoder);
}
return 0;
@@ -2783,6 +2975,9 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
if (err == -EAGAIN)
err = intel_pt_walk_trace(decoder);
break;
+ case INTEL_PT_STATE_FUP_IN_PSB:
+ err = intel_pt_fup_in_psb(decoder);
+ break;
case INTEL_PT_STATE_RESAMPLE:
err = intel_pt_resample(decoder);
break;
@@ -2797,6 +2992,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
decoder->state.from_ip = decoder->ip;
intel_pt_update_sample_time(decoder);
decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
+ intel_pt_set_nr(decoder);
} else {
decoder->state.err = 0;
if (decoder->cbr != decoder->cbr_seen) {
@@ -2811,14 +3007,30 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
}
if (intel_pt_sample_time(decoder->pkt_state)) {
intel_pt_update_sample_time(decoder);
- if (decoder->sample_cyc)
+ if (decoder->sample_cyc) {
decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
+ decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
+ decoder->sample_cyc = false;
+ }
}
+ /*
+ * When using only TSC/MTC to compute cycles, IPC can be
+ * sampled as soon as the cycle count changes.
+ */
+ if (!decoder->have_cyc)
+ decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
}
+ /* Let PSB event always have TSC timestamp */
+ if ((decoder->state.type & INTEL_PT_PSB_EVT) && decoder->tsc_timestamp)
+ decoder->sample_timestamp = decoder->tsc_timestamp;
+
+ decoder->state.from_nr = decoder->nr;
+ decoder->state.to_nr = decoder->next_nr;
+ decoder->nr = decoder->next_nr;
+
decoder->state.timestamp = decoder->sample_timestamp;
decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
- decoder->state.cr3 = decoder->cr3;
decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
decoder->state.tot_cyc_cnt = decoder->sample_tot_cyc_cnt;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index 8645fc265481..d9e62a7f6f0e 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -17,6 +17,7 @@
#define INTEL_PT_ABORT_TX (1 << 1)
#define INTEL_PT_ASYNC (1 << 2)
#define INTEL_PT_FUP_IP (1 << 3)
+#define INTEL_PT_SAMPLE_IPC (1 << 4)
enum intel_pt_sample_type {
INTEL_PT_BRANCH = 1 << 0,
@@ -31,6 +32,7 @@ enum intel_pt_sample_type {
INTEL_PT_TRACE_BEGIN = 1 << 9,
INTEL_PT_TRACE_END = 1 << 10,
INTEL_PT_BLK_ITEMS = 1 << 11,
+ INTEL_PT_PSB_EVT = 1 << 12,
};
enum intel_pt_period_type {
@@ -199,10 +201,11 @@ struct intel_pt_blk_items {
struct intel_pt_state {
enum intel_pt_sample_type type;
+ bool from_nr;
+ bool to_nr;
int err;
uint64_t from_ip;
uint64_t to_ip;
- uint64_t cr3;
uint64_t tot_insn_cnt;
uint64_t tot_cyc_cnt;
uint64_t timestamp;
@@ -213,6 +216,7 @@ struct intel_pt_state {
uint64_t pwre_payload;
uint64_t pwrx_payload;
uint64_t cbr_payload;
+ uint64_t psb_offset;
uint32_t cbr;
uint32_t flags;
enum intel_pt_insn_op insn_op;
@@ -243,6 +247,7 @@ struct intel_pt_params {
void *data;
bool return_compression;
bool branch_enable;
+ uint64_t ctl;
uint64_t period;
enum intel_pt_period_type period_type;
unsigned max_non_turbo_ratio;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index fb8a3558d3d5..2f6cc7eea251 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -43,6 +43,17 @@ static void intel_pt_insn_decoder(struct insn *insn,
switch (insn->opcode.bytes[0]) {
case 0xf:
switch (insn->opcode.bytes[1]) {
+ case 0x01:
+ switch (insn->modrm.bytes[0]) {
+ case 0xc2: /* vmlaunch */
+ case 0xc3: /* vmresume */
+ op = INTEL_PT_OP_VMENTRY;
+ branch = INTEL_PT_BR_INDIRECT;
+ break;
+ default:
+ break;
+ }
+ break;
case 0x05: /* syscall */
case 0x34: /* sysenter */
op = INTEL_PT_OP_SYSCALL;
@@ -213,6 +224,7 @@ const char *branch_name[] = {
[INTEL_PT_OP_INT] = "Int",
[INTEL_PT_OP_SYSCALL] = "Syscall",
[INTEL_PT_OP_SYSRET] = "Sysret",
+ [INTEL_PT_OP_VMENTRY] = "VMentry",
};
const char *intel_pt_insn_name(enum intel_pt_insn_op op)
@@ -267,6 +279,9 @@ int intel_pt_insn_type(enum intel_pt_insn_op op)
case INTEL_PT_OP_SYSRET:
return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN |
PERF_IP_FLAG_SYSCALLRET;
+ case INTEL_PT_OP_VMENTRY:
+ return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_VMENTRY;
default:
return 0;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
index 95a1eb0141ff..c2861cfdd768 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
@@ -24,6 +24,7 @@ enum intel_pt_insn_op {
INTEL_PT_OP_INT,
INTEL_PT_OP_SYSCALL,
INTEL_PT_OP_SYSRET,
+ INTEL_PT_OP_VMENTRY,
};
enum intel_pt_insn_branch {
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index 4ce109993e74..02a3395d6ce3 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -16,8 +16,6 @@
#define BIT63 ((uint64_t)1 << 63)
-#define NR_FLAG BIT63
-
#if __BYTE_ORDER == __BIG_ENDIAN
#define le16_to_cpu bswap_16
#define le32_to_cpu bswap_32
@@ -106,9 +104,7 @@ static int intel_pt_get_pip(const unsigned char *buf, size_t len,
packet->type = INTEL_PT_PIP;
memcpy_le64(&payload, buf + 2, 6);
- packet->payload = payload >> 1;
- if (payload & 1)
- packet->payload |= NR_FLAG;
+ packet->payload = payload;
return 8;
}
@@ -719,10 +715,10 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf,
name, (unsigned)(payload >> 1) & 1,
(unsigned)payload & 1);
case INTEL_PT_PIP:
- nr = packet->payload & NR_FLAG ? 1 : 0;
- payload &= ~NR_FLAG;
+ nr = packet->payload & INTEL_PT_VMX_NR_FLAG ? 1 : 0;
+ payload &= ~INTEL_PT_VMX_NR_FLAG;
ret = snprintf(buf, buf_len, "%s 0x%llx (NR=%d)",
- name, payload, nr);
+ name, payload >> 1, nr);
return ret;
case INTEL_PT_PTWRITE:
return snprintf(buf, buf_len, "%s 0x%llx IP:0", name, payload);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
index 17ca9b56d72f..996090cb84f6 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
@@ -21,6 +21,8 @@
#define INTEL_PT_PKT_MAX_SZ 16
+#define INTEL_PT_VMX_NR_FLAG 1
+
enum intel_pt_pkt_type {
INTEL_PT_BAD,
INTEL_PT_PAD,
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 60214de42f31..f6e28ac231b7 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -108,6 +108,7 @@ struct intel_pt {
u64 exstop_id;
u64 pwrx_id;
u64 cbr_id;
+ u64 psb_id;
bool sample_pebs;
struct evsel *pebs_evsel;
@@ -162,6 +163,9 @@ struct intel_pt_queue {
int switch_state;
pid_t next_tid;
struct thread *thread;
+ struct machine *guest_machine;
+ struct thread *unknown_guest_thread;
+ pid_t guest_machine_pid;
bool exclude_kernel;
bool have_sample;
u64 time;
@@ -549,13 +553,59 @@ static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
auxtrace_cache__remove(dso->auxtrace_cache, offset);
}
-static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
+static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
{
- return ip >= pt->kernel_start ?
+ /* Assumes 64-bit kernel */
+ return ip & (1ULL << 63);
+}
+
+static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
+{
+ if (nr) {
+ return intel_pt_guest_kernel_ip(ip) ?
+ PERF_RECORD_MISC_GUEST_KERNEL :
+ PERF_RECORD_MISC_GUEST_USER;
+ }
+
+ return ip >= ptq->pt->kernel_start ?
PERF_RECORD_MISC_KERNEL :
PERF_RECORD_MISC_USER;
}
+static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
+{
+ /* No support for non-zero CS base */
+ if (from_ip)
+ return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
+ return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
+}
+
+static int intel_pt_get_guest(struct intel_pt_queue *ptq)
+{
+ struct machines *machines = &ptq->pt->session->machines;
+ struct machine *machine;
+ pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
+
+ if (ptq->guest_machine && pid == ptq->guest_machine_pid)
+ return 0;
+
+ ptq->guest_machine = NULL;
+ thread__zput(ptq->unknown_guest_thread);
+
+ machine = machines__find_guest(machines, pid);
+ if (!machine)
+ return -1;
+
+ ptq->unknown_guest_thread = machine__idle_thread(machine);
+ if (!ptq->unknown_guest_thread)
+ return -1;
+
+ ptq->guest_machine = machine;
+ ptq->guest_machine_pid = pid;
+
+ return 0;
+}
+
static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
uint64_t *insn_cnt_ptr, uint64_t *ip,
uint64_t to_ip, uint64_t max_insn_cnt,
@@ -572,19 +622,29 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
u64 offset, start_offset, start_ip;
u64 insn_cnt = 0;
bool one_map = true;
+ bool nr;
intel_pt_insn->length = 0;
if (to_ip && *ip == to_ip)
goto out_no_cache;
- cpumode = intel_pt_cpumode(ptq->pt, *ip);
+ nr = ptq->state->to_nr;
+ cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
- thread = ptq->thread;
- if (!thread) {
- if (cpumode != PERF_RECORD_MISC_KERNEL)
+ if (nr) {
+ if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL ||
+ intel_pt_get_guest(ptq))
return -EINVAL;
- thread = ptq->pt->unknown_thread;
+ machine = ptq->guest_machine;
+ thread = ptq->unknown_guest_thread;
+ } else {
+ thread = ptq->thread;
+ if (!thread) {
+ if (cpumode != PERF_RECORD_MISC_KERNEL)
+ return -EINVAL;
+ thread = ptq->pt->unknown_thread;
+ }
}
while (1) {
@@ -732,8 +792,14 @@ static int __intel_pt_pgd_ip(uint64_t ip, void *data)
u8 cpumode;
u64 offset;
- if (ip >= ptq->pt->kernel_start)
+ if (ptq->state->to_nr) {
+ if (intel_pt_guest_kernel_ip(ip))
+ return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
+ /* No support for decoding guest user space */
+ return -EINVAL;
+ } else if (ip >= ptq->pt->kernel_start) {
return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
+ }
cpumode = PERF_RECORD_MISC_USER;
@@ -893,6 +959,18 @@ static bool intel_pt_sampling_mode(struct intel_pt *pt)
return false;
}
+static u64 intel_pt_ctl(struct intel_pt *pt)
+{
+ struct evsel *evsel;
+ u64 config;
+
+ evlist__for_each_entry(pt->session->evlist, evsel) {
+ if (intel_pt_get_config(pt, &evsel->core.attr, &config))
+ return config;
+ }
+ return 0;
+}
+
static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
{
u64 quot, rem;
@@ -1026,6 +1104,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
params.data = ptq;
params.return_compression = intel_pt_return_compression(pt);
params.branch_enable = intel_pt_branch_enable(pt);
+ params.ctl = intel_pt_ctl(pt);
params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
params.mtc_period = intel_pt_mtc_period(pt);
params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
@@ -1087,6 +1166,7 @@ static void intel_pt_free_queue(void *priv)
if (!ptq)
return;
thread__zput(ptq->thread);
+ thread__zput(ptq->unknown_guest_thread);
intel_pt_decoder_free(ptq->decoder);
zfree(&ptq->event_buf);
zfree(&ptq->last_branch);
@@ -1121,13 +1201,16 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
if (ptq->state->flags & INTEL_PT_ABORT_TX) {
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
} else if (ptq->state->flags & INTEL_PT_ASYNC) {
- if (ptq->state->to_ip)
+ if (!ptq->state->to_ip)
+ ptq->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_TRACE_END;
+ else if (ptq->state->from_nr && !ptq->state->to_nr)
+ ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_VMEXIT;
+ else
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_INTERRUPT;
- else
- ptq->flags = PERF_IP_FLAG_BRANCH |
- PERF_IP_FLAG_TRACE_END;
ptq->insn_len = 0;
} else {
if (ptq->state->from_ip)
@@ -1301,8 +1384,8 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt,
sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample->ip = ptq->state->from_ip;
- sample->cpumode = intel_pt_cpumode(pt, sample->ip);
sample->addr = ptq->state->to_ip;
+ sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
sample->period = 1;
sample->flags = ptq->flags;
@@ -1381,7 +1464,8 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
sample.branch_stack = (struct branch_stack *)&dummy_bs;
}
- sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
+ if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
+ sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
if (sample.cyc_cnt) {
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
@@ -1431,7 +1515,8 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
else
sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
- sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
+ if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
+ sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
if (sample.cyc_cnt) {
sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
@@ -1533,6 +1618,32 @@ static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
pt->pwr_events_sample_type);
}
+static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
+{
+ struct intel_pt *pt = ptq->pt;
+ union perf_event *event = ptq->event_buf;
+ struct perf_sample sample = { .ip = 0, };
+ struct perf_synth_intel_psb raw;
+
+ if (intel_pt_skip_event(pt))
+ return 0;
+
+ intel_pt_prep_p_sample(pt, ptq, event, &sample);
+
+ sample.id = ptq->pt->psb_id;
+ sample.stream_id = ptq->pt->psb_id;
+ sample.flags = 0;
+
+ raw.reserved = 0;
+ raw.offset = ptq->state->psb_offset;
+
+ sample.raw_size = perf_synth__raw_size(raw);
+ sample.raw_data = perf_synth__raw_data(&raw);
+
+ return intel_pt_deliver_synth_event(pt, event, &sample,
+ pt->pwr_events_sample_type);
+}
+
static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
@@ -1791,10 +1902,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
else
sample.ip = ptq->state->from_ip;
- /* No support for guest mode at this time */
- cpumode = sample.ip < ptq->pt->kernel_start ?
- PERF_RECORD_MISC_USER :
- PERF_RECORD_MISC_KERNEL;
+ cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
@@ -1853,13 +1961,30 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
sample.addr = items->mem_access_address;
- if (sample_type & PERF_SAMPLE_WEIGHT) {
+ if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
/*
* Refer kernel's setup_pebs_adaptive_sample_data() and
* intel_hsw_weight().
*/
- if (items->has_mem_access_latency)
- sample.weight = items->mem_access_latency;
+ if (items->has_mem_access_latency) {
+ u64 weight = items->mem_access_latency >> 32;
+
+ /*
+ * Starts from SPR, the mem access latency field
+ * contains both cache latency [47:32] and instruction
+ * latency [15:0]. The cache latency is the same as the
+ * mem access latency on previous platforms.
+ *
+ * In practice, no memory access could last than 4G
+ * cycles. Use latency >> 32 to distinguish the
+ * different format of the mem access latency field.
+ */
+ if (weight > 0) {
+ sample.weight = weight & 0xffff;
+ sample.ins_lat = items->mem_access_latency & 0xffff;
+ } else
+ sample.weight = items->mem_access_latency;
+ }
if (!sample.weight && items->has_tsx_aux_info) {
/* Cycles last block */
sample.weight = (u32)items->tsx_aux_info;
@@ -1966,14 +2091,8 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
ptq->have_sample = false;
- if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
- /*
- * Cycle count and instruction count only go together to create
- * a valid IPC ratio when the cycle count changes.
- */
- ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
- ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
- }
+ ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
+ ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
/*
* Do PEBS first to allow for the possibility that the PEBS timestamp
@@ -1986,6 +2105,11 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
}
if (pt->sample_pwr_events) {
+ if (state->type & INTEL_PT_PSB_EVT) {
+ err = intel_pt_synth_psb_sample(ptq);
+ if (err)
+ return err;
+ }
if (ptq->state->cbr != ptq->cbr_seen) {
err = intel_pt_synth_cbr_sample(ptq);
if (err)
@@ -2047,7 +2171,27 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
}
if (pt->sample_branches) {
- err = intel_pt_synth_branch_sample(ptq);
+ if (state->from_nr != state->to_nr &&
+ state->from_ip && state->to_ip) {
+ struct intel_pt_state *st = (struct intel_pt_state *)state;
+ u64 to_ip = st->to_ip;
+ u64 from_ip = st->from_ip;
+
+ /*
+ * perf cannot handle having different machines for ip
+ * and addr, so create 2 branches.
+ */
+ st->to_ip = 0;
+ err = intel_pt_synth_branch_sample(ptq);
+ if (err)
+ return err;
+ st->from_ip = 0;
+ st->to_ip = to_ip;
+ err = intel_pt_synth_branch_sample(ptq);
+ st->from_ip = from_ip;
+ } else {
+ err = intel_pt_synth_branch_sample(ptq);
+ }
if (err)
return err;
}
@@ -3083,6 +3227,14 @@ static int intel_pt_synth_events(struct intel_pt *pt,
pt->cbr_id = id;
intel_pt_set_event_name(evlist, id, "cbr");
id += 1;
+
+ attr.config = PERF_SYNTH_INTEL_PSB;
+ err = intel_pt_synth_event(session, "psb", &attr, id);
+ if (err)
+ return err;
+ pt->psb_id = id;
+ intel_pt_set_event_name(evlist, id, "psb");
+ id += 1;
}
if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
index 84e5304e151a..934092199f89 100644
--- a/tools/perf/util/intlist.c
+++ b/tools/perf/util/intlist.c
@@ -13,7 +13,7 @@
static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused,
const void *entry)
{
- int i = (int)((long)entry);
+ unsigned long i = (unsigned long)entry;
struct rb_node *rc = NULL;
struct int_node *node = malloc(sizeof(*node));
@@ -41,15 +41,20 @@ static void intlist__node_delete(struct rblist *rblist __maybe_unused,
static int intlist__node_cmp(struct rb_node *rb_node, const void *entry)
{
- int i = (int)((long)entry);
+ unsigned long i = (unsigned long)entry;
struct int_node *node = container_of(rb_node, struct int_node, rb_node);
- return node->i - i;
+ if (node->i > i)
+ return 1;
+ else if (node->i < i)
+ return -1;
+
+ return 0;
}
-int intlist__add(struct intlist *ilist, int i)
+int intlist__add(struct intlist *ilist, unsigned long i)
{
- return rblist__add_node(&ilist->rblist, (void *)((long)i));
+ return rblist__add_node(&ilist->rblist, (void *)i);
}
void intlist__remove(struct intlist *ilist, struct int_node *node)
@@ -58,7 +63,7 @@ void intlist__remove(struct intlist *ilist, struct int_node *node)
}
static struct int_node *__intlist__findnew(struct intlist *ilist,
- int i, bool create)
+ unsigned long i, bool create)
{
struct int_node *node = NULL;
struct rb_node *rb_node;
@@ -67,9 +72,9 @@ static struct int_node *__intlist__findnew(struct intlist *ilist,
return NULL;
if (create)
- rb_node = rblist__findnew(&ilist->rblist, (void *)((long)i));
+ rb_node = rblist__findnew(&ilist->rblist, (void *)i);
else
- rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
+ rb_node = rblist__find(&ilist->rblist, (void *)i);
if (rb_node)
node = container_of(rb_node, struct int_node, rb_node);
@@ -77,12 +82,12 @@ static struct int_node *__intlist__findnew(struct intlist *ilist,
return node;
}
-struct int_node *intlist__find(struct intlist *ilist, int i)
+struct int_node *intlist__find(struct intlist *ilist, unsigned long i)
{
return __intlist__findnew(ilist, i, false);
}
-struct int_node *intlist__findnew(struct intlist *ilist, int i)
+struct int_node *intlist__findnew(struct intlist *ilist, unsigned long i)
{
return __intlist__findnew(ilist, i, true);
}
@@ -93,7 +98,7 @@ static int intlist__parse_list(struct intlist *ilist, const char *s)
int err;
do {
- long value = strtol(s, &sep, 10);
+ unsigned long value = strtol(s, &sep, 10);
err = -EINVAL;
if (*sep != ',' && *sep != '\0')
break;
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
index 5c19ee001299..e336b174d0c7 100644
--- a/tools/perf/util/intlist.h
+++ b/tools/perf/util/intlist.h
@@ -9,7 +9,7 @@
struct int_node {
struct rb_node rb_node;
- int i;
+ unsigned long i;
void *priv;
};
@@ -21,13 +21,13 @@ struct intlist *intlist__new(const char *slist);
void intlist__delete(struct intlist *ilist);
void intlist__remove(struct intlist *ilist, struct int_node *in);
-int intlist__add(struct intlist *ilist, int i);
+int intlist__add(struct intlist *ilist, unsigned long i);
struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx);
-struct int_node *intlist__find(struct intlist *ilist, int i);
-struct int_node *intlist__findnew(struct intlist *ilist, int i);
+struct int_node *intlist__find(struct intlist *ilist, unsigned long i);
+struct int_node *intlist__findnew(struct intlist *ilist, unsigned long i);
-static inline bool intlist__has_entry(struct intlist *ilist, int i)
+static inline bool intlist__has_entry(struct intlist *ilist, unsigned long i)
{
return intlist__find(ilist, i) != NULL;
}
diff --git a/tools/perf/util/jit.h b/tools/perf/util/jit.h
index 6817ffc2a059..fb810e1b2de7 100644
--- a/tools/perf/util/jit.h
+++ b/tools/perf/util/jit.h
@@ -5,7 +5,7 @@
#include <data.h>
int jit_process(struct perf_session *session, struct perf_data *output,
- struct machine *machine, char *filename, pid_t pid, u64 *nbytes);
+ struct machine *machine, char *filename, pid_t pid, pid_t tid, u64 *nbytes);
int jit_inject_record(const char *filename);
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 055bab7a92b3..9760d8e7b386 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -18,6 +18,7 @@
#include "event.h"
#include "debug.h"
#include "evlist.h"
+#include "namespaces.h"
#include "symbol.h"
#include <elf.h>
@@ -35,6 +36,7 @@ struct jit_buf_desc {
struct perf_data *output;
struct perf_session *session;
struct machine *machine;
+ struct nsinfo *nsi;
union jr_entry *entry;
void *buf;
uint64_t sample_type;
@@ -72,7 +74,8 @@ struct jit_tool {
#define get_jit_tool(t) (container_of(tool, struct jit_tool, tool))
static int
-jit_emit_elf(char *filename,
+jit_emit_elf(struct jit_buf_desc *jd,
+ char *filename,
const char *sym,
uint64_t code_addr,
const void *code,
@@ -83,14 +86,18 @@ jit_emit_elf(char *filename,
uint32_t unwinding_header_size,
uint32_t unwinding_size)
{
- int ret, fd;
+ int ret, fd, saved_errno;
+ struct nscookie nsc;
if (verbose > 0)
fprintf(stderr, "write ELF image %s\n", filename);
+ nsinfo__mountns_enter(jd->nsi, &nsc);
fd = open(filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
+ saved_errno = errno;
+ nsinfo__mountns_exit(&nsc);
if (fd == -1) {
- pr_warning("cannot create jit ELF %s: %s\n", filename, strerror(errno));
+ pr_warning("cannot create jit ELF %s: %s\n", filename, strerror(saved_errno));
return -1;
}
@@ -99,8 +106,11 @@ jit_emit_elf(char *filename,
close(fd);
- if (ret)
- unlink(filename);
+ if (ret) {
+ nsinfo__mountns_enter(jd->nsi, &nsc);
+ unlink(filename);
+ nsinfo__mountns_exit(&nsc);
+ }
return ret;
}
@@ -134,12 +144,15 @@ static int
jit_open(struct jit_buf_desc *jd, const char *name)
{
struct jitheader header;
+ struct nscookie nsc;
struct jr_prefix *prefix;
ssize_t bs, bsz = 0;
void *n, *buf = NULL;
int ret, retval = -1;
+ nsinfo__mountns_enter(jd->nsi, &nsc);
jd->in = fopen(name, "r");
+ nsinfo__mountns_exit(&nsc);
if (!jd->in)
return -1;
@@ -367,6 +380,20 @@ jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
return 0;
}
+static pid_t jr_entry_pid(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+ if (jd->nsi && jd->nsi->in_pidns)
+ return jd->nsi->tgid;
+ return jr->load.pid;
+}
+
+static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+ if (jd->nsi && jd->nsi->in_pidns)
+ return jd->nsi->pid;
+ return jr->load.tid;
+}
+
static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
{
struct perf_tsc_conversion tc;
@@ -402,14 +429,15 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
const char *sym;
uint64_t count;
int ret, csize, usize;
- pid_t pid, tid;
+ pid_t nspid, pid, tid;
struct {
u32 pid, tid;
u64 time;
} *id;
- pid = jr->load.pid;
- tid = jr->load.tid;
+ nspid = jr->load.pid;
+ pid = jr_entry_pid(jd, jr);
+ tid = jr_entry_tid(jd, jr);
csize = jr->load.code_size;
usize = jd->unwinding_mapped_size;
addr = jr->load.code_addr;
@@ -425,14 +453,14 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
filename = event->mmap2.filename;
size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
- pid,
+ nspid,
count);
size++; /* for \0 */
size = PERF_ALIGN(size, sizeof(u64));
uaddr = (uintptr_t)code;
- ret = jit_emit_elf(filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries,
+ ret = jit_emit_elf(jd, filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries,
jd->unwinding_data, jd->eh_frame_hdr_size, jd->unwinding_size);
if (jd->debug_data && jd->nr_debug_entries) {
@@ -451,7 +479,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
free(event);
return -1;
}
- if (stat(filename, &st))
+ if (nsinfo__stat(filename, &st, jd->nsi))
memset(&st, 0, sizeof(st));
event->mmap2.header.type = PERF_RECORD_MMAP2;
@@ -515,14 +543,15 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
int usize;
u16 idr_size;
int ret;
- pid_t pid, tid;
+ pid_t nspid, pid, tid;
struct {
u32 pid, tid;
u64 time;
} *id;
- pid = jr->move.pid;
- tid = jr->move.tid;
+ nspid = jr->load.pid;
+ pid = jr_entry_pid(jd, jr);
+ tid = jr_entry_tid(jd, jr);
usize = jd->unwinding_mapped_size;
idr_size = jd->machine->id_hdr_size;
@@ -536,12 +565,12 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
filename = event->mmap2.filename;
size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
jd->dir,
- pid,
+ nspid,
jr->move.code_index);
size++; /* for \0 */
- if (stat(filename, &st))
+ if (nsinfo__stat(filename, &st, jd->nsi))
memset(&st, 0, sizeof(st));
size = PERF_ALIGN(size, sizeof(u64));
@@ -700,7 +729,7 @@ jit_inject(struct jit_buf_desc *jd, char *path)
* as captured in the RECORD_MMAP record
*/
static int
-jit_detect(char *mmap_name, pid_t pid)
+jit_detect(char *mmap_name, pid_t pid, struct nsinfo *nsi)
{
char *p;
char *end = NULL;
@@ -740,7 +769,7 @@ jit_detect(char *mmap_name, pid_t pid)
* pid does not match mmap pid
* pid==0 in system-wide mode (synthesized)
*/
- if (pid && pid2 != pid)
+ if (pid && pid2 != nsi->nstgid)
return -1;
/*
* validate suffix
@@ -782,16 +811,30 @@ jit_process(struct perf_session *session,
struct machine *machine,
char *filename,
pid_t pid,
+ pid_t tid,
u64 *nbytes)
{
+ struct thread *thread;
+ struct nsinfo *nsi;
struct evsel *first;
struct jit_buf_desc jd;
int ret;
+ thread = machine__findnew_thread(machine, pid, tid);
+ if (thread == NULL) {
+ pr_err("problem processing JIT mmap event, skipping it.\n");
+ return 0;
+ }
+
+ nsi = nsinfo__get(thread->nsinfo);
+ thread__put(thread);
+
/*
* first, detect marker mmap (i.e., the jitdump mmap)
*/
- if (jit_detect(filename, pid)) {
+ if (jit_detect(filename, pid, nsi)) {
+ nsinfo__put(nsi);
+
// Strip //anon* mmaps if we processed a jitdump for this pid
if (jit_has_pid(machine, pid) && (strncmp(filename, "//anon", 6) == 0))
return 1;
@@ -804,6 +847,7 @@ jit_process(struct perf_session *session,
jd.session = session;
jd.output = output;
jd.machine = machine;
+ jd.nsi = nsi;
/*
* track sample_type to compute id_all layout
@@ -821,5 +865,7 @@ jit_process(struct perf_session *session,
ret = 1;
}
+ nsinfo__put(jd.nsi);
+
return ret;
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 1e9d3f982b47..b5c2d8be4144 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -369,6 +369,15 @@ out:
return machine;
}
+struct machine *machines__find_guest(struct machines *machines, pid_t pid)
+{
+ struct machine *machine = machines__find(machines, pid);
+
+ if (!machine)
+ machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
+ return machine;
+}
+
void machines__process_guests(struct machines *machines,
machine__process_t process, void *data)
{
@@ -589,6 +598,24 @@ struct thread *machine__find_thread(struct machine *machine, pid_t pid,
return th;
}
+/*
+ * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
+ * So here a single thread is created for that, but actually there is a separate
+ * idle task per cpu, so there should be one 'struct thread' per cpu, but there
+ * is only 1. That causes problems for some tools, requiring workarounds. For
+ * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
+ */
+struct thread *machine__idle_thread(struct machine *machine)
+{
+ struct thread *thread = machine__findnew_thread(machine, 0, 0);
+
+ if (!thread || thread__set_comm(thread, "swapper", 0) ||
+ thread__set_namespaces(thread, 0, NULL))
+ pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
+
+ return thread;
+}
+
struct comm *machine__thread_exec_comm(struct machine *machine,
struct thread *thread)
{
@@ -1599,7 +1626,8 @@ static int machine__process_extra_kernel_map(struct machine *machine,
}
static int machine__process_kernel_mmap_event(struct machine *machine,
- struct extra_kernel_map *xm)
+ struct extra_kernel_map *xm,
+ struct build_id *bid)
{
struct map *map;
enum dso_space_type dso_space;
@@ -1624,6 +1652,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
goto out_problem;
map->end = map->start + xm->end - xm->start;
+
+ if (build_id__is_defined(bid))
+ dso__set_build_id(map->dso, bid);
+
} else if (is_kernel_mmap) {
const char *symbol_name = (xm->name + strlen(machine->mmap_name));
/*
@@ -1681,6 +1713,9 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
machine__update_kernel_mmap(machine, xm->start, xm->end);
+ if (build_id__is_defined(bid))
+ dso__set_build_id(kernel, bid);
+
/*
* Avoid using a zero address (kptr_restrict) for the ref reloc
* symbol. Effectively having zero here means that at record
@@ -1718,11 +1753,17 @@ int machine__process_mmap2_event(struct machine *machine,
.ino = event->mmap2.ino,
.ino_generation = event->mmap2.ino_generation,
};
+ struct build_id __bid, *bid = NULL;
int ret = 0;
if (dump_trace)
perf_event__fprintf_mmap2(event, stdout);
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ bid = &__bid;
+ build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
+ }
+
if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
sample->cpumode == PERF_RECORD_MISC_KERNEL) {
struct extra_kernel_map xm = {
@@ -1732,7 +1773,7 @@ int machine__process_mmap2_event(struct machine *machine,
};
strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
- ret = machine__process_kernel_mmap_event(machine, &xm);
+ ret = machine__process_kernel_mmap_event(machine, &xm, bid);
if (ret < 0)
goto out_problem;
return 0;
@@ -1746,7 +1787,7 @@ int machine__process_mmap2_event(struct machine *machine,
map = map__new(machine, event->mmap2.start,
event->mmap2.len, event->mmap2.pgoff,
&dso_id, event->mmap2.prot,
- event->mmap2.flags,
+ event->mmap2.flags, bid,
event->mmap2.filename, thread);
if (map == NULL)
@@ -1789,7 +1830,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
};
strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
- ret = machine__process_kernel_mmap_event(machine, &xm);
+ ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
if (ret < 0)
goto out_problem;
return 0;
@@ -1805,7 +1846,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
map = map__new(machine, event->mmap.start,
event->mmap.len, event->mmap.pgoff,
- NULL, prot, 0, event->mmap.filename, thread);
+ NULL, prot, 0, NULL, event->mmap.filename, thread);
if (map == NULL)
goto out_problem_map;
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 26368d3c1754..7377ed6efdf1 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -106,6 +106,7 @@ u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid);
+struct thread *machine__idle_thread(struct machine *machine);
struct comm *machine__thread_exec_comm(struct machine *machine,
struct thread *thread);
@@ -162,6 +163,7 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
struct machine *machines__find_host(struct machines *machines);
struct machine *machines__find(struct machines *machines, pid_t pid);
struct machine *machines__findnew(struct machines *machines, pid_t pid);
+struct machine *machines__find_guest(struct machines *machines, pid_t pid);
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
void machines__set_comm_exec(struct machines *machines, bool comm_exec);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index f44ede437dc7..692e56dc832e 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -130,8 +130,8 @@ void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso)
struct map *map__new(struct machine *machine, u64 start, u64 len,
u64 pgoff, struct dso_id *id,
- u32 prot, u32 flags, char *filename,
- struct thread *thread)
+ u32 prot, u32 flags, struct build_id *bid,
+ char *filename, struct thread *thread)
{
struct map *map = malloc(sizeof(*map));
struct nsinfo *nsi = NULL;
@@ -194,6 +194,10 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
dso__set_loaded(dso);
}
dso->nsinfo = nsi;
+
+ if (build_id__is_defined(bid))
+ dso__set_build_id(dso, bid);
+
dso__put(dso);
}
return map;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index b1c0686db1b7..9f32825c98d8 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -104,10 +104,11 @@ void map__init(struct map *map,
u64 start, u64 end, u64 pgoff, struct dso *dso);
struct dso_id;
+struct build_id;
struct map *map__new(struct machine *machine, u64 start, u64 len,
u64 pgoff, struct dso_id *id, u32 prot, u32 flags,
- char *filename, struct thread *thread);
+ struct build_id *bid, char *filename, struct thread *thread);
struct map *map__new2(u64 start, struct dso *dso);
void map__delete(struct map *map);
struct map *map__clone(struct map *map);
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 19007e463b8a..f93a852ad838 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -56,6 +56,11 @@ char * __weak perf_mem_events__name(int i)
return (char *)e->name;
}
+__weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
+{
+ return false;
+}
+
int perf_mem_events__parse(const char *str)
{
char *tok, *saveptr = NULL;
@@ -332,6 +337,29 @@ int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
return l;
}
+int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+{
+ size_t l = 0;
+ u64 mask = PERF_MEM_BLK_NA;
+
+ sz -= 1; /* -1 for null termination */
+ out[0] = '\0';
+
+ if (mem_info)
+ mask = mem_info->data_src.mem_blk;
+
+ if (!mask || (mask & PERF_MEM_BLK_NA)) {
+ l += scnprintf(out + l, sz - l, " N/A");
+ return l;
+ }
+ if (mask & PERF_MEM_BLK_DATA)
+ l += scnprintf(out + l, sz - l, " Data");
+ if (mask & PERF_MEM_BLK_ADDR)
+ l += scnprintf(out + l, sz - l, " Addr");
+
+ return l;
+}
+
int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
int i = 0;
@@ -343,6 +371,8 @@ int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_in
i += perf_mem__tlb_scnprintf(out + i, sz - i, mem_info);
i += scnprintf(out + i, sz - i, "|LCK ");
i += perf_mem__lck_scnprintf(out + i, sz - i, mem_info);
+ i += scnprintf(out + i, sz - i, "|BLK ");
+ i += perf_mem__blk_scnprintf(out + i, sz - i, mem_info);
return i;
}
@@ -355,6 +385,7 @@ int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
u64 lvl = data_src->mem_lvl;
u64 snoop = data_src->mem_snoop;
u64 lock = data_src->mem_lock;
+ u64 blk = data_src->mem_blk;
/*
* Skylake might report unknown remote level via this
* bit, consider it when evaluating remote HITMs.
@@ -374,6 +405,9 @@ do { \
if (lock & P(LOCK, LOCKED)) stats->locks++;
+ if (blk & P(BLK, DATA)) stats->blk_data++;
+ if (blk & P(BLK, ADDR)) stats->blk_addr++;
+
if (op & P(OP, LOAD)) {
/* load */
stats->load++;
@@ -485,6 +519,8 @@ void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
stats->rmt_hit += add->rmt_hit;
stats->lcl_dram += add->lcl_dram;
stats->rmt_dram += add->rmt_dram;
+ stats->blk_data += add->blk_data;
+ stats->blk_addr += add->blk_addr;
stats->nomap += add->nomap;
stats->noparse += add->noparse;
}
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index 5ef178278909..755cef7e0625 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -9,6 +9,7 @@
#include <linux/refcount.h>
#include <linux/perf_event.h>
#include "stat.h"
+#include "evsel.h"
struct perf_mem_event {
bool record;
@@ -39,6 +40,7 @@ int perf_mem_events__init(void);
char *perf_mem_events__name(int i);
struct perf_mem_event *perf_mem_events__ptr(int i);
+bool is_mem_loads_aux_event(struct evsel *leader);
void perf_mem_events__list(void);
@@ -47,6 +49,7 @@ int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
+int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_script__meminfo_scnprintf(char *bf, size_t size, struct mem_info *mem_info);
@@ -76,6 +79,8 @@ struct c2c_stats {
u32 rmt_hit; /* count of loads with remote hit clean; */
u32 lcl_dram; /* count of loads miss to local DRAM */
u32 rmt_dram; /* count of loads miss to remote DRAM */
+ u32 blk_data; /* count of loads blocked by data */
+ u32 blk_addr; /* count of loads blocked by address conflict */
u32 nomap; /* count of load/stores with no phys adrs */
u32 noparse; /* count of unparsable data sources */
};
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index e6d3452031e5..26c990e32378 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -379,7 +379,7 @@ static int metricgroup__setup_events(struct list_head *groups,
metric_refs[i].metric_expr = ref->metric_expr;
i++;
}
- };
+ }
expr->metric_refs = metric_refs;
expr->metric_expr = m->metric_expr;
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index 285d6f30d912..608b20c72a5c 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -66,6 +66,7 @@ int nsinfo__init(struct nsinfo *nsi)
char spath[PATH_MAX];
char *newns = NULL;
char *statln = NULL;
+ char *nspid;
struct stat old_stat;
struct stat new_stat;
FILE *f = NULL;
@@ -112,8 +113,12 @@ int nsinfo__init(struct nsinfo *nsi)
}
if (strstr(statln, "NStgid:") != NULL) {
- nsi->nstgid = (pid_t)strtol(strrchr(statln, '\t'),
- NULL, 10);
+ nspid = strrchr(statln, '\t');
+ nsi->nstgid = (pid_t)strtol(nspid, NULL, 10);
+ /* If innermost tgid is not the first, process is in a different
+ * PID namespace.
+ */
+ nsi->in_pidns = (statln + sizeof("NStgid:") - 1) != nspid;
break;
}
}
@@ -140,6 +145,7 @@ struct nsinfo *nsinfo__new(pid_t pid)
nsi->tgid = pid;
nsi->nstgid = pid;
nsi->need_setns = false;
+ nsi->in_pidns = false;
/* Init may fail if the process exits while we're trying to look
* at its proc information. In that case, save the pid but
* don't try to enter the namespace.
@@ -166,6 +172,7 @@ struct nsinfo *nsinfo__copy(struct nsinfo *nsi)
nnsi->tgid = nsi->tgid;
nnsi->nstgid = nsi->nstgid;
nnsi->need_setns = nsi->need_setns;
+ nnsi->in_pidns = nsi->in_pidns;
if (nsi->mntns_path) {
nnsi->mntns_path = strdup(nsi->mntns_path);
if (!nnsi->mntns_path) {
@@ -280,3 +287,15 @@ char *nsinfo__realpath(const char *path, struct nsinfo *nsi)
return rpath;
}
+
+int nsinfo__stat(const char *filename, struct stat *st, struct nsinfo *nsi)
+{
+ int ret;
+ struct nscookie nsc;
+
+ nsinfo__mountns_enter(nsi, &nsc);
+ ret = stat(filename, st);
+ nsinfo__mountns_exit(&nsc);
+
+ return ret;
+}
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index 4b33f684eddd..ad9775db7b9c 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -8,6 +8,7 @@
#define __PERF_NAMESPACES_H
#include <sys/types.h>
+#include <sys/stat.h>
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <linux/refcount.h>
@@ -33,6 +34,7 @@ struct nsinfo {
pid_t tgid;
pid_t nstgid;
bool need_setns;
+ bool in_pidns;
char *mntns_path;
refcount_t refcnt;
};
@@ -55,6 +57,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi, struct nscookie *nc);
void nsinfo__mountns_exit(struct nscookie *nc);
char *nsinfo__realpath(const char *path, struct nsinfo *nsi);
+int nsinfo__stat(const char *filename, struct stat *st, struct nsinfo *nsi);
static inline void __nsinfo__zput(struct nsinfo **nsip)
{
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 9db5097317f4..0b36285a9435 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -356,6 +356,7 @@ bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUT
cycles-ct |
cycles-t |
mem-loads |
+mem-loads-aux |
mem-stores |
topdown-[a-z-]+ |
tx-capacity-[a-z-]+ |
diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c
index 3840d02f0f7b..829af17a0867 100644
--- a/tools/perf/util/perf_api_probe.c
+++ b/tools/perf/util/perf_api_probe.c
@@ -98,6 +98,11 @@ static void perf_probe_text_poke(struct evsel *evsel)
evsel->core.attr.text_poke = 1;
}
+static void perf_probe_build_id(struct evsel *evsel)
+{
+ evsel->core.attr.build_id = 1;
+}
+
bool perf_can_sample_identifier(void)
{
return perf_probe_api(perf_probe_sample_identifier);
@@ -172,3 +177,8 @@ bool perf_can_aux_sample(void)
return true;
}
+
+bool perf_can_record_build_id(void)
+{
+ return perf_probe_api(perf_probe_build_id);
+}
diff --git a/tools/perf/util/perf_api_probe.h b/tools/perf/util/perf_api_probe.h
index d5506a983a94..f12ca55f509a 100644
--- a/tools/perf/util/perf_api_probe.h
+++ b/tools/perf/util/perf_api_probe.h
@@ -11,5 +11,6 @@ bool perf_can_record_cpu_wide(void);
bool perf_can_record_switch_events(void);
bool perf_can_record_text_poke_events(void);
bool perf_can_sample_identifier(void);
+bool perf_can_record_build_id(void);
#endif // __PERF_API_PROBE_H
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index fb0bb6684438..30481825515b 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -35,7 +35,8 @@ static void __p_sample_type(char *buf, size_t size, u64 value)
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC),
bit_name(WEIGHT), bit_name(PHYS_ADDR), bit_name(AUX),
- bit_name(CGROUP), bit_name(DATA_PAGE_SIZE),
+ bit_name(CGROUP), bit_name(DATA_PAGE_SIZE), bit_name(CODE_PAGE_SIZE),
+ bit_name(WEIGHT_STRUCT),
{ .name = NULL, }
};
#undef bit_name
@@ -134,6 +135,8 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(bpf_event, p_unsigned);
PRINT_ATTRf(aux_output, p_unsigned);
PRINT_ATTRf(cgroup, p_unsigned);
+ PRINT_ATTRf(text_poke, p_unsigned);
+ PRINT_ATTRf(build_id, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index a45499126184..eeac181ebccf 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -33,6 +33,13 @@ extern const struct sample_reg sample_reg_masks[];
int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
+static inline const char *perf_reg_name(int id)
+{
+ const char *reg_name = __perf_reg_name(id);
+
+ return reg_name ?: "unknown";
+}
+
#else
#define PERF_REGS_MASK 0
#define PERF_REGS_MAX 0
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 8eae2afff71a..a9cff3a50ddf 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -894,6 +894,16 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct debuginfo *dinfo;
int ntevs, ret = 0;
+ /* Workaround for gcc #98776 issue.
+ * Perf failed to add kretprobe event with debuginfo of vmlinux which is
+ * compiled by gcc with -fpatchable-function-entry option enabled. The
+ * same issue with kernel module. The retprobe doesn`t need debuginfo.
+ * This workaround solution use map to query the probe function address
+ * for retprobe event.
+ */
+ if (pev->point.retprobe)
+ return 0;
+
dinfo = open_debuginfo(pev->target, pev->nsi, !need_dwarf);
if (!dinfo) {
if (need_dwarf)
@@ -1074,7 +1084,7 @@ static int __show_line_range(struct line_range *lr, const char *module,
}
intlist__for_each_entry(ln, lr->line_list) {
- for (; ln->i > l; l++) {
+ for (; ln->i > (unsigned long)l; l++) {
ret = show_one_line(fp, l - lr->offset);
if (ret < 0)
goto end;
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index bbecb449ea94..52273542e6ef 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -794,6 +794,8 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note,
char *ret = NULL;
int i, args_count, err;
unsigned long long ref_ctr_offset;
+ char *arg;
+ int arg_idx = 0;
if (strbuf_init(&buf, 32) < 0)
return NULL;
@@ -818,11 +820,43 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note,
if (args == NULL)
goto error;
- for (i = 0; i < args_count; ++i) {
- if (synthesize_sdt_probe_arg(&buf, i, args[i]) < 0) {
+ for (i = 0; i < args_count; ) {
+ /*
+ * FIXUP: Arm64 ELF section '.note.stapsdt' uses string
+ * format "-4@[sp, NUM]" if a probe is to access data in
+ * the stack, e.g. below is an example for the SDT
+ * Arguments:
+ *
+ * Arguments: -4@[sp, 12] -4@[sp, 8] -4@[sp, 4]
+ *
+ * Since the string introduces an extra space character
+ * in the middle of square brackets, the argument is
+ * divided into two items. Fixup for this case, if an
+ * item contains sub string "[sp,", need to concatenate
+ * the two items.
+ */
+ if (strstr(args[i], "[sp,") && (i+1) < args_count) {
+ err = asprintf(&arg, "%s %s", args[i], args[i+1]);
+ i += 2;
+ } else {
+ err = asprintf(&arg, "%s", args[i]);
+ i += 1;
+ }
+
+ /* Failed to allocate memory */
+ if (err < 0) {
argv_free(args);
goto error;
}
+
+ if (synthesize_sdt_probe_arg(&buf, arg_idx, arg) < 0) {
+ free(arg);
+ argv_free(args);
+ goto error;
+ }
+
+ free(arg);
+ arg_idx++;
}
argv_free(args);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 76dd349aa48d..1b118c9c86a6 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1187,8 +1187,10 @@ static int debuginfo__find_probe_location(struct debuginfo *dbg,
while (!dwarf_nextcu(dbg->dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
/* Get the DIE(Debugging Information Entry) of this CU */
diep = dwarf_offdie(dbg->dbg, off + cuhl, &pf->cu_die);
- if (!diep)
+ if (!diep) {
+ off = noff;
continue;
+ }
/* Check if target file is included. */
if (pp->file)
@@ -1949,8 +1951,10 @@ int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr)
/* Get the DIE(Debugging Information Entry) of this CU */
diep = dwarf_offdie(dbg->dbg, off + cuhl, &lf.cu_die);
- if (!diep)
+ if (!diep) {
+ off = noff;
continue;
+ }
/* Check if target file is included. */
if (lr->file)
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index a9d9c142eb7c..71b753523fac 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,7 @@ util/python.c
util/cap.c
util/evlist.c
util/evsel.c
+util/evsel_fprintf.c
util/perf_event_attr_fprintf.c
util/cpumap.c
util/memswap.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index cc5ade85a33f..278abecb5bdf 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -80,6 +80,27 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
}
/*
+ * XXX: All these evsel destructors need some better mechanism, like a linked
+ * list of destructors registered when the relevant code indeed is used instead
+ * of having more and more calls in perf_evsel__delete(). -- acme
+ *
+ * For now, add some more:
+ *
+ * Not to drag the BPF bandwagon...
+ */
+void bpf_counter__destroy(struct evsel *evsel);
+int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
+
+void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
+{
+}
+
+int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_unused, int fd __maybe_unused)
+{
+ return 0;
+}
+
+/*
* Support debug printing even though util/debug.c is not linked. That means
* implementing 'verbose' and 'eprintf'.
*/
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index e70c9dd04567..f99852d54b14 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -15,6 +15,8 @@
#include "record.h"
#include "../perf-sys.h"
#include "topdown.h"
+#include "map_symbol.h"
+#include "mem-events.h"
/*
* evsel__config_leader_sampling() uses special rules for leader sampling.
@@ -25,7 +27,8 @@ static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evl
{
struct evsel *leader = evsel->leader;
- if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader)) {
+ if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader) ||
+ is_mem_loads_aux_event(leader)) {
evlist__for_each_entry(evlist, evsel) {
if (evsel->leader == leader && evsel != evsel->leader)
return evsel;
@@ -201,10 +204,10 @@ static int record_opts__config_freq(struct record_opts *opts)
* Default frequency is over current maximum.
*/
if (max_rate < opts->freq) {
- pr_warning("Lowering default frequency rate to %u.\n"
+ pr_warning("Lowering default frequency rate from %u to %u.\n"
"Please consider tweaking "
"/proc/sys/kernel/perf_event_max_sample_rate.\n",
- max_rate);
+ opts->freq, max_rate);
opts->freq = max_rate;
}
diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h
index 694b351dcd27..68f471d9a88b 100644
--- a/tools/perf/util/record.h
+++ b/tools/perf/util/record.h
@@ -23,6 +23,7 @@ struct record_opts {
bool sample_address;
bool sample_phys_addr;
bool sample_data_page_size;
+ bool sample_code_page_size;
bool sample_weight;
bool sample_time;
bool sample_time_set;
@@ -50,6 +51,7 @@ struct record_opts {
bool no_bpf_event;
bool kcore;
bool text_poke;
+ bool build_id;
unsigned int freq;
unsigned int mmap_pages;
unsigned int auxtrace_mmap_pages;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 25adbcce0281..859832a82496 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -593,10 +593,13 @@ static void perf_event__mmap2_swap(union perf_event *event,
event->mmap2.start = bswap_64(event->mmap2.start);
event->mmap2.len = bswap_64(event->mmap2.len);
event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
- event->mmap2.maj = bswap_32(event->mmap2.maj);
- event->mmap2.min = bswap_32(event->mmap2.min);
- event->mmap2.ino = bswap_64(event->mmap2.ino);
- event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
+
+ if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
+ event->mmap2.maj = bswap_32(event->mmap2.maj);
+ event->mmap2.min = bswap_32(event->mmap2.min);
+ event->mmap2.ino = bswap_64(event->mmap2.ino);
+ event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
+ }
if (sample_id_all) {
void *data = &event->mmap2.filename;
@@ -1297,8 +1300,12 @@ static void dump_sample(struct evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_STACK_USER)
stack_user__printf(&sample->user_stack);
- if (sample_type & PERF_SAMPLE_WEIGHT)
- printf("... weight: %" PRIu64 "\n", sample->weight);
+ if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
+ printf("... weight: %" PRIu64 "", sample->weight);
+ if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT)
+ printf(",0x%"PRIx16"", sample->ins_lat);
+ printf("\n");
+ }
if (sample_type & PERF_SAMPLE_DATA_SRC)
printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
@@ -1309,6 +1316,9 @@ static void dump_sample(struct evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
+ if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
+ printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
+
if (sample_type & PERF_SAMPLE_TRANSACTION)
printf("... transaction: %" PRIx64 "\n", sample->transaction);
@@ -1346,8 +1356,6 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
union perf_event *event,
struct perf_sample *sample)
{
- struct machine *machine;
-
if (perf_guest &&
((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
(sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
@@ -1359,10 +1367,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
else
pid = sample->pid;
- machine = machines__find(machines, pid);
- if (!machine)
- machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
- return machine;
+ return machines__find_guest(machines, pid);
}
return &machines->host;
@@ -1784,32 +1789,13 @@ struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
return machine__findnew_thread(&session->machines.host, -1, pid);
}
-/*
- * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
- * So here a single thread is created for that, but actually there is a separate
- * idle task per cpu, so there should be one 'struct thread' per cpu, but there
- * is only 1. That causes problems for some tools, requiring workarounds. For
- * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
- */
int perf_session__register_idle_thread(struct perf_session *session)
{
- struct thread *thread;
- int err = 0;
-
- thread = machine__findnew_thread(&session->machines.host, 0, 0);
- if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
- pr_err("problem inserting idle task.\n");
- err = -1;
- }
+ struct thread *thread = machine__idle_thread(&session->machines.host);
- if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
- pr_err("problem inserting idle task.\n");
- err = -1;
- }
-
- /* machine__findnew_thread() got the thread, so put it */
+ /* machine__idle_thread() got the thread, so put it */
thread__put(thread);
- return err;
+ return thread ? 0 : -1;
}
static void
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index c5e3e9a68162..483f05004e68 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -43,7 +43,7 @@ class install_lib(_install_lib):
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
+cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls', '-DPYTHON_PERF' ]
if not cc_is_clang:
cflags += ['-Wno-cast-function-type' ]
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 80907bc32683..0d5ad42812b9 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -36,7 +36,7 @@ const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char *default_sort_order = "comm,dso,symbol";
const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
-const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
+const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat";
const char default_top_sort_order[] = "dso,symbol";
const char default_diff_sort_order[] = "dso,symbol";
const char default_tracepoint_sort_order[] = "trace";
@@ -1365,6 +1365,49 @@ struct sort_entry sort_global_weight = {
.se_width_idx = HISTC_GLOBAL_WEIGHT,
};
+static u64 he_ins_lat(struct hist_entry *he)
+{
+ return he->stat.nr_events ? he->stat.ins_lat / he->stat.nr_events : 0;
+}
+
+static int64_t
+sort__local_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return he_ins_lat(left) - he_ins_lat(right);
+}
+
+static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*u", width, he_ins_lat(he));
+}
+
+struct sort_entry sort_local_ins_lat = {
+ .se_header = "Local INSTR Latency",
+ .se_cmp = sort__local_ins_lat_cmp,
+ .se_snprintf = hist_entry__local_ins_lat_snprintf,
+ .se_width_idx = HISTC_LOCAL_INS_LAT,
+};
+
+static int64_t
+sort__global_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->stat.ins_lat - right->stat.ins_lat;
+}
+
+static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*u", width, he->stat.ins_lat);
+}
+
+struct sort_entry sort_global_ins_lat = {
+ .se_header = "INSTR Latency",
+ .se_cmp = sort__global_ins_lat_cmp,
+ .se_snprintf = hist_entry__global_ins_lat_snprintf,
+ .se_width_idx = HISTC_GLOBAL_INS_LAT,
+};
+
struct sort_entry sort_mem_daddr_sym = {
.se_header = "Data Symbol",
.se_cmp = sort__daddr_cmp,
@@ -1422,6 +1465,41 @@ struct sort_entry sort_mem_dcacheline = {
};
static int64_t
+sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ union perf_mem_data_src data_src_l;
+ union perf_mem_data_src data_src_r;
+
+ if (left->mem_info)
+ data_src_l = left->mem_info->data_src;
+ else
+ data_src_l.mem_blk = PERF_MEM_BLK_NA;
+
+ if (right->mem_info)
+ data_src_r = right->mem_info->data_src;
+ else
+ data_src_r.mem_blk = PERF_MEM_BLK_NA;
+
+ return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
+}
+
+static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ char out[16];
+
+ perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
+ return repsep_snprintf(bf, size, "%.*s", width, out);
+}
+
+struct sort_entry sort_mem_blocked = {
+ .se_header = "Blocked",
+ .se_cmp = sort__blocked_cmp,
+ .se_snprintf = hist_entry__blocked_snprintf,
+ .se_width_idx = HISTC_MEM_BLOCKED,
+};
+
+static int64_t
sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = 0, r = 0;
@@ -1492,6 +1570,31 @@ struct sort_entry sort_mem_data_page_size = {
};
static int64_t
+sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ uint64_t l = left->code_page_size;
+ uint64_t r = right->code_page_size;
+
+ return (int64_t)(r - l);
+}
+
+static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ char str[PAGE_SIZE_NAME_LEN];
+
+ return repsep_snprintf(bf, size, "%-*s", width,
+ get_page_size_name(he->code_page_size, str));
+}
+
+struct sort_entry sort_code_page_size = {
+ .se_header = "Code Page Size",
+ .se_cmp = sort__code_page_size_cmp,
+ .se_snprintf = hist_entry__code_page_size_snprintf,
+ .se_width_idx = HISTC_CODE_PAGE_SIZE,
+};
+
+static int64_t
sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->branch_info || !right->branch_info)
@@ -1735,6 +1838,9 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
DIM(SORT_TIME, "time", sort_time),
+ DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
+ DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
+ DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
};
#undef DIM
@@ -1770,6 +1876,7 @@ static struct sort_dimension memory_sort_dimensions[] = {
DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
+ DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
};
#undef DIM
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index e50f2b695bc4..63f67a3f3630 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -50,6 +50,7 @@ struct he_stat {
u64 period_guest_sys;
u64 period_guest_us;
u64 weight;
+ u64 ins_lat;
u32 nr_events;
};
@@ -106,6 +107,7 @@ struct hist_entry {
u64 transaction;
s32 socket;
s32 cpu;
+ u64 code_page_size;
u8 cpumode;
u8 depth;
@@ -229,6 +231,9 @@ enum sort_type {
SORT_CGROUP_ID,
SORT_SYM_IPC_NULL,
SORT_TIME,
+ SORT_CODE_PAGE_SIZE,
+ SORT_LOCAL_INS_LAT,
+ SORT_GLOBAL_INS_LAT,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -256,6 +261,7 @@ enum sort_type {
SORT_MEM_IADDR_SYMBOL,
SORT_MEM_PHYS_DADDR,
SORT_MEM_DATA_PAGE_SIZE,
+ SORT_MEM_BLOCKED,
};
/*
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 583ae4f09c5d..cce7a76d6473 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -1045,7 +1045,9 @@ static void print_header(struct perf_stat_config *config,
if (!config->csv_output) {
fprintf(output, "\n");
fprintf(output, " Performance counter stats for ");
- if (_target->system_wide)
+ if (_target->bpf_str)
+ fprintf(output, "\'BPF program(s) %s", _target->bpf_str);
+ else if (_target->system_wide)
fprintf(output, "\'system wide");
else if (_target->cpu_list)
fprintf(output, "\'CPU(s) %s", _target->cpu_list);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 12eafd12a693..6ccf21a72f06 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -273,6 +273,18 @@ void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
cpu, count, &rsd);
+ else if (perf_stat_evsel__is(counter, TOPDOWN_HEAVY_OPS))
+ update_runtime_stat(st, STAT_TOPDOWN_HEAVY_OPS,
+ cpu, count, &rsd);
+ else if (perf_stat_evsel__is(counter, TOPDOWN_BR_MISPREDICT))
+ update_runtime_stat(st, STAT_TOPDOWN_BR_MISPREDICT,
+ cpu, count, &rsd);
+ else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_LAT))
+ update_runtime_stat(st, STAT_TOPDOWN_FETCH_LAT,
+ cpu, count, &rsd);
+ else if (perf_stat_evsel__is(counter, TOPDOWN_MEM_BOUND))
+ update_runtime_stat(st, STAT_TOPDOWN_MEM_BOUND,
+ cpu, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
cpu, count, &rsd);
@@ -1174,6 +1186,86 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
color = PERF_COLOR_RED;
print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
bad_spec * 100.);
+ } else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) &&
+ full_td(cpu, st, &rsd) && (config->topdown_level > 1)) {
+ double retiring = td_metric_ratio(cpu,
+ STAT_TOPDOWN_RETIRING, st,
+ &rsd);
+ double heavy_ops = td_metric_ratio(cpu,
+ STAT_TOPDOWN_HEAVY_OPS, st,
+ &rsd);
+ double light_ops = retiring - heavy_ops;
+
+ if (retiring > 0.7 && heavy_ops > 0.1)
+ color = PERF_COLOR_GREEN;
+ print_metric(config, ctxp, color, "%8.1f%%", "heavy operations",
+ heavy_ops * 100.);
+ if (retiring > 0.7 && light_ops > 0.6)
+ color = PERF_COLOR_GREEN;
+ else
+ color = NULL;
+ print_metric(config, ctxp, color, "%8.1f%%", "light operations",
+ light_ops * 100.);
+ } else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) &&
+ full_td(cpu, st, &rsd) && (config->topdown_level > 1)) {
+ double bad_spec = td_metric_ratio(cpu,
+ STAT_TOPDOWN_BAD_SPEC, st,
+ &rsd);
+ double br_mis = td_metric_ratio(cpu,
+ STAT_TOPDOWN_BR_MISPREDICT, st,
+ &rsd);
+ double m_clears = bad_spec - br_mis;
+
+ if (bad_spec > 0.1 && br_mis > 0.05)
+ color = PERF_COLOR_RED;
+ print_metric(config, ctxp, color, "%8.1f%%", "branch mispredict",
+ br_mis * 100.);
+ if (bad_spec > 0.1 && m_clears > 0.05)
+ color = PERF_COLOR_RED;
+ else
+ color = NULL;
+ print_metric(config, ctxp, color, "%8.1f%%", "machine clears",
+ m_clears * 100.);
+ } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) &&
+ full_td(cpu, st, &rsd) && (config->topdown_level > 1)) {
+ double fe_bound = td_metric_ratio(cpu,
+ STAT_TOPDOWN_FE_BOUND, st,
+ &rsd);
+ double fetch_lat = td_metric_ratio(cpu,
+ STAT_TOPDOWN_FETCH_LAT, st,
+ &rsd);
+ double fetch_bw = fe_bound - fetch_lat;
+
+ if (fe_bound > 0.2 && fetch_lat > 0.15)
+ color = PERF_COLOR_RED;
+ print_metric(config, ctxp, color, "%8.1f%%", "fetch latency",
+ fetch_lat * 100.);
+ if (fe_bound > 0.2 && fetch_bw > 0.1)
+ color = PERF_COLOR_RED;
+ else
+ color = NULL;
+ print_metric(config, ctxp, color, "%8.1f%%", "fetch bandwidth",
+ fetch_bw * 100.);
+ } else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) &&
+ full_td(cpu, st, &rsd) && (config->topdown_level > 1)) {
+ double be_bound = td_metric_ratio(cpu,
+ STAT_TOPDOWN_BE_BOUND, st,
+ &rsd);
+ double mem_bound = td_metric_ratio(cpu,
+ STAT_TOPDOWN_MEM_BOUND, st,
+ &rsd);
+ double core_bound = be_bound - mem_bound;
+
+ if (be_bound > 0.2 && mem_bound > 0.2)
+ color = PERF_COLOR_RED;
+ print_metric(config, ctxp, color, "%8.1f%%", "memory bound",
+ mem_bound * 100.);
+ if (be_bound > 0.2 && core_bound > 0.1)
+ color = PERF_COLOR_RED;
+ else
+ color = NULL;
+ print_metric(config, ctxp, color, "%8.1f%%", "Core bound",
+ core_bound * 100.);
} else if (evsel->metric_expr) {
generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 8ce1479c98f0..5d8af29447f4 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -99,6 +99,10 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
ID(TOPDOWN_BE_BOUND, topdown-be-bound),
+ ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops),
+ ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict),
+ ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat),
+ ID(TOPDOWN_MEM_BOUND, topdown-mem-bound),
ID(SMI_NUM, msr/smi/),
ID(APERF, msr/aperf/),
};
@@ -527,7 +531,7 @@ int create_perf_stat_counter(struct evsel *evsel,
if (leader->core.nr_members > 1)
attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
- attr->inherit = !config->no_inherit;
+ attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
/*
* Some events get initialized with sample_(period/type) set,
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index b5369730b4a2..d85c292148bb 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -33,6 +33,10 @@ enum perf_stat_evsel_id {
PERF_STAT_EVSEL_ID__TOPDOWN_BAD_SPEC,
PERF_STAT_EVSEL_ID__TOPDOWN_FE_BOUND,
PERF_STAT_EVSEL_ID__TOPDOWN_BE_BOUND,
+ PERF_STAT_EVSEL_ID__TOPDOWN_HEAVY_OPS,
+ PERF_STAT_EVSEL_ID__TOPDOWN_BR_MISPREDICT,
+ PERF_STAT_EVSEL_ID__TOPDOWN_FETCH_LAT,
+ PERF_STAT_EVSEL_ID__TOPDOWN_MEM_BOUND,
PERF_STAT_EVSEL_ID__SMI_NUM,
PERF_STAT_EVSEL_ID__APERF,
PERF_STAT_EVSEL_ID__MAX,
@@ -91,6 +95,10 @@ enum stat_type {
STAT_TOPDOWN_BAD_SPEC,
STAT_TOPDOWN_FE_BOUND,
STAT_TOPDOWN_BE_BOUND,
+ STAT_TOPDOWN_HEAVY_OPS,
+ STAT_TOPDOWN_BR_MISPREDICT,
+ STAT_TOPDOWN_FETCH_LAT,
+ STAT_TOPDOWN_MEM_BOUND,
STAT_SMI_NUM,
STAT_APERF,
STAT_MAX
@@ -148,6 +156,7 @@ struct perf_stat_config {
int ctl_fd_ack;
bool ctl_fd_close;
const char *cgroup_list;
+ unsigned int topdown_level;
};
void perf_stat__set_big_num(int set);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 52603876c548..f6d90cdd9225 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -293,3 +293,12 @@ char *strdup_esc(const char *str)
return ret;
}
+
+unsigned int hex(char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ return c - 'A' + 10;
+}
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index 73df616ced43..56c30fef9682 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -38,4 +38,6 @@ char *asprintf__tp_filter_pids(size_t npids, pid_t *pids);
char *strpbrk_esc(char *str, const char *stopset);
char *strdup_esc(const char *str);
+unsigned int hex(char c);
+
#endif /* PERF_STRING_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index f3577f7d72fe..6dff843fd883 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -12,6 +12,7 @@
#include "maps.h"
#include "symbol.h"
#include "symsrc.h"
+#include "demangle-ocaml.h"
#include "demangle-java.h"
#include "demangle-rust.h"
#include "machine.h"
@@ -251,8 +252,12 @@ static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
return demangled;
demangled = bfd_demangle(NULL, elf_name, demangle_flags);
- if (demangled == NULL)
- demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
+ if (demangled == NULL) {
+ demangled = ocaml_demangle_sym(elf_name);
+ if (demangled == NULL) {
+ demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
+ }
+ }
else if (rust_is_mangled(demangled))
/*
* Input to Rust demangling is the BFD-demangled
@@ -1226,12 +1231,26 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
if (sym.st_shndx == SHN_ABS)
continue;
- sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
+ sec = elf_getscn(syms_ss->elf, sym.st_shndx);
if (!sec)
goto out_elf_end;
gelf_getshdr(sec, &shdr);
+ /*
+ * We have to fallback to runtime when syms' section header has
+ * NOBITS set. NOBITS results in file offset (sh_offset) not
+ * being incremented. So sh_offset used below has different
+ * values for syms (invalid) and runtime (valid).
+ */
+ if (shdr.sh_type == SHT_NOBITS) {
+ sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
+ if (!sec)
+ goto out_elf_end;
+
+ gelf_getshdr(sec, &shdr);
+ }
+
if (is_label && !elf_sec__filter(&shdr, secstrs))
continue;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 64a039cbba1b..77fc46ca07c0 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1561,15 +1561,14 @@ static int bfd2elf_binding(asymbol *symbol)
int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
{
int err = -1;
- long symbols_size, symbols_count;
+ long symbols_size, symbols_count, i;
asection *section;
asymbol **symbols, *sym;
struct symbol *symbol;
bfd *abfd;
- u_int i;
u64 start, len;
- abfd = bfd_openr(dso->long_name, NULL);
+ abfd = bfd_openr(debugfile, NULL);
if (!abfd)
return -1;
@@ -1586,21 +1585,6 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
if (section)
dso->text_offset = section->vma - section->filepos;
- bfd_close(abfd);
-
- abfd = bfd_openr(debugfile, NULL);
- if (!abfd)
- return -1;
-
- if (!bfd_check_format(abfd, bfd_object)) {
- pr_debug2("%s: cannot read %s bfd file.\n", __func__,
- debugfile);
- goto out_close;
- }
-
- if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
- goto out_close;
-
symbols_size = bfd_get_symtab_upper_bound(abfd);
if (symbols_size == 0) {
bfd_close(abfd);
@@ -1867,8 +1851,10 @@ int dso__load(struct dso *dso, struct map *map)
if (nsexit)
nsinfo__mountns_enter(dso->nsinfo, &nsc);
- if (bfdrc == 0)
+ if (bfdrc == 0) {
+ ret = 0;
break;
+ }
if (!is_reg || sirc < 0)
continue;
@@ -2406,6 +2392,49 @@ int setup_intlist(struct intlist **list, const char *list_str,
return 0;
}
+static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list)
+{
+ struct str_node *pos, *tmp;
+ unsigned long val;
+ char *sep;
+ const char *end;
+ int i = 0, err;
+
+ *addr_list = intlist__new(NULL);
+ if (!*addr_list)
+ return -1;
+
+ strlist__for_each_entry_safe(pos, tmp, sym_list) {
+ errno = 0;
+ val = strtoul(pos->s, &sep, 16);
+ if (errno || (sep == pos->s))
+ continue;
+
+ if (*sep != '\0') {
+ end = pos->s + strlen(pos->s) - 1;
+ while (end >= sep && isspace(*end))
+ end--;
+
+ if (end >= sep)
+ continue;
+ }
+
+ err = intlist__add(*addr_list, val);
+ if (err)
+ break;
+
+ strlist__remove(sym_list, pos);
+ i++;
+ }
+
+ if (i == 0) {
+ intlist__delete(*addr_list);
+ *addr_list = NULL;
+ }
+
+ return 0;
+}
+
static bool symbol__read_kptr_restrict(void)
{
bool value = false;
@@ -2489,6 +2518,10 @@ int symbol__init(struct perf_env *env)
symbol_conf.sym_list_str, "symbol") < 0)
goto out_free_tid_list;
+ if (symbol_conf.sym_list &&
+ setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0)
+ goto out_free_sym_list;
+
if (setup_list(&symbol_conf.bt_stop_list,
symbol_conf.bt_stop_list_str, "symbol") < 0)
goto out_free_sym_list;
@@ -2512,6 +2545,7 @@ int symbol__init(struct perf_env *env)
out_free_sym_list:
strlist__delete(symbol_conf.sym_list);
+ intlist__delete(symbol_conf.addr_list);
out_free_tid_list:
intlist__delete(symbol_conf.tid_list);
out_free_pid_list:
@@ -2533,6 +2567,7 @@ void symbol__exit(void)
strlist__delete(symbol_conf.comm_list);
intlist__delete(symbol_conf.tid_list);
intlist__delete(symbol_conf.pid_list);
+ intlist__delete(symbol_conf.addr_list);
vmlinux_path__exit();
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
symbol_conf.bt_stop_list = NULL;
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index b916afb95ec5..a70b3ec09dac 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -42,7 +42,8 @@ struct symbol_conf {
report_block,
report_individual_block,
inline_name,
- disable_add2line_warn;
+ disable_add2line_warn,
+ buildid_mmap2;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
@@ -69,11 +70,13 @@ struct symbol_conf {
*sym_to_list,
*bt_stop_list;
struct intlist *pid_list,
- *tid_list;
+ *tid_list,
+ *addr_list;
const char *symfs;
int res_sample;
int pad_output_len_dso;
int group_sort_idx;
+ int addr_range;
};
extern struct symbol_conf symbol_conf;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 2947e3f3c6d9..b698046ec2db 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -24,7 +24,6 @@
#include <linux/perf_event.h>
#include <asm/bug.h>
#include <perf/evsel.h>
-#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include <internal/lib.h> // page_size
#include <internal/threadmap.h>
@@ -69,19 +68,22 @@ int perf_tool__process_synth_event(struct perf_tool *tool,
* Assumes that the first 4095 bytes of /proc/pid/stat contains
* the comm, tgid and ppid.
*/
-static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
- pid_t *tgid, pid_t *ppid)
+static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
+ pid_t *tgid, pid_t *ppid, bool *kernel)
{
char bf[4096];
int fd;
size_t size = 0;
ssize_t n;
- char *name, *tgids, *ppids;
+ char *name, *tgids, *ppids, *vmpeak, *threads;
*tgid = -1;
*ppid = -1;
- snprintf(bf, sizeof(bf), "/proc/%d/status", pid);
+ if (pid)
+ snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
+ else
+ snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
fd = open(bf, O_RDONLY);
if (fd < 0) {
@@ -93,14 +95,20 @@ static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
close(fd);
if (n <= 0) {
pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
- pid);
+ tid);
return -1;
}
bf[n] = '\0';
name = strstr(bf, "Name:");
- tgids = strstr(bf, "Tgid:");
- ppids = strstr(bf, "PPid:");
+ tgids = strstr(name ?: bf, "Tgid:");
+ ppids = strstr(tgids ?: bf, "PPid:");
+ vmpeak = strstr(ppids ?: bf, "VmPeak:");
+
+ if (vmpeak)
+ threads = NULL;
+ else
+ threads = strstr(ppids ?: bf, "Threads:");
if (name) {
char *nl;
@@ -116,29 +124,34 @@ static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
memcpy(comm, name, size);
comm[size] = '\0';
} else {
- pr_debug("Name: string not found for pid %d\n", pid);
+ pr_debug("Name: string not found for pid %d\n", tid);
}
if (tgids) {
tgids += 5; /* strlen("Tgid:") */
*tgid = atoi(tgids);
} else {
- pr_debug("Tgid: string not found for pid %d\n", pid);
+ pr_debug("Tgid: string not found for pid %d\n", tid);
}
if (ppids) {
ppids += 5; /* strlen("PPid:") */
*ppid = atoi(ppids);
} else {
- pr_debug("PPid: string not found for pid %d\n", pid);
+ pr_debug("PPid: string not found for pid %d\n", tid);
}
+ if (!vmpeak && threads)
+ *kernel = true;
+ else
+ *kernel = false;
+
return 0;
}
-static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
+static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
struct machine *machine,
- pid_t *tgid, pid_t *ppid)
+ pid_t *tgid, pid_t *ppid, bool *kernel)
{
size_t size;
@@ -147,9 +160,9 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
memset(&event->comm, 0, sizeof(event->comm));
if (machine__is_host(machine)) {
- if (perf_event__get_comm_ids(pid, event->comm.comm,
+ if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
sizeof(event->comm.comm),
- tgid, ppid) != 0) {
+ tgid, ppid, kernel) != 0) {
return -1;
}
} else {
@@ -168,7 +181,7 @@ static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
machine->id_hdr_size);
- event->comm.tid = pid;
+ event->comm.tid = tid;
return 0;
}
@@ -179,8 +192,10 @@ pid_t perf_event__synthesize_comm(struct perf_tool *tool,
struct machine *machine)
{
pid_t tgid, ppid;
+ bool kernel_thread;
- if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
+ if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
+ &kernel_thread) != 0)
return -1;
if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
@@ -347,6 +362,31 @@ static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
}
}
+static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
+ bool is_kernel)
+{
+ struct build_id bid;
+ int rc;
+
+ if (is_kernel)
+ rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
+ else
+ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+
+ if (rc == 0) {
+ memcpy(event->build_id, bid.data, sizeof(bid.data));
+ event->build_id_size = (u8) bid.size;
+ event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
+ event->__reserved_1 = 0;
+ event->__reserved_2 = 0;
+ } else {
+ if (event->filename[0] == '/') {
+ pr_debug2("Failed to read build ID for %s\n",
+ event->filename);
+ }
+ }
+}
+
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
@@ -453,6 +493,9 @@ out:
event->mmap2.pid = tgid;
event->mmap2.tid = pid;
+ if (symbol_conf.buildid_mmap2)
+ perf_record_mmap2__read_build_id(&event->mmap2, false);
+
if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
rc = -1;
break;
@@ -596,16 +639,17 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
int rc = 0;
struct map *pos;
struct maps *maps = machine__kernel_maps(machine);
- union perf_event *event = zalloc((sizeof(event->mmap) +
- machine->id_hdr_size));
+ union perf_event *event;
+ size_t size = symbol_conf.buildid_mmap2 ?
+ sizeof(event->mmap2) : sizeof(event->mmap);
+
+ event = zalloc(size + machine->id_hdr_size);
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
return -1;
}
- event->header.type = PERF_RECORD_MMAP;
-
/*
* kernel uses 0 for user space maps, see kernel/perf_event.c
* __perf_event_mmap
@@ -616,23 +660,39 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
maps__for_each_entry(maps, pos) {
- size_t size;
-
if (!__map__is_kmodule(pos))
continue;
- size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
- event->mmap.header.type = PERF_RECORD_MMAP;
- event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size));
- memset(event->mmap.filename + size, 0, machine->id_hdr_size);
- event->mmap.header.size += machine->id_hdr_size;
- event->mmap.start = pos->start;
- event->mmap.len = pos->end - pos->start;
- event->mmap.pid = machine->pid;
+ if (symbol_conf.buildid_mmap2) {
+ size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ event->mmap2.header.type = PERF_RECORD_MMAP2;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size));
+ memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+ event->mmap2.header.size += machine->id_hdr_size;
+ event->mmap2.start = pos->start;
+ event->mmap2.len = pos->end - pos->start;
+ event->mmap2.pid = machine->pid;
+
+ memcpy(event->mmap2.filename, pos->dso->long_name,
+ pos->dso->long_name_len + 1);
+
+ perf_record_mmap2__read_build_id(&event->mmap2, false);
+ } else {
+ size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size));
+ memset(event->mmap.filename + size, 0, machine->id_hdr_size);
+ event->mmap.header.size += machine->id_hdr_size;
+ event->mmap.start = pos->start;
+ event->mmap.len = pos->end - pos->start;
+ event->mmap.pid = machine->pid;
+
+ memcpy(event->mmap.filename, pos->dso->long_name,
+ pos->dso->long_name_len + 1);
+ }
- memcpy(event->mmap.filename, pos->dso->long_name,
- pos->dso->long_name_len + 1);
if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
rc = -1;
break;
@@ -643,6 +703,11 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t
return rc;
}
+static int filter_task(const struct dirent *dirent)
+{
+ return isdigit(dirent->d_name[0]);
+}
+
static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *mmap_event,
union perf_event *fork_event,
@@ -651,10 +716,10 @@ static int __event__synthesize_thread(union perf_event *comm_event,
struct perf_tool *tool, struct machine *machine, bool mmap_data)
{
char filename[PATH_MAX];
- DIR *tasks;
- struct dirent *dirent;
+ struct dirent **dirent;
pid_t tgid, ppid;
int rc = 0;
+ int i, n;
/* special case: only send one comm event using passed in pid */
if (!full) {
@@ -686,23 +751,22 @@ static int __event__synthesize_thread(union perf_event *comm_event,
snprintf(filename, sizeof(filename), "%s/proc/%d/task",
machine->root_dir, pid);
- tasks = opendir(filename);
- if (tasks == NULL) {
- pr_debug("couldn't open %s\n", filename);
- return 0;
- }
+ n = scandir(filename, &dirent, filter_task, alphasort);
+ if (n < 0)
+ return n;
- while ((dirent = readdir(tasks)) != NULL) {
+ for (i = 0; i < n; i++) {
char *end;
pid_t _pid;
+ bool kernel_thread;
- _pid = strtol(dirent->d_name, &end, 10);
+ _pid = strtol(dirent[i]->d_name, &end, 10);
if (*end)
continue;
rc = -1;
- if (perf_event__prepare_comm(comm_event, _pid, machine,
- &tgid, &ppid) != 0)
+ if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
+ &tgid, &ppid, &kernel_thread) != 0)
break;
if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
@@ -720,7 +784,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
break;
rc = 0;
- if (_pid == pid) {
+ if (_pid == pid && !kernel_thread) {
/* process the parent's maps too */
rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data);
@@ -729,7 +793,10 @@ static int __event__synthesize_thread(union perf_event *comm_event,
}
}
- closedir(tasks);
+ for (i = 0; i < n; i++)
+ zfree(&dirent[i]);
+ free(dirent);
+
return rc;
}
@@ -914,7 +981,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
return 0;
snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
- n = scandir(proc_path, &dirent, 0, alphasort);
+ n = scandir(proc_path, &dirent, filter_task, alphasort);
if (n < 0)
return err;
@@ -991,11 +1058,12 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine)
{
- size_t size;
+ union perf_event *event;
+ size_t size = symbol_conf.buildid_mmap2 ?
+ sizeof(event->mmap2) : sizeof(event->mmap);
struct map *map = machine__kernel_map(machine);
struct kmap *kmap;
int err;
- union perf_event *event;
if (map == NULL)
return -1;
@@ -1009,7 +1077,7 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
* available use this, and after it is use this as a fallback for older
* kernels.
*/
- event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
+ event = zalloc(size + machine->id_hdr_size);
if (event == NULL) {
pr_debug("Not enough memory synthesizing mmap event "
"for kernel modules\n");
@@ -1026,16 +1094,31 @@ static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
}
- size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
- "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
- size = PERF_ALIGN(size, sizeof(u64));
- event->mmap.header.type = PERF_RECORD_MMAP;
- event->mmap.header.size = (sizeof(event->mmap) -
- (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
- event->mmap.pgoff = kmap->ref_reloc_sym->addr;
- event->mmap.start = map->start;
- event->mmap.len = map->end - event->mmap.start;
- event->mmap.pid = machine->pid;
+ if (symbol_conf.buildid_mmap2) {
+ size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
+ "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap2.header.type = PERF_RECORD_MMAP2;
+ event->mmap2.header.size = (sizeof(event->mmap2) -
+ (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
+ event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
+ event->mmap2.start = map->start;
+ event->mmap2.len = map->end - event->mmap.start;
+ event->mmap2.pid = machine->pid;
+
+ perf_record_mmap2__read_build_id(&event->mmap2, true);
+ } else {
+ size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
+ "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
+ size = PERF_ALIGN(size, sizeof(u64));
+ event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = (sizeof(event->mmap) -
+ (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
+ event->mmap.pgoff = kmap->ref_reloc_sym->addr;
+ event->mmap.start = map->start;
+ event->mmap.len = map->end - event->mmap.start;
+ event->mmap.pid = machine->pid;
+ }
err = perf_tool__process_synth_event(tool, event, machine, process);
free(event);
@@ -1384,7 +1467,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
}
}
- if (type & PERF_SAMPLE_WEIGHT)
+ if (type & PERF_SAMPLE_WEIGHT_TYPE)
result += sizeof(u64);
if (type & PERF_SAMPLE_DATA_SRC)
@@ -1412,6 +1495,9 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
result += sizeof(u64);
+ if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
+ result += sizeof(u64);
+
if (type & PERF_SAMPLE_AUX) {
result += sizeof(u64);
result += sample->aux_sample.size;
@@ -1420,6 +1506,12 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
return result;
}
+void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
+ __u64 *array, u64 type __maybe_unused)
+{
+ *array = data->weight;
+}
+
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
const struct perf_sample *sample)
{
@@ -1555,8 +1647,8 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
}
}
- if (type & PERF_SAMPLE_WEIGHT) {
- *array = sample->weight;
+ if (type & PERF_SAMPLE_WEIGHT_TYPE) {
+ arch_perf_synthesize_sample_weight(sample, array, type);
array++;
}
@@ -1596,6 +1688,11 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
array++;
}
+ if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
+ *array = sample->code_page_size;
+ array++;
+ }
+
if (type & PERF_SAMPLE_AUX) {
sz = sample->aux_sample.size;
*array++ = sz;
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index a3db13dea937..0f383418e3df 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -56,6 +56,34 @@ enum target_errno target__validate(struct target *target)
ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
}
+ /* BPF and CPU are mutually exclusive */
+ if (target->bpf_str && target->cpu_list) {
+ target->cpu_list = NULL;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__BPF_OVERRIDE_CPU;
+ }
+
+ /* BPF and PID/TID are mutually exclusive */
+ if (target->bpf_str && target->tid) {
+ target->tid = NULL;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__BPF_OVERRIDE_PID;
+ }
+
+ /* BPF and UID are mutually exclusive */
+ if (target->bpf_str && target->uid_str) {
+ target->uid_str = NULL;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__BPF_OVERRIDE_UID;
+ }
+
+ /* BPF and THREADS are mutually exclusive */
+ if (target->bpf_str && target->per_thread) {
+ target->per_thread = false;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__BPF_OVERRIDE_THREAD;
+ }
+
/* THREAD and SYSTEM/CPU are mutually exclusive */
if (target->per_thread && (target->system_wide || target->cpu_list)) {
target->per_thread = false;
@@ -109,6 +137,10 @@ static const char *target__error_str[] = {
"PID/TID switch overriding SYSTEM",
"UID switch overriding SYSTEM",
"SYSTEM/CPU switch overriding PER-THREAD",
+ "BPF switch overriding CPU",
+ "BPF switch overriding PID/TID",
+ "BPF switch overriding UID",
+ "BPF switch overriding THREAD",
"Invalid User: %s",
"Problems obtaining information for user %s",
};
@@ -134,7 +166,7 @@ int target__strerror(struct target *target, int errnum,
switch (errnum) {
case TARGET_ERRNO__PID_OVERRIDE_CPU ...
- TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD:
+ TARGET_ERRNO__BPF_OVERRIDE_THREAD:
snprintf(buf, buflen, "%s", msg);
break;
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index 6ef01a83b24e..f132c6c2eef8 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -10,6 +10,7 @@ struct target {
const char *tid;
const char *cpu_list;
const char *uid_str;
+ const char *bpf_str;
uid_t uid;
bool system_wide;
bool uses_mmap;
@@ -36,6 +37,10 @@ enum target_errno {
TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
TARGET_ERRNO__SYSTEM_OVERRIDE_THREAD,
+ TARGET_ERRNO__BPF_OVERRIDE_CPU,
+ TARGET_ERRNO__BPF_OVERRIDE_PID,
+ TARGET_ERRNO__BPF_OVERRIDE_UID,
+ TARGET_ERRNO__BPF_OVERRIDE_THREAD,
/* for target__parse_uid() */
TARGET_ERRNO__INVALID_UID,
@@ -59,6 +64,11 @@ static inline bool target__has_cpu(struct target *target)
return target->system_wide || target->cpu_list;
}
+static inline bool target__has_bpf(struct target *target)
+{
+ return target->bpf_str;
+}
+
static inline bool target__none(struct target *target)
{
return !target__has_task(target) && !target__has_cpu(target);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 0e5c4786f296..a65f65d0857e 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -152,7 +152,7 @@ static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
return false;
}
-#define for_each_event(dir, dent, tps) \
+#define for_each_event_tps(dir, dent, tps) \
while ((dent = readdir(dir))) \
if (dent->d_type == DT_DIR && \
(strcmp(dent->d_name, ".")) && \
@@ -174,7 +174,7 @@ static int copy_event_system(const char *sys, struct tracepoint_path *tps)
return -errno;
}
- for_each_event(dir, dent, tps) {
+ for_each_event_tps(dir, dent, tps) {
if (!name_in_tp_list(dent->d_name, tps))
continue;
@@ -196,7 +196,7 @@ static int copy_event_system(const char *sys, struct tracepoint_path *tps)
}
rewinddir(dir);
- for_each_event(dir, dent, tps) {
+ for_each_event_tps(dir, dent, tps) {
if (!name_in_tp_list(dent->d_name, tps))
continue;
@@ -274,7 +274,7 @@ static int record_event_files(struct tracepoint_path *tps)
goto out;
}
- for_each_event(dir, dent, tps) {
+ for_each_event_tps(dir, dent, tps) {
if (strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
continue;
@@ -289,7 +289,7 @@ static int record_event_files(struct tracepoint_path *tps)
}
rewinddir(dir);
- for_each_event(dir, dent, tps) {
+ for_each_event_tps(dir, dent, tps) {
if (strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
continue;
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 0ada907c60d4..a74b517f7497 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -60,10 +60,8 @@ static int __report_module(struct addr_location *al, u64 ip,
mod = dwfl_addrmodule(ui->dwfl, ip);
if (mod) {
Dwarf_Addr s;
- void **userdatap;
- dwfl_module_info(mod, &userdatap, &s, NULL, NULL, NULL, NULL, NULL);
- *userdatap = dso;
+ dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
if (s != al->map->start - al->map->pgoff)
mod = 0;
}
@@ -79,6 +77,13 @@ static int __report_module(struct addr_location *al, u64 ip,
al->map->start - al->map->pgoff, false);
}
+ if (mod) {
+ void **userdatap;
+
+ dwfl_module_info(mod, &userdatap, NULL, NULL, NULL, NULL, NULL, NULL);
+ *userdatap = dso;
+ }
+
return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
}
diff --git a/tools/perf/util/xyarray.c b/tools/perf/util/xyarray.c
deleted file mode 100644
index 86889ebc3514..000000000000
--- a/tools/perf/util/xyarray.c
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "xyarray.h"
-#include <stdlib.h>
-#include <string.h>
-#include <linux/zalloc.h>
-
-struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size)
-{
- size_t row_size = ylen * entry_size;
- struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size);
-
- if (xy != NULL) {
- xy->entry_size = entry_size;
- xy->row_size = row_size;
- xy->entries = xlen * ylen;
- xy->max_x = xlen;
- xy->max_y = ylen;
- }
-
- return xy;
-}
-
-void xyarray__reset(struct xyarray *xy)
-{
- size_t n = xy->entries * xy->entry_size;
-
- memset(xy->contents, 0, n);
-}
-
-void xyarray__delete(struct xyarray *xy)
-{
- free(xy);
-}