summaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/affinity.c8
-rw-r--r--tools/perf/util/arm-spe.c2
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c10
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c13
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c18
-rw-r--r--tools/perf/util/cpumap.c80
-rw-r--r--tools/perf/util/cpumap.h4
-rw-r--r--tools/perf/util/event.h25
-rw-r--r--tools/perf/util/evsel.c29
-rw-r--r--tools/perf/util/genelf.c34
-rw-r--r--tools/perf/util/genelf.h4
-rw-r--r--tools/perf/util/metricgroup.c3
-rw-r--r--tools/perf/util/parse-events-hybrid.c21
-rw-r--r--tools/perf/util/parse-events.c39
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/print-events.c39
-rw-r--r--tools/perf/util/scripting-engines/Build2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c19
-rw-r--r--tools/perf/util/session.c65
-rw-r--r--tools/perf/util/stat-shadow.c24
-rw-r--r--tools/perf/util/symbol-elf.c7
-rw-r--r--tools/perf/util/synthetic-events.c116
-rw-r--r--tools/perf/util/synthetic-events.h2
24 files changed, 370 insertions, 197 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 9dfae1bda9cc..485e1a343165 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -269,7 +269,7 @@ CFLAGS_expr-flex.o += $(flex_flags)
bison_flags := -DYYENABLE_NLS=0
BISON_GE_35 := $(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 35)
ifeq ($(BISON_GE_35),1)
- bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum
+ bison_flags += -Wno-unused-parameter -Wno-nested-externs -Wno-implicit-function-declaration -Wno-switch-enum -Wno-unused-but-set-variable -Wno-unknown-warning-option
else
bison_flags += -w
endif
diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
index 4d216c0dc425..4ee96b3c755b 100644
--- a/tools/perf/util/affinity.c
+++ b/tools/perf/util/affinity.c
@@ -49,8 +49,14 @@ void affinity__set(struct affinity *a, int cpu)
{
int cpu_set_size = get_cpu_set_size();
- if (cpu == -1)
+ /*
+ * Return:
+ * - if cpu is -1
+ * - restrict out of bound access to sched_cpus
+ */
+ if (cpu == -1 || ((cpu >= (cpu_set_size * 8))))
return;
+
a->changed = true;
set_bit(cpu, a->sched_cpus);
/*
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 22dcfe07e886..906476a839e1 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -498,7 +498,7 @@ static void arm_spe__synth_data_source_generic(const struct arm_spe_record *reco
static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
{
union perf_mem_data_src data_src = { 0 };
- bool is_neoverse = is_midr_in_range(midr, neoverse_spe);
+ bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
if (record->op == ARM_SPE_LD)
data_src.mem_op = PERF_MEM_OP_LOAD;
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 63b9db657442..3c2df7522f6f 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -95,7 +95,7 @@ static int bperf_load_program(struct evlist *evlist)
perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
- FD(cgrp_switch, cpu.cpu));
+ FD(cgrp_switch, i));
if (IS_ERR(link)) {
pr_err("Failed to attach cgroup program\n");
err = PTR_ERR(link);
@@ -115,15 +115,15 @@ static int bperf_load_program(struct evlist *evlist)
evsel->cgrp = NULL;
/* open single copy of the events w/o cgroup */
- err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
+ err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
if (err) {
pr_err("Failed to open first cgroup events\n");
goto out;
}
map_fd = bpf_map__fd(skel->maps.events);
- perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
- int fd = FD(evsel, cpu.cpu);
+ perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
+ int fd = FD(evsel, j);
__u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
err = bpf_map_update_elem(map_fd, &idx, &fd,
@@ -269,7 +269,7 @@ static int bperf_cgrp__read(struct evsel *evsel)
goto out;
}
- perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
+ perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
counts = perf_counts(evsel->counts, i, 0);
counts->val = values[cpu.cpu].counter;
counts->ena = values[cpu.cpu].enabled;
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
index 292c430768b5..9aa8cdd93de4 100644
--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -48,6 +48,7 @@ const volatile __u32 num_cpus = 1;
int enabled = 0;
int use_cgroup_v2 = 0;
+int perf_subsys_id = -1;
static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
{
@@ -58,7 +59,15 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
int level;
int cnt;
- cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_event_cgrp_id], cgroup);
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+ cgrp = BPF_CORE_READ(p, cgroups, subsys[perf_subsys_id], cgroup);
level = BPF_CORE_READ(cgrp, level);
for (cnt = 0; i < MAX_LEVELS; i++) {
@@ -176,7 +185,7 @@ static int bperf_cgroup_count(void)
}
// This will be attached to cgroup-switches event for each cpu
-SEC("perf_events")
+SEC("perf_event")
int BPF_PROG(on_cgrp_switch)
{
return bperf_cgroup_count();
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index c4ba2bcf179f..38e3b287dbb2 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -94,6 +94,8 @@ const volatile bool has_prev_state = false;
const volatile bool needs_cgroup = false;
const volatile bool uses_cgroup_v1 = false;
+int perf_subsys_id = -1;
+
/*
* Old kernel used to call it task_struct->state and now it's '__state'.
* Use BPF CO-RE "ignored suffix rule" to deal with it like below:
@@ -119,11 +121,19 @@ static inline __u64 get_cgroup_id(struct task_struct *t)
{
struct cgroup *cgrp;
- if (uses_cgroup_v1)
- cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_event_cgrp_id], cgroup);
- else
- cgrp = BPF_CORE_READ(t, cgroups, dfl_cgrp);
+ if (!uses_cgroup_v1)
+ return BPF_CORE_READ(t, cgroups, dfl_cgrp, kn, id);
+
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+ cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_subsys_id], cgroup);
return BPF_CORE_READ(cgrp, kn, id);
}
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 12b2243222b0..ae43fb88f444 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -22,54 +22,102 @@ static int max_node_num;
*/
static int *cpunode_map;
-static struct perf_cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
+bool perf_record_cpu_map_data__test_bit(int i,
+ const struct perf_record_cpu_map_data *data)
+{
+ int bit_word32 = i / 32;
+ __u32 bit_mask32 = 1U << (i & 31);
+ int bit_word64 = i / 64;
+ __u64 bit_mask64 = ((__u64)1) << (i & 63);
+
+ return (data->mask32_data.long_size == 4)
+ ? (bit_word32 < data->mask32_data.nr) &&
+ (data->mask32_data.mask[bit_word32] & bit_mask32) != 0
+ : (bit_word64 < data->mask64_data.nr) &&
+ (data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
+}
+
+/* Read ith mask value from data into the given 64-bit sized bitmap */
+static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
+ int i, unsigned long *bitmap)
+{
+#if __SIZEOF_LONG__ == 8
+ if (data->mask32_data.long_size == 4)
+ bitmap[0] = data->mask32_data.mask[i];
+ else
+ bitmap[0] = data->mask64_data.mask[i];
+#else
+ if (data->mask32_data.long_size == 4) {
+ bitmap[0] = data->mask32_data.mask[i];
+ bitmap[1] = 0;
+ } else {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
+ bitmap[1] = (unsigned long)data->mask64_data.mask[i];
+#else
+ bitmap[0] = (unsigned long)data->mask64_data.mask[i];
+ bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
+#endif
+ }
+#endif
+}
+static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
{
struct perf_cpu_map *map;
- map = perf_cpu_map__empty_new(cpus->nr);
+ map = perf_cpu_map__empty_new(data->cpus_data.nr);
if (map) {
unsigned i;
- for (i = 0; i < cpus->nr; i++) {
+ for (i = 0; i < data->cpus_data.nr; i++) {
/*
* Special treatment for -1, which is not real cpu number,
* and we need to use (int) -1 to initialize map[i],
* otherwise it would become 65535.
*/
- if (cpus->cpu[i] == (u16) -1)
+ if (data->cpus_data.cpu[i] == (u16) -1)
map->map[i].cpu = -1;
else
- map->map[i].cpu = (int) cpus->cpu[i];
+ map->map[i].cpu = (int) data->cpus_data.cpu[i];
}
}
return map;
}
-static struct perf_cpu_map *cpu_map__from_mask(struct perf_record_record_cpu_map *mask)
+static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
{
+ DECLARE_BITMAP(local_copy, 64);
+ int weight = 0, mask_nr = data->mask32_data.nr;
struct perf_cpu_map *map;
- int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
- nr = bitmap_weight(mask->mask, nbits);
+ for (int i = 0; i < mask_nr; i++) {
+ perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
+ weight += bitmap_weight(local_copy, 64);
+ }
+
+ map = perf_cpu_map__empty_new(weight);
+ if (!map)
+ return NULL;
- map = perf_cpu_map__empty_new(nr);
- if (map) {
- int cpu, i = 0;
+ for (int i = 0, j = 0; i < mask_nr; i++) {
+ int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
+ int cpu;
- for_each_set_bit(cpu, mask->mask, nbits)
- map->map[i++].cpu = cpu;
+ perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
+ for_each_set_bit(cpu, local_copy, 64)
+ map->map[j++].cpu = cpu + cpus_per_i;
}
return map;
}
-struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data)
+struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
{
if (data->type == PERF_CPU_MAP__CPUS)
- return cpu_map__from_entries((struct cpu_map_entries *)data->data);
+ return cpu_map__from_entries(data);
else
- return cpu_map__from_mask((struct perf_record_record_cpu_map *)data->data);
+ return cpu_map__from_mask(data);
}
size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 703ae6d3386e..fa8a5acdcae1 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -37,9 +37,11 @@ struct cpu_aggr_map {
struct perf_record_cpu_map_data;
+bool perf_record_cpu_map_data__test_bit(int i, const struct perf_record_cpu_map_data *data);
+
struct perf_cpu_map *perf_cpu_map__empty_new(int nr);
-struct perf_cpu_map *cpu_map__new_data(struct perf_record_cpu_map_data *data);
+struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data);
size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size);
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size);
size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index a7b0931d5137..12eae6917022 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -65,7 +65,8 @@ struct stack_dump {
struct sample_read_value {
u64 value;
- u64 id;
+ u64 id; /* only if PERF_FORMAT_ID */
+ u64 lost; /* only if PERF_FORMAT_LOST */
};
struct sample_read {
@@ -80,6 +81,24 @@ struct sample_read {
};
};
+static inline size_t sample_read_value_size(u64 read_format)
+{
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_LOST)
+ return sizeof(struct sample_read_value);
+ else
+ return offsetof(struct sample_read_value, lost);
+}
+
+static inline struct sample_read_value *
+next_sample_read_value(struct sample_read_value *v, u64 read_format)
+{
+ return (void *)v + sample_read_value_size(read_format);
+}
+
+#define sample_read_group__for_each(v, nr, rf) \
+ for (int __i = 0; __i < (int)nr; v = next_sample_read_value(v, rf), __i++)
+
struct ip_callchain {
u64 nr;
u64 ips[];
@@ -463,10 +482,6 @@ size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FIL
int kallsyms__get_function_start(const char *kallsyms_filename,
const char *symbol_name, u64 *addr);
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max);
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max);
-
void event_attr_init(struct perf_event_attr *attr);
int perf_event_paranoid(void);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 4852089e1d79..18c3eb864d55 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1541,7 +1541,7 @@ static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
}
static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
- u64 val, u64 ena, u64 run)
+ u64 val, u64 ena, u64 run, u64 lost)
{
struct perf_counts_values *count;
@@ -1550,6 +1550,7 @@ static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
count->val = val;
count->ena = ena;
count->run = run;
+ count->lost = lost;
perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
}
@@ -1558,7 +1559,7 @@ static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int
{
u64 read_format = leader->core.attr.read_format;
struct sample_read_value *v;
- u64 nr, ena = 0, run = 0, i;
+ u64 nr, ena = 0, run = 0, lost = 0;
nr = *data++;
@@ -1571,18 +1572,18 @@ static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
run = *data++;
- v = (struct sample_read_value *) data;
-
- evsel__set_count(leader, cpu_map_idx, thread, v[0].value, ena, run);
-
- for (i = 1; i < nr; i++) {
+ v = (void *)data;
+ sample_read_group__for_each(v, nr, read_format) {
struct evsel *counter;
- counter = evlist__id2evsel(leader->evlist, v[i].id);
+ counter = evlist__id2evsel(leader->evlist, v->id);
if (!counter)
return -EINVAL;
- evsel__set_count(counter, cpu_map_idx, thread, v[i].value, ena, run);
+ if (read_format & PERF_FORMAT_LOST)
+ lost = v->lost;
+
+ evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
}
return 0;
@@ -2475,8 +2476,8 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
if (data->read.group.nr > max_group_nr)
return -EFAULT;
- sz = data->read.group.nr *
- sizeof(struct sample_read_value);
+
+ sz = data->read.group.nr * sample_read_value_size(read_format);
OVERFLOW_CHECK(array, sz, max_size);
data->read.group.values =
(struct sample_read_value *)array;
@@ -2485,6 +2486,12 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
OVERFLOW_CHECK_u64(array);
data->read.one.id = *array;
array++;
+
+ if (read_format & PERF_FORMAT_LOST) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.one.lost = *array;
+ array++;
+ }
}
}
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index 953338b9e887..d81b54563e96 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -30,10 +30,6 @@
#define BUILD_ID_URANDOM /* different uuid for each run */
-// FIXME, remove this and fix the deprecation warnings before its removed and
-// We'll break for good here...
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-
#ifdef HAVE_LIBCRYPTO_SUPPORT
#define BUILD_ID_MD5
@@ -45,6 +41,7 @@
#endif
#ifdef BUILD_ID_MD5
+#include <openssl/evp.h>
#include <openssl/md5.h>
#endif
#endif
@@ -142,15 +139,20 @@ gen_build_id(struct buildid_note *note,
static void
gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *code, size_t csize)
{
- MD5_CTX context;
+ EVP_MD_CTX *mdctx;
if (sizeof(note->build_id) < 16)
errx(1, "build_id too small for MD5");
- MD5_Init(&context);
- MD5_Update(&context, &load_addr, sizeof(load_addr));
- MD5_Update(&context, code, csize);
- MD5_Final((unsigned char *)note->build_id, &context);
+ mdctx = EVP_MD_CTX_new();
+ if (!mdctx)
+ errx(2, "failed to create EVP_MD_CTX");
+
+ EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
+ EVP_DigestUpdate(mdctx, &load_addr, sizeof(load_addr));
+ EVP_DigestUpdate(mdctx, code, csize);
+ EVP_DigestFinal_ex(mdctx, (unsigned char *)note->build_id, NULL);
+ EVP_MD_CTX_free(mdctx);
}
#endif
@@ -251,6 +253,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
Elf_Data *d;
Elf_Scn *scn;
Elf_Ehdr *ehdr;
+ Elf_Phdr *phdr;
Elf_Shdr *shdr;
uint64_t eh_frame_base_offset;
char *strsym = NULL;
@@ -286,6 +289,19 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
/*
+ * setup program header
+ */
+ phdr = elf_newphdr(e, 1);
+ phdr[0].p_type = PT_LOAD;
+ phdr[0].p_offset = 0;
+ phdr[0].p_vaddr = 0;
+ phdr[0].p_paddr = 0;
+ phdr[0].p_filesz = csize;
+ phdr[0].p_memsz = csize;
+ phdr[0].p_flags = PF_X | PF_R;
+ phdr[0].p_align = 8;
+
+ /*
* setup text section
*/
scn = elf_newscn(e);
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index ae138afe6c56..b5c909546e3f 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -53,8 +53,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#if GEN_ELF_CLASS == ELFCLASS64
#define elf_newehdr elf64_newehdr
+#define elf_newphdr elf64_newphdr
#define elf_getshdr elf64_getshdr
#define Elf_Ehdr Elf64_Ehdr
+#define Elf_Phdr Elf64_Phdr
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
@@ -62,8 +64,10 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#define ELF_ST_VIS(a) ELF64_ST_VISIBILITY(a)
#else
#define elf_newehdr elf32_newehdr
+#define elf_newphdr elf32_newphdr
#define elf_getshdr elf32_getshdr
#define Elf_Ehdr Elf32_Ehdr
+#define Elf_Phdr Elf32_Phdr
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 464475fd6b9a..c93bcaf6d55d 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -1655,6 +1655,9 @@ int metricgroup__parse_groups(const struct option *opt,
struct evlist *perf_evlist = *(struct evlist **)opt->value;
const struct pmu_events_table *table = pmu_events_table__find();
+ if (!table)
+ return -EINVAL;
+
return parse_groups(perf_evlist, str, metric_no_group,
metric_no_merge, NULL, metric_events, table);
}
diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c
index 284f8eabd3b9..7c9f9150bad5 100644
--- a/tools/perf/util/parse-events-hybrid.c
+++ b/tools/perf/util/parse-events-hybrid.c
@@ -33,7 +33,8 @@ static void config_hybrid_attr(struct perf_event_attr *attr,
* If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
*/
attr->type = type;
- attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
+ attr->config = (attr->config & PERF_HW_EVENT_MASK) |
+ ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
}
static int create_event_hybrid(__u32 config_type, int *idx,
@@ -48,13 +49,25 @@ static int create_event_hybrid(__u32 config_type, int *idx,
__u64 config = attr->config;
config_hybrid_attr(attr, config_type, pmu->type);
+
+ /*
+ * Some hybrid hardware cache events are only available on one CPU
+ * PMU. For example, the 'L1-dcache-load-misses' is only available
+ * on cpu_core, while the 'L1-icache-loads' is only available on
+ * cpu_atom. We need to remove "not supported" hybrid cache events.
+ */
+ if (attr->type == PERF_TYPE_HW_CACHE
+ && !is_event_supported(attr->type, attr->config))
+ return 0;
+
evsel = parse_events__add_event_hybrid(list, idx, attr, name, metric_id,
pmu, config_terms);
- if (evsel)
+ if (evsel) {
evsel->pmu_name = strdup(pmu->name);
- else
+ if (!evsel->pmu_name)
+ return -ENOMEM;
+ } else
return -ENOMEM;
-
attr->type = type;
attr->config = config;
return 0;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f05e15acd33f..f3b2c2a87456 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -28,6 +28,7 @@
#include "util/parse-events-hybrid.h"
#include "util/pmu-hybrid.h"
#include "tracepoint.h"
+#include "thread_map.h"
#define MAX_NAME_LEN 100
@@ -157,6 +158,44 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
+bool is_event_supported(u8 type, u64 config)
+{
+ bool ret = true;
+ int open_return;
+ struct evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .disabled = 1,
+ };
+ struct perf_thread_map *tmap = thread_map__new_by_tid(0);
+
+ if (tmap == NULL)
+ return false;
+
+ evsel = evsel__new(&attr);
+ if (evsel) {
+ open_return = evsel__open(evsel, NULL, tmap);
+ ret = open_return >= 0;
+
+ if (open_return == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->core.attr.exclude_kernel = 1;
+ ret = evsel__open(evsel, NULL, tmap) >= 0;
+ }
+ evsel__delete(evsel);
+ }
+
+ perf_thread_map__put(tmap);
+ return ret;
+}
+
const char *event_type(int type)
{
switch (type) {
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 7e6a601d9cd0..07df7bb7b042 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -19,6 +19,7 @@ struct option;
struct perf_pmu;
bool have_tracepoints(struct list_head *evlist);
+bool is_event_supported(u8 type, u64 config);
const char *event_type(int type);
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index ba1ab5134685..c4d5d87fae2f 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -22,7 +22,6 @@
#include "probe-file.h"
#include "string2.h"
#include "strlist.h"
-#include "thread_map.h"
#include "tracepoint.h"
#include "pfm.h"
#include "pmu-hybrid.h"
@@ -239,44 +238,6 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
strlist__delete(sdtlist);
}
-static bool is_event_supported(u8 type, unsigned int config)
-{
- bool ret = true;
- int open_return;
- struct evsel *evsel;
- struct perf_event_attr attr = {
- .type = type,
- .config = config,
- .disabled = 1,
- };
- struct perf_thread_map *tmap = thread_map__new_by_tid(0);
-
- if (tmap == NULL)
- return false;
-
- evsel = evsel__new(&attr);
- if (evsel) {
- open_return = evsel__open(evsel, NULL, tmap);
- ret = open_return >= 0;
-
- if (open_return == -EACCES) {
- /*
- * This happens if the paranoid value
- * /proc/sys/kernel/perf_event_paranoid is set to 2
- * Re-run with exclude_kernel set; we don't do that
- * by default as some ARM machines do not support it.
- *
- */
- evsel->core.attr.exclude_kernel = 1;
- ret = evsel__open(evsel, NULL, tmap) >= 0;
- }
- evsel__delete(evsel);
- }
-
- perf_thread_map__put(tmap);
- return ret;
-}
-
int print_hwcache_events(const char *event_glob, bool name_only)
{
unsigned int type, op, i, evt_i = 0, evt_num = 0, npmus = 0;
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index c92326c2233a..0f5ba28339cf 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -3,4 +3,4 @@ perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
-CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-error=deprecated-declarations
+CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-deprecated-declarations
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 9ef2406e0ede..1f2040f36d4e 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -642,15 +642,19 @@ exit:
return pylist;
}
-static PyObject *get_sample_value_as_tuple(struct sample_read_value *value)
+static PyObject *get_sample_value_as_tuple(struct sample_read_value *value,
+ u64 read_format)
{
PyObject *t;
- t = PyTuple_New(2);
+ t = PyTuple_New(3);
if (!t)
Py_FatalError("couldn't create Python tuple");
PyTuple_SetItem(t, 0, PyLong_FromUnsignedLongLong(value->id));
PyTuple_SetItem(t, 1, PyLong_FromUnsignedLongLong(value->value));
+ if (read_format & PERF_FORMAT_LOST)
+ PyTuple_SetItem(t, 2, PyLong_FromUnsignedLongLong(value->lost));
+
return t;
}
@@ -681,12 +685,17 @@ static void set_sample_read_in_dict(PyObject *dict_sample,
Py_FatalError("couldn't create Python list");
if (read_format & PERF_FORMAT_GROUP) {
- for (i = 0; i < sample->read.group.nr; i++) {
- PyObject *t = get_sample_value_as_tuple(&sample->read.group.values[i]);
+ struct sample_read_value *v = sample->read.group.values;
+
+ i = 0;
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ PyObject *t = get_sample_value_as_tuple(v, read_format);
PyList_SET_ITEM(values, i, t);
+ i++;
}
} else {
- PyObject *t = get_sample_value_as_tuple(&sample->read.one);
+ PyObject *t = get_sample_value_as_tuple(&sample->read.one,
+ read_format);
PyList_SET_ITEM(values, 0, t);
}
pydict_set_item_string_decref(dict_sample, "values", values);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 98e16659a149..192c9274f7ad 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -916,30 +916,30 @@ static void perf_event__cpu_map_swap(union perf_event *event,
bool sample_id_all __maybe_unused)
{
struct perf_record_cpu_map_data *data = &event->cpu_map.data;
- struct cpu_map_entries *cpus;
- struct perf_record_record_cpu_map *mask;
- unsigned i;
data->type = bswap_16(data->type);
switch (data->type) {
case PERF_CPU_MAP__CPUS:
- cpus = (struct cpu_map_entries *)data->data;
-
- cpus->nr = bswap_16(cpus->nr);
+ data->cpus_data.nr = bswap_16(data->cpus_data.nr);
- for (i = 0; i < cpus->nr; i++)
- cpus->cpu[i] = bswap_16(cpus->cpu[i]);
+ for (unsigned i = 0; i < data->cpus_data.nr; i++)
+ data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
break;
case PERF_CPU_MAP__MASK:
- mask = (struct perf_record_record_cpu_map *)data->data;
-
- mask->nr = bswap_16(mask->nr);
- mask->long_size = bswap_16(mask->long_size);
+ data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
- switch (mask->long_size) {
- case 4: mem_bswap_32(&mask->mask, mask->nr); break;
- case 8: mem_bswap_64(&mask->mask, mask->nr); break;
+ switch (data->mask32_data.long_size) {
+ case 4:
+ data->mask32_data.nr = bswap_16(data->mask32_data.nr);
+ for (unsigned i = 0; i < data->mask32_data.nr; i++)
+ data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
+ break;
+ case 8:
+ data->mask64_data.nr = bswap_16(data->mask64_data.nr);
+ for (unsigned i = 0; i < data->mask64_data.nr; i++)
+ data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
+ break;
default:
pr_err("cpu_map swap: unsupported long size\n");
}
@@ -1283,21 +1283,25 @@ static void sample_read__printf(struct perf_sample *sample, u64 read_format)
sample->read.time_running);
if (read_format & PERF_FORMAT_GROUP) {
- u64 i;
+ struct sample_read_value *value = sample->read.group.values;
printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
- for (i = 0; i < sample->read.group.nr; i++) {
- struct sample_read_value *value;
-
- value = &sample->read.group.values[i];
+ sample_read_group__for_each(value, sample->read.group.nr, read_format) {
printf("..... id %016" PRIx64
- ", value %016" PRIx64 "\n",
+ ", value %016" PRIx64,
value->id, value->value);
+ if (read_format & PERF_FORMAT_LOST)
+ printf(", lost %" PRIu64, value->lost);
+ printf("\n");
}
- } else
- printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
+ } else {
+ printf("..... id %016" PRIx64 ", value %016" PRIx64,
sample->read.one.id, sample->read.one.value);
+ if (read_format & PERF_FORMAT_LOST)
+ printf(", lost %" PRIu64, sample->read.one.lost);
+ printf("\n");
+ }
}
static void dump_event(struct evlist *evlist, union perf_event *event,
@@ -1411,6 +1415,9 @@ static void dump_read(struct evsel *evsel, union perf_event *event)
if (read_format & PERF_FORMAT_ID)
printf("... id : %" PRI_lu64 "\n", read_event->id);
+
+ if (read_format & PERF_FORMAT_LOST)
+ printf("... lost : %" PRI_lu64 "\n", read_event->lost);
}
static struct machine *machines__find_for_cpumode(struct machines *machines,
@@ -1479,14 +1486,14 @@ static int deliver_sample_group(struct evlist *evlist,
struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
- struct machine *machine)
+ struct machine *machine,
+ u64 read_format)
{
int ret = -EINVAL;
- u64 i;
+ struct sample_read_value *v = sample->read.group.values;
- for (i = 0; i < sample->read.group.nr; i++) {
- ret = deliver_sample_value(evlist, tool, event, sample,
- &sample->read.group.values[i],
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ ret = deliver_sample_value(evlist, tool, event, sample, v,
machine);
if (ret)
break;
@@ -1510,7 +1517,7 @@ static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
/* For PERF_SAMPLE_READ we have either single or group mode. */
if (read_format & PERF_FORMAT_GROUP)
return deliver_sample_group(evlist, tool, event, sample,
- machine);
+ machine, read_format);
else
return deliver_sample_value(evlist, tool, event, sample,
&sample->read.one, machine);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 979c8cb918f7..788ce5e46470 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -1193,7 +1193,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
&rsd);
if (retiring > 0.7)
color = PERF_COLOR_GREEN;
- print_metric(config, ctxp, color, "%8.1f%%", "retiring",
+ print_metric(config, ctxp, color, "%8.1f%%", "Retiring",
retiring * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
full_td(cpu_map_idx, st, &rsd)) {
@@ -1202,7 +1202,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
&rsd);
if (fe_bound > 0.2)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "Frontend Bound",
fe_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
full_td(cpu_map_idx, st, &rsd)) {
@@ -1211,7 +1211,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
&rsd);
if (be_bound > 0.2)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "Backend Bound",
be_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
full_td(cpu_map_idx, st, &rsd)) {
@@ -1220,7 +1220,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
&rsd);
if (bad_spec > 0.1)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
+ print_metric(config, ctxp, color, "%8.1f%%", "Bad Speculation",
bad_spec * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) &&
full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1234,13 +1234,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (retiring > 0.7 && heavy_ops > 0.1)
color = PERF_COLOR_GREEN;
- print_metric(config, ctxp, color, "%8.1f%%", "heavy operations",
+ print_metric(config, ctxp, color, "%8.1f%%", "Heavy Operations",
heavy_ops * 100.);
if (retiring > 0.7 && light_ops > 0.6)
color = PERF_COLOR_GREEN;
else
color = NULL;
- print_metric(config, ctxp, color, "%8.1f%%", "light operations",
+ print_metric(config, ctxp, color, "%8.1f%%", "Light Operations",
light_ops * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) &&
full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1254,13 +1254,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (bad_spec > 0.1 && br_mis > 0.05)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "branch mispredict",
+ print_metric(config, ctxp, color, "%8.1f%%", "Branch Mispredict",
br_mis * 100.);
if (bad_spec > 0.1 && m_clears > 0.05)
color = PERF_COLOR_RED;
else
color = NULL;
- print_metric(config, ctxp, color, "%8.1f%%", "machine clears",
+ print_metric(config, ctxp, color, "%8.1f%%", "Machine Clears",
m_clears * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) &&
full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1274,13 +1274,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (fe_bound > 0.2 && fetch_lat > 0.15)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "fetch latency",
+ print_metric(config, ctxp, color, "%8.1f%%", "Fetch Latency",
fetch_lat * 100.);
if (fe_bound > 0.2 && fetch_bw > 0.1)
color = PERF_COLOR_RED;
else
color = NULL;
- print_metric(config, ctxp, color, "%8.1f%%", "fetch bandwidth",
+ print_metric(config, ctxp, color, "%8.1f%%", "Fetch Bandwidth",
fetch_bw * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) &&
full_td(cpu_map_idx, st, &rsd) && (config->topdown_level > 1)) {
@@ -1294,13 +1294,13 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (be_bound > 0.2 && mem_bound > 0.2)
color = PERF_COLOR_RED;
- print_metric(config, ctxp, color, "%8.1f%%", "memory bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "Memory Bound",
mem_bound * 100.);
if (be_bound > 0.2 && core_bound > 0.1)
color = PERF_COLOR_RED;
else
color = NULL;
- print_metric(config, ctxp, color, "%8.1f%%", "Core bound",
+ print_metric(config, ctxp, color, "%8.1f%%", "Core Bound",
core_bound * 100.);
} else if (evsel->metric_expr) {
generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 75bec32d4f57..647b7dff8ef3 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -2102,8 +2102,8 @@ static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
* unusual. One significant peculiarity is that the mapping (start -> pgoff)
* is not the same for the kernel map and the modules map. That happens because
* the data is copied adjacently whereas the original kcore has gaps. Finally,
- * kallsyms and modules files are compared with their copies to check that
- * modules have not been loaded or unloaded while the copies were taking place.
+ * kallsyms file is compared with its copy to check that modules have not been
+ * loaded or unloaded while the copies were taking place.
*
* Return: %0 on success, %-1 on failure.
*/
@@ -2166,9 +2166,6 @@ int kcore_copy(const char *from_dir, const char *to_dir)
goto out_extract_close;
}
- if (kcore_copy__compare_file(from_dir, to_dir, "modules"))
- goto out_extract_close;
-
if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
goto out_extract_close;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 2ae59c03ae77..538790758e24 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -367,13 +367,24 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
bool is_kernel)
{
struct build_id bid;
+ struct nsinfo *nsi;
+ struct nscookie nc;
int rc;
- if (is_kernel)
+ if (is_kernel) {
rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
- else
- rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+ goto out;
+ }
+
+ nsi = nsinfo__new(event->pid);
+ nsinfo__mountns_enter(nsi, &nc);
+ rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
+
+ nsinfo__mountns_exit(&nc);
+ nsinfo__put(nsi);
+
+out:
if (rc == 0) {
memcpy(event->build_id, bid.data, sizeof(bid.data));
event->build_id_size = (u8) bid.size;
@@ -1184,52 +1195,48 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
return err;
}
-static void synthesize_cpus(struct cpu_map_entries *cpus,
- struct perf_cpu_map *map)
+static void synthesize_cpus(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map)
{
int i, map_nr = perf_cpu_map__nr(map);
- cpus->nr = map_nr;
+ data->cpus_data.nr = map_nr;
for (i = 0; i < map_nr; i++)
- cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
+ data->cpus_data.cpu[i] = perf_cpu_map__cpu(map, i).cpu;
}
-static void synthesize_mask(struct perf_record_record_cpu_map *mask,
- struct perf_cpu_map *map, int max)
+static void synthesize_mask(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map, int max)
{
- int i;
+ int idx;
+ struct perf_cpu cpu;
+
+ /* Due to padding, the 4bytes per entry mask variant is always smaller. */
+ data->mask32_data.nr = BITS_TO_U32(max);
+ data->mask32_data.long_size = 4;
- mask->nr = BITS_TO_LONGS(max);
- mask->long_size = sizeof(long);
+ perf_cpu_map__for_each_cpu(cpu, idx, map) {
+ int bit_word = cpu.cpu / 32;
+ __u32 bit_mask = 1U << (cpu.cpu & 31);
- for (i = 0; i < perf_cpu_map__nr(map); i++)
- set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
+ data->mask32_data.mask[bit_word] |= bit_mask;
+ }
}
-static size_t cpus_size(struct perf_cpu_map *map)
+static size_t cpus_size(const struct perf_cpu_map *map)
{
return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
}
-static size_t mask_size(struct perf_cpu_map *map, int *max)
+static size_t mask_size(const struct perf_cpu_map *map, int *max)
{
- int i;
-
- *max = 0;
-
- for (i = 0; i < perf_cpu_map__nr(map); i++) {
- /* bit position of the cpu is + 1 */
- int bit = perf_cpu_map__cpu(map, i).cpu + 1;
-
- if (bit > *max)
- *max = bit;
- }
-
- return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
+ *max = perf_cpu_map__max(map).cpu;
+ return sizeof(struct perf_record_mask_cpu_map32) + BITS_TO_U32(*max) * sizeof(__u32);
}
-void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
+static void *cpu_map_data__alloc(const struct perf_cpu_map *map, size_t *size,
+ u16 *type, int *max)
{
size_t size_cpus, size_mask;
bool is_dummy = perf_cpu_map__empty(map);
@@ -1258,30 +1265,31 @@ void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int
*type = PERF_CPU_MAP__MASK;
}
- *size += sizeof(struct perf_record_cpu_map_data);
+ *size += sizeof(__u16); /* For perf_record_cpu_map_data.type. */
*size = PERF_ALIGN(*size, sizeof(u64));
return zalloc(*size);
}
-void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
- u16 type, int max)
+static void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data,
+ const struct perf_cpu_map *map,
+ u16 type, int max)
{
data->type = type;
switch (type) {
case PERF_CPU_MAP__CPUS:
- synthesize_cpus((struct cpu_map_entries *) data->data, map);
+ synthesize_cpus(data, map);
break;
case PERF_CPU_MAP__MASK:
- synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
+ synthesize_mask(data, map, max);
default:
break;
}
}
-static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
+static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
{
- size_t size = sizeof(struct perf_record_cpu_map);
+ size_t size = sizeof(struct perf_event_header);
struct perf_record_cpu_map *event;
int max;
u16 type;
@@ -1299,7 +1307,7 @@ static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
}
int perf_event__synthesize_cpu_map(struct perf_tool *tool,
- struct perf_cpu_map *map,
+ const struct perf_cpu_map *map,
perf_event__handler_t process,
struct machine *machine)
{
@@ -1432,11 +1440,12 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
result += sizeof(u64);
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- result += sz;
+ sz = sample_read_value_size(read_format);
+ result += sz * sample->read.group.nr;
} else {
result += sizeof(u64);
+ if (read_format & PERF_FORMAT_LOST)
+ result += sizeof(u64);
}
}
@@ -1521,6 +1530,20 @@ void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
*array = data->weight;
}
+static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
+ const struct perf_sample *sample)
+{
+ size_t sz = sample_read_value_size(read_format);
+ struct sample_read_value *v = sample->read.group.values;
+
+ sample_read_group__for_each(v, sample->read.group.nr, read_format) {
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ memcpy(array, v, sz);
+ array = (void *)array + sz;
+ }
+ return array;
+}
+
int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
const struct perf_sample *sample)
{
@@ -1602,13 +1625,16 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_fo
/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
if (read_format & PERF_FORMAT_GROUP) {
- sz = sample->read.group.nr *
- sizeof(struct sample_read_value);
- memcpy(array, sample->read.group.values, sz);
- array = (void *)array + sz;
+ array = copy_read_group_values(array, read_format,
+ sample);
} else {
*array = sample->read.one.id;
array++;
+
+ if (read_format & PERF_FORMAT_LOST) {
+ *array = sample->read.one.lost;
+ array++;
+ }
}
}
diff --git a/tools/perf/util/synthetic-events.h b/tools/perf/util/synthetic-events.h
index 81cb3d6af0b9..53737d1619a4 100644
--- a/tools/perf/util/synthetic-events.h
+++ b/tools/perf/util/synthetic-events.h
@@ -46,7 +46,7 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *e
int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process);
int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process);
int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine);
-int perf_event__synthesize_cpu_map(struct perf_tool *tool, struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
+int perf_event__synthesize_cpu_map(struct perf_tool *tool, const struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine);
int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);
int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process);