diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-04 08:17:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-04 08:17:29 -0700 |
commit | c1776a18e3b5a3559f3dff5df0ecce570abd3a9f (patch) | |
tree | 5b97defe199f434f5d9829bf7d8df6124e6ce5f1 | |
parent | 91cca0f0ffa3e485ad9d5b44744f23318c8f99ab (diff) | |
parent | b9df84fd7c05cc300d6d14f022b8a00773ebcf8c (diff) | |
download | linux-c1776a18e3b5a3559f3dff5df0ecce570abd3a9f.tar.bz2 |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"This tree includes an x86 PMU scheduling fix, but most changes are
late breaking tooling fixes and updates:
User visible fixes:
- Create config.detected into OUTPUT directory, fixing parallel
builds sharing the same source directory (Aaro Kiskinen)
- Allow to specify custom linker command, fixing some MIPS64 builds.
(Aaro Kiskinen)
- Fix to show proper convergence stats in 'perf bench numa' (Srikar
Dronamraju)
User visible changes:
- Validate syscall list passed via -e argument to 'perf trace'.
(Arnaldo Carvalho de Melo)
- Introduce 'perf stat --per-thread' (Jiri Olsa)
- Check access permission for --kallsyms and --vmlinux (Li Zhang)
- Move toggling event logic from 'perf top' and into hists browser,
allowing freeze/unfreeze with event lists with more than one entry
(Namhyung Kim)
- Add missing newlines when dumping PERF_RECORD_FINISHED_ROUND and
showing the Aggregated stats in 'perf report -D' (Adrian Hunter)
Infrastructure fixes:
- Add missing break for PERF_RECORD_ITRACE_START, which caused those
events samples to be parsed as well as PERF_RECORD_LOST_SAMPLES.
ITRACE_START only appears when Intel PT or BTS are present, so..
(Jiri Olsa)
- Call the perf_session destructor when bailing out in the inject,
kmem, report, kvm and mem tools (Taeung Song)
Infrastructure changes:
- Move stuff out of 'perf stat' and into the lib for further use
(Jiri Olsa)
- Reference count the cpu_map and thread_map classes (Jiri Olsa)
- Set evsel->{cpus,threads} from the evlist, if not set, allowing the
generalization of some 'perf stat' functions that previously were
accessing private static evlist variable (Jiri Olsa)
- Delete an unnecessary check before the calling free_event_desc()
(Markus Elfring)
- Allow auxtrace data alignment (Adrian Hunter)
- Allow events with dot (Andi Kleen)
- Fix failure to 'perf probe' events on arm (He Kuang)
- Add testing for Makefile.perf (Jiri Olsa)
- Add test for make install with prefix (Jiri Olsa)
- Fix single target build dependency check (Jiri Olsa)
- Access thread_map entries via accessors, prep patch to hold more
info per entry, for ongoing 'perf stat --per-thread' work (Jiri
Olsa)
- Use __weak definition from compiler.h (Sukadev Bhattiprolu)
- Split perf_pmu__new_alias() (Sukadev Bhattiprolu)"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (54 commits)
perf tools: Allow to specify custom linker command
perf tools: Create config.detected into OUTPUT directory
perf mem: Fill in the missing session freeing after an error occurs
perf kvm: Fill in the missing session freeing after an error occurs
perf report: Fill in the missing session freeing after an error occurs
perf kmem: Fill in the missing session freeing after an error occurs
perf inject: Fill in the missing session freeing after an error occurs
perf tools: Add missing break for PERF_RECORD_ITRACE_START
perf/x86: Fix 'active_events' imbalance
perf symbols: Check access permission when reading symbol files
perf stat: Introduce --per-thread option
perf stat: Introduce print_counters function
perf stat: Using init_stats instead of memset
perf stat: Rename print_interval to process_interval
perf stat: Remove perf_evsel__read_cb function
perf stat: Move perf_stat initialization counter process code
perf stat: Move zero_per_pkg into counter process code
perf stat: Separate counters reading and processing
perf stat: Introduce read_counters function
perf stat: Introduce perf_evsel__read function
...
54 files changed, 885 insertions, 422 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5801a14f7524..3658de47900f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -357,34 +357,24 @@ void x86_release_hardware(void) */ int x86_add_exclusive(unsigned int what) { - int ret = -EBUSY, i; - - if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) - return 0; + int i; - mutex_lock(&pmc_reserve_mutex); - for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { - if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) - goto out; + if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { + mutex_lock(&pmc_reserve_mutex); + for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { + if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) + goto fail_unlock; + } + atomic_inc(&x86_pmu.lbr_exclusive[what]); + mutex_unlock(&pmc_reserve_mutex); } - atomic_inc(&x86_pmu.lbr_exclusive[what]); - ret = 0; + atomic_inc(&active_events); + return 0; -out: +fail_unlock: mutex_unlock(&pmc_reserve_mutex); - - /* - * Assuming that all exclusive events will share the PMI handler - * (which checks active_events for whether there is work to do), - * we can bump active_events counter right here, except for - * x86_lbr_exclusive_lbr events that go through x86_pmu_event_init() - * path, which already bumps active_events for them. - */ - if (!ret && what != x86_lbr_exclusive_lbr) - atomic_inc(&active_events); - - return ret; + return -EBUSY; } void x86_del_exclusive(unsigned int what) diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build index a51244a8022f..faca2bf6a430 100644 --- a/tools/build/Makefile.build +++ b/tools/build/Makefile.build @@ -25,7 +25,7 @@ build-dir := $(srctree)/tools/build include $(build-dir)/Build.include # do not force detected configuration --include .config-detected +-include $(OUTPUT).config-detected # Init all relevant variables used in build files so # 1) they have correct type diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 04e150d83e7d..47469abdcc1c 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -144,6 +144,10 @@ is a useful mode to detect imbalance between physical cores. To enable this mod use --per-core in addition to -a. (system-wide). The output includes the core number and the number of online logical processors on that physical processor. +--per-thread:: +Aggregate counts per monitored threads, when monitoring threads (-t option) +or processes (-p option). + -D msecs:: --delay msecs:: After starting the program, wait msecs before measuring. This is useful to diff --git a/tools/perf/Makefile b/tools/perf/Makefile index d31a7bbd7cee..480546d5f13b 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -83,8 +83,8 @@ build-test: # # All other targets get passed through: # -%: +%: FORCE $(print_msg) $(make) -.PHONY: tags TAGS +.PHONY: tags TAGS FORCE Makefile diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 1af0cfeb7a57..7a4b549214e3 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -110,7 +110,7 @@ $(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD $(Q)touch $(OUTPUT)PERF-VERSION-FILE CC = $(CROSS_COMPILE)gcc -LD = $(CROSS_COMPILE)ld +LD ?= $(CROSS_COMPILE)ld AR = $(CROSS_COMPILE)ar PKG_CONFIG = $(CROSS_COMPILE)pkg-config @@ -545,7 +545,7 @@ config-clean: clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean config-clean $(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS) $(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete - $(Q)$(RM) .config-detected + $(Q)$(RM) $(OUTPUT).config-detected $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 52ec66b23607..01b06492bd6a 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -630,12 +630,13 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) if (inject.session == NULL) return -1; - if (symbol__init(&inject.session->header.env) < 0) - return -1; + ret = symbol__init(&inject.session->header.env); + if (ret < 0) + goto out_delete; ret = __cmd_inject(&inject); +out_delete: perf_session__delete(inject.session); - return ret; } diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 950f296dfcf7..23b1faaaa4cc 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -1916,7 +1916,7 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) if (!perf_evlist__find_tracepoint_by_name(session->evlist, "kmem:kmalloc")) { pr_err(errmsg, "slab", "slab"); - return -1; + goto out_delete; } } @@ -1927,7 +1927,7 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) "kmem:mm_page_alloc"); if (evsel == NULL) { pr_err(errmsg, "page", "page"); - return -1; + goto out_delete; } kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent); diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 74878cd75078..fc1cffb1b7a2 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -1061,8 +1061,10 @@ static int read_events(struct perf_kvm_stat *kvm) symbol__init(&kvm->session->header.env); - if (!perf_session__has_traces(kvm->session, "kvm record")) - return -EINVAL; + if (!perf_session__has_traces(kvm->session, "kvm record")) { + ret = -EINVAL; + goto out_delete; + } /* * Do not use 'isa' recorded in kvm_exit tracepoint since it is not @@ -1070,9 +1072,13 @@ static int read_events(struct perf_kvm_stat *kvm) */ ret = cpu_isa_config(kvm); if (ret < 0) - return ret; + goto out_delete; - return perf_session__process_events(kvm->session); + ret = perf_session__process_events(kvm->session); + +out_delete: + perf_session__delete(kvm->session); + return ret; } static int parse_target_str(struct perf_kvm_stat *kvm) diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index da2ec06f0742..80170aace5d4 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -124,7 +124,6 @@ static int report_raw_events(struct perf_mem *mem) .mode = PERF_DATA_MODE_READ, .force = mem->force, }; - int err = -EINVAL; int ret; struct perf_session *session = perf_session__new(&file, false, &mem->tool); @@ -135,24 +134,21 @@ static int report_raw_events(struct perf_mem *mem) if (mem->cpu_list) { ret = perf_session__cpu_bitmap(session, mem->cpu_list, mem->cpu_bitmap); - if (ret) + if (ret < 0) goto out_delete; } - if (symbol__init(&session->header.env) < 0) - return -1; + ret = symbol__init(&session->header.env); + if (ret < 0) + goto out_delete; printf("# PID, TID, IP, ADDR, LOCAL WEIGHT, DSRC, SYMBOL\n"); - err = perf_session__process_events(session); - if (err) - return err; - - return 0; + ret = perf_session__process_events(session); out_delete: perf_session__delete(session); - return err; + return ret; } static int report_events(int argc, const char **argv, struct perf_mem *mem) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 32626ea3e227..95a47719aec3 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -742,6 +742,17 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) argc = parse_options(argc, argv, options, report_usage, 0); + if (symbol_conf.vmlinux_name && + access(symbol_conf.vmlinux_name, R_OK)) { + pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); + return -EINVAL; + } + if (symbol_conf.kallsyms_name && + access(symbol_conf.kallsyms_name, R_OK)) { + pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); + return -EINVAL; + } + if (report.use_stdio) use_browser = 0; else if (report.use_tui) @@ -828,8 +839,10 @@ repeat: if (report.header || report.header_only) { perf_session__fprintf_info(session, stdout, report.show_full_info); - if (report.header_only) - return 0; + if (report.header_only) { + ret = 0; + goto error; + } } else if (use_browser == 0) { fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n", stdout); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index fcf99bdeb19e..37e301a32f43 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -67,10 +67,7 @@ #define CNTR_NOT_SUPPORTED "<not supported>" #define CNTR_NOT_COUNTED "<not counted>" -static void print_stat(int argc, const char **argv); -static void print_counter_aggr(struct perf_evsel *counter, char *prefix); -static void print_counter(struct perf_evsel *counter, char *prefix); -static void print_aggr(char *prefix); +static void print_counters(struct timespec *ts, int argc, const char **argv); /* Default events used for perf stat -T */ static const char *transaction_attrs = { @@ -141,96 +138,9 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a, } } -static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) +static void perf_stat__reset_stats(void) { - return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; -} - -static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) -{ - return perf_evsel__cpus(evsel)->nr; -} - -static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) -{ - int i; - struct perf_stat *ps = evsel->priv; - - for (i = 0; i < 3; i++) - init_stats(&ps->res_stats[i]); - - perf_stat_evsel_id_init(evsel); -} - -static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) -{ - evsel->priv = zalloc(sizeof(struct perf_stat)); - if (evsel->priv == NULL) - return -ENOMEM; - perf_evsel__reset_stat_priv(evsel); - return 0; -} - -static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) -{ - zfree(&evsel->priv); -} - -static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel) -{ - struct perf_counts *counts; - - counts = perf_counts__new(perf_evsel__nr_cpus(evsel)); - if (counts) - evsel->prev_raw_counts = counts; - - return counts ? 0 : -ENOMEM; -} - -static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) -{ - perf_counts__delete(evsel->prev_raw_counts); - evsel->prev_raw_counts = NULL; -} - -static void perf_evlist__free_stats(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel; - - evlist__for_each(evlist, evsel) { - perf_evsel__free_stat_priv(evsel); - perf_evsel__free_counts(evsel); - perf_evsel__free_prev_raw_counts(evsel); - } -} - -static int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) -{ - struct perf_evsel *evsel; - - evlist__for_each(evlist, evsel) { - if (perf_evsel__alloc_stat_priv(evsel) < 0 || - perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 || - (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0)) - goto out_free; - } - - return 0; - -out_free: - perf_evlist__free_stats(evlist); - return -1; -} - -static void perf_stat__reset_stats(struct perf_evlist *evlist) -{ - struct perf_evsel *evsel; - - evlist__for_each(evlist, evsel) { - perf_evsel__reset_stat_priv(evsel); - perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel)); - } - + perf_evlist__reset_stats(evsel_list); perf_stat__reset_shadow_stats(); } @@ -304,8 +214,9 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) return 0; } -static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused, - struct perf_counts_values *count) +static int +process_counter_values(struct perf_evsel *evsel, int cpu, int thread, + struct perf_counts_values *count) { struct perf_counts_values *aggr = &evsel->counts->aggr; static struct perf_counts_values zero; @@ -320,13 +231,13 @@ static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused, count = &zero; switch (aggr_mode) { + case AGGR_THREAD: case AGGR_CORE: case AGGR_SOCKET: case AGGR_NONE: if (!evsel->snapshot) - perf_evsel__compute_deltas(evsel, cpu, count); + perf_evsel__compute_deltas(evsel, cpu, thread, count); perf_counts_values__scale(count, scale, NULL); - evsel->counts->cpu[cpu] = *count; if (aggr_mode == AGGR_NONE) perf_stat__update_shadow_stats(evsel, count->values, cpu); break; @@ -343,26 +254,48 @@ static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused, return 0; } -static int read_counter(struct perf_evsel *counter); +static int process_counter_maps(struct perf_evsel *counter) +{ + int nthreads = thread_map__nr(counter->threads); + int ncpus = perf_evsel__nr_cpus(counter); + int cpu, thread; -/* - * Read out the results of a single counter: - * aggregate counts across CPUs in system-wide mode - */ -static int read_counter_aggr(struct perf_evsel *counter) + if (counter->system_wide) + nthreads = 1; + + for (thread = 0; thread < nthreads; thread++) { + for (cpu = 0; cpu < ncpus; cpu++) { + if (process_counter_values(counter, cpu, thread, + perf_counts(counter->counts, cpu, thread))) + return -1; + } + } + + return 0; +} + +static int process_counter(struct perf_evsel *counter) { struct perf_counts_values *aggr = &counter->counts->aggr; struct perf_stat *ps = counter->priv; u64 *count = counter->counts->aggr.values; - int i; + int i, ret; aggr->val = aggr->ena = aggr->run = 0; + init_stats(ps->res_stats); - if (read_counter(counter)) - return -1; + if (counter->per_pkg) + zero_per_pkg(counter); + + ret = process_counter_maps(counter); + if (ret) + return ret; + + if (aggr_mode != AGGR_GLOBAL) + return 0; if (!counter->snapshot) - perf_evsel__compute_deltas(counter, -1, aggr); + perf_evsel__compute_deltas(counter, -1, -1, aggr); perf_counts_values__scale(aggr, scale, &counter->counts->scaled); for (i = 0; i < 3; i++) @@ -397,12 +330,12 @@ static int read_counter(struct perf_evsel *counter) if (counter->system_wide) nthreads = 1; - if (counter->per_pkg) - zero_per_pkg(counter); - for (thread = 0; thread < nthreads; thread++) { for (cpu = 0; cpu < ncpus; cpu++) { - if (perf_evsel__read_cb(counter, cpu, thread, read_cb)) + struct perf_counts_values *count; + + count = perf_counts(counter->counts, cpu, thread); + if (perf_evsel__read(counter, cpu, thread, count)) return -1; } } @@ -410,68 +343,34 @@ static int read_counter(struct perf_evsel *counter) return 0; } -static void print_interval(void) +static void read_counters(bool close) { - static int num_print_interval; struct perf_evsel *counter; - struct perf_stat *ps; - struct timespec ts, rs; - char prefix[64]; - if (aggr_mode == AGGR_GLOBAL) { - evlist__for_each(evsel_list, counter) { - ps = counter->priv; - memset(ps->res_stats, 0, sizeof(ps->res_stats)); - read_counter_aggr(counter); - } - } else { - evlist__for_each(evsel_list, counter) { - ps = counter->priv; - memset(ps->res_stats, 0, sizeof(ps->res_stats)); - read_counter(counter); - } - } + evlist__for_each(evsel_list, counter) { + if (read_counter(counter)) + pr_warning("failed to read counter %s\n", counter->name); - clock_gettime(CLOCK_MONOTONIC, &ts); - diff_timespec(&rs, &ts, &ref_time); - sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep); + if (process_counter(counter)) + pr_warning("failed to process counter %s\n", counter->name); - if (num_print_interval == 0 && !csv_output) { - switch (aggr_mode) { - case AGGR_SOCKET: - fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit"); - break; - case AGGR_CORE: - fprintf(output, "# time core cpus counts %*s events\n", unit_width, "unit"); - break; - case AGGR_NONE: - fprintf(output, "# time CPU counts %*s events\n", unit_width, "unit"); - break; - case AGGR_GLOBAL: - default: - fprintf(output, "# time counts %*s events\n", unit_width, "unit"); + if (close) { + perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), + thread_map__nr(evsel_list->threads)); } } +} - if (++num_print_interval == 25) - num_print_interval = 0; +static void process_interval(void) +{ + struct timespec ts, rs; - switch (aggr_mode) { - case AGGR_CORE: - case AGGR_SOCKET: - print_aggr(prefix); - break; - case AGGR_NONE: - evlist__for_each(evsel_list, counter) - print_counter(counter, prefix); - break; - case AGGR_GLOBAL: - default: - evlist__for_each(evsel_list, counter) - print_counter_aggr(counter, prefix); - } + read_counters(false); - fflush(output); + clock_gettime(CLOCK_MONOTONIC, &ts); + diff_timespec(&rs, &ts, &ref_time); + + print_counters(&rs, 0, NULL); } static void handle_initial_delay(void) @@ -586,7 +485,7 @@ static int __run_perf_stat(int argc, const char **argv) if (interval) { while (!waitpid(child_pid, &status, WNOHANG)) { nanosleep(&ts, NULL); - print_interval(); + process_interval(); } } wait(&status); @@ -604,7 +503,7 @@ static int __run_perf_stat(int argc, const char **argv) while (!done) { nanosleep(&ts, NULL); if (interval) - print_interval(); + process_interval(); } } @@ -612,18 +511,7 @@ static int __run_perf_stat(int argc, const char **argv) update_stats(&walltime_nsecs_stats, t1 - t0); - if (aggr_mode == AGGR_GLOBAL) { - evlist__for_each(evsel_list, counter) { - read_counter_aggr(counter); - perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), - thread_map__nr(evsel_list->threads)); - } - } else { - evlist__for_each(evsel_list, counter) { - read_counter(counter); - perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1); - } - } + read_counters(true); return WEXITSTATUS(status); } @@ -715,6 +603,14 @@ static void aggr_printout(struct perf_evsel *evsel, int id, int nr) csv_output ? 0 : -4, perf_evsel__cpus(evsel)->map[id], csv_sep); break; + case AGGR_THREAD: + fprintf(output, "%*s-%*d%s", + csv_output ? 0 : 16, + thread_map__comm(evsel->threads, id), + csv_output ? 0 : -8, + thread_map__pid(evsel->threads, id), + csv_sep); + break; case AGGR_GLOBAL: default: break; @@ -815,9 +711,9 @@ static void print_aggr(char *prefix) s2 = aggr_get_id(evsel_list->cpus, cpu2); if (s2 != id) continue; - val += counter->counts->cpu[cpu].val; - ena += counter->counts->cpu[cpu].ena; - run += counter->counts->cpu[cpu].run; + val += perf_counts(counter->counts, cpu, 0)->val; + ena += perf_counts(counter->counts, cpu, 0)->ena; + run += perf_counts(counter->counts, cpu, 0)->run; nr++; } if (prefix) @@ -863,6 +759,40 @@ static void print_aggr(char *prefix) } } +static void print_aggr_thread(struct perf_evsel *counter, char *prefix) +{ + int nthreads = thread_map__nr(counter->threads); + int ncpus = cpu_map__nr(counter->cpus); + int cpu, thread; + double uval; + + for (thread = 0; thread < nthreads; thread++) { + u64 ena = 0, run = 0, val = 0; + + for (cpu = 0; cpu < ncpus; cpu++) { + val += perf_counts(counter->counts, cpu, thread)->val; + ena += perf_counts(counter->counts, cpu, thread)->ena; + run += perf_counts(counter->counts, cpu, thread)->run; + } + + if (prefix) + fprintf(output, "%s", prefix); + + uval = val * counter->scale; + + if (nsec_counter(counter)) + nsec_printout(thread, 0, counter, uval); + else + abs_printout(thread, 0, counter, uval); + + if (!csv_output) + print_noise(counter, 1.0); + + print_running(run, ena); + fputc('\n', output); + } +} + /* * Print out the results of a single counter: * aggregated counts in system-wide mode @@ -925,9 +855,9 @@ static void print_counter(struct perf_evsel *counter, char *prefix) int cpu; for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { - val = counter->counts->cpu[cpu].val; - ena = counter->counts->cpu[cpu].ena; - run = counter->counts->cpu[cpu].run; + val = perf_counts(counter->counts, cpu, 0)->val; + ena = perf_counts(counter->counts, cpu, 0)->ena; + run = perf_counts(counter->counts, cpu, 0)->run; if (prefix) fprintf(output, "%s", prefix); @@ -972,9 +902,38 @@ static void print_counter(struct perf_evsel *counter, char *prefix) } } -static void print_stat(int argc, const char **argv) +static void print_interval(char *prefix, struct timespec *ts) +{ + static int num_print_interval; + + sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep); + + if (num_print_interval == 0 && !csv_output) { + switch (aggr_mode) { + case AGGR_SOCKET: + fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit"); + break; + case AGGR_CORE: + fprintf(output, "# time core cpus counts %*s events\n", unit_width, "unit"); + break; + case AGGR_NONE: + fprintf(output, "# time CPU counts %*s events\n", unit_width, "unit"); + break; + case AGGR_THREAD: + fprintf(output, "# time comm-pid counts %*s events\n", unit_width, "unit"); + break; + case AGGR_GLOBAL: + default: + fprintf(output, "# time counts %*s events\n", unit_width, "unit"); + } + } + + if (++num_print_interval == 25) + num_print_interval = 0; +} + +static void print_header(int argc, const char **argv) { - struct perf_evsel *counter; int i; fflush(stdout); @@ -1000,36 +959,57 @@ static void print_stat(int argc, const char **argv) fprintf(output, " (%d runs)", run_count); fprintf(output, ":\n\n"); } +} + +static void print_footer(void) +{ + if (!null_run) + fprintf(output, "\n"); + fprintf(output, " %17.9f seconds time elapsed", + avg_stats(&walltime_nsecs_stats)/1e9); + if (run_count > 1) { + fprintf(output, " "); + print_noise_pct(stddev_stats(&walltime_nsecs_stats), + avg_stats(&walltime_nsecs_stats)); + } + fprintf(output, "\n\n"); +} + +static void print_counters(struct timespec *ts, int argc, const char **argv) +{ + struct perf_evsel *counter; + char buf[64], *prefix = NULL; + + if (interval) + print_interval(prefix = buf, ts); + else + print_header(argc, argv); switch (aggr_mode) { case AGGR_CORE: case AGGR_SOCKET: - print_aggr(NULL); + print_aggr(prefix); + break; + case AGGR_THREAD: + evlist__for_each(evsel_list, counter) + print_aggr_thread(counter, prefix); break; case AGGR_GLOBAL: evlist__for_each(evsel_list, counter) - print_counter_aggr(counter, NULL); + print_counter_aggr(counter, prefix); break; case AGGR_NONE: evlist__for_each(evsel_list, counter) - print_counter(counter, NULL); + print_counter(counter, prefix); break; default: break; } - if (!csv_output) { - if (!null_run) - fprintf(output, "\n"); - fprintf(output, " %17.9f seconds time elapsed", - avg_stats(&walltime_nsecs_stats)/1e9); - if (run_count > 1) { - fprintf(output, " "); - print_noise_pct(stddev_stats(&walltime_nsecs_stats), - avg_stats(&walltime_nsecs_stats)); - } - fprintf(output, "\n\n"); - } + if (!interval && !csv_output) + print_footer(); + + fflush(output); } static volatile int signr = -1; @@ -1101,6 +1081,7 @@ static int perf_stat_init_aggr_mode(void) break; case AGGR_NONE: case AGGR_GLOBAL: + case AGGR_THREAD: default: break; } @@ -1325,6 +1306,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) "aggregate counts per processor socket", AGGR_SOCKET), OPT_SET_UINT(0, "per-core", &aggr_mode, "aggregate counts per physical processor core", AGGR_CORE), + OPT_SET_UINT(0, "per-thread", &aggr_mode, + "aggregate counts per thread", AGGR_THREAD), OPT_UINTEGER('D', "delay", &initial_delay, "ms to wait before starting measurement after program start"), OPT_END() @@ -1416,8 +1399,19 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) run_count = 1; } - /* no_aggr, cgroup are for system-wide only */ - if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) && + if ((aggr_mode == AGGR_THREAD) && !target__has_task(&target)) { + fprintf(stderr, "The --per-thread option is only available " + "when monitoring via -p -t options.\n"); + parse_options_usage(NULL, options, "p", 1); + parse_options_usage(NULL, options, "t", 1); + goto out; + } + + /* + * no_aggr, cgroup are for system-wide only + * --per-thread is aggregated per thread, we dont mix it with cpu mode + */ + if (((aggr_mode != AGGR_GLOBAL && aggr_mode != AGGR_THREAD) || nr_cgroups) && !target__has_cpu(&target)) { fprintf(stderr, "both cgroup and no-aggregation " "modes only available in system-wide mode\n"); @@ -1445,6 +1439,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) } goto out; } + + /* + * Initialize thread_map with comm names, + * so we could print it out on output. + */ + if (aggr_mode == AGGR_THREAD) + thread_map__read_comms(evsel_list->threads); + if (interval && interval < 100) { pr_err("print interval must be >= 100ms\n"); parse_options_usage(stat_usage, options, "I", 1); @@ -1478,13 +1480,13 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) status = run_perf_stat(argc, argv); if (forever && status != -1) { - print_stat(argc, argv); - perf_stat__reset_stats(evsel_list); + print_counters(NULL, argc, argv); + perf_stat__reset_stats(); } } if (!forever && status != -1 && !interval) - print_stat(argc, argv); + print_counters(NULL, argc, argv); perf_evlist__free_stats(evsel_list); out: diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 619a8696fda7..ecf319728f25 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -586,27 +586,9 @@ static void *display_thread_tui(void *arg) hists->uid_filter_str = top->record_opts.target.uid_str; } - while (true) { - int key = perf_evlist__tui_browse_hists(top->evlist, help, &hbt, - top->min_percent, - &top->session->header.env); - - if (key != 'f') - break; - - perf_evlist__toggle_enable(top->evlist); - /* - * No need to refresh, resort/decay histogram entries - * if we are not collecting samples: - */ - if (top->evlist->enabled) { - hbt.refresh = top->delay_secs; - help = "Press 'f' to disable the events or 'h' to see other hotkeys"; - } else { - help = "Press 'f' again to re-enable the events"; - hbt.refresh = 0; - } - } + perf_evlist__tui_browse_hists(top->evlist, help, &hbt, + top->min_percent, + &top->session->header.env); done = 1; return NULL; diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index de5d277d1ad7..39ad4d0ca884 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -1617,6 +1617,34 @@ static int trace__read_syscall_info(struct trace *trace, int id) return syscall__set_arg_fmts(sc); } +static int trace__validate_ev_qualifier(struct trace *trace) +{ + int err = 0; + struct str_node *pos; + + strlist__for_each(pos, trace->ev_qualifier) { + const char *sc = pos->s; + + if (audit_name_to_syscall(sc, trace->audit.machine) < 0) { + if (err == 0) { + fputs("Error:\tInvalid syscall ", trace->output); + err = -EINVAL; + } else { + fputs(", ", trace->output); + } + + fputs(sc, trace->output); + } + } + + if (err < 0) { + fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'" + "\nHint:\tand: 'man syscalls'\n", trace->output); + } + + return err; +} + /* * args is to be interpreted as a series of longs but we need to handle * 8-byte unaligned accesses. args points to raw_data within the event @@ -2325,7 +2353,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv) */ if (trace->filter_pids.nr > 0) err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries); - else if (evlist->threads->map[0] == -1) + else if (thread_map__pid(evlist->threads, 0) == -1) err = perf_evlist__set_filter_pid(evlist, getpid()); if (err < 0) { @@ -2343,7 +2371,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv) if (forks) perf_evlist__start_workload(evlist); - trace->multiple_threads = evlist->threads->map[0] == -1 || + trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 || evlist->threads->nr > 1 || perf_evlist__first(evlist)->attr.inherit; again: @@ -2862,6 +2890,10 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) err = -ENOMEM; goto out_close; } + + err = trace__validate_ev_qualifier(&trace); + if (err) + goto out_close; } err = target__validate(&trace.opts.target); diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 317001c94660..094ddaee104c 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -11,9 +11,9 @@ ifneq ($(obj-perf),) obj-perf := $(abspath $(obj-perf))/ endif -$(shell echo -n > .config-detected) -detected = $(shell echo "$(1)=y" >> .config-detected) -detected_var = $(shell echo "$(1)=$($(1))" >> .config-detected) +$(shell echo -n > $(OUTPUT).config-detected) +detected = $(shell echo "$(1)=y" >> $(OUTPUT).config-detected) +detected_var = $(shell echo "$(1)=$($(1))" >> $(OUTPUT).config-detected) CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS) diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index ee41e705b2eb..d20d6e6ab65b 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -31,6 +31,7 @@ perf-y += code-reading.o perf-y += sample-parsing.o perf-y += parse-no-sample-id-all.o perf-y += kmod-path.o +perf-y += thread-map.o perf-$(CONFIG_X86) += perf-time-to-tsc.o diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 87b9961646e4..c1dde733c3a6 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -171,6 +171,10 @@ static struct test { .func = test__kmod_path__parse, }, { + .desc = "Test thread map", + .func = test__thread_map, + }, + { .func = NULL, }, }; diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index 22f8a00446e1..39c784a100a9 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -545,8 +545,8 @@ out_err: if (evlist) { perf_evlist__delete(evlist); } else { - cpu_map__delete(cpus); - thread_map__delete(threads); + cpu_map__put(cpus); + thread_map__put(threads); } machines__destroy_kernel_maps(&machines); machine__delete_threads(machine); diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c index 5b171d1e338b..4d4b9837b630 100644 --- a/tools/perf/tests/keep-tracking.c +++ b/tools/perf/tests/keep-tracking.c @@ -144,8 +144,8 @@ out_err: perf_evlist__disable(evlist); perf_evlist__delete(evlist); } else { - cpu_map__delete(cpus); - thread_map__delete(threads); + cpu_map__put(cpus); + thread_map__put(threads); } return err; diff --git a/tools/perf/tests/make b/tools/perf/tests/make index 65280d28662e..729112f4cfaa 100644 --- a/tools/perf/tests/make +++ b/tools/perf/tests/make @@ -1,5 +1,16 @@ +ifndef MK +ifeq ($(MAKECMDGOALS),) +# no target specified, trigger the whole suite +all: + @echo "Testing Makefile"; $(MAKE) -sf tests/make MK=Makefile + @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf +else +# run only specific test over 'Makefile' +%: + @echo "Testing Makefile"; $(MAKE) -sf tests/make MK=Makefile $@ +endif +else PERF := . -MK := Makefile include config/Makefile.arch @@ -47,6 +58,7 @@ make_install_man := install-man make_install_html := install-html make_install_info := install-info make_install_pdf := install-pdf +make_install_prefix := install prefix=/tmp/krava make_static := LDFLAGS=-static # all the NO_* variable combined @@ -57,7 +69,12 @@ make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 # $(run) contains all available tests run := make_pure +# Targets 'clean all' can be run together only through top level +# Makefile because we detect clean target in Makefile.perf and +# disable features detection +ifeq ($(MK),Makefile) run += make_clean_all +endif run += make_python_perf_so run += make_debug run += make_no_libperl @@ -83,6 +100,7 @@ run += make_util_map_o run += make_util_pmu_bison_o run += make_install run += make_install_bin +run += make_install_prefix # FIXME 'install-*' commented out till they're fixed # run += make_install_doc # run += make_install_man @@ -157,6 +175,12 @@ test_make_install_O := $(call test_dest_files,$(installed_files_all)) test_make_install_bin := $(call test_dest_files,$(installed_files_bin)) test_make_install_bin_O := $(call test_dest_files,$(installed_files_bin)) +# We prefix all installed files for make_install_prefix +# with '/tmp/krava' to match installed/prefix-ed files. +installed_files_all_prefix := $(addprefix /tmp/krava/,$(installed_files_all)) +test_make_install_prefix := $(call test_dest_files,$(installed_files_all_prefix)) +test_make_install_prefix_O := $(call test_dest_files,$(installed_files_all_prefix)) + # FIXME nothing gets installed test_make_install_man := test -f $$TMP_DEST/share/man/man1/perf.1 test_make_install_man_O := $(test_make_install_man) @@ -226,13 +250,13 @@ tarpkg: ( eval $$cmd ) >> $@ 2>&1 make_kernelsrc: - @echo " - make -C <kernelsrc> tools/perf" + @echo "- make -C <kernelsrc> tools/perf" $(call clean); \ (make -C ../.. tools/perf) > $@ 2>&1 && \ test -x perf && rm -f $@ || (cat $@ ; false) make_kernelsrc_tools: - @echo " - make -C <kernelsrc>/tools perf" + @echo "- make -C <kernelsrc>/tools perf" $(call clean); \ (make -C ../../tools perf) > $@ 2>&1 && \ test -x perf && rm -f $@ || (cat $@ ; false) @@ -244,3 +268,4 @@ out: $(run_O) @echo OK .PHONY: all $(run) $(run_O) tarpkg clean +endif # ifndef MK diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index 5855cf471210..666b67a4df9d 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -140,8 +140,8 @@ out_delete_evlist: cpus = NULL; threads = NULL; out_free_cpus: - cpu_map__delete(cpus); + cpu_map__put(cpus); out_free_threads: - thread_map__delete(threads); + thread_map__put(threads); return err; } diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c index 7f48efa7e295..145050e2e544 100644 --- a/tools/perf/tests/mmap-thread-lookup.c +++ b/tools/perf/tests/mmap-thread-lookup.c @@ -143,7 +143,7 @@ static int synth_process(struct machine *machine) perf_event__process, machine, 0, 500); - thread_map__delete(map); + thread_map__put(map); return err; } diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c index 9a7a116e09b8..a572f87e9c8d 100644 --- a/tools/perf/tests/openat-syscall-all-cpus.c +++ b/tools/perf/tests/openat-syscall-all-cpus.c @@ -78,7 +78,7 @@ int test__openat_syscall_event_on_all_cpus(void) * we use the auto allocation it will allocate just for 1 cpu, * as we start by cpu 0. */ - if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { + if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); goto out_close_fd; } @@ -98,9 +98,9 @@ int test__openat_syscall_event_on_all_cpus(void) } expected = nr_openat_calls + cpu; - if (evsel->counts->cpu[cpu].val != expected) { + if (perf_counts(evsel->counts, cpu, 0)->val != expected) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", - expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); + expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); err = -1; } } @@ -111,6 +111,6 @@ out_close_fd: out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: - thread_map__delete(threads); + thread_map__put(threads); return err; } diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c index 6245221479d7..01a19626c846 100644 --- a/tools/perf/tests/openat-syscall-tp-fields.c +++ b/tools/perf/tests/openat-syscall-tp-fields.c @@ -45,7 +45,7 @@ int test__syscall_openat_tp_fields(void) perf_evsel__config(evsel, &opts); - evlist->threads->map[0] = getpid(); + thread_map__set_pid(evlist->threads, 0, getpid()); err = perf_evlist__open(evlist); if (err < 0) { diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c index 9f9491bb8e48..c9a37bc6b33a 100644 --- a/tools/perf/tests/openat-syscall.c +++ b/tools/perf/tests/openat-syscall.c @@ -44,9 +44,9 @@ int test__openat_syscall_event(void) goto out_close_fd; } - if (evsel->counts->cpu[0].val != nr_openat_calls) { + if (perf_counts(evsel->counts, 0, 0)->val != nr_openat_calls) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n", - nr_openat_calls, evsel->counts->cpu[0].val); + nr_openat_calls, perf_counts(evsel->counts, 0, 0)->val); goto out_close_fd; } @@ -56,6 +56,6 @@ out_close_fd: out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: - thread_map__delete(threads); + thread_map__put(threads); return err; } diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c index 0d31403ea593..e698742d4fec 100644 --- a/tools/perf/tests/switch-tracking.c +++ b/tools/perf/tests/switch-tracking.c @@ -560,8 +560,8 @@ out: perf_evlist__disable(evlist); perf_evlist__delete(evlist); } else { - cpu_map__delete(cpus); - thread_map__delete(threads); + cpu_map__put(cpus); + thread_map__put(threads); } return err; diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 8e5038b48ba8..ebb47d96bc0b 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -61,6 +61,7 @@ int test__switch_tracking(void); int test__fdarray__filter(void); int test__fdarray__add(void); int test__kmod_path__parse(void); +int test__thread_map(void); #if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__) #ifdef HAVE_DWARF_UNWIND_SUPPORT diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c new file mode 100644 index 000000000000..5acf000939ea --- /dev/null +++ b/tools/perf/tests/thread-map.c @@ -0,0 +1,38 @@ +#include <sys/types.h> +#include <unistd.h> +#include "tests.h" +#include "thread_map.h" +#include "debug.h" + +int test__thread_map(void) +{ + struct thread_map *map; + + /* test map on current pid */ + map = thread_map__new_by_pid(getpid()); + TEST_ASSERT_VAL("failed to alloc map", map); + + thread_map__read_comms(map); + + TEST_ASSERT_VAL("wrong nr", map->nr == 1); + TEST_ASSERT_VAL("wrong pid", + thread_map__pid(map, 0) == getpid()); + TEST_ASSERT_VAL("wrong comm", + thread_map__comm(map, 0) && + !strcmp(thread_map__comm(map, 0), "perf")); + thread_map__put(map); + + /* test dummy pid */ + map = thread_map__new_dummy(); + TEST_ASSERT_VAL("failed to alloc map", map); + + thread_map__read_comms(map); + + TEST_ASSERT_VAL("wrong nr", map->nr == 1); + TEST_ASSERT_VAL("wrong pid", thread_map__pid(map, 0) == -1); + TEST_ASSERT_VAL("wrong comm", + thread_map__comm(map, 0) && + !strcmp(thread_map__comm(map, 0), "dummy")); + thread_map__put(map); + return 0; +} diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index c42adb600091..7629bef2fd79 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -1902,8 +1902,23 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, case CTRL('c'): goto out_free_stack; case 'f': - if (!is_report_browser(hbt)) - goto out_free_stack; + if (!is_report_browser(hbt)) { + struct perf_top *top = hbt->arg; + + perf_evlist__toggle_enable(top->evlist); + /* + * No need to refresh, resort/decay histogram + * entries if we are not collecting samples: + */ + if (top->evlist->enabled) { + helpline = "Press 'f' to disable the events or 'h' to see other hotkeys"; + hbt->refresh = delay_secs; + } else { + helpline = "Press 'f' again to re-enable the events"; + hbt->refresh = 0; + } + continue; + } /* Fall thru */ default: helpline = "Press '?' for help on key bindings"; diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index df66966cfde7..7e7405c9b936 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -119,12 +119,12 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, if (per_cpu) { mp->cpu = evlist->cpus->map[idx]; if (evlist->threads) - mp->tid = evlist->threads->map[0]; + mp->tid = thread_map__pid(evlist->threads, 0); else mp->tid = -1; } else { mp->cpu = -1; - mp->tid = evlist->threads->map[idx]; + mp->tid = thread_map__pid(evlist->threads, idx); } } @@ -1182,6 +1182,13 @@ static int __auxtrace_mmap__read(struct auxtrace_mmap *mm, data2 = NULL; } + if (itr->alignment) { + unsigned int unwanted = len1 % itr->alignment; + + len1 -= unwanted; + size -= unwanted; + } + /* padding must be written by fn() e.g. record__process_auxtrace() */ padding = size & 7; if (padding) diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index a171abbe7301..471aecbc4d68 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h @@ -303,6 +303,7 @@ struct auxtrace_record { const char *str); u64 (*reference)(struct auxtrace_record *itr); int (*read_finish)(struct auxtrace_record *itr, int idx); + unsigned int alignment; }; #ifdef HAVE_AUXTRACE_SUPPORT diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c index 85b523885f9d..2babddaa2481 100644 --- a/tools/perf/util/cloexec.c +++ b/tools/perf/util/cloexec.c @@ -7,11 +7,15 @@ static unsigned long flag = PERF_FLAG_FD_CLOEXEC; +#ifdef __GLIBC_PREREQ +#if !__GLIBC_PREREQ(2, 6) int __weak sched_getcpu(void) { errno = ENOSYS; return -1; } +#endif +#endif static int perf_flag_probe(void) { diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index c4e55b71010c..3667e2123e5b 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -5,6 +5,7 @@ #include <assert.h> #include <stdio.h> #include <stdlib.h> +#include "asm/bug.h" static struct cpu_map *cpu_map__default_new(void) { @@ -22,6 +23,7 @@ static struct cpu_map *cpu_map__default_new(void) cpus->map[i] = i; cpus->nr = nr_cpus; + atomic_set(&cpus->refcnt, 1); } return cpus; @@ -35,6 +37,7 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) if (cpus != NULL) { cpus->nr = nr_cpus; memcpy(cpus->map, tmp_cpus, payload_size); + atomic_set(&cpus->refcnt, 1); } return cpus; @@ -194,14 +197,32 @@ struct cpu_map *cpu_map__dummy_new(void) if (cpus != NULL) { cpus->nr = 1; cpus->map[0] = -1; + atomic_set(&cpus->refcnt, 1); } return cpus; } -void cpu_map__delete(struct cpu_map *map) +static void cpu_map__delete(struct cpu_map *map) { - free(map); + if (map) { + WARN_ONCE(atomic_read(&map->refcnt) != 0, + "cpu_map refcnt unbalanced\n"); + free(map); + } +} + +struct cpu_map *cpu_map__get(struct cpu_map *map) +{ + if (map) + atomic_inc(&map->refcnt); + return map; +} + +void cpu_map__put(struct cpu_map *map) +{ + if (map && atomic_dec_and_test(&map->refcnt)) + cpu_map__delete(map); } int cpu_map__get_socket(struct cpu_map *map, int idx) @@ -263,6 +284,7 @@ static int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res, /* ensure we process id in increasing order */ qsort(c->map, c->nr, sizeof(int), cmp_ids); + atomic_set(&cpus->refcnt, 1); *res = c; return 0; } diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 61a654849002..0af9cecb4c51 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -3,18 +3,19 @@ #include <stdio.h> #include <stdbool.h> +#include <linux/atomic.h> #include "perf.h" #include "util/debug.h" struct cpu_map { + atomic_t refcnt; int nr; int map[]; }; struct cpu_map *cpu_map__new(const char *cpu_list); struct cpu_map *cpu_map__dummy_new(void); -void cpu_map__delete(struct cpu_map *map); struct cpu_map *cpu_map__read(FILE *file); size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); int cpu_map__get_socket(struct cpu_map *map, int idx); @@ -22,6 +23,9 @@ int cpu_map__get_core(struct cpu_map *map, int idx); int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp); int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep); +struct cpu_map *cpu_map__get(struct cpu_map *map); +void cpu_map__put(struct cpu_map *map); + static inline int cpu_map__socket(struct cpu_map *sock, int s) { if (!sock || s > sock->nr || s < 0) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index d7d986d8f23e..67a977e5d0ab 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -504,7 +504,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, for (thread = 0; thread < threads->nr; ++thread) { if (__event__synthesize_thread(comm_event, mmap_event, fork_event, - threads->map[thread], 0, + thread_map__pid(threads, thread), 0, process, tool, machine, mmap_data, proc_map_timeout)) { err = -1; @@ -515,12 +515,12 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, * comm.pid is set to thread group id by * perf_event__synthesize_comm */ - if ((int) comm_event->comm.pid != threads->map[thread]) { + if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { bool need_leader = true; /* is thread group leader in thread_map? */ for (j = 0; j < threads->nr; ++j) { - if ((int) comm_event->comm.pid == threads->map[j]) { + if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { need_leader = false; break; } diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 8366511b45f8..6cfdee68e763 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -114,8 +114,8 @@ void perf_evlist__delete(struct perf_evlist *evlist) { perf_evlist__munmap(evlist); perf_evlist__close(evlist); - cpu_map__delete(evlist->cpus); - thread_map__delete(evlist->threads); + cpu_map__put(evlist->cpus); + thread_map__put(evlist->threads); evlist->cpus = NULL; evlist->threads = NULL; perf_evlist__purge(evlist); @@ -548,7 +548,7 @@ static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, else sid->cpu = -1; if (!evsel->system_wide && evlist->threads && thread >= 0) - sid->tid = evlist->threads->map[thread]; + sid->tid = thread_map__pid(evlist->threads, thread); else sid->tid = -1; } @@ -1101,6 +1101,31 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); } +static int perf_evlist__propagate_maps(struct perf_evlist *evlist, + struct target *target) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + /* + * We already have cpus for evsel (via PMU sysfs) so + * keep it, if there's no target cpu list defined. + */ + if (evsel->cpus && target->cpu_list) + cpu_map__put(evsel->cpus); + + if (!evsel->cpus || target->cpu_list) + evsel->cpus = cpu_map__get(evlist->cpus); + + evsel->threads = thread_map__get(evlist->threads); + + if (!evsel->cpus || !evsel->threads) + return -ENOMEM; + } + + return 0; +} + int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) { evlist->threads = thread_map__new_str(target->pid, target->tid, @@ -1117,10 +1142,10 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) if (evlist->cpus == NULL) goto out_delete_threads; - return 0; + return perf_evlist__propagate_maps(evlist, target); out_delete_threads: - thread_map__delete(evlist->threads); + thread_map__put(evlist->threads); evlist->threads = NULL; return -1; } @@ -1353,7 +1378,7 @@ static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) out: return err; out_free_cpus: - cpu_map__delete(evlist->cpus); + cpu_map__put(evlist->cpus); evlist->cpus = NULL; goto out; } @@ -1475,7 +1500,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar __func__, __LINE__); goto out_close_pipes; } - evlist->threads->map[0] = evlist->workload.pid; + thread_map__set_pid(evlist->threads, 0, evlist->workload.pid); } close(child_ready_pipe[1]); diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index a8489b9d2812..037633c1da9d 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -289,5 +289,4 @@ void perf_evlist__to_front(struct perf_evlist *evlist, void perf_evlist__set_tracking_event(struct perf_evlist *evlist, struct perf_evsel *tracking_evsel); - #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 33449decf7bd..2936b3080722 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -885,6 +885,8 @@ void perf_evsel__exit(struct perf_evsel *evsel) perf_evsel__free_fd(evsel); perf_evsel__free_id(evsel); close_cgroup(evsel->cgrp); + cpu_map__put(evsel->cpus); + thread_map__put(evsel->threads); zfree(&evsel->group_name); zfree(&evsel->name); perf_evsel__object.fini(evsel); @@ -896,7 +898,7 @@ void perf_evsel__delete(struct perf_evsel *evsel) free(evsel); } -void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, +void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count) { struct perf_counts_values tmp; @@ -908,8 +910,8 @@ void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, tmp = evsel->prev_raw_counts->aggr; evsel->prev_raw_counts->aggr = *count; } else { - tmp = evsel->prev_raw_counts->cpu[cpu]; - evsel->prev_raw_counts->cpu[cpu] = *count; + tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); + *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; } count->val = count->val - tmp.val; @@ -937,20 +939,18 @@ void perf_counts_values__scale(struct perf_counts_values *count, *pscaled = scaled; } -int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread, - perf_evsel__read_cb_t cb) +int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, + struct perf_counts_values *count) { - struct perf_counts_values count; - - memset(&count, 0, sizeof(count)); + memset(count, 0, sizeof(*count)); if (FD(evsel, cpu, thread) < 0) return -EINVAL; - if (readn(FD(evsel, cpu, thread), &count, sizeof(count)) < 0) + if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0) return -errno; - return cb(evsel, cpu, thread, &count); + return 0; } int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, @@ -962,15 +962,15 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, if (FD(evsel, cpu, thread) < 0) return -EINVAL; - if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0) + if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) return -ENOMEM; if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) return -errno; - perf_evsel__compute_deltas(evsel, cpu, &count); + perf_evsel__compute_deltas(evsel, cpu, thread, &count); perf_counts_values__scale(&count, scale, NULL); - evsel->counts->cpu[cpu] = count; + *perf_counts(evsel->counts, cpu, thread) = count; return 0; } @@ -1167,7 +1167,7 @@ retry_sample_id: int group_fd; if (!evsel->cgrp && !evsel->system_wide) - pid = threads->map[thread]; + pid = thread_map__pid(threads, thread); group_fd = get_group_fd(evsel, cpu, thread); retry_open: diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index bb0579e8a10a..4a7ed5656cf0 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -8,23 +8,8 @@ #include <linux/types.h> #include "xyarray.h" #include "symbol.h" - -struct perf_counts_values { - union { - struct { - u64 val; - u64 ena; - u64 run; - }; - u64 values[3]; - }; -}; - -struct perf_counts { - s8 scaled; - struct perf_counts_values aggr; - struct perf_counts_values cpu[]; -}; +#include "cpumap.h" +#include "stat.h" struct perf_evsel; @@ -82,6 +67,7 @@ struct perf_evsel { struct cgroup_sel *cgrp; void *handler; struct cpu_map *cpus; + struct thread_map *threads; unsigned int sample_size; int id_pos; int is_pos; @@ -113,10 +99,20 @@ struct thread_map; struct perf_evlist; struct record_opts; +static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) +{ + return evsel->cpus; +} + +static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) +{ + return perf_evsel__cpus(evsel)->nr; +} + void perf_counts_values__scale(struct perf_counts_values *count, bool scale, s8 *pscaled); -void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, +void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread, struct perf_counts_values *count); int perf_evsel__object_config(size_t object_size, @@ -233,12 +229,8 @@ static inline bool perf_evsel__match2(struct perf_evsel *e1, (a)->attr.type == (b)->attr.type && \ (a)->attr.config == (b)->attr.config) -typedef int (perf_evsel__read_cb_t)(struct perf_evsel *evsel, - int cpu, int thread, - struct perf_counts_values *count); - -int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread, - perf_evsel__read_cb_t cb); +int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, + struct perf_counts_values *count); int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, int cpu, int thread, bool scale); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 21a77e7a171e..03ace57a800c 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1063,8 +1063,7 @@ out: free(buf); return events; error: - if (events) - free_event_desc(events); + free_event_desc(events); events = NULL; goto out; } diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 4744673aff1b..7ff682770fdb 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1448,10 +1448,9 @@ int machine__process_event(struct machine *machine, union perf_event *event, case PERF_RECORD_AUX: ret = machine__process_aux_event(machine, event); break; case PERF_RECORD_ITRACE_START: - ret = machine__process_itrace_start_event(machine, event); + ret = machine__process_itrace_start_event(machine, event); break; case PERF_RECORD_LOST_SAMPLES: ret = machine__process_lost_samples_event(machine, event, sample); break; - break; default: ret = -1; break; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 2a4d1ec02846..09f8d2357108 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -17,6 +17,7 @@ #include "parse-events-flex.h" #include "pmu.h" #include "thread_map.h" +#include "cpumap.h" #include "asm/bug.h" #define MAX_NAME_LEN 100 @@ -285,7 +286,9 @@ __add_event(struct list_head *list, int *idx, if (!evsel) return NULL; - evsel->cpus = cpus; + if (cpus) + evsel->cpus = cpu_map__get(cpus); + if (name) evsel->name = strdup(name); list_add_tail(&evsel->node, list); diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 09e738fe9ea2..13cef3c65565 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -119,8 +119,8 @@ event [^,{}/]+ num_dec [0-9]+ num_hex 0x[a-fA-F0-9]+ num_raw_hex [a-fA-F0-9]+ -name [a-zA-Z_*?][a-zA-Z0-9_*?]* -name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]* +name [a-zA-Z_*?][a-zA-Z0-9_*?.]* +name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.]* /* If you add a modifier you need to update check_modifier() */ modifier_event [ukhpGHSDI]+ modifier_bp [rwx]{1,3} @@ -165,7 +165,6 @@ modifier_bp [rwx]{1,3} return PE_EVENT_NAME; } -. | <<EOF>> { BEGIN(INITIAL); REWIND(0); diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 0fcc624eb767..7bcb8c315615 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1,4 +1,5 @@ #include <linux/list.h> +#include <linux/compiler.h> #include <sys/types.h> #include <unistd.h> #include <stdio.h> @@ -205,17 +206,12 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias, return 0; } -static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file) +static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name, + char *desc __maybe_unused, char *val) { struct perf_pmu_alias *alias; - char buf[256]; int ret; - ret = fread(buf, 1, sizeof(buf), file); - if (ret == 0) - return -EINVAL; - buf[ret] = 0; - alias = malloc(sizeof(*alias)); if (!alias) return -ENOMEM; @@ -225,26 +221,43 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI alias->unit[0] = '\0'; alias->per_pkg = false; - ret = parse_events_terms(&alias->terms, buf); + ret = parse_events_terms(&alias->terms, val); if (ret) { + pr_err("Cannot parse alias %s: %d\n", val, ret); free(alias); return ret; } alias->name = strdup(name); - /* - * load unit name and scale if available - */ - perf_pmu__parse_unit(alias, dir, name); - perf_pmu__parse_scale(alias, dir, name); - perf_pmu__parse_per_pkg(alias, dir, name); - perf_pmu__parse_snapshot(alias, dir, name); + if (dir) { + /* + * load unit name and scale if available + */ + perf_pmu__parse_unit(alias, dir, name); + perf_pmu__parse_scale(alias, dir, name); + perf_pmu__parse_per_pkg(alias, dir, name); + perf_pmu__parse_snapshot(alias, dir, name); + } list_add_tail(&alias->list, list); return 0; } +static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FILE *file) +{ + char buf[256]; + int ret; + + ret = fread(buf, 1, sizeof(buf), file); + if (ret == 0) + return -EINVAL; + + buf[ret] = 0; + + return __perf_pmu__new_alias(list, dir, name, NULL, buf); +} + static inline bool pmu_alias_info_file(char *name) { size_t len; @@ -436,7 +449,7 @@ static struct cpu_map *pmu_cpumask(const char *name) return cpus; } -struct perf_event_attr *__attribute__((weak)) +struct perf_event_attr * __weak perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused) { return NULL; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 076527b639bd..381f23a443c7 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -249,8 +249,12 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs) static bool kprobe_blacklist__listed(unsigned long address); static bool kprobe_warn_out_range(const char *symbol, unsigned long address) { + u64 etext_addr; + /* Get the address of _etext for checking non-probable text symbol */ - if (kernel_get_symbol_address_by_name("_etext", false) < address) + etext_addr = kernel_get_symbol_address_by_name("_etext", false); + + if (etext_addr != 0 && etext_addr < address) pr_warning("%s is out of .text, skip it.\n", symbol); else if (kprobe_blacklist__listed(address)) pr_warning("%s is blacklisted function, skip it.\n", symbol); diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index 5925fec90562..e23ded40c79e 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -20,3 +20,4 @@ util/stat.c util/strlist.c util/trace-event.c ../../lib/rbtree.c +util/string.c diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index d906d0ad5d40..626422eda727 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -384,7 +384,7 @@ static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus) { - cpu_map__delete(pcpus->cpus); + cpu_map__put(pcpus->cpus); pcpus->ob_type->tp_free((PyObject*)pcpus); } @@ -453,7 +453,7 @@ static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads) { - thread_map__delete(pthreads->threads); + thread_map__put(pthreads->threads); pthreads->ob_type->tp_free((PyObject*)pthreads); } diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index d457c523a33d..1f7becbe5e18 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c @@ -64,7 +64,7 @@ static bool perf_probe_api(setup_probe_fn_t fn) if (!cpus) return false; cpu = cpus->map[0]; - cpu_map__delete(cpus); + cpu_map__put(cpus); do { ret = perf_do_probe_api(fn, cpu, try[i++]); @@ -226,7 +226,7 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str) struct cpu_map *cpus = cpu_map__new(NULL); cpu = cpus ? cpus->map[0] : 0; - cpu_map__delete(cpus); + cpu_map__put(cpus); } else { cpu = evlist->cpus->map[0]; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index aa482c10469d..ed9dc2555ec7 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -686,6 +686,8 @@ static int process_finished_round(struct perf_tool *tool __maybe_unused, union perf_event *event __maybe_unused, struct ordered_events *oe) { + if (dump_trace) + fprintf(stdout, "\n"); return ordered_events__flush(oe, OE_FLUSH__ROUND); } @@ -1726,7 +1728,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; - ret = fprintf(fp, "Aggregated stats:%s\n", msg); + ret = fprintf(fp, "\nAggregated stats:%s\n", msg); ret += events_stats__fprintf(&session->evlist->stats, fp); return ret; @@ -1893,7 +1895,7 @@ int perf_session__cpu_bitmap(struct perf_session *session, err = 0; out_delete_map: - cpu_map__delete(map); + cpu_map__put(map); return err; } diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 4014b709f956..f2a0d1521e26 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -1,6 +1,8 @@ #include <math.h> #include "stat.h" +#include "evlist.h" #include "evsel.h" +#include "thread_map.h" void update_stats(struct stats *stats, u64 val) { @@ -95,33 +97,46 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel) } } -struct perf_counts *perf_counts__new(int ncpus) +struct perf_counts *perf_counts__new(int ncpus, int nthreads) { - int size = sizeof(struct perf_counts) + - ncpus * sizeof(struct perf_counts_values); + struct perf_counts *counts = zalloc(sizeof(*counts)); - return zalloc(size); + if (counts) { + struct xyarray *values; + + values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values)); + if (!values) { + free(counts); + return NULL; + } + + counts->values = values; + } + + return counts; } void perf_counts__delete(struct perf_counts *counts) { - free(counts); + if (counts) { + xyarray__delete(counts->values); + free(counts); + } } -static void perf_counts__reset(struct perf_counts *counts, int ncpus) +static void perf_counts__reset(struct perf_counts *counts) { - memset(counts, 0, (sizeof(*counts) + - (ncpus * sizeof(struct perf_counts_values)))); + xyarray__reset(counts->values); } -void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus) +void perf_evsel__reset_counts(struct perf_evsel *evsel) { - perf_counts__reset(evsel->counts, ncpus); + perf_counts__reset(evsel->counts); } -int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus) +int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads) { - evsel->counts = perf_counts__new(ncpus); + evsel->counts = perf_counts__new(ncpus, nthreads); return evsel->counts != NULL ? 0 : -ENOMEM; } @@ -130,3 +145,96 @@ void perf_evsel__free_counts(struct perf_evsel *evsel) perf_counts__delete(evsel->counts); evsel->counts = NULL; } + +void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) +{ + int i; + struct perf_stat *ps = evsel->priv; + + for (i = 0; i < 3; i++) + init_stats(&ps->res_stats[i]); + + perf_stat_evsel_id_init(evsel); +} + +int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) +{ + evsel->priv = zalloc(sizeof(struct perf_stat)); + if (evsel->priv == NULL) + return -ENOMEM; + perf_evsel__reset_stat_priv(evsel); + return 0; +} + +void perf_evsel__free_stat_priv(struct perf_evsel *evsel) +{ + zfree(&evsel->priv); +} + +int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, + int ncpus, int nthreads) +{ + struct perf_counts *counts; + + counts = perf_counts__new(ncpus, nthreads); + if (counts) + evsel->prev_raw_counts = counts; + + return counts ? 0 : -ENOMEM; +} + +void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel) +{ + perf_counts__delete(evsel->prev_raw_counts); + evsel->prev_raw_counts = NULL; +} + +int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw) +{ + int ncpus = perf_evsel__nr_cpus(evsel); + int nthreads = thread_map__nr(evsel->threads); + + if (perf_evsel__alloc_stat_priv(evsel) < 0 || + perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 || + (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0)) + return -ENOMEM; + + return 0; +} + +int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + if (perf_evsel__alloc_stats(evsel, alloc_raw)) + goto out_free; + } + + return 0; + +out_free: + perf_evlist__free_stats(evlist); + return -1; +} + +void perf_evlist__free_stats(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + perf_evsel__free_stat_priv(evsel); + perf_evsel__free_counts(evsel); + perf_evsel__free_prev_raw_counts(evsel); + } +} + +void perf_evlist__reset_stats(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel; + + evlist__for_each(evlist, evsel) { + perf_evsel__reset_stat_priv(evsel); + perf_evsel__reset_counts(evsel); + } +} diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 093dc3cb28dd..1cfbe0a980ac 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <stdio.h> +#include "xyarray.h" struct stats { @@ -29,8 +30,32 @@ enum aggr_mode { AGGR_GLOBAL, AGGR_SOCKET, AGGR_CORE, + AGGR_THREAD, }; +struct perf_counts_values { + union { + struct { + u64 val; + u64 ena; + u64 run; + }; + u64 values[3]; + }; +}; + +struct perf_counts { + s8 scaled; + struct perf_counts_values aggr; + struct xyarray *values; +}; + +static inline struct perf_counts_values* +perf_counts(struct perf_counts *counts, int cpu, int thread) +{ + return xyarray__entry(counts->values, cpu, thread); +} + void update_stats(struct stats *stats, u64 val); double avg_stats(struct stats *stats); double stddev_stats(struct stats *stats); @@ -46,6 +71,8 @@ static inline void init_stats(struct stats *stats) } struct perf_evsel; +struct perf_evlist; + bool __perf_evsel_stat__is(struct perf_evsel *evsel, enum perf_stat_evsel_id id); @@ -62,10 +89,24 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel, double avg, int cpu, enum aggr_mode aggr); -struct perf_counts *perf_counts__new(int ncpus); +struct perf_counts *perf_counts__new(int ncpus, int nthreads); void perf_counts__delete(struct perf_counts *counts); -void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus); -int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); +void perf_evsel__reset_counts(struct perf_evsel *evsel); +int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads); void perf_evsel__free_counts(struct perf_evsel *evsel); + +void perf_evsel__reset_stat_priv(struct perf_evsel *evsel); +int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel); +void perf_evsel__free_stat_priv(struct perf_evsel *evsel); + +int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel, + int ncpus, int nthreads); +void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel); + +int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw); + +int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw); +void perf_evlist__free_stats(struct perf_evlist *evlist); +void perf_evlist__reset_stats(struct perf_evlist *evlist); #endif diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index 283d3e73e2f2..eec6c1149f44 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -748,7 +748,7 @@ static int str_to_bitmap(char *s, cpumask_t *b) set_bit(c, cpumask_bits(b)); } - cpu_map__delete(m); + cpu_map__put(m); return ret; } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 504f2d73b7ee..48b588c6951a 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1132,8 +1132,11 @@ static int dso__load_kcore(struct dso *dso, struct map *map, INIT_LIST_HEAD(&md.maps); fd = open(kcore_filename, O_RDONLY); - if (fd < 0) + if (fd < 0) { + pr_err("%s requires CAP_SYS_RAWIO capability to access.\n", + kcore_filename); return -EINVAL; + } /* Read new maps into temporary lists */ err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md, diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index f4822bd03709..da7646d767fe 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -8,8 +8,11 @@ #include <unistd.h> #include "strlist.h" #include <string.h> +#include <api/fs/fs.h> +#include "asm/bug.h" #include "thread_map.h" #include "util.h" +#include "debug.h" /* Skip "." and ".." directories */ static int filter(const struct dirent *dir) @@ -20,11 +23,26 @@ static int filter(const struct dirent *dir) return 1; } +static void thread_map__reset(struct thread_map *map, int start, int nr) +{ + size_t size = (nr - start) * sizeof(map->map[0]); + + memset(&map->map[start], 0, size); +} + static struct thread_map *thread_map__realloc(struct thread_map *map, int nr) { - size_t size = sizeof(*map) + sizeof(pid_t) * nr; + size_t size = sizeof(*map) + sizeof(map->map[0]) * nr; + int start = map ? map->nr : 0; + + map = realloc(map, size); + /* + * We only realloc to add more items, let's reset new items. + */ + if (map) + thread_map__reset(map, start, nr); - return realloc(map, size); + return map; } #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr) @@ -45,8 +63,9 @@ struct thread_map *thread_map__new_by_pid(pid_t pid) threads = thread_map__alloc(items); if (threads != NULL) { for (i = 0; i < items; i++) - threads->map[i] = atoi(namelist[i]->d_name); + thread_map__set_pid(threads, i, atoi(namelist[i]->d_name)); threads->nr = items; + atomic_set(&threads->refcnt, 1); } for (i=0; i<items; i++) @@ -61,8 +80,9 @@ struct thread_map *thread_map__new_by_tid(pid_t tid) struct thread_map *threads = thread_map__alloc(1); if (threads != NULL) { - threads->map[0] = tid; - threads->nr = 1; + thread_map__set_pid(threads, 0, tid); + threads->nr = 1; + atomic_set(&threads->refcnt, 1); } return threads; @@ -84,6 +104,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) goto out_free_threads; threads->nr = 0; + atomic_set(&threads->refcnt, 1); while (!readdir_r(proc, &dirent, &next) && next) { char *end; @@ -123,8 +144,10 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) threads = tmp; } - for (i = 0; i < items; i++) - threads->map[threads->nr + i] = atoi(namelist[i]->d_name); + for (i = 0; i < items; i++) { + thread_map__set_pid(threads, threads->nr + i, + atoi(namelist[i]->d_name)); + } for (i = 0; i < items; i++) zfree(&namelist[i]); @@ -201,7 +224,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) threads = nt; for (i = 0; i < items; i++) { - threads->map[j++] = atoi(namelist[i]->d_name); + thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name)); zfree(&namelist[i]); } threads->nr = total_tasks; @@ -210,6 +233,8 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) out: strlist__delete(slist); + if (threads) + atomic_set(&threads->refcnt, 1); return threads; out_free_namelist: @@ -227,8 +252,9 @@ struct thread_map *thread_map__new_dummy(void) struct thread_map *threads = thread_map__alloc(1); if (threads != NULL) { - threads->map[0] = -1; - threads->nr = 1; + thread_map__set_pid(threads, 0, -1); + threads->nr = 1; + atomic_set(&threads->refcnt, 1); } return threads; } @@ -267,10 +293,12 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str) goto out_free_threads; threads = nt; - threads->map[ntasks - 1] = tid; - threads->nr = ntasks; + thread_map__set_pid(threads, ntasks - 1, tid); + threads->nr = ntasks; } out: + if (threads) + atomic_set(&threads->refcnt, 1); return threads; out_free_threads: @@ -290,9 +318,30 @@ struct thread_map *thread_map__new_str(const char *pid, const char *tid, return thread_map__new_by_tid_str(tid); } -void thread_map__delete(struct thread_map *threads) +static void thread_map__delete(struct thread_map *threads) { - free(threads); + if (threads) { + int i; + + WARN_ONCE(atomic_read(&threads->refcnt) != 0, + "thread map refcnt unbalanced\n"); + for (i = 0; i < threads->nr; i++) + free(thread_map__comm(threads, i)); + free(threads); + } +} + +struct thread_map *thread_map__get(struct thread_map *map) +{ + if (map) + atomic_inc(&map->refcnt); + return map; +} + +void thread_map__put(struct thread_map *map) +{ + if (map && atomic_dec_and_test(&map->refcnt)) + thread_map__delete(map); } size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) @@ -301,7 +350,60 @@ size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) size_t printed = fprintf(fp, "%d thread%s: ", threads->nr, threads->nr > 1 ? "s" : ""); for (i = 0; i < threads->nr; ++i) - printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]); + printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i)); return printed + fprintf(fp, "\n"); } + +static int get_comm(char **comm, pid_t pid) +{ + char *path; + size_t size; + int err; + + if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1) + return -ENOMEM; + + err = filename__read_str(path, comm, &size); + if (!err) { + /* + * We're reading 16 bytes, while filename__read_str + * allocates data per BUFSIZ bytes, so we can safely + * mark the end of the string. + */ + (*comm)[size] = 0; + rtrim(*comm); + } + + free(path); + return err; +} + +static void comm_init(struct thread_map *map, int i) +{ + pid_t pid = thread_map__pid(map, i); + char *comm = NULL; + + /* dummy pid comm initialization */ + if (pid == -1) { + map->map[i].comm = strdup("dummy"); + return; + } + + /* + * The comm name is like extra bonus ;-), + * so just warn if we fail for any reason. + */ + if (get_comm(&comm, pid)) + pr_warning("Couldn't resolve comm name for pid %d\n", pid); + + map->map[i].comm = comm; +} + +void thread_map__read_comms(struct thread_map *threads) +{ + int i; + + for (i = 0; i < threads->nr; ++i) + comm_init(threads, i); +} diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h index 95313f43cc0f..af679d8a50f8 100644 --- a/tools/perf/util/thread_map.h +++ b/tools/perf/util/thread_map.h @@ -3,10 +3,17 @@ #include <sys/types.h> #include <stdio.h> +#include <linux/atomic.h> + +struct thread_map_data { + pid_t pid; + char *comm; +}; struct thread_map { + atomic_t refcnt; int nr; - pid_t map[]; + struct thread_map_data map[]; }; struct thread_map *thread_map__new_dummy(void); @@ -15,11 +22,12 @@ struct thread_map *thread_map__new_by_tid(pid_t tid); struct thread_map *thread_map__new_by_uid(uid_t uid); struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid); +struct thread_map *thread_map__get(struct thread_map *map); +void thread_map__put(struct thread_map *map); + struct thread_map *thread_map__new_str(const char *pid, const char *tid, uid_t uid); -void thread_map__delete(struct thread_map *threads); - size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); static inline int thread_map__nr(struct thread_map *threads) @@ -27,4 +35,21 @@ static inline int thread_map__nr(struct thread_map *threads) return threads ? threads->nr : 1; } +static inline pid_t thread_map__pid(struct thread_map *map, int thread) +{ + return map->map[thread].pid; +} + +static inline void +thread_map__set_pid(struct thread_map *map, int thread, pid_t pid) +{ + map->map[thread].pid = pid; +} + +static inline char *thread_map__comm(struct thread_map *map, int thread) +{ + return map->map[thread].comm; +} + +void thread_map__read_comms(struct thread_map *threads); #endif /* __PERF_THREAD_MAP_H */ |