diff options
author | Zhengjun Xing <zhengjun.xing@linux.intel.com> | 2022-04-22 14:56:35 +0800 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2022-04-24 07:50:18 -0300 |
commit | d7e3c397087fffde68389e7530093dbc2b70c48a (patch) | |
tree | 3c61b4931c55a4dc8688ee4ec9ad42f420f7e397 /tools/perf | |
parent | 2c8e64514aa2ea414c8ada6c77405680267d0ab3 (diff) | |
download | linux-d7e3c397087fffde68389e7530093dbc2b70c48a.tar.bz2 |
perf stat: Support hybrid --topdown option
Since for cpu_core or cpu_atom, they have different topdown events
groups.
For cpu_core, --topdown equals to:
"{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,
cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/,
cpu_core/topdown-heavy-ops/,cpu_core/topdown-br-mispredict/,
cpu_core/topdown-fetch-lat/,cpu_core/topdown-mem-bound/}"
For cpu_atom, --topdown equals to:
"{cpu_atom/topdown-retiring/,cpu_atom/topdown-bad-spec/,
cpu_atom/topdown-fe-bound/,cpu_atom/topdown-be-bound/}"
To simplify the implementation, on hybrid, --topdown is used
together with --cputype. If without --cputype, it uses cpu_core
topdown events by default.
# ./perf stat --topdown -a sleep 1
WARNING: default to use cpu_core topdown events
Performance counter stats for 'system wide':
retiring bad speculation frontend bound backend bound heavy operations light operations branch mispredict machine clears fetch latency fetch bandwidth memory bound Core bound
4.1% 0.0% 5.1% 90.8% 2.3% 1.8% 0.0% 0.0% 4.2% 0.9% 9.9% 81.0%
1.002624229 seconds time elapsed
# ./perf stat --topdown -a --cputype atom sleep 1
Performance counter stats for 'system wide':
retiring bad speculation frontend bound backend bound
13.5% 0.1% 31.2% 55.2%
1.002366987 seconds time elapsed
Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220422065635.767648-3-zhengjun.xing@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/builtin-stat.c | 21 | ||||
-rw-r--r-- | tools/perf/util/stat.c | 4 | ||||
-rw-r--r-- | tools/perf/util/topdown.c | 17 | ||||
-rw-r--r-- | tools/perf/util/topdown.h | 3 |
4 files changed, 36 insertions, 9 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 5958bfd9a112..1b96636df01e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1867,11 +1867,23 @@ static int add_default_attributes(void) unsigned int max_level = 1; char *str = NULL; bool warn = false; + const char *pmu_name = "cpu"; if (!force_metric_only) stat_config.metric_only = true; - if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) { + if (perf_pmu__has_hybrid()) { + if (!evsel_list->hybrid_pmu_name) { + pr_warning("WARNING: default to use cpu_core topdown events\n"); + evsel_list->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core"); + } + + pmu_name = evsel_list->hybrid_pmu_name; + if (!pmu_name) + return -1; + } + + if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) { metric_attrs = topdown_metric_L2_attrs; max_level = 2; } @@ -1882,10 +1894,11 @@ static int add_default_attributes(void) } else if (!stat_config.topdown_level) stat_config.topdown_level = max_level; - if (topdown_filter_events(metric_attrs, &str, 1) < 0) { + if (topdown_filter_events(metric_attrs, &str, 1, pmu_name) < 0) { pr_err("Out of memory\n"); return -1; } + if (metric_attrs[0] && str) { if (!stat_config.interval && !stat_config.metric_only) { fprintf(stat_config.output, @@ -1909,10 +1922,12 @@ static int add_default_attributes(void) } if (topdown_filter_events(topdown_attrs, &str, - arch_topdown_check_group(&warn)) < 0) { + arch_topdown_check_group(&warn), + pmu_name) < 0) { pr_err("Out of memory\n"); return -1; } + if (topdown_attrs[0] && str) { struct parse_events_error errinfo; if (warn) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index ee6f03481215..924183df3da2 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -116,7 +116,9 @@ static void perf_stat_evsel_id_init(struct evsel *evsel) /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */ for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) { - if (!strcmp(evsel__name(evsel), id_str[i])) { + if (!strcmp(evsel__name(evsel), id_str[i]) || + (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name + && strstr(evsel__name(evsel), evsel->pmu_name))) { ps->id = i; break; } diff --git a/tools/perf/util/topdown.c b/tools/perf/util/topdown.c index 1081b20f9891..a369f84ceb6a 100644 --- a/tools/perf/util/topdown.c +++ b/tools/perf/util/topdown.c @@ -1,18 +1,24 @@ // SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include "pmu.h" +#include "pmu-hybrid.h" #include "topdown.h" -int topdown_filter_events(const char **attr, char **str, bool use_group) +int topdown_filter_events(const char **attr, char **str, bool use_group, + const char *pmu_name) { int off = 0; int i; int len = 0; char *s; + bool is_hybrid = perf_pmu__is_hybrid(pmu_name); for (i = 0; attr[i]; i++) { - if (pmu_have_event("cpu", attr[i])) { - len += strlen(attr[i]) + 1; + if (pmu_have_event(pmu_name, attr[i])) { + if (is_hybrid) + len += strlen(attr[i]) + strlen(pmu_name) + 3; + else + len += strlen(attr[i]) + 1; attr[i - off] = attr[i]; } else off++; @@ -30,7 +36,10 @@ int topdown_filter_events(const char **attr, char **str, bool use_group) if (use_group) *s++ = '{'; for (i = 0; attr[i]; i++) { - strcpy(s, attr[i]); + if (!is_hybrid) + strcpy(s, attr[i]); + else + sprintf(s, "%s/%s/", pmu_name, attr[i]); s += strlen(s); *s++ = ','; } diff --git a/tools/perf/util/topdown.h b/tools/perf/util/topdown.h index 2f0d0b887639..118e75281f93 100644 --- a/tools/perf/util/topdown.h +++ b/tools/perf/util/topdown.h @@ -7,6 +7,7 @@ bool arch_topdown_check_group(bool *warn); void arch_topdown_group_warn(void); bool arch_topdown_sample_read(struct evsel *leader); -int topdown_filter_events(const char **attr, char **str, bool use_group); +int topdown_filter_events(const char **attr, char **str, bool use_group, + const char *pmu_name); #endif |