summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-02 12:24:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-02 12:24:31 -0700
commit406254918b232db198ed60f5bf1f8b84d96bca00 (patch)
tree98ac344a31aa65577eea707e330c951fb2cc66f3
parent71bd9341011f626d692aabe024f099820f02c497 (diff)
parentcf96b8e45a9bf74d2a6f1e1f88a41b10e9357c6b (diff)
downloadlinux-406254918b232db198ed60f5bf1f8b84d96bca00.tar.bz2
Merge tag 'perf-tools-for-v5.14-2021-07-01' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tool updates from Arnaldo Carvalho de Melo: "Tools: - Add cgroup support for 'perf top' (-G). - Add support for KVM MSRs in 'perf kvm stat' - Support probes on init functions in 'perf probe', to support the bootconfig format. - Improve error reporting in 'perf probe'. - No need to synthesize BUILD_ID records in 'perf inject' if the MMAP2 records have build ids already. - Allow toggling source code ('s' hotkey) in 'perf annotate' in all lines. - Add itrace options support to 'perf annotate'. - Support to custom DSO filters for 'perf script'. Hardware enablement: - Support the HYBRID_TOPOLOGY and HYBRID_CPU_PMU_CAPS features in the perf.data file header. - Support PMU prefix for mem-load and mem-store events, to support hybrid (BIG little) CPUs such as Intel's Alderlake. - Support hybrid CPUs in 'perf mem' and 'perf c2c'. Hardware tracing: - Intel PT now supports tracing KVM guests. - Timestamp improvements for ARM's Coresight. Build: - Add 'make -C tools/perf build-test' entries for libopencsd/CORESIGHT=1 and libbpf/LIBBPF_DYNAMIC=1. - Use bison's --file-prefix-map option to avoid storing full paths when using O= in the perf build. Tests: - Improve the 'perf test' entries for libpfm4 and BPF counters. Misc: - Sync msr-index.h, mount.h, kvm headers with the kernel originals. - Add vendor events and metrics for Intel's Icelake Server & Client" * tag 'perf-tools-for-v5.14-2021-07-01' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: (123 commits) perf session: Add missing evlist__delete when deleting a session perf annotate: Allow 's' on source code lines perf dlfilter: Add object_code() to perf_dlfilter_fns perf dlfilter: Add attr() to perf_dlfilter_fns perf dlfilter: Add srcline() to perf_dlfilter_fns perf dlfilter: Add insn() to perf_dlfilter_fns perf dlfilter: Add resolve_address() to perf_dlfilter_fns perf build: Install perf_dlfilter.h perf script: Add option to pass arguments to dlfilters perf script: Add option to list dlfilters perf script: Add dlfilter__filter_event_early() perf script: Add API for filtering via dynamically loaded shared object perf llvm: Return -ENOMEM when asprintf() fails perf cs-etm: Delay decode of non-timeless data until cs_etm__flush_events() tools headers UAPI: Synch KVM's svm.h header with the kernel tools kvm headers arm64: Update KVM headers from the kernel sources tools headers UAPI: Sync linux/kvm.h with the kernel sources tools headers cpufeatures: Sync with the kernel sources tools include UAPI: Update linux/mount.h copy tools arch x86: Sync the msr-index.h copy with the kernel sources ...
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h11
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h3
-rw-r--r--tools/arch/x86/include/asm/msr-index.h4
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h13
-rw-r--r--tools/arch/x86/include/uapi/asm/svm.h3
-rw-r--r--tools/include/linux/bitmap.h11
-rw-r--r--tools/include/uapi/linux/kvm.h105
-rw-r--r--tools/include/uapi/linux/mount.h1
-rw-r--r--tools/lib/bitmap.c14
-rw-r--r--tools/perf/Documentation/itrace.txt1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt7
-rw-r--r--tools/perf/Documentation/perf-dlfilter.txt251
-rw-r--r--tools/perf/Documentation/perf-inject.txt10
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt119
-rw-r--r--tools/perf/Documentation/perf-probe.txt19
-rw-r--r--tools/perf/Documentation/perf-script-python.txt46
-rw-r--r--tools/perf/Documentation/perf-script.txt15
-rw-r--r--tools/perf/Documentation/perf-top.txt12
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt33
-rw-r--r--tools/perf/Makefile.config14
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/arch/arm/include/arch-tests.h5
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c133
-rw-r--r--tools/perf/arch/arm64/include/arch-tests.h5
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c45
-rw-r--r--tools/perf/arch/arm64/util/mem-events.c2
-rw-r--r--tools/perf/arch/powerpc/include/arch-tests.h7
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/powerpc/util/mem-events.c2
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h12
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c46
-rw-r--r--tools/perf/arch/x86/util/mem-events.c54
-rw-r--r--tools/perf/builtin-annotate.c11
-rw-r--r--tools/perf/builtin-c2c.c40
-rw-r--r--tools/perf/builtin-inject.c98
-rw-r--r--tools/perf/builtin-mem.c51
-rw-r--r--tools/perf/builtin-probe.c12
-rw-r--r--tools/perf/builtin-record.c27
-rw-r--r--tools/perf/builtin-report.c2
-rw-r--r--tools/perf/builtin-script.c235
-rw-r--r--tools/perf/builtin-top.c8
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json724
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/floating-point.json101
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/frontend.json610
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json273
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/memory.json654
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/other.json1089
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/pipeline.json1169
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json251
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/cache.json706
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/floating-point.json95
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/frontend.json469
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json291
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json181
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json972
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json333
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json2476
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json245
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c168
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-record4
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-report4
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py89
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py283
-rw-r--r--tools/perf/scripts/python/libxed.py107
-rw-r--r--tools/perf/tests/builtin-test.c43
-rw-r--r--tools/perf/tests/dwarf-unwind.c4
-rw-r--r--tools/perf/tests/make7
-rw-r--r--tools/perf/tests/pfm.c10
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh16
-rw-r--r--tools/perf/tests/tests.h2
-rw-r--r--tools/perf/ui/browsers/annotate.c32
-rw-r--r--tools/perf/util/Build7
-rw-r--r--tools/perf/util/arm-spe.c73
-rw-r--r--tools/perf/util/auxtrace.c18
-rw-r--r--tools/perf/util/auxtrace.h53
-rw-r--r--tools/perf/util/bpf_counter.c52
-rw-r--r--tools/perf/util/bpf_counter.h52
-rw-r--r--tools/perf/util/cgroup.c44
-rw-r--r--tools/perf/util/cgroup.h12
-rw-r--r--tools/perf/util/cputopo.c80
-rw-r--r--tools/perf/util/cputopo.h13
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c61
-rw-r--r--tools/perf/util/cs-etm.c83
-rw-r--r--tools/perf/util/cs-etm.h4
-rw-r--r--tools/perf/util/data.c3
-rw-r--r--tools/perf/util/data.h1
-rw-r--r--tools/perf/util/db-export.c12
-rw-r--r--tools/perf/util/db-export.h2
-rw-r--r--tools/perf/util/dlfilter.c615
-rw-r--r--tools/perf/util/dlfilter.h97
-rw-r--r--tools/perf/util/env.c12
-rw-r--r--tools/perf/util/env.h16
-rw-r--r--tools/perf/util/evlist.c25
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c25
-rw-r--r--tools/perf/util/header.c254
-rw-r--r--tools/perf/util/header.h2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c723
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h20
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.h5
-rw-r--r--tools/perf/util/intel-pt.c224
-rw-r--r--tools/perf/util/llvm-utils.c2
-rw-r--r--tools/perf/util/mem-events.c103
-rw-r--r--tools/perf/util/mem-events.h4
-rw-r--r--tools/perf/util/perf_dlfilter.h150
-rw-r--r--tools/perf/util/pmu-hybrid.h11
-rw-r--r--tools/perf/util/probe-event.c203
-rw-r--r--tools/perf/util/probe-event.h2
-rw-r--r--tools/perf/util/probe-file.c95
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c13
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c359
-rw-r--r--tools/perf/util/session.c11
-rw-r--r--tools/perf/util/srccode.c3
-rw-r--r--tools/perf/util/stat-display.c8
-rw-r--r--tools/perf/util/stat.c12
-rw-r--r--tools/perf/util/trace-event-scripting.c32
-rw-r--r--tools/perf/util/trace-event.h29
120 files changed, 14120 insertions, 2419 deletions
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 24223adae150..b3edde68bc3e 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -184,6 +184,17 @@ struct kvm_vcpu_events {
__u32 reserved[12];
};
+struct kvm_arm_copy_mte_tags {
+ __u64 guest_ipa;
+ __u64 length;
+ void __user *addr;
+ __u64 flags;
+ __u64 reserved[2];
+};
+
+#define KVM_ARM_TAGS_TO_GUEST 0
+#define KVM_ARM_TAGS_FROM_GUEST 1
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index ac37830ae941..d0ce5cfd3ac1 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -108,7 +108,7 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
-/* free ( 3*32+29) */
+#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
@@ -378,6 +378,7 @@
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 211ba3375ee9..a7c413432b33 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -772,6 +772,10 @@
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+#define MSR_TFA_TSX_CPUID_CLEAR_BIT 1
+#define MSR_TFA_TSX_CPUID_CLEAR BIT_ULL(MSR_TFA_TSX_CPUID_CLEAR_BIT)
+#define MSR_TFA_SDV_ENABLE_RTM_BIT 2
+#define MSR_TFA_SDV_ENABLE_RTM BIT_ULL(MSR_TFA_SDV_ENABLE_RTM_BIT)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 0662f644aad9..a6c327f8ad9e 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -159,6 +159,19 @@ struct kvm_sregs {
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
};
+struct kvm_sregs2 {
+ /* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 flags;
+ __u64 pdptrs[4];
+};
+#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
+
/* for KVM_GET_FPU and KVM_SET_FPU */
struct kvm_fpu {
__u8 fpr[8][16];
diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h
index 554f75fe013c..efa969325ede 100644
--- a/tools/arch/x86/include/uapi/asm/svm.h
+++ b/tools/arch/x86/include/uapi/asm/svm.h
@@ -110,6 +110,9 @@
#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+/* Exit code reserved for hypervisor/software use */
+#define SVM_EXIT_SW 0xf0000000
+
#define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 330dbf7509cc..9d959bc24859 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -18,6 +18,8 @@ int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
void bitmap_clear(unsigned long *map, unsigned int start, int len);
+int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -170,4 +172,13 @@ static inline int bitmap_equal(const unsigned long *src1,
return __bitmap_equal(src1, src2, nbits);
}
+static inline int bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ else
+ return __bitmap_intersects(src1, src2, nbits);
+}
+
#endif /* _PERF_BITOPS_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 79d9c44d1ad7..d9e4aabcb31a 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -280,6 +280,9 @@ struct kvm_xen_exit {
/* Encounter unexpected vm-exit reason */
#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
+/* Flags that describe what fields in emulation_failure hold valid data. */
+#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
+
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
/* in */
@@ -383,6 +386,25 @@ struct kvm_run {
__u32 ndata;
__u64 data[16];
} internal;
+ /*
+ * KVM_INTERNAL_ERROR_EMULATION
+ *
+ * "struct emulation_failure" is an overlay of "struct internal"
+ * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of
+ * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error
+ * sub-types, this struct is ABI! It also needs to be backwards
+ * compatible with "struct internal". Take special care that
+ * "ndata" is correct, that new fields are enumerated in "flags",
+ * and that each flag enumerates fields that are 64-bit aligned
+ * and sized (so that ndata+internal.data[] is valid/accurate).
+ */
+ struct {
+ __u32 suberror;
+ __u32 ndata;
+ __u64 flags;
+ __u8 insn_size;
+ __u8 insn_bytes[15];
+ } emulation_failure;
/* KVM_EXIT_OSI */
struct {
__u64 gprs[32];
@@ -1083,6 +1105,13 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_SGX_ATTRIBUTE 196
#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
#define KVM_CAP_PTP_KVM 198
+#define KVM_CAP_HYPERV_ENFORCE_CPUID 199
+#define KVM_CAP_SREGS2 200
+#define KVM_CAP_EXIT_HYPERCALL 201
+#define KVM_CAP_PPC_RPT_INVALIDATE 202
+#define KVM_CAP_BINARY_STATS_FD 203
+#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
+#define KVM_CAP_ARM_MTE 205
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1428,6 +1457,7 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PMU_EVENT_FILTER */
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
+#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1621,6 +1651,9 @@ struct kvm_xen_hvm_attr {
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
+#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
+#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
+
struct kvm_xen_vcpu_attr {
__u16 type;
__u16 pad[3];
@@ -1899,4 +1932,76 @@ struct kvm_dirty_gfn {
#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
+/**
+ * struct kvm_stats_header - Header of per vm/vcpu binary statistics data.
+ * @flags: Some extra information for header, always 0 for now.
+ * @name_size: The size in bytes of the memory which contains statistics
+ * name string including trailing '\0'. The memory is allocated
+ * at the send of statistics descriptor.
+ * @num_desc: The number of statistics the vm or vcpu has.
+ * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed
+ * by vm/vcpu stats fd.
+ * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file
+ * pointd by vm/vcpu stats fd.
+ * @data_offset: The offset of the vm/vcpu stats' data block in the file
+ * pointed by vm/vcpu stats fd.
+ *
+ * This is the header userspace needs to read from stats fd before any other
+ * readings. It is used by userspace to discover all the information about the
+ * vm/vcpu's binary statistics.
+ * Userspace reads this header from the start of the vm/vcpu's stats fd.
+ */
+struct kvm_stats_header {
+ __u32 flags;
+ __u32 name_size;
+ __u32 num_desc;
+ __u32 id_offset;
+ __u32 desc_offset;
+ __u32 data_offset;
+};
+
+#define KVM_STATS_TYPE_SHIFT 0
+#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_PEAK
+
+#define KVM_STATS_UNIT_SHIFT 4
+#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
+
+#define KVM_STATS_BASE_SHIFT 8
+#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2
+
+/**
+ * struct kvm_stats_desc - Descriptor of a KVM statistics.
+ * @flags: Annotations of the stats, like type, unit, etc.
+ * @exponent: Used together with @flags to determine the unit.
+ * @size: The number of data items for this stats.
+ * Every data item is of type __u64.
+ * @offset: The offset of the stats to the start of stat structure in
+ * struture kvm or kvm_vcpu.
+ * @unused: Unused field for future usage. Always 0 for now.
+ * @name: The name string for the stats. Its size is indicated by the
+ * &kvm_stats_header->name_size.
+ */
+struct kvm_stats_desc {
+ __u32 flags;
+ __s16 exponent;
+ __u16 size;
+ __u32 offset;
+ __u32 unused;
+ char name[];
+};
+
+#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+
#endif /* __LINUX_KVM_H */
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
index e6524ead2b7b..dd7a166fdf9c 100644
--- a/tools/include/uapi/linux/mount.h
+++ b/tools/include/uapi/linux/mount.h
@@ -120,6 +120,7 @@ enum fsconfig_command {
#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
+#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */
/*
* mount_setattr()
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index f4e914712b6f..db466ef7be9d 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -86,3 +86,17 @@ int __bitmap_equal(const unsigned long *bitmap1,
return 1;
}
+
+int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] & bitmap2[k])
+ return 1;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return 1;
+ return 0;
+}
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index 0f1005209a2b..2d586fe5e4c5 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -20,6 +20,7 @@
L synthesize last branch entries on existing event records
s skip initial number of events
q quicker (less detailed) decoding
+ Z prefer to ignore timestamps (so-called "timeless" decoding)
The default is all events i.e. the same as --itrace=ibxwpe,
except for perf script where it is --itrace=ce
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index 80c1be5d566c..33c2521cba4a 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -58,6 +58,13 @@ OPTIONS
--ignore-vmlinux::
Ignore vmlinux files.
+--itrace::
+ Options for decoding instruction tracing data. The options are:
+
+include::itrace.txt[]
+
+ To disable decoding entirely, use --no-itrace.
+
-m::
--modules::
Load module symbols. WARNING: use only with -k and LIVE kernel.
diff --git a/tools/perf/Documentation/perf-dlfilter.txt b/tools/perf/Documentation/perf-dlfilter.txt
new file mode 100644
index 000000000000..02842cb4cf90
--- /dev/null
+++ b/tools/perf/Documentation/perf-dlfilter.txt
@@ -0,0 +1,251 @@
+perf-dlfilter(1)
+================
+
+NAME
+----
+perf-dlfilter - Filter sample events using a dynamically loaded shared
+object file
+
+SYNOPSIS
+--------
+[verse]
+'perf script' [--dlfilter file.so ] [ --dlarg arg ]...
+
+DESCRIPTION
+-----------
+
+This option is used to process data through a custom filter provided by a
+dynamically loaded shared object file. Arguments can be passed using --dlarg
+and retrieved using perf_dlfilter_fns.args().
+
+If 'file.so' does not contain "/", then it will be found either in the current
+directory, or perf tools exec path which is ~/libexec/perf-core/dlfilters for
+a local build and install (refer perf --exec-path), or the dynamic linker
+paths.
+
+API
+---
+
+The API for filtering consists of the following:
+
+[source,c]
+----
+#include <perf/perf_dlfilter.h>
+
+const struct perf_dlfilter_fns perf_dlfilter_fns;
+
+int start(void **data, void *ctx);
+int stop(void *data, void *ctx);
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+const char *filter_description(const char **long_description);
+----
+
+If implemented, 'start' will be called at the beginning, before any
+calls to 'filter_event' or 'filter_event_early'. Return 0 to indicate success,
+or return a negative error code. '*data' can be assigned for use by other
+functions. 'ctx' is needed for calls to perf_dlfilter_fns, but most
+perf_dlfilter_fns are not valid when called from 'start'.
+
+If implemented, 'stop' will be called at the end, after any calls to
+'filter_event' or 'filter_event_early'. Return 0 to indicate success, or
+return a negative error code. 'data' is set by 'start'. 'ctx' is needed
+for calls to perf_dlfilter_fns, but most perf_dlfilter_fns are not valid
+when called from 'stop'.
+
+If implemented, 'filter_event' will be called for each sample event.
+Return 0 to keep the sample event, 1 to filter it out, or return a negative
+error code. 'data' is set by 'start'. 'ctx' is needed for calls to
+'perf_dlfilter_fns'.
+
+'filter_event_early' is the same as 'filter_event' except it is called before
+internal filtering.
+
+If implemented, 'filter_description' should return a one-line description
+of the filter, and optionally a longer description.
+
+The perf_dlfilter_sample structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+'filter_event' and 'filter_event_early' are passed a perf_dlfilter_sample
+structure, which contains the following fields:
+[source,c]
+----
+/*
+ * perf sample event information (as per perf script and <linux/perf_event.h>)
+ */
+struct perf_dlfilter_sample {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 ip;
+ __s32 pid;
+ __s32 tid;
+ __u64 time;
+ __u64 addr;
+ __u64 id;
+ __u64 stream_id;
+ __u64 period;
+ __u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
+ __u64 insn_cnt; /* For instructions-per-cycle (IPC) */
+ __u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
+ __s32 cpu;
+ __u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
+ __u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
+ __u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
+ __u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
+ __u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
+ __u8 addr_correlates_sym; /* True => resolve_addr() can be called */
+ __u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
+ __u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ __u64 brstack_nr; /* Number of brstack entries */
+ const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
+ __u64 raw_callchain_nr; /* Number of raw_callchain entries */
+ const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
+ const char *event;
+};
+----
+
+The perf_dlfilter_fns structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'perf_dlfilter_fns' structure is populated with function pointers when the
+file is loaded. The functions can be called by 'filter_event' or
+'filter_event_early'.
+
+[source,c]
+----
+struct perf_dlfilter_fns {
+ const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
+ const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
+ char **(*args)(void *ctx, int *dlargc);
+ __s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ const __u8 *(*insn)(void *ctx, __u32 *length);
+ const char *(*srcline)(void *ctx, __u32 *line_number);
+ struct perf_event_attr *(*attr)(void *ctx);
+ __s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ void *(*reserved[120])(void *);
+};
+----
+
+'resolve_ip' returns information about ip.
+
+'resolve_addr' returns information about addr (if addr_correlates_sym).
+
+'args' returns arguments from --dlarg options.
+
+'resolve_address' provides information about 'address'. al->size must be set
+before calling. Returns 0 on success, -1 otherwise.
+
+'insn' returns instruction bytes and length.
+
+'srcline' return source file name and line number.
+
+'attr' returns perf_event_attr, refer <linux/perf_event.h>.
+
+'object_code' reads object code and returns the number of bytes read.
+
+The perf_dlfilter_al structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'perf_dlfilter_al' structure contains information about an address.
+
+[source,c]
+----
+/*
+ * Address location (as per perf script)
+ */
+struct perf_dlfilter_al {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u32 symoff;
+ const char *sym;
+ __u64 addr; /* Mapped address (from dso) */
+ __u64 sym_start;
+ __u64 sym_end;
+ const char *dso;
+ __u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
+ __u8 is_64_bit; /* Only valid if dso is not NULL */
+ __u8 is_kernel_ip; /* True if in kernel space */
+ __u32 buildid_size;
+ __u8 *buildid;
+ /* Below members are only populated by resolve_ip() */
+ __u8 filtered; /* true if this sample event will be filtered out */
+ const char *comm;
+};
+----
+
+perf_dlfilter_sample flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'flags' member of 'perf_dlfilter_sample' corresponds with the flags field
+of perf script. The bits of the flags are as follows:
+
+[source,c]
+----
+/* Definitions for perf_dlfilter_sample flags */
+enum {
+ PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
+ PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
+ PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
+ PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
+ PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
+ PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
+ PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
+ PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
+ PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
+ PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
+ PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
+ PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
+ PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
+};
+----
+
+EXAMPLE
+-------
+
+Filter out everything except branches from "foo" to "bar":
+
+[source,c]
+----
+#include <perf/perf_dlfilter.h>
+#include <string.h>
+
+const struct perf_dlfilter_fns perf_dlfilter_fns;
+
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ const struct perf_dlfilter_al *al;
+ const struct perf_dlfilter_al *addr_al;
+
+ if (!sample->ip || !sample->addr_correlates_sym)
+ return 1;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al || !al->sym || strcmp(al->sym, "foo"))
+ return 1;
+
+ addr_al = perf_dlfilter_fns.resolve_addr(ctx);
+ if (!addr_al || !addr_al->sym || strcmp(addr_al->sym, "bar"))
+ return 1;
+
+ return 0;
+}
+----
+
+To build the shared object, assuming perf has been installed for the local user
+i.e. perf_dlfilter.h is in ~/include/perf :
+
+ gcc -c -I ~/include -fpic dlfilter-example.c
+ gcc -shared -o dlfilter-example.so dlfilter-example.o
+
+To use the filter with perf script:
+
+ perf script --dlfilter dlfilter-example.so
+
+SEE ALSO
+--------
+linkperf:perf-script[1]
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index a8eccff21281..91108fe3ad5f 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -68,6 +68,16 @@ include::itrace.txt[]
--force::
Don't complain, do it.
+--vm-time-correlation[=OPTIONS]::
+ Some architectures may capture AUX area data which contains timestamps
+ affected by virtualization. This option will update those timestamps
+ in place, to correlate with host timestamps. The in-place update means
+ that an output file is not specified, and instead the input file is
+ modified. The options are architecture specific, except that they may
+ start with "dry-run" which will cause the file to be processed but
+ without updating it. Currently this option is supported only by
+ Intel PT, refer linkperf:perf-intel-pt[1]
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1],
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index bcf3eca5afbe..184ba62420f0 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -174,7 +174,11 @@ Refer to script export-to-sqlite.py or export-to-postgresql.py for more details,
and to script exported-sql-viewer.py for an example of using the database.
There is also script intel-pt-events.py which provides an example of how to
-unpack the raw data for power events and PTWRITE.
+unpack the raw data for power events and PTWRITE. The script also displays
+branches, and supports 2 additional modes selected by option:
+
+ --insn-trace - instruction trace
+ --src-trace - source trace
As mentioned above, it is easy to capture too much data. One way to limit the
data captured is to use 'snapshot' mode which is explained further below.
@@ -869,6 +873,7 @@ The letters are:
L synthesize last branch entries on existing event records
s skip initial number of events
q quicker (less detailed) decoding
+ Z prefer to ignore timestamps (so-called "timeless" decoding)
"Instructions" events look like they were recorded by "perf record -e
instructions".
@@ -1062,6 +1067,10 @@ What *will* be decoded with the qq option:
- instruction pointer associated with PSB packets
+The Z option is equivalent to having recorded a trace without TSC
+(i.e. config term tsc=0). It can be useful to avoid timestamp issues when
+decoding a trace of a virtual machine.
+
dump option
~~~~~~~~~~~
@@ -1150,8 +1159,9 @@ include::build-xed.txt[]
Tracing Virtual Machines
------------------------
-Currently, only kernel tracing is supported and only with "timeless" decoding
-i.e. no TSC timestamps
+Currently, only kernel tracing is supported and only with either "timeless" decoding
+(i.e. no TSC timestamps) or VM Time Correlation. VM Time Correlation is an extra step
+using 'perf inject' and requires unchanging VMX TSC Offset and no VMX TSC Scaling.
Other limitations and caveats
@@ -1162,7 +1172,7 @@ Other limitations and caveats
Guest VCPU is unknown but may be able to be inferred from the host thread
Callchains are not supported
-Example
+Example using "timeless" decoding
Start VM
@@ -1226,6 +1236,107 @@ perf script can be used to provide an instruction trace
:1440 1440 ffffffffbb74603c clockevents_program_event+0x4c ([guest.kernel.kallsyms]) popq %rbx
:1440 1440 ffffffffbb74603d clockevents_program_event+0x4d ([guest.kernel.kallsyms]) popq %r12
+Example using VM Time Correlation
+
+Start VM
+
+ $ sudo virsh start kubuntu20.04
+ Domain kubuntu20.04 started
+
+Mount the guest file system. Note sshfs needs -o direct_io to enable reading of proc files. root access is needed to read /proc/kcore.
+
+ $ mkdir -p vm0
+ $ sshfs -o direct_io root@vm0:/ vm0
+
+Copy the guest /proc/kallsyms, /proc/modules and /proc/kcore
+
+ $ perf buildid-cache -v --kcore vm0/proc/kcore
+ same kcore found in /home/user/.debug/[kernel.kcore]/cc9c55a98c5e4ec0aeda69302554aabed5cd6491/2021021312450777
+ $ KALLSYMS=/home/user/.debug/\[kernel.kcore\]/cc9c55a98c5e4ec0aeda69302554aabed5cd6491/2021021312450777/kallsyms
+
+Find the VM process
+
+ $ ps -eLl | grep 'KVM\|PID'
+ F S UID PID PPID LWP C PRI NI ADDR SZ WCHAN TTY TIME CMD
+ 3 S 64055 16998 1 17005 13 80 0 - 1818189 - ? 00:00:16 CPU 0/KVM
+ 3 S 64055 16998 1 17006 4 80 0 - 1818189 - ? 00:00:05 CPU 1/KVM
+ 3 S 64055 16998 1 17007 3 80 0 - 1818189 - ? 00:00:04 CPU 2/KVM
+ 3 S 64055 16998 1 17008 4 80 0 - 1818189 - ? 00:00:05 CPU 3/KVM
+
+Start an open-ended perf record, tracing the VM process, do something on the VM, and then ctrl-C to stop.
+IPC can be determined, hence cyc=1 can be added.
+Only kernel decoding is supported, so 'k' must be specified.
+Intel PT traces both the host and the guest so --guest and --host need to be specified.
+
+ $ sudo perf kvm --guest --host --guestkallsyms $KALLSYMS record --kcore -e intel_pt/cyc=1/k -p 16998
+ ^C[ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 9.041 MB perf.data.kvm ]
+
+Now 'perf inject' can be used to determine the VMX TCS Offset. Note, Intel PT TSC packets are
+only 7-bytes, so the TSC Offset might differ from the actual value in the 8th byte. That will
+have no effect i.e. the resulting timestamps will be correct anyway.
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation=dry-run
+ ERROR: Unknown TSC Offset for VMCS 0x1bff6a
+ VMCS: 0x1bff6a TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1cbc08
+ VMCS: 0x1cbc08 TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1c3ce8
+ VMCS: 0x1c3ce8 TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1cbce9
+ VMCS: 0x1cbce9 TSC Offset 0xffffe42722c64c41
+
+Each virtual CPU has a different Virtual Machine Control Structure (VMCS)
+shown above with the calculated TSC Offset. For an unchanging TSC Offset
+they should all be the same for the same virtual machine.
+
+Now that the TSC Offset is known, it can be provided to 'perf inject'
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation="dry-run 0xffffe42722c64c41"
+
+Note the options for 'perf inject' --vm-time-correlation are:
+
+ [ dry-run ] [ <TSC Offset> [ : <VMCS> [ , <VMCS> ]... ] ]...
+
+So it is possible to specify different TSC Offsets for different VMCS.
+The option "dry-run" will cause the file to be processed but without updating it.
+Note it is also possible to get a intel_pt.log file by adding option --itrace=d
+
+There were no errors so, do it for real
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation=0xffffe42722c64c41 --force
+
+'perf script' can be used to see if there are any decoder errors
+
+ $ perf script -i perf.data.kvm --guestkallsyms $KALLSYMS --itrace=e-o
+
+There were none.
+
+'perf script' can be used to provide an instruction trace showing timestamps
+
+ $ perf script -i perf.data.kvm --guestkallsyms $KALLSYMS --insn-trace --xed -F+ipc | grep -C10 vmresume | head -21
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cdd __vmx_vcpu_run+0x3d ([kernel.kallsyms]) movq 0x48(%rax), %r9
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce1 __vmx_vcpu_run+0x41 ([kernel.kallsyms]) movq 0x50(%rax), %r10
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce5 __vmx_vcpu_run+0x45 ([kernel.kallsyms]) movq 0x58(%rax), %r11
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce9 __vmx_vcpu_run+0x49 ([kernel.kallsyms]) movq 0x60(%rax), %r12
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ced __vmx_vcpu_run+0x4d ([kernel.kallsyms]) movq 0x68(%rax), %r13
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf1 __vmx_vcpu_run+0x51 ([kernel.kallsyms]) movq 0x70(%rax), %r14
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf5 __vmx_vcpu_run+0x55 ([kernel.kallsyms]) movq 0x78(%rax), %r15
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf9 __vmx_vcpu_run+0x59 ([kernel.kallsyms]) movq (%rax), %rax
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cfc __vmx_vcpu_run+0x5c ([kernel.kallsyms]) callq 0xffffffff82133c40
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133c40 vmx_vmenter+0x0 ([kernel.kallsyms]) jz 0xffffffff82133c46
+ CPU 1/KVM 17006 [001] 11500.262866075: ffffffff82133c42 vmx_vmenter+0x2 ([kernel.kallsyms]) vmresume IPC: 0.05 (40/769)
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb0 asm_sysvec_apic_timer_interrupt+0x0 ([guest.kernel.kallsyms]) clac
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb3 asm_sysvec_apic_timer_interrupt+0x3 ([guest.kernel.kallsyms]) pushq $0xffffffffffffffff
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb5 asm_sysvec_apic_timer_interrupt+0x5 ([guest.kernel.kallsyms]) callq 0xffffffff82201160
+ :17006 17006 [001] 11500.262869216: ffffffff82201160 error_entry+0x0 ([guest.kernel.kallsyms]) cld
+ :17006 17006 [001] 11500.262869216: ffffffff82201161 error_entry+0x1 ([guest.kernel.kallsyms]) pushq %rsi
+ :17006 17006 [001] 11500.262869216: ffffffff82201162 error_entry+0x2 ([guest.kernel.kallsyms]) movq 0x8(%rsp), %rsi
+ :17006 17006 [001] 11500.262869216: ffffffff82201167 error_entry+0x7 ([guest.kernel.kallsyms]) movq %rdi, 0x8(%rsp)
+ :17006 17006 [001] 11500.262869216: ffffffff8220116c error_entry+0xc ([guest.kernel.kallsyms]) pushq %rdx
+ :17006 17006 [001] 11500.262869216: ffffffff8220116d error_entry+0xd ([guest.kernel.kallsyms]) pushq %rcx
+ :17006 17006 [001] 11500.262869216: ffffffff8220116e error_entry+0xe ([guest.kernel.kallsyms]) pushq %rax
+
SEE ALSO
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index ed3ecfa422e1..080981d38d7b 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -226,7 +226,7 @@ So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And
LAZY MATCHING
-------------
- The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
+The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
e.g.
'a=*' can matches 'a=b', 'a = b', 'a == b' and so on.
@@ -235,8 +235,8 @@ This provides some sort of flexibility and robustness to probe point definitions
FILTER PATTERN
--------------
- The filter pattern is a glob matching pattern(s) to filter variables.
- In addition, you can use "!" for specifying filter-out rule. You also can give several rules combined with "&" or "|", and fold those rules as one rule by using "(" ")".
+The filter pattern is a glob matching pattern(s) to filter variables.
+In addition, you can use "!" for specifying filter-out rule. You also can give several rules combined with "&" or "|", and fold those rules as one rule by using "(" ")".
e.g.
With --filter "foo* | bar*", perf probe -V shows variables which start with "foo" or "bar".
@@ -295,6 +295,19 @@ Add a probe in a source file using special characters by backslash escape
./perf probe -x /opt/test/a.out 'foo\+bar.c:4'
+PERMISSIONS AND SYSCTL
+----------------------
+Since perf probe depends on ftrace (tracefs) and kallsyms (/proc/kallsyms), you have to care about the permission and some sysctl knobs.
+
+ - Since tracefs and kallsyms requires root or privileged user to access it, the following perf probe commands also require it; --add, --del, --list (except for --cache option)
+
+ - The system admin can remount the tracefs with 755 (`sudo mount -o remount,mode=755 /sys/kernel/tracing/`) to allow unprivileged user to run the perf probe --list command.
+
+ - /proc/sys/kernel/kptr_restrict = 2 (restrict all users) also prevents perf probe to retrieve the important information from kallsyms. You also need to set to 1 (restrict non CAP_SYSLOG users) for the above commands. Since the user-space probe doesn't need to access kallsyms, this is only for probing the kernel function (kprobes).
+
+ - Since the perf probe commands read the vmlinux (for kernel) and/or the debuginfo file (including user-space application), you need to ensure that you can read those files.
+
+
SEE ALSO
--------
linkperf:perf-trace[1], linkperf:perf-record[1], linkperf:perf-buildid-cache[1]
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 0fb9eda3cbca..5e43cfa5ea1e 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -550,6 +550,27 @@ def trace_unhandled(event_name, context, event_fields_dict):
pass
----
+*process_event*, if defined, is called for any non-tracepoint event
+
+----
+def process_event(param_dict):
+ pass
+----
+
+*context_switch*, if defined, is called for any context switch
+
+----
+def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
+ pass
+----
+
+*auxtrace_error*, if defined, is called for any AUX area tracing error
+
+----
+def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
+ pass
+----
+
The remaining sections provide descriptions of each of the available
built-in perf script Python modules and their associated functions.
@@ -592,12 +613,18 @@ common, but need to be made accessible to user scripts nonetheless.
perf_trace_context defines a set of functions that can be used to
access this data in the context of the current event. Each of these
functions expects a context variable, which is the same as the
-context variable passed into every event handler as the second
-argument.
+context variable passed into every tracepoint event handler as the second
+argument. For non-tracepoint events, the context variable is also present
+as perf_trace_context.perf_script_context .
common_pc(context) - returns common_preempt count for the current event
common_flags(context) - returns common_flags for the current event
common_lock_depth(context) - returns common_lock_depth for the current event
+ perf_sample_insn(context) - returns the machine code instruction
+ perf_set_itrace_options(context, itrace_options) - set --itrace options if they have not been set already
+ perf_sample_srcline(context) - returns source_file_name, line_number
+ perf_sample_srccode(context) - returns source_file_name, line_number, source_line
+
Util.py Module
~~~~~~~~~~~~~~
@@ -616,9 +643,20 @@ SUPPORTED FIELDS
Currently supported fields:
ev_name, comm, pid, tid, cpu, ip, time, period, phys_addr, addr,
-symbol, dso, time_enabled, time_running, values, callchain,
+symbol, symoff, dso, time_enabled, time_running, values, callchain,
brstack, brstacksym, datasrc, datasrc_decode, iregs, uregs,
-weight, transaction, raw_buf, attr.
+weight, transaction, raw_buf, attr, cpumode.
+
+Fields that may also be present:
+
+ flags - sample flags
+ flags_disp - sample flags display
+ insn_cnt - instruction count for determining instructions-per-cycle (IPC)
+ cyc_cnt - cycle count for determining IPC
+ addr_correlates_sym - addr can correlate to a symbol
+ addr_dso - addr dso
+ addr_symbol - addr symbol
+ addr_symoff - addr symbol offset
Some fields have sub items:
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 48a5f5b26dd4..aa3a0b2c29a2 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -98,6 +98,18 @@ OPTIONS
Generate perf-script.[ext] starter script for given language,
using current perf.data.
+--dlfilter=<file>::
+ Filter sample events using the given shared object file.
+ Refer linkperf:perf-dlfilter[1]
+
+--dlarg=<arg>::
+ Pass 'arg' as an argument to the dlfilter. --dlarg may be repeated
+ to add more arguments.
+
+--list-dlfilters=::
+ Display a list of available dlfilters. Use with option -v (must come
+ before option --list-dlfilters) to show long descriptions.
+
-a::
Force system-wide collection. Scripts run without a <command>
normally use -a by default, while scripts run with a <command>
@@ -483,4 +495,5 @@ include::itrace.txt[]
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
-linkperf:perf-script-python[1], linkperf:perf-intel-pt[1]
+linkperf:perf-script-python[1], linkperf:perf-intel-pt[1],
+linkperf:perf-dlfilter[1]
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index bba5ffb05463..9898a32b8d9c 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -277,6 +277,18 @@ Default is to monitor all CPUS.
Record events of type PERF_RECORD_NAMESPACES and display it with the
'cgroup_id' sort key.
+-G name::
+--cgroup name::
+monitor only in the container (cgroup) called "name". This option is available only
+in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
+container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
+can be provided. Each cgroup is applied to the corresponding event, i.e., first cgroup
+to first event, second cgroup to second event and so on. It is possible to provide
+an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
+corresponding events, i.e., they always refer to events defined earlier on the command
+line. If the user wants to track multiple events for a specific cgroup, the user can
+use '-e e1 -e e2 -G foo,foo' or just use '-e e1 -e e2 -G foo'.
+
--all-cgroups::
Record events of type PERF_RECORD_CGROUP and display it with the
'cgroup' sort key.
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index 9ee96640744e..e6ff8c898ada 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -402,6 +402,39 @@ struct {
u64 clockid_time_ns;
};
+ HEADER_HYBRID_TOPOLOGY = 30,
+
+Indicate the hybrid CPUs. The format of data is as below.
+
+struct {
+ u32 nr;
+ struct {
+ char pmu_name[];
+ char cpus[];
+ } [nr]; /* Variable length records */
+};
+
+Example:
+ hybrid cpu system:
+ cpu_core cpu list : 0-15
+ cpu_atom cpu list : 16-23
+
+ HEADER_HYBRID_CPU_PMU_CAPS = 31,
+
+ A list of hybrid CPU PMU capabilities.
+
+struct {
+ u32 nr_pmu;
+ struct {
+ u32 nr_cpu_pmu_caps;
+ {
+ char name[];
+ char value[];
+ } [nr_cpu_pmu_caps];
+ char pmu_name[];
+ } [nr_pmu];
+};
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 73df23dd664c..eb8e487ef90b 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -201,6 +201,12 @@ ifeq ($(call get-executable,$(BISON)),)
dummy := $(error Error: $(BISON) is missing on this system, please install it)
endif
+ifneq ($(OUTPUT),)
+ ifeq ($(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \>\= 371), 1)
+ BISON_FILE_PREFIX_MAP := --file-prefix-map=$(OUTPUT)=
+ endif
+endif
+
# Treat warnings as errors unless directed not to
ifneq ($(WERROR),0)
CORE_CFLAGS += -Werror
@@ -634,7 +640,7 @@ endif
ifdef BUILD_BPF_SKEL
$(call feature_check,clang-bpf-co-re)
ifeq ($(feature-clang-bpf-co-re), 0)
- dummy := $(error Error: clang too old. Please install recent clang)
+ dummy := $(error Error: clang too old/not installed. Please install recent clang to build with BUILD_BPF_SKEL)
endif
$(call detected,CONFIG_PERF_BPF_SKEL)
CFLAGS += -DHAVE_BPF_SKEL
@@ -1111,6 +1117,8 @@ prefix ?= $(HOME)
endif
bindir_relative = bin
bindir = $(abspath $(prefix)/$(bindir_relative))
+includedir_relative = include
+includedir = $(abspath $(prefix)/$(includedir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
@@ -1143,6 +1151,7 @@ ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
STRACE_GROUPS_DIR_SQ = $(subst ','\'',$(STRACE_GROUPS_DIR))
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
bindir_SQ = $(subst ','\'',$(bindir))
+includedir_SQ = $(subst ','\'',$(includedir))
mandir_SQ = $(subst ','\'',$(mandir))
infodir_SQ = $(subst ','\'',$(infodir))
perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
@@ -1227,6 +1236,9 @@ $(call detected_var,LIBDIR)
$(call detected_var,GTK_CFLAGS)
$(call detected_var,PERL_EMBED_CCOPTS)
$(call detected_var,PYTHON_EMBED_CCOPTS)
+ifneq ($(BISON_FILE_PREFIX_MAP),)
+$(call detected_var,BISON_FILE_PREFIX_MAP)
+endif
# re-generate FEATURE-DUMP as we may have called feature_check, found out
# extra libraries to add to LDFLAGS of some other test and then redo those
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index e47f04e5b51e..c9e0de5b00c1 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -923,7 +923,9 @@ install-tools: all install-gtk
$(call QUIET_INSTALL, binaries) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \
$(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \
- $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace'
+ $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(dir_SQ)/trace'; \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(includedir_SQ)/perf'; \
+ $(INSTALL) util/perf_dlfilter.h -t '$(DESTDIR_SQ)$(includedir_SQ)/perf'
ifndef NO_PERF_READ_VDSO32
$(call QUIET_INSTALL, perf-read-vdso32) \
$(INSTALL) $(OUTPUT)perf-read-vdso32 '$(DESTDIR_SQ)$(bindir_SQ)';
diff --git a/tools/perf/arch/arm/include/arch-tests.h b/tools/perf/arch/arm/include/arch-tests.h
index 90ec4c8cb880..c62538052404 100644
--- a/tools/perf/arch/arm/include/arch-tests.h
+++ b/tools/perf/arch/arm/include/arch-tests.h
@@ -2,11 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-#endif
-
extern struct test arch_tests[];
#endif
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index d942f118d32c..85168d87b2d7 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -38,8 +38,6 @@ struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
struct evlist *evlist;
- int wrapped_cnt;
- bool *wrapped;
bool snapshot_mode;
size_t snapshot_size;
};
@@ -734,135 +732,6 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return 0;
}
-static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
-{
- bool *wrapped;
- int cnt = ptr->wrapped_cnt;
-
- /* Make @ptr->wrapped as big as @idx */
- while (cnt <= idx)
- cnt++;
-
- /*
- * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
- * cross compilation problems where the host's system supports
- * reallocarray() but not the target.
- */
- wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
- if (!wrapped)
- return -ENOMEM;
-
- wrapped[cnt - 1] = false;
- ptr->wrapped_cnt = cnt;
- ptr->wrapped = wrapped;
-
- return 0;
-}
-
-static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
- size_t buffer_size, u64 head)
-{
- u64 i, watermark;
- u64 *buf = (u64 *)buffer;
- size_t buf_size = buffer_size;
-
- /*
- * We want to look the very last 512 byte (chosen arbitrarily) in
- * the ring buffer.
- */
- watermark = buf_size - 512;
-
- /*
- * @head is continuously increasing - if its value is equal or greater
- * than the size of the ring buffer, it has wrapped around.
- */
- if (head >= buffer_size)
- return true;
-
- /*
- * The value of @head is somewhere within the size of the ring buffer.
- * This can be that there hasn't been enough data to fill the ring
- * buffer yet or the trace time was so long that @head has numerically
- * wrapped around. To find we need to check if we have data at the very
- * end of the ring buffer. We can reliably do this because mmap'ed
- * pages are zeroed out and there is a fresh mapping with every new
- * session.
- */
-
- /* @head is less than 512 byte from the end of the ring buffer */
- if (head > watermark)
- watermark = head;
-
- /*
- * Speed things up by using 64 bit transactions (see "u64 *buf" above)
- */
- watermark >>= 3;
- buf_size >>= 3;
-
- /*
- * If we find trace data at the end of the ring buffer, @head has
- * been there and has numerically wrapped around at least once.
- */
- for (i = watermark; i < buf_size; i++)
- if (buf[i])
- return true;
-
- return false;
-}
-
-static int cs_etm_find_snapshot(struct auxtrace_record *itr,
- int idx, struct auxtrace_mmap *mm,
- unsigned char *data,
- u64 *head, u64 *old)
-{
- int err;
- bool wrapped;
- struct cs_etm_recording *ptr =
- container_of(itr, struct cs_etm_recording, itr);
-
- /*
- * Allocate memory to keep track of wrapping if this is the first
- * time we deal with this *mm.
- */
- if (idx >= ptr->wrapped_cnt) {
- err = cs_etm_alloc_wrapped_array(ptr, idx);
- if (err)
- return err;
- }
-
- /*
- * Check to see if *head has wrapped around. If it hasn't only the
- * amount of data between *head and *old is snapshot'ed to avoid
- * bloating the perf.data file with zeros. But as soon as *head has
- * wrapped around the entire size of the AUX ring buffer it taken.
- */
- wrapped = ptr->wrapped[idx];
- if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
- wrapped = true;
- ptr->wrapped[idx] = true;
- }
-
- pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
- __func__, idx, (size_t)*old, (size_t)*head, mm->len);
-
- /* No wrap has occurred, we can just use *head and *old. */
- if (!wrapped)
- return 0;
-
- /*
- * *head has wrapped around - adjust *head and *old to pickup the
- * entire content of the AUX buffer.
- */
- if (*head >= mm->len) {
- *old = *head - mm->len;
- } else {
- *head += mm->len;
- *old = *head - mm->len;
- }
-
- return 0;
-}
-
static int cs_etm_snapshot_start(struct auxtrace_record *itr)
{
struct cs_etm_recording *ptr =
@@ -900,7 +769,6 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
- zfree(&ptr->wrapped);
free(ptr);
}
@@ -928,7 +796,6 @@ struct auxtrace_record *cs_etm_record_init(int *err)
ptr->itr.recording_options = cs_etm_recording_options;
ptr->itr.info_priv_size = cs_etm_info_priv_size;
ptr->itr.info_fill = cs_etm_info_fill;
- ptr->itr.find_snapshot = cs_etm_find_snapshot;
ptr->itr.snapshot_start = cs_etm_snapshot_start;
ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
ptr->itr.reference = cs_etm_reference;
diff --git a/tools/perf/arch/arm64/include/arch-tests.h b/tools/perf/arch/arm64/include/arch-tests.h
index 90ec4c8cb880..c62538052404 100644
--- a/tools/perf/arch/arm64/include/arch-tests.h
+++ b/tools/perf/arch/arm64/include/arch-tests.h
@@ -2,11 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-#endif
-
extern struct test arch_tests[];
#endif
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 414c8a5584b1..a4420d4df503 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -14,6 +14,7 @@
#include "../../../util/cpumap.h"
#include "../../../util/event.h"
#include "../../../util/evsel.h"
+#include "../../../util/evsel_config.h"
#include "../../../util/evlist.h"
#include "../../../util/session.h"
#include <internal/lib.h> // page_size
@@ -32,6 +33,29 @@ struct arm_spe_recording {
struct evlist *evlist;
};
+static void arm_spe_set_timestamp(struct auxtrace_record *itr,
+ struct evsel *evsel)
+{
+ struct arm_spe_recording *ptr;
+ struct perf_pmu *arm_spe_pmu;
+ struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG);
+ u64 user_bits = 0, bit;
+
+ ptr = container_of(itr, struct arm_spe_recording, itr);
+ arm_spe_pmu = ptr->arm_spe_pmu;
+
+ if (term)
+ user_bits = term->val.cfg_chg;
+
+ bit = perf_pmu__format_bits(&arm_spe_pmu->format, "ts_enable");
+
+ /* Skip if user has set it */
+ if (bit & user_bits)
+ return;
+
+ evsel->core.attr.config |= bit;
+}
+
static size_t
arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct evlist *evlist __maybe_unused)
@@ -68,6 +92,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
container_of(itr, struct arm_spe_recording, itr);
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
struct evsel *evsel, *arm_spe_evsel = NULL;
+ struct perf_cpu_map *cpus = evlist->core.cpus;
bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel;
int err;
@@ -91,7 +116,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
return 0;
/* We are in full trace mode but '-m,xyz' wasn't specified */
- if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (!opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
@@ -120,9 +145,14 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
*/
evlist__to_front(evlist, arm_spe_evsel);
- evsel__set_sample_bit(arm_spe_evsel, CPU);
- evsel__set_sample_bit(arm_spe_evsel, TIME);
- evsel__set_sample_bit(arm_spe_evsel, TID);
+ /*
+ * In the case of per-cpu mmaps, sample CPU for AUX event;
+ * also enable the timestamp tracing for samples correlation.
+ */
+ if (!perf_cpu_map__empty(cpus)) {
+ evsel__set_sample_bit(arm_spe_evsel, CPU);
+ arm_spe_set_timestamp(itr, arm_spe_evsel);
+ }
/* Add dummy event to keep tracking */
err = parse_events(evlist, "dummy:u", NULL);
@@ -134,9 +164,10 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
tracking_evsel->core.attr.freq = 0;
tracking_evsel->core.attr.sample_period = 1;
- evsel__set_sample_bit(tracking_evsel, TIME);
- evsel__set_sample_bit(tracking_evsel, CPU);
- evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
+
+ /* In per-cpu case, always need the time of mmap events etc */
+ if (!perf_cpu_map__empty(cpus))
+ evsel__set_sample_bit(tracking_evsel, TIME);
return 0;
}
diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
index 2a2497372671..be41721b9aa1 100644
--- a/tools/perf/arch/arm64/util/mem-events.c
+++ b/tools/perf/arch/arm64/util/mem-events.c
@@ -20,7 +20,7 @@ struct perf_mem_event *perf_mem_events__ptr(int i)
return &perf_mem_events[i];
}
-char *perf_mem_events__name(int i)
+char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
diff --git a/tools/perf/arch/powerpc/include/arch-tests.h b/tools/perf/arch/powerpc/include/arch-tests.h
index 1c7be75cbc78..c62538052404 100644
--- a/tools/perf/arch/powerpc/include/arch-tests.h
+++ b/tools/perf/arch/powerpc/include/arch-tests.h
@@ -2,13 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-int test__arch_unwind_sample(struct perf_sample *sample,
- struct thread *thread);
-#endif
-
extern struct test arch_tests[];
#endif
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 8efd9ed9e9db..c9cb4b059392 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -7,7 +7,6 @@
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
-#include "arch-tests.h"
#define STACK_SIZE 8192
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
index 07fb5e049488..4120fafe0be4 100644
--- a/tools/perf/arch/powerpc/util/mem-events.c
+++ b/tools/perf/arch/powerpc/util/mem-events.c
@@ -3,7 +3,7 @@
#include "mem-events.h"
/* PowerPC does not support 'ldlat' parameter. */
-char *perf_mem_events__name(int i)
+char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
{
if (i == PERF_MEM_EVENTS__LOAD)
return (char *) "cpu/mem-loads/";
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index 0e20f3dc69f3..9599e7a3f1af 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -2,23 +2,15 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#include <linux/compiler.h>
struct test;
/* Tests */
-int test__rdpmc(struct test *test __maybe_unused, int subtest);
-int test__insn_x86(struct test *test __maybe_unused, int subtest);
+int test__rdpmc(struct test *test, int subtest);
+int test__insn_x86(struct test *test, int subtest);
int test__intel_pt_pkt_decoder(struct test *test, int subtest);
int test__bp_modify(struct test *test, int subtest);
int test__x86_sample_parsing(struct test *test, int subtest);
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-int test__arch_unwind_sample(struct perf_sample *sample,
- struct thread *thread);
-#endif
-
extern struct test arch_tests[];
#endif
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index 478078fb0f22..a54dea7c112f 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -7,7 +7,6 @@
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
-#include "arch-tests.h"
#define STACK_SIZE 8192
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 072920475b65..c5dd54f6ef5e 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -133,11 +133,56 @@ static struct kvm_events_ops ioport_events = {
.name = "IO Port Access"
};
+ /* The time of emulation msr is from kvm_msr to kvm_entry. */
+static void msr_event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "ecx");
+ key->info = evsel__intval(evsel, sample, "write");
+}
+
+static bool msr_event_begin(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (!strcmp(evsel->name, "kvm:kvm_msr")) {
+ msr_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool msr_event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+static void msr_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key,
+ char *decode)
+{
+ scnprintf(decode, decode_str_len, "%#llx:%s",
+ (unsigned long long)key->key,
+ key->info ? "W" : "R");
+}
+
+static struct kvm_events_ops msr_events = {
+ .is_begin_event = msr_event_begin,
+ .is_end_event = msr_event_end,
+ .decode_key = msr_event_decode_key,
+ .name = "MSR Access"
+};
+
const char *kvm_events_tp[] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
"kvm:kvm_mmio",
"kvm:kvm_pio",
+ "kvm:kvm_msr",
NULL,
};
@@ -145,6 +190,7 @@ struct kvm_reg_events_ops kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events },
{ .name = "mmio", .ops = &mmio_events },
{ .name = "ioport", .ops = &ioport_events },
+ { .name = "msr", .ops = &msr_events },
{ NULL, NULL },
};
diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
index 588110fd8904..5214370ca4e4 100644
--- a/tools/perf/arch/x86/util/mem-events.c
+++ b/tools/perf/arch/x86/util/mem-events.c
@@ -5,19 +5,41 @@
static char mem_loads_name[100];
static bool mem_loads_name__init;
+static char mem_stores_name[100];
#define MEM_LOADS_AUX 0x8203
-#define MEM_LOADS_AUX_NAME "{cpu/mem-loads-aux/,cpu/mem-loads,ldlat=%u/pp}:S"
+#define MEM_LOADS_AUX_NAME "{%s/mem-loads-aux/,%s/mem-loads,ldlat=%u/}:P"
+
+#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
+
+static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
+ E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "%s/events/mem-loads"),
+ E("ldlat-stores", "%s/mem-stores/P", "%s/events/mem-stores"),
+ E(NULL, NULL, NULL),
+};
+
+struct perf_mem_event *perf_mem_events__ptr(int i)
+{
+ if (i >= PERF_MEM_EVENTS__MAX)
+ return NULL;
+
+ return &perf_mem_events[i];
+}
bool is_mem_loads_aux_event(struct evsel *leader)
{
- if (!pmu_have_event("cpu", "mem-loads-aux"))
- return false;
+ if (perf_pmu__find("cpu")) {
+ if (!pmu_have_event("cpu", "mem-loads-aux"))
+ return false;
+ } else if (perf_pmu__find("cpu_core")) {
+ if (!pmu_have_event("cpu_core", "mem-loads-aux"))
+ return false;
+ }
return leader->core.attr.config == MEM_LOADS_AUX;
}
-char *perf_mem_events__name(int i)
+char *perf_mem_events__name(int i, char *pmu_name)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
@@ -25,20 +47,34 @@ char *perf_mem_events__name(int i)
return NULL;
if (i == PERF_MEM_EVENTS__LOAD) {
- if (mem_loads_name__init)
+ if (mem_loads_name__init && !pmu_name)
return mem_loads_name;
- mem_loads_name__init = true;
+ if (!pmu_name) {
+ mem_loads_name__init = true;
+ pmu_name = (char *)"cpu";
+ }
- if (pmu_have_event("cpu", "mem-loads-aux")) {
+ if (pmu_have_event(pmu_name, "mem-loads-aux")) {
scnprintf(mem_loads_name, sizeof(mem_loads_name),
- MEM_LOADS_AUX_NAME, perf_mem_events__loads_ldlat);
+ MEM_LOADS_AUX_NAME, pmu_name, pmu_name,
+ perf_mem_events__loads_ldlat);
} else {
scnprintf(mem_loads_name, sizeof(mem_loads_name),
- e->name, perf_mem_events__loads_ldlat);
+ e->name, pmu_name,
+ perf_mem_events__loads_ldlat);
}
return mem_loads_name;
}
+ if (i == PERF_MEM_EVENTS__STORE) {
+ if (!pmu_name)
+ pmu_name = (char *)"cpu";
+
+ scnprintf(mem_stores_name, sizeof(mem_stores_name),
+ e->name, pmu_name);
+ return mem_stores_name;
+ }
+
return (char *)e->name;
}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 49627a7bed7c..cebb861be3e3 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -474,6 +474,9 @@ int cmd_annotate(int argc, const char **argv)
.attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
.tracing_data = perf_event__process_tracing_data,
+ .id_index = perf_event__process_id_index,
+ .auxtrace_info = perf_event__process_auxtrace_info,
+ .auxtrace = perf_event__process_auxtrace,
.feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
@@ -483,6 +486,9 @@ int cmd_annotate(int argc, const char **argv)
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
+ struct itrace_synth_opts itrace_synth_opts = {
+ .set = 0,
+ };
struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
@@ -547,6 +553,9 @@ int cmd_annotate(int argc, const char **argv)
OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
+ OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
+ "Instruction Tracing options\n" ITRACE_HELP,
+ itrace_parse_synth_opts),
OPT_END()
};
@@ -591,6 +600,8 @@ int cmd_annotate(int argc, const char **argv)
if (IS_ERR(annotate.session))
return PTR_ERR(annotate.session);
+ annotate.session->itrace_synth_opts = &itrace_synth_opts;
+
annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
HEADER_BRANCH_STACK);
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index e3b9d63077ef..6dea37f141b2 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -42,6 +42,8 @@
#include "ui/ui.h"
#include "ui/progress.h"
#include "../perf.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
struct c2c_hists {
struct hists hists;
@@ -2907,8 +2909,9 @@ static const char * const *record_mem_usage = __usage_record;
static int perf_c2c__record(int argc, const char **argv)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, rec_tmp_nr = 0;
const char **rec_argv;
+ char **rec_tmp;
int ret;
bool all_user = false, all_kernel = false;
bool event_set = false;
@@ -2932,11 +2935,21 @@ static int perf_c2c__record(int argc, const char **argv)
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_KEEP_UNKNOWN);
- rec_argc = argc + 11; /* max number of arguments */
+ if (!perf_pmu__has_hybrid())
+ rec_argc = argc + 11; /* max number of arguments */
+ else
+ rec_argc = argc + 11 * perf_pmu__hybrid_pmu_num();
+
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
+ rec_tmp = calloc(rec_argc + 1, sizeof(char *));
+ if (!rec_tmp) {
+ free(rec_argv);
+ return -1;
+ }
+
rec_argv[i++] = "record";
if (!event_set) {
@@ -2964,21 +2977,9 @@ static int perf_c2c__record(int argc, const char **argv)
rec_argv[i++] = "--phys-data";
rec_argv[i++] = "--sample-cpu";
- for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- e = perf_mem_events__ptr(j);
- if (!e->record)
- continue;
-
- if (!e->supported) {
- pr_err("failed: event '%s' not supported\n",
- perf_mem_events__name(j));
- free(rec_argv);
- return -1;
- }
-
- rec_argv[i++] = "-e";
- rec_argv[i++] = perf_mem_events__name(j);
- }
+ ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &rec_tmp_nr);
+ if (ret)
+ goto out;
if (all_user)
rec_argv[i++] = "--all-user";
@@ -3002,6 +3003,11 @@ static int perf_c2c__record(int argc, const char **argv)
}
ret = cmd_record(i, rec_argv);
+out:
+ for (i = 0; i < rec_tmp_nr; i++)
+ free(rec_tmp[i]);
+
+ free(rec_tmp);
free(rec_argv);
return ret;
}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index ddccc0eb7390..5d6f583e2cd3 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -31,6 +31,7 @@
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include <linux/list.h>
+#include <linux/string.h>
#include <errno.h>
#include <signal.h>
@@ -43,6 +44,8 @@ struct perf_inject {
bool have_auxtrace;
bool strip;
bool jit_mode;
+ bool in_place_update;
+ bool in_place_update_dry_run;
const char *input_name;
struct perf_data output;
u64 bytes_written;
@@ -380,8 +383,8 @@ static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
if (dso && !dso->hit) {
dso->hit = 1;
dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
- dso__put(dso);
}
+ dso__put(dso);
return perf_event__repipe(tool, event, sample, machine);
}
@@ -396,6 +399,18 @@ static int perf_event__repipe_mmap2(struct perf_tool *tool,
err = perf_event__process_mmap2(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ struct dso *dso;
+
+ dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
+ event->mmap2.filename, NULL, machine);
+ if (dso) {
+ /* mark it not to inject build-id */
+ dso->hit = 1;
+ }
+ dso__put(dso);
+ }
+
return err;
}
@@ -437,6 +452,18 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
};
struct dso *dso;
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ /* cannot use dso_id since it'd have invalid info */
+ dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
+ event->mmap2.filename, NULL, machine);
+ if (dso) {
+ /* mark it not to inject build-id */
+ dso->hit = 1;
+ }
+ dso__put(dso);
+ return 0;
+ }
+
dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
event->mmap2.filename, &dso_id, machine);
@@ -444,8 +471,8 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
dso->hit = 1;
dso__inject_build_id(dso, tool, machine, sample->cpumode,
event->mmap2.flags);
- dso__put(dso);
}
+ dso__put(dso);
perf_event__repipe(tool, event, sample, machine);
@@ -696,12 +723,42 @@ static void strip_init(struct perf_inject *inject)
evsel->handler = drop_sample;
}
+static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
+{
+ struct perf_inject *inject = opt->value;
+ const char *args;
+ char *dry_run;
+
+ if (unset)
+ return 0;
+
+ inject->itrace_synth_opts.set = true;
+ inject->itrace_synth_opts.vm_time_correlation = true;
+ inject->in_place_update = true;
+
+ if (!str)
+ return 0;
+
+ dry_run = skip_spaces(str);
+ if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
+ inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
+ inject->in_place_update_dry_run = true;
+ args = dry_run + strlen("dry-run");
+ } else {
+ args = str;
+ }
+
+ inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
+
+ return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
+}
+
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
struct perf_session *session = inject->session;
struct perf_data *data_out = &inject->output;
- int fd = perf_data__fd(data_out);
+ int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
u64 output_data_offset;
signal(SIGINT, sig_handler);
@@ -737,6 +794,15 @@ static int __cmd_inject(struct perf_inject *inject)
else if (!strncmp(name, "sched:sched_stat_", 17))
evsel->handler = perf_inject__sched_stat;
}
+ } else if (inject->itrace_synth_opts.vm_time_correlation) {
+ session->itrace_synth_opts = &inject->itrace_synth_opts;
+ memset(&inject->tool, 0, sizeof(inject->tool));
+ inject->tool.id_index = perf_event__process_id_index;
+ inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
+ inject->tool.auxtrace = perf_event__process_auxtrace;
+ inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
+ inject->tool.ordered_events = true;
+ inject->tool.ordering_requires_timestamps = true;
} else if (inject->itrace_synth_opts.set) {
session->itrace_synth_opts = &inject->itrace_synth_opts;
inject->itrace_synth_opts.inject = true;
@@ -759,14 +825,14 @@ static int __cmd_inject(struct perf_inject *inject)
if (!inject->itrace_synth_opts.set)
auxtrace_index__free(&session->auxtrace_index);
- if (!data_out->is_pipe)
+ if (!data_out->is_pipe && !inject->in_place_update)
lseek(fd, output_data_offset, SEEK_SET);
ret = perf_session__process_events(session);
if (ret)
return ret;
- if (!data_out->is_pipe) {
+ if (!data_out->is_pipe && !inject->in_place_update) {
if (inject->build_ids)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
@@ -878,6 +944,9 @@ int cmd_inject(int argc, const char **argv)
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "strip", &inject.strip,
"strip non-synthesized events (use with --itrace)"),
+ OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
+ "correlate time between VM guests and the host",
+ parse_vm_time_correlation),
OPT_END()
};
const char * const inject_usage[] = {
@@ -900,7 +969,23 @@ int cmd_inject(int argc, const char **argv)
return -1;
}
- if (perf_data__open(&inject.output)) {
+ if (inject.in_place_update) {
+ if (!strcmp(inject.input_name, "-")) {
+ pr_err("Input file name required for in-place updating\n");
+ return -1;
+ }
+ if (strcmp(inject.output.path, "-")) {
+ pr_err("Output file name must not be specified for in-place updating\n");
+ return -1;
+ }
+ if (!data.force && !inject.in_place_update_dry_run) {
+ pr_err("The input file would be updated in place, "
+ "the --force option is required.\n");
+ return -1;
+ }
+ if (!inject.in_place_update_dry_run)
+ data.in_place_update = true;
+ } else if (perf_data__open(&inject.output)) {
perror("failed to create output file");
return -1;
}
@@ -950,5 +1035,6 @@ int cmd_inject(int argc, const char **argv)
out_delete:
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
+ free(inject.itrace_synth_opts.vm_tm_corr_args);
return ret;
}
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index cdd2b9f643f6..0fd2a74dbaca 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -18,6 +18,8 @@
#include "util/dso.h"
#include "util/map.h"
#include "util/symbol.h"
+#include "util/pmu.h"
+#include "util/pmu-hybrid.h"
#include <linux/err.h>
#define MEM_OPERATION_LOAD 0x1
@@ -62,8 +64,10 @@ static const char * const *record_mem_usage = __usage;
static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, tmp_nr = 0;
+ int start, end;
const char **rec_argv;
+ char **rec_tmp;
int ret;
bool all_user = false, all_kernel = false;
struct perf_mem_event *e;
@@ -87,11 +91,24 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_KEEP_UNKNOWN);
- rec_argc = argc + 9; /* max number of arguments */
+ if (!perf_pmu__has_hybrid())
+ rec_argc = argc + 9; /* max number of arguments */
+ else
+ rec_argc = argc + 9 * perf_pmu__hybrid_pmu_num();
+
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
+ /*
+ * Save the allocated event name strings.
+ */
+ rec_tmp = calloc(rec_argc + 1, sizeof(char *));
+ if (!rec_tmp) {
+ free(rec_argv);
+ return -1;
+ }
+
rec_argv[i++] = "record";
e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
@@ -128,21 +145,11 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (mem->data_page_size)
rec_argv[i++] = "--data-page-size";
- for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- e = perf_mem_events__ptr(j);
- if (!e->record)
- continue;
-
- if (!e->supported) {
- pr_err("failed: event '%s' not supported\n",
- perf_mem_events__name(j));
- free(rec_argv);
- return -1;
- }
-
- rec_argv[i++] = "-e";
- rec_argv[i++] = perf_mem_events__name(j);
- }
+ start = i;
+ ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &tmp_nr);
+ if (ret)
+ goto out;
+ end = i;
if (all_user)
rec_argv[i++] = "--all-user";
@@ -156,14 +163,18 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (verbose > 0) {
pr_debug("calling: record ");
- while (rec_argv[j]) {
+ for (j = start; j < end; j++)
pr_debug("%s ", rec_argv[j]);
- j++;
- }
+
pr_debug("\n");
}
ret = cmd_record(i, rec_argv);
+out:
+ for (i = 0; i < tmp_nr; i++)
+ free(rec_tmp[i]);
+
+ free(rec_tmp);
free(rec_argv);
return ret;
}
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 6b1507566770..2bfd41df621c 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -347,7 +347,10 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
goto out_cleanup;
if (params.command == 'D') { /* it shows definition */
- ret = show_probe_trace_events(pevs, npevs);
+ if (probe_conf.bootconfig)
+ ret = show_bootconfig_events(pevs, npevs);
+ else
+ ret = show_probe_trace_events(pevs, npevs);
goto out_cleanup;
}
@@ -581,6 +584,8 @@ __cmd_probe(int argc, const char **argv)
"Look for files with symbols relative to this directory"),
OPT_CALLBACK(0, "target-ns", NULL, "pid",
"target pid for namespace contexts", opt_set_target_ns),
+ OPT_BOOLEAN(0, "bootconfig", &probe_conf.bootconfig,
+ "Output probe definition with bootconfig format"),
OPT_END()
};
int ret;
@@ -692,6 +697,11 @@ __cmd_probe(int argc, const char **argv)
}
break;
case 'D':
+ if (probe_conf.bootconfig && params.uprobes) {
+ pr_err(" Error: --bootconfig doesn't support uprobes.\n");
+ return -EINVAL;
+ }
+ __fallthrough;
case 'a':
/* Ensure the last given target is used */
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 84803abeb942..71efe6573ee7 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -969,6 +969,15 @@ out:
return rc;
}
+static void set_timestamp_boundary(struct record *rec, u64 sample_time)
+{
+ if (rec->evlist->first_sample_time == 0)
+ rec->evlist->first_sample_time = sample_time;
+
+ if (sample_time)
+ rec->evlist->last_sample_time = sample_time;
+}
+
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -977,10 +986,7 @@ static int process_sample_event(struct perf_tool *tool,
{
struct record *rec = container_of(tool, struct record, tool);
- if (rec->evlist->first_sample_time == 0)
- rec->evlist->first_sample_time = sample->time;
-
- rec->evlist->last_sample_time = sample->time;
+ set_timestamp_boundary(rec, sample->time);
if (rec->buildid_all)
return 0;
@@ -2402,6 +2408,17 @@ static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *eve
return perf_event__process_mmap2(tool, event, sample, machine);
}
+static int process_timestamp_boundary(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ struct record *rec = container_of(tool, struct record, tool);
+
+ set_timestamp_boundary(rec, sample->time);
+ return 0;
+}
+
/*
* XXX Ideally would be local to cmd_record() and passed to a record__new
* because we need to have access to it in record__exit, that is called
@@ -2436,6 +2453,8 @@ static struct record record = {
.namespaces = perf_event__process_namespaces,
.mmap = build_id__process_mmap,
.mmap2 = build_id__process_mmap2,
+ .itrace_start = process_timestamp_boundary,
+ .aux = process_timestamp_boundary,
.ordered_events = true,
},
};
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 36f9ccfeb38a..bc5c393021dc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -934,6 +934,8 @@ static int __cmd_report(struct report *rep)
return ret;
}
+ evlist__check_mem_load_aux(session->evlist);
+
if (rep->stats_mode)
return stats_print(rep);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 1280cbfad4db..2030936cc891 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -55,6 +55,7 @@
#include <subcmd/pager.h>
#include <perf/evlist.h>
#include <linux/err.h>
+#include "util/dlfilter.h"
#include "util/record.h"
#include "util/util.h"
#include "perf.h"
@@ -79,6 +80,9 @@ static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct perf_stat_config stat_config;
static int max_blocks;
static bool native_arch;
+static struct dlfilter *dlfilter;
+static int dlargc;
+static char **dlargv;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
@@ -1337,17 +1341,18 @@ static const char *resolve_branch_sym(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
+ struct addr_location *addr_al,
u64 *ip)
{
- struct addr_location addr_al;
struct perf_event_attr *attr = &evsel->core.attr;
const char *name = NULL;
if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
if (sample_addr_correlates_sym(attr)) {
- thread__resolve(thread, &addr_al, sample);
- if (addr_al.sym)
- name = addr_al.sym->name;
+ if (!addr_al->thread)
+ thread__resolve(thread, addr_al, sample);
+ if (addr_al->sym)
+ name = addr_al->sym->name;
else
*ip = sample->addr;
} else {
@@ -1365,7 +1370,9 @@ static const char *resolve_branch_sym(struct perf_sample *sample,
static int perf_sample__fprintf_callindent(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
- struct addr_location *al, FILE *fp)
+ struct addr_location *al,
+ struct addr_location *addr_al,
+ FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
size_t depth = thread_stack__depth(thread, sample->cpu);
@@ -1382,7 +1389,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
if (thread->ts && sample->flags & PERF_IP_FLAG_RETURN)
depth += 1;
- name = resolve_branch_sym(sample, evsel, thread, al, &ip);
+ name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) {
dlen += fprintf(fp, "(");
@@ -1417,6 +1424,13 @@ __weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
{
}
+void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
+ struct machine *machine)
+{
+ if (sample->insn_len == 0 && native_arch)
+ arch_fetch_insn(sample, thread, machine);
+}
+
static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct perf_event_attr *attr,
struct thread *thread,
@@ -1424,8 +1438,7 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
{
int printed = 0;
- if (sample->insn_len == 0 && native_arch)
- arch_fetch_insn(sample, thread, machine);
+ script_fetch_insn(sample, thread, machine);
if (PRINT_FIELD(INSNLEN))
printed += fprintf(fp, " ilen: %d", sample->insn_len);
@@ -1460,6 +1473,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
+ struct addr_location *addr_al,
struct machine *machine, FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
@@ -1468,7 +1482,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
int printed = 0;
if (PRINT_FIELD(CALLINDENT))
- printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, fp);
+ printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, addr_al, fp);
/* print branch_from information */
if (PRINT_FIELD(IP)) {
@@ -1553,41 +1567,49 @@ static const char *sample_flags_to_name(u32 flags)
return NULL;
}
-static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
+int perf_sample__sprintf_flags(u32 flags, char *str, size_t sz)
{
const char *chars = PERF_IP_FLAG_CHARS;
- const int n = strlen(PERF_IP_FLAG_CHARS);
+ const size_t n = strlen(PERF_IP_FLAG_CHARS);
bool in_tx = flags & PERF_IP_FLAG_IN_TX;
const char *name = NULL;
- char str[33];
- int i, pos = 0;
+ size_t i, pos = 0;
name = sample_flags_to_name(flags & ~PERF_IP_FLAG_IN_TX);
if (name)
- return fprintf(fp, " %-15s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "%-15s%4s", name, in_tx ? "(x)" : "");
if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_BEGIN));
if (name)
- return fprintf(fp, " tr strt %-7s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "tr strt %-7s%4s", name, in_tx ? "(x)" : "");
}
if (flags & PERF_IP_FLAG_TRACE_END) {
name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_END));
if (name)
- return fprintf(fp, " tr end %-7s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "tr end %-7s%4s", name, in_tx ? "(x)" : "");
}
for (i = 0; i < n; i++, flags >>= 1) {
- if (flags & 1)
+ if ((flags & 1) && pos < sz)
str[pos++] = chars[i];
}
for (; i < 32; i++, flags >>= 1) {
- if (flags & 1)
+ if ((flags & 1) && pos < sz)
str[pos++] = '?';
}
- str[pos] = 0;
+ if (pos < sz)
+ str[pos] = 0;
+
+ return pos;
+}
+static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
+{
+ char str[SAMPLE_FLAGS_BUF_SIZE];
+
+ perf_sample__sprintf_flags(flags, str, sizeof(str));
return fprintf(fp, " %-19s ", str);
}
@@ -1917,7 +1939,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
static bool show_event(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
int depth = thread_stack__depth(thread, sample->cpu);
@@ -1933,7 +1956,7 @@ static bool show_event(struct perf_sample *sample,
} else {
const char *s = symbol_conf.graph_function;
u64 ip;
- const char *name = resolve_branch_sym(sample, evsel, thread, al,
+ const char *name = resolve_branch_sym(sample, evsel, thread, al, addr_al,
&ip);
unsigned nlen;
@@ -1958,6 +1981,7 @@ static bool show_event(struct perf_sample *sample,
static void process_event(struct perf_script *script,
struct perf_sample *sample, struct evsel *evsel,
struct addr_location *al,
+ struct addr_location *addr_al,
struct machine *machine)
{
struct thread *thread = al->thread;
@@ -1970,12 +1994,6 @@ static void process_event(struct perf_script *script,
if (output[type].fields == 0)
return;
- if (!show_event(sample, evsel, thread, al))
- return;
-
- if (evswitch__discard(&script->evswitch, evsel))
- return;
-
++es->samples;
perf_sample__fprintf_start(script, sample, thread, evsel,
@@ -1997,7 +2015,7 @@ static void process_event(struct perf_script *script,
perf_sample__fprintf_flags(sample->flags, fp);
if (is_bts_event(attr)) {
- perf_sample__fprintf_bts(sample, evsel, thread, al, machine, fp);
+ perf_sample__fprintf_bts(sample, evsel, thread, al, addr_al, machine, fp);
return;
}
@@ -2160,10 +2178,23 @@ static int process_sample_event(struct perf_tool *tool,
{
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
+ struct addr_location addr_al;
+ int ret = 0;
+
+ /* Set thread to NULL to indicate addr_al and al are not initialized */
+ addr_al.thread = NULL;
+ al.thread = NULL;
+
+ ret = dlfilter__filter_event_early(dlfilter, event, sample, evsel, machine, &al, &addr_al);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto out_put;
+ }
if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
sample->time)) {
- return 0;
+ goto out_put;
}
if (debug_mode) {
@@ -2174,29 +2205,53 @@ static int process_sample_event(struct perf_tool *tool,
nr_unordered++;
}
last_timestamp = sample->time;
- return 0;
+ goto out_put;
}
+ if (filter_cpu(sample))
+ goto out_put;
+
if (machine__resolve(machine, &al, sample) < 0) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
- return -1;
+ ret = -1;
+ goto out_put;
}
if (al.filtered)
goto out_put;
- if (filter_cpu(sample))
+ if (!show_event(sample, evsel, al.thread, &al, &addr_al))
goto out_put;
- if (scripting_ops)
- scripting_ops->process_event(event, sample, evsel, &al);
- else
- process_event(scr, sample, evsel, &al, machine);
+ if (evswitch__discard(&scr->evswitch, evsel))
+ goto out_put;
+
+ ret = dlfilter__filter_event(dlfilter, event, sample, evsel, machine, &al, &addr_al);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto out_put;
+ }
+
+ if (scripting_ops) {
+ struct addr_location *addr_al_ptr = NULL;
+
+ if ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
+ sample_addr_correlates_sym(&evsel->core.attr)) {
+ if (!addr_al.thread)
+ thread__resolve(al.thread, &addr_al, sample);
+ addr_al_ptr = &addr_al;
+ }
+ scripting_ops->process_event(event, sample, evsel, &al, addr_al_ptr);
+ } else {
+ process_event(scr, sample, evsel, &al, &addr_al, machine);
+ }
out_put:
- addr_location__put(&al);
- return 0;
+ if (al.thread)
+ addr_location__put(&al);
+ return ret;
}
static int process_attr(struct perf_tool *tool, union perf_event *event,
@@ -2415,6 +2470,17 @@ static int process_switch_event(struct perf_tool *tool,
sample->tid);
}
+static int process_auxtrace_error(struct perf_session *session,
+ union perf_event *event)
+{
+ if (scripting_ops && scripting_ops->process_auxtrace_error) {
+ scripting_ops->process_auxtrace_error(session, event);
+ return 0;
+ }
+
+ return perf_event__process_auxtrace_error(session, event);
+}
+
static int
process_lost_event(struct perf_tool *tool,
union perf_event *event,
@@ -2554,6 +2620,8 @@ static int __cmd_script(struct perf_script *script)
}
if (script->show_switch_events || (scripting_ops && scripting_ops->process_switch))
script->tool.context_switch = process_switch_event;
+ if (scripting_ops && scripting_ops->process_auxtrace_error)
+ script->tool.auxtrace_error = process_auxtrace_error;
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
if (script->show_cgroup_events)
@@ -2665,6 +2733,37 @@ static void list_available_languages(void)
fprintf(stderr, "\n");
}
+/* Find script file relative to current directory or exec path */
+static char *find_script(const char *script)
+{
+ char path[PATH_MAX];
+
+ if (!scripting_ops) {
+ const char *ext = strrchr(script, '.');
+
+ if (!ext)
+ return NULL;
+
+ scripting_ops = script_spec__lookup(++ext);
+ if (!scripting_ops)
+ return NULL;
+ }
+
+ if (access(script, R_OK)) {
+ char *exec_path = get_argv_exec_path();
+
+ if (!exec_path)
+ return NULL;
+ snprintf(path, sizeof(path), "%s/scripts/%s/%s",
+ exec_path, scripting_ops->dirname, script);
+ free(exec_path);
+ script = path;
+ if (access(script, R_OK))
+ return NULL;
+ }
+ return strdup(script);
+}
+
static int parse_scriptname(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
@@ -2706,7 +2805,9 @@ static int parse_scriptname(const struct option *opt __maybe_unused,
}
}
- script_name = strdup(script);
+ script_name = find_script(script);
+ if (!script_name)
+ script_name = strdup(script);
return 0;
}
@@ -3076,6 +3177,34 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
exit(0);
}
+static int add_dlarg(const struct option *opt __maybe_unused,
+ const char *s, int unset __maybe_unused)
+{
+ char *arg = strdup(s);
+ void *a;
+
+ if (!arg)
+ return -1;
+
+ a = realloc(dlargv, sizeof(dlargv[0]) * (dlargc + 1));
+ if (!a) {
+ free(arg);
+ return -1;
+ }
+
+ dlargv = a;
+ dlargv[dlargc++] = arg;
+
+ return 0;
+}
+
+static void free_dlarg(void)
+{
+ while (dlargc--)
+ free(dlargv[dlargc]);
+ free(dlargv);
+}
+
/*
* Some scripts specify the required events in their "xxx-record" file,
* this function will check if the events in perf.data match those
@@ -3489,6 +3618,7 @@ int cmd_script(int argc, const char **argv)
};
struct utsname uts;
char *script_path = NULL;
+ const char *dlfilter_file = NULL;
const char **__argv;
int i, j, err = 0;
struct perf_script script = {
@@ -3531,11 +3661,16 @@ int cmd_script(int argc, const char **argv)
"show latency attributes (irqs/preemption disabled, etc)"),
OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts",
list_available_scripts),
+ OPT_CALLBACK_NOOPT(0, "list-dlfilters", NULL, NULL, "list available dlfilters",
+ list_available_dlfilters),
OPT_CALLBACK('s', "script", NULL, "name",
"script file name (lang:script name, script name, or *)",
parse_scriptname),
OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
"generate perf-script.xx script in specified language"),
+ OPT_STRING(0, "dlfilter", &dlfilter_file, "file", "filter .so file name"),
+ OPT_CALLBACK(0, "dlarg", NULL, "argument", "filter argument",
+ add_dlarg),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_BOOLEAN('d', "debug-mode", &debug_mode,
"do various checks like samples ordering and lost events"),
@@ -3718,6 +3853,12 @@ int cmd_script(int argc, const char **argv)
rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
if (!rec_script_path && !rep_script_path) {
+ script_name = find_script(argv[0]);
+ if (script_name) {
+ argc -= 1;
+ argv += 1;
+ goto script_found;
+ }
usage_with_options_msg(script_usage, options,
"Couldn't find script `%s'\n\n See perf"
" script -l for available scripts.\n", argv[0]);
@@ -3810,7 +3951,7 @@ int cmd_script(int argc, const char **argv)
free(__argv);
exit(-1);
}
-
+script_found:
if (rec_script_path)
script_path = rec_script_path;
if (rep_script_path)
@@ -3848,6 +3989,12 @@ int cmd_script(int argc, const char **argv)
exit(-1);
}
+ if (dlfilter_file) {
+ dlfilter = dlfilter__new(dlfilter_file, dlargc, dlargv);
+ if (!dlfilter)
+ return -1;
+ }
+
if (!script_name) {
setup_pager();
use_browser = 0;
@@ -3947,8 +4094,12 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
}
+ err = dlfilter__start(dlfilter, session);
+ if (err)
+ goto out_delete;
+
if (script_name) {
- err = scripting_ops->start_script(script_name, argc, argv);
+ err = scripting_ops->start_script(script_name, argc, argv, session);
if (err)
goto out_delete;
pr_debug("perf script started with script %s\n\n", script_name);
@@ -3996,6 +4147,8 @@ out_delete:
if (script_started)
cleanup_scripting();
+ dlfilter__cleanup(dlfilter);
+ free_dlarg();
out:
return err;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 69cb3635f5ef..2d570bfe7a56 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -22,6 +22,7 @@
#include "util/annotate.h"
#include "util/bpf-event.h"
+#include "util/cgroup.h"
#include "util/config.h"
#include "util/color.h"
#include "util/dso.h"
@@ -1558,6 +1559,8 @@ int cmd_top(int argc, const char **argv)
OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
"number of thread to run event synthesize"),
+ OPT_CALLBACK('G', "cgroup", &top.evlist, "name",
+ "monitor event in cgroup name only", parse_cgroups),
OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
"Record namespaces events"),
OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
@@ -1646,6 +1649,11 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
+ if (nr_cgroups > 0 && opts->record_cgroup) {
+ pr_err("--cgroup and --all-cgroups cannot be used together\n");
+ goto out_delete_evlist;
+ }
+
if (opts->branch_stack && callchain_param.enabled)
symbol_conf.show_branchflag_count = true;
diff --git a/tools/perf/pmu-events/arch/x86/icelake/cache.json b/tools/perf/pmu-events/arch/x86/icelake/cache.json
index 3529fc338c17..49fe78fb6538 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/cache.json
@@ -1,552 +1,664 @@
[
{
+ "BriefDescription": "L2 code requests",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x21",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read miss L2, no rejects"
+ "Speculative": "1",
+ "UMask": "0xe4"
},
{
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x22",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache"
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Demand requests that miss L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts L2 cache misses when fetching instructions.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x24",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions"
+ "Speculative": "1",
+ "UMask": "0x27"
},
{
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand requests that miss L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x27",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache"
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "RFO requests that hit L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x28",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "SW prefetch requests that miss L2 cache."
+ "Speculative": "1",
+ "UMask": "0xc2"
},
{
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xc1",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache"
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xc2",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache"
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 cache lines filling L2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xc4",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads."
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1f"
},
{
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xc8",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.SWPF_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "SW prefetch requests that hit L2 cache."
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe1",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests"
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe2",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache"
+ "Speculative": "1",
+ "UMask": "0x21"
},
{
+ "BriefDescription": "L2 cache misses when fetching instructions",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of L2 code requests.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe4",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests"
+ "Speculative": "1",
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand requests to L2 cache.",
- "EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe7",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache"
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of L1D misses that are outstanding"
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All retired load instructions.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1"
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
},
{
+ "BriefDescription": "L2 writebacks that access L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability."
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Demand Data Read requests",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability.",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe1"
},
{
+ "BriefDescription": "Demand Data Read transactions pending for off-core. Highly correlated.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.L2_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources."
+ "PublicDescription": "Counts the number of off-core outstanding Demand Data Read transactions every cycle. A transaction is considered to be in the Off-core outstanding state between L2 cache miss and data-return to the core.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "EventCode": "0x51",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of cache lines replaced in L1 data cache."
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Cycles the superQ cannot take any more entries.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SQ_FULL",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1"
+ "PublicDescription": "Counts the cycles for which the thread is active and the superQ cannot take any more entries.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore"
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Demand Data Read requests sent to uncore",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1"
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore"
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM"
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads"
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Any memory transaction that reached the SQ."
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions that true miss the STLB.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x11",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that miss the STLB.",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions with locked access.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired store instructions that true miss the STLB.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x12",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that miss the STLB.",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "All retired store instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "L1_Hit_Indication": "1",
"PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with locked access.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x21",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions with locked access.",
- "Data_LA": "1"
+ "PublicDescription": "Counts demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe7"
},
{
- "PEBS": "1",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
- "Data_LA": "1"
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Demand and prefetch data reads",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x42",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x81",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load instructions.",
- "Data_LA": "1"
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x28"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
- "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store instructions.",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Number of L1D misses that are outstanding",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
"PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired store instructions that miss the STLB.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired store instructions that true miss the STLB.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
},
{
- "PEBS": "1",
+ "BriefDescription": "RFO requests to L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
- "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
"EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "SampleAfterValue": "100007",
- "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "EventCode": "0xd2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "L1_Hit_Indication": "1",
"PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "EventCode": "0xd2",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "Data_LA": "1"
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc8"
},
{
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions that true miss the STLB.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
- "EventCode": "0xd2",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
- "Data_LA": "1"
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x22"
},
{
- "PEBS": "1",
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
- "EventCode": "0xd2",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
"PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Any memory transaction that reached the SQ.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
"Counter": "0,1,2,3",
- "UMask": "0x1f",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PEBScounters": "0,1,2,3",
- "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2"
+ "Speculative": "1",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles for which the thread is active and the superQ cannot take any more entries.",
- "EventCode": "0xF4",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xf2",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
"PEBScounters": "0,1,2,3",
- "EventName": "SQ_MISC.SQ_FULL",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles the thread is active and superQ cannot take any more entries."
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x4"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
index 594c5551f610..5391c4f6eca3 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
@@ -1,102 +1,95 @@
[
{
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all microcode Floating Point assists.",
- "EventCode": "0xC1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "ASSISTS.FP",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all microcode FP assists.",
- "CounterMask": "1"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts all microcode FP assists.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/frontend.json b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
index 9c3cfbfcec0f..4fa2a4186ee3 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
@@ -1,424 +1,482 @@
[
{
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
"PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path"
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
- "CounterMask": "5"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "CounterMask": "5",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventName": "IDQ.MITE_CYCLES_OK",
"PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path"
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
- "CounterMask": "5"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "5",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
"PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB or MITE to the MS",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to IDQ while MS is busy"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x510006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
"PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_16B.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
- "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity."
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
- "EventCode": "0x83",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
- "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
"PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled"
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9c",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x504006",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
- "CounterMask": "5"
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9C",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x502006",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
- "CounterMask": "1"
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"CollectPEBSRecord": "2",
- "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
- "EventCode": "0xAB",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
"PEBScounters": "0,1,2,3",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "DSB-to-MITE switch true penalty cycles."
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x11",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced DSB miss.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x12",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.L1I_MISS",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
- "TakenAlone": "1"
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of Decode Stream Buffer (DSB a.k.a. Uop Cache)-to-MITE speculative transitions.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x13",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x14",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x520006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x15",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x500206",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x501006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x500406",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x508006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x500806",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500806",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x501006",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500106",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x502006",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500406",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x504006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x30"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x508006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x510006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x30"
},
{
- "PEBS": "1",
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x520006",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x100206",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
- "MSRIndex": "0x3F7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
new file mode 100644
index 000000000000..432e45ac6814
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -0,0 +1,273 @@
+[
+ {
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricGroup": "Summary",
+ "MetricName": "IPC"
+ },
+ {
+ "MetricExpr": "UOPS_RETIRED.SLOTS / INST_RETIRED.ANY",
+ "BriefDescription": "Uops Per Instruction",
+ "MetricGroup": "Pipeline;Retire",
+ "MetricName": "UPI"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Instruction per taken branch",
+ "MetricGroup": "Branches;FetchBW;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricGroup": "Pipeline",
+ "MetricName": "CPI"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricGroup": "Pipeline",
+ "MetricName": "CLKS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricGroup": "SMT;TmaL1",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricGroup": "Flops",
+ "MetricName": "FLOPc"
+ },
+ {
+ "MetricExpr": "UOPS_EXECUTED.THREAD / ( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 )",
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricGroup": "Pipeline;PortsUtil",
+ "MetricName": "ILP"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricGroup": "BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricGroup": "InsType",
+ "MetricName": "IpLoad"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricGroup": "InsType",
+ "MetricName": "IpStore"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricGroup": "Branches;InsType",
+ "MetricName": "IpBranch"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricGroup": "Branches",
+ "MetricName": "IpCall"
+ },
+ {
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricGroup": "Branches;PGO",
+ "MetricName": "BpTkBranch"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )",
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricGroup": "Flops;FpArith;InsType",
+ "MetricName": "IpFLOP"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricGroup": "Summary;TmaL1",
+ "MetricName": "Instructions"
+ },
+ {
+ "MetricExpr": "LSD.UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
+ "MetricGroup": "LSD",
+ "MetricName": "LSD_Coverage"
+ },
+ {
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricGroup": "DSB;FetchBW",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
+ "MetricGroup": "MemoryBound;MemoryLat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "MetricGroup": "MemoryBound;MemoryBW",
+ "MetricName": "MLP"
+ },
+ {
+ "MetricConstraint": "NO_NMI_WATCHDOG",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricGroup": "MemoryTLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricGroup": "MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricGroup": "MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricGroup": "MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "CacheMisses",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "CacheMisses",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "MetricExpr": "1000 * ( ( OFFCORE_REQUESTS.ALL_DATA_RD - OFFCORE_REQUESTS.DEMAND_DATA_RD ) + L2_RQSTS.ALL_DEMAND_MISS + L2_RQSTS.SWPF_MISS ) / INST_RETIRED.ANY",
+ "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
+ "MetricGroup": "CacheMisses;Offcore",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricGroup": "CacheMisses",
+ "MetricName": "L3MPKI"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time",
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricGroup": "Summary;Power",
+ "MetricName": "Average_Frequency"
+ },
+ {
+ "MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricGroup": "Flops;HPC",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricGroup": "SMT",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricGroup": "HPC;MemoryBW;SoC",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "IpFarBranch"
+ },
+ {
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C3 residency percent per core",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C6 residency percent per core",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C7 residency percent per core",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C2 residency percent per package",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C3 residency percent per package",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C6 residency percent per package",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C7 residency percent per package",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/memory.json b/tools/perf/pmu-events/arch/x86/icelake/memory.json
index f158366b9dd6..3701bd93a462 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/memory.json
@@ -1,410 +1,574 @@
[
{
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
+ "EventCode": "0x54",
"EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address"
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Speculatively counts the number Transactional Synchronization Extensions (TSX) Aborts due to a data capacity limitation for transactional writes.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculatively counts the number TSX Aborts due to a data capacity limitation for transactional writes."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times HLE abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer"
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00010",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer"
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
+ "EventCode": "0x54",
"EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer."
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
- "EventCode": "0x54",
"Counter": "0,1,2,3",
- "UMask": "0x40",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
"PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero."
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "EventCode": "0x5d",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TX_EXEC.MISC2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region"
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
- "EventCode": "0x5d",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TX_EXEC.MISC3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded"
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles where data return is pending for a Demand Data Read request who miss L3 cache.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
"PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "CounterMask": "2"
+ "PublicDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x6",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00002",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "CounterMask": "6"
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
- "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
"PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests who miss L3 cache"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
- "EventCode": "0xc3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears due to memory ordering conflicts."
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Number of times an HLE execution successfully committed",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
- "EventCode": "0xC8",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution started."
+ "PublicDescription": "Counts the number of times HLE commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times HLE commit succeeded.",
- "EventCode": "0xC8",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution successfully committed",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times HLE abort was triggered.",
- "EventCode": "0xc8",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one)."
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts)."
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts streaming stores that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.)."
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00800",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts)."
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution started."
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC08000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that was not supplied by the L3 cache.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times RTM commit succeeded.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution successfully committed"
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times RTM abort was triggered.",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted.",
- "Data_LA": "1"
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)"
+ "Counter": "0,1,2,3",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
- "EventCode": "0xC9",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions"
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
- "EventCode": "0xC9",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type"
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Number of times an HLE execution started.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "EventCode": "0xC9",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.START",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)"
+ "PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x8",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
"PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "50021",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x10",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "20011",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
- "TakenAlone": "1"
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x20",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
"PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
- "TakenAlone": "1"
+ "TakenAlone": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x40",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PEBS": "2",
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that was not supplied by the L3 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that was not supplied by the L3 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x80",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PEBS": "2",
+ "BriefDescription": "Number of times an RTM execution started.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x100",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x200",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
- "TakenAlone": "1"
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/other.json b/tools/perf/pmu-events/arch/x86/icelake/other.json
index f8dfdb847224..a806b00f8616 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/other.json
@@ -1,121 +1,1090 @@
[
{
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the Top-down Microarchitecture Analysis method. This event is counted on a designated fixed counter (Fixed Counter 3) and is an architectural event.",
- "Counter": "35",
- "UMask": "0x4",
- "PEBScounters": "35",
- "EventName": "TOPDOWN.SLOTS",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C8000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184008000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184008000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C8000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the specualtive path as well as the out-of-order engine recovery past a branch misprediction.",
"SampleAfterValue": "10000003",
- "BriefDescription": "Counts the number of available slots for an unhalted logical processor."
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
- "EventCode": "0x28",
"Counter": "0,1,2,3",
- "UMask": "0x7",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0010",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
- "SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010800",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
- "EventCode": "0x28",
"Counter": "0,1,2,3",
- "UMask": "0x18",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000800",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C8000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule."
+ "Speculative": "1",
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
- "EventCode": "0x28",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
- "SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
- "EventCode": "0x32",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.NTA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHNTA instructions executed."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
- "EventCode": "0x32",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHT0 instructions executed."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
- "EventCode": "0x32",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T1_T2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000800",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C8000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "35",
+ "EventName": "TOPDOWN.SLOTS",
+ "PEBScounters": "35",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
"EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0001",
+ "Offcore": "1",
"PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHW instructions executed."
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
- "EventCode": "0xa4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
"SampleAfterValue": "10000003",
- "BriefDescription": "Counts the number of available slots for an unhalted logical processor."
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000018000",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
- "SampleAfterValue": "10000003",
- "BriefDescription": "Issue slots where no uops were being issued due to lack of back end resources."
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0010",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0800",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000001",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that hit a cacheline in the L3 where a snoop was sent or not.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C2380",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
- "EventCode": "0xc1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01003C0400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010400",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0184000002",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04003C0020",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02003C0004",
+ "Offcore": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware."
+ "Speculative": "1",
+ "UMask": "0x1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
index 6d8311e634aa..4f4ce309c2f8 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
@@ -1,892 +1,1035 @@
[
{
+ "BriefDescription": "Mispredicted indirect CALL instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
- "Counter": "32",
- "UMask": "0x1",
- "PEBScounters": "32",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "3",
- "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
- "Counter": "32",
- "UMask": "0x1",
- "PEBScounters": "32",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "BriefDescription": "Number of uops executed on the core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of uops executed on port 4 and 9",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "33",
- "UMask": "0x2",
- "PEBScounters": "33",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "Counter": "34",
- "UMask": "0x3",
- "PEBScounters": "34",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state."
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Not taken branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when: a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations, c. preceding lock RMW operations are not forwarded, d. store has the no-forward bit set (uncacheable/page-split/masked stores), e. all-blocking stores are used (mostly, fences and port I/O), and others. The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.NO_SR",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
"SampleAfterValue": "100003",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "All indirect branch instructions retired (excluding RETs. TSX aborts are considered indirect branch).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
"SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address."
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
- "EventCode": "0x0D",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of uops executed on port 2 and 3",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
- "EventCode": "0x0D",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x3",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Taken branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
- "EventCode": "0x0d",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path."
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 1",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
- "EventCode": "0x0E",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that RAT issues to RS"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 5",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_5",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of uops executed on port 6",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "EventCode": "0x14",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x9",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
"Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state"
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
- "EventCode": "0x3C",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "2",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "25003",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted."
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
- "EventCode": "0x3C",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
"SampleAfterValue": "25003",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted."
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
- "EventCode": "0x4c",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
"PEBScounters": "0,1,2,3",
- "EventName": "LOAD_HIT_PREFETCH.SWPF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch."
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
- "EventCode": "0x5E",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread"
+ "Counter": "0,1,2,3",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "EventCode": "0x5E",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
- "EventCode": "0x87",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBScounters": "0,1,2,3",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction."
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles without actually retired uops.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 0"
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Far branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 1"
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_2_3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 2 and 3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_4_9",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 4 and 9"
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 5"
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 6"
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
},
{
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
- "EventCode": "0xa1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Counts the number of x87 uops executed.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 7 and 8"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"CollectPEBSRecord": "2",
- "EventCode": "0xa2",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
- "EventCode": "0xA2",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
"EventName": "RESOURCE_STALLS.SB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync)."
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
"PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1"
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4"
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Return instructions retired.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x9"
},
{
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "16"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles without actually retired instructions.",
"CollectPEBSRecord": "2",
- "EventCode": "0xA3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x14",
+ "CounterMask": "1",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "20"
+ "PublicDescription": "This event counts cycles without actually retired instructions.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "33",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PEBScounters": "33",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Taken conditional branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty."
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
- "EventCode": "0xA6",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
- "CounterMask": "2"
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LSD.UOPS",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD."
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Total execution stalls.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1"
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
- "EventCode": "0xa8",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"PEBScounters": "0,1,2,3",
- "EventName": "LSD.CYCLES_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
- "CounterMask": "5"
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0xc"
},
{
+ "BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
"CollectPEBSRecord": "2",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle."
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.PAUSE_INST",
+ "PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1"
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Uops that RAT issues to RS",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xb1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xb1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2"
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x5"
},
{
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xb1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "Counter": "34",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PEBScounters": "34",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3"
+ "Speculative": "1",
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xb1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4"
+ "Speculative": "1",
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops executed from any thread.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core."
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2"
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3"
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret &amp;lt; 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "All branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4"
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of x87 uops executed.",
- "EventCode": "0xB1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.X87",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of x87 uops dispatched."
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
- "EventCode": "0xC0",
"Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles using always true condition (uops_ret &amp;lt; 16) applied to non PEBS uops retired event.",
- "EventCode": "0xC2",
- "Invert": "1",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10"
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the retirement slots used each cycle.",
- "EventCode": "0xc2",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used."
+ "Speculative": "1"
},
{
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
- "EventCode": "0xC3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x11"
},
{
+ "BriefDescription": "Number of uops executed on port 0",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
- "EventCode": "0xC3",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected."
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Conditional branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all branch instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
- "BriefDescription": "All branch instructions retired."
+ "UMask": "0x11"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retirement slots used.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
- "EventCode": "0xc4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken conditional branch instructions retired."
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired."
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts return instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired."
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts not taken branch instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired."
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts conditional branch instructions retired.",
- "EventCode": "0xc4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x11",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired."
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken branch instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired."
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts far branch instructions retired.",
- "EventCode": "0xC4",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired."
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "EventCode": "0xc4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.INDIRECT",
- "SampleAfterValue": "100003",
- "BriefDescription": "All indirect branch instructions retired (excluding RETs. TSX aborts are considered indirect branch)."
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "EventCode": "0xC5",
"Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted branch instructions retired.",
- "Data_LA": "1"
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
- "EventCode": "0xc5",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
- "Data_LA": "1"
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xc5",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x11",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.COND",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "Data_LA": "1"
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
- "EventCode": "0xC5",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "Data_LA": "1"
+ "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "50021",
+ "UMask": "0x80"
},
{
- "PEBS": "1",
+ "BriefDescription": "TMA slots where uops got dropped",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "EventCode": "0xC5",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.UOP_DROPPING",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.INDIRECT",
- "SampleAfterValue": "100003",
- "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "Data_LA": "1"
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
- "EventCode": "0xcc",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
+ "CounterMask": "20",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MISC_RETIRED.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Increments whenever there is an update to the LBR array."
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x14"
},
{
- "PublicDescription": "Counts number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted).",
- "EventCode": "0xcc",
+ "BriefDescription": "Number of uops executed on port 7 and 8",
+ "CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
- "EventName": "MISC_RETIRED.PAUSE_INST",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of retired PAUSE instructions."
+ "Speculative": "1",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end."
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All mispredicted branch instructions retired.",
"CollectPEBSRecord": "2",
- "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
- "EventCode": "0xec",
"Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
"PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core."
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "50021"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
index 7180a900c175..f485f4664ea6 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
@@ -1,236 +1,245 @@
[
{
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Page walks completed due to a demand data load to a 4K page."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page."
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "Speculative": "1",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0xe",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)"
+ "Speculative": "1",
+ "UMask": "0xe"
},
{
+ "BriefDescription": "STLB flush attempts",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.STLB_ANY",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle."
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
- "EventCode": "0x08",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Loads that miss the DTLB and hit the STLB."
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walks completed due to a demand data store to a 4K page."
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page."
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0xe",
+ "CounterMask": "1",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle."
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
- "EventCode": "0x49",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Stores that miss the DTLB and hit the STLB."
+ "Speculative": "1",
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)"
+ "Speculative": "1",
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts code misses in all ITLB (Instruction TLB) levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)"
+ "Speculative": "1",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)"
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle."
+ "Speculative": "1",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x10",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
- "CounterMask": "1"
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
- "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
"SampleAfterValue": "100003",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB."
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "EventCode": "0xAE",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"PEBScounters": "0,1,2,3",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages."
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
- "EventCode": "0xBD",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
- "EventName": "TLB_FLUSH.DTLB_THREAD",
- "SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries"
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
- "EventCode": "0xBD",
"Counter": "0,1,2,3",
- "UMask": "0x20",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"PEBScounters": "0,1,2,3",
- "EventName": "TLB_FLUSH.STLB_ANY",
- "SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts"
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/cache.json b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
new file mode 100644
index 000000000000..624762008aaa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
@@ -0,0 +1,706 @@
+[
+ {
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "For every cycle where the core is waiting on at least 1 outstanding Demand RFO request, increments by 1.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "For every cycle where the core is waiting on at least 1 outstanding demand RFO request, increments by 1. RFOs are initiated by a core as part of a data store operation. Demand RFO requests include RFOs, locks, and ItoM transactions. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding data read requests the core is waiting on.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "For every cycle, increments by the number of outstanding data read requests the core is waiting on. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "For every cycle where the core is waiting on at least 1 outstanding demand data read request, increments by 1.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "For every cycle where the core is waiting on at least 1 outstanding data read request, increments by 1. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts memory transactions sent to the uncore.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts memory transactions sent to the uncore including requests initiated by the core, all L3 prefetches, reads resulting from page walks, and snoop responses.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions that true miss the STLB.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired store instructions that true miss the STLB.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "All retired load instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "All retired store instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired demand load instructions which missed L3 but serviced from local IXP memory as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was remote HITM",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired demand load instructions which missed L3 but serviced from remote IXP memory as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions which data source was serviced from L4",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of lines that are evicted by the L2 cache due to L2 cache fills. Evicted lines are delivered to the L3, which may or may not cache them, according to system load and priorities.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles the queue waiting for offcore responses is full.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SQ_FULL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles for which the thread is active and the queue waiting for responses from the uncore cannot take any more entries.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
new file mode 100644
index 000000000000..bcedcd985e84
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
@@ -0,0 +1,95 @@
+[
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
new file mode 100644
index 000000000000..cc59cee1cd57
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
@@ -0,0 +1,469 @@
+[
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "5",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of Decode Stream Buffer (DSB a.k.a. Uop Cache)-to-MITE speculative transitions.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500406",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500806",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x501006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x502006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x504006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x508006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x510006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x520006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500106",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/memory.json b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
new file mode 100644
index 000000000000..d319d448e2aa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
@@ -0,0 +1,291 @@
+[
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json
new file mode 100644
index 000000000000..ef50d3a3392e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json
@@ -0,0 +1,181 @@
+[
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "35",
+ "EventName": "TOPDOWN.SLOTS",
+ "PEBScounters": "35",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts writes that generate a demand reads for ownership (RFO) request and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "Offcore": "1",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
new file mode 100644
index 000000000000..3cc71244e699
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -0,0 +1,972 @@
+[
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "CollectPEBSRecord": "2",
+ "Counter": "32",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "33",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PEBScounters": "33",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "34",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PEBScounters": "34",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "False dependencies due to partial compare on address.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
+ "SampleAfterValue": "25003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 0",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 1",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 2 and 3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 4 and 9",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 5",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_5",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 6",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 7 and 8",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "20",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x14"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "Speculative": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret &amp;lt; 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "All indirect branch instructions retired (excluding RETs. TSX aborts are considered indirect branch).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "50021"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "50021",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.PAUSE_INST",
+ "PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
new file mode 100644
index 000000000000..5f0d2c462940
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
@@ -0,0 +1,333 @@
+[
+ {
+ "BriefDescription": "2LM Tag Check : Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.HIT",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Miss, no data in this line",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Miss, existing data may be evicted to Far Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Read Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.NM_RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Write Hit in Near Memory Cache",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.NM_WR_HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x0f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM write CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.OPPORTUNISTIC",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Half clockticks for IMC",
+ "Counter": "FIXED",
+ "CounterType": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : All Activates",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x0B",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1C",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Reads - RPQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RD",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WR",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Underfill reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.UFILL_RD",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json
new file mode 100644
index 000000000000..52f2301582bb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-other.json
@@ -0,0 +1,2476 @@
+[
+ {
+ "BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote INVITOE requests (exclusive ownership of a cache line without receiving data) sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local read requests that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote read requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local write requests that miss the SF/LLC and are sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote write requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the uncore caching &amp;amp; home agent (CHA)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Full Line Non-ISOCH",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : All Lines Victimized",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local read requests that miss the SF/LLC and remote read requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local write requests that miss the SF/LLC and remote write requests sent to the CHA's home agent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x0c",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "UMask": "0xC001FF01",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD01",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFD01",
+ "UMaskExt": "0xC80FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FD01",
+ "UMaskExt": "0xC817FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FD01",
+ "UMaskExt": "0xCCC7FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FD01",
+ "UMaskExt": "0xC807FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFE01",
+ "UMaskExt": "0xC80FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FE01",
+ "UMaskExt": "0xC817FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FE01",
+ "UMaskExt": "0xCCC7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FE01",
+ "UMaskExt": "0xC807FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "UMask": "0xC001FF04",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD04",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE04",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "UMask": "0xC001FF01",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD01",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFE01",
+ "UMaskExt": "0xC80FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FE01",
+ "UMaskExt": "0xC817FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FE01",
+ "UMaskExt": "0xC807FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "UMask": "0xC001FF04",
+ "UMaskExt": "0xC001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0xC001FD04",
+ "UMaskExt": "0xC001FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0xC001FE04",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FE04",
+ "UMaskExt": "0xCC43FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFD01",
+ "UMaskExt": "0xC88FFD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FD01",
+ "UMaskExt": "0xC897FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FD01",
+ "UMaskExt": "0xC887FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC88FFE01",
+ "UMaskExt": "0xC88FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FE01",
+ "UMaskExt": "0xC897FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FE01",
+ "UMaskExt": "0xC887FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FD04",
+ "UMaskExt": "0xCC43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC43FF04",
+ "UMaskExt": "0xCC43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC887FF01",
+ "UMaskExt": "0xC887FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FF01",
+ "UMaskExt": "0xC807FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0xCCC7FF01",
+ "UMaskExt": "0xCCC7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0xC897FF01",
+ "UMaskExt": "0xC897FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRDs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFF01",
+ "UMaskExt": "0xC80FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "UMask": "0xC807FF01",
+ "UMaskExt": "0xC807FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
+ "PerPkg": "1",
+ "UMask": "0xC817FF01",
+ "UMaskExt": "0xC817FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRDs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "UMask": "0xC80FFF01",
+ "UMaskExt": "0xC80FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC816FE01",
+ "UMaskExt": "0xC816FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8177E01",
+ "UMaskExt": "0xC8177E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC816FE01",
+ "UMaskExt": "0xC816FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8177E01",
+ "UMaskExt": "0xC8177E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC896FE01",
+ "UMaskExt": "0xC896FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8977E01",
+ "UMaskExt": "0xC8977E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC806FE01",
+ "UMaskExt": "0xC806FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8077E01",
+ "UMaskExt": "0xC8077E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0xC886FE01",
+ "UMaskExt": "0xC886FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xC8877E01",
+ "UMaskExt": "0xC8877E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0xC8C7FF01",
+ "UMaskExt": "0xC8C7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
+ "PerPkg": "1",
+ "UMask": "0xCC57FF01",
+ "UMaskExt": "0xCC57FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FF04",
+ "UMaskExt": "0xCD43FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FD04",
+ "UMaskExt": "0xCD43FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0xCD43FE04",
+ "UMaskExt": "0xCD43FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8178A01",
+ "UMaskExt": "0xC8178A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8168A01",
+ "UMaskExt": "0xC8168A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8170A01",
+ "UMaskExt": "0xC8170A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc867fe01",
+ "UMaskExt": "0xc867fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "UMask": "0xc86ffe01",
+ "UMaskExt": "0xc86ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "UMask": "0xC8178A01",
+ "UMaskExt": "0xC8178A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0xCCD7FE01",
+ "UMaskExt": "0xCCD7FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FE04",
+ "UMaskExt": "0xC8F3FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xc8f3fe04",
+ "UMaskExt": "0xc8f3fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8178601",
+ "UMaskExt": "0xC81786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8168601",
+ "UMaskExt": "0xC81686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8170601",
+ "UMaskExt": "0xC81706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "UMask": "0xC8178601",
+ "UMaskExt": "0xC81786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FD04",
+ "UMaskExt": "0xC8F3FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FF04",
+ "UMaskExt": "0xC8F3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0xCCD7FF01",
+ "UMaskExt": "0xCCD7FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0xC8F3FF04",
+ "UMaskExt": "0xC8F3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x1BC1FF",
+ "UMaskExt": "0x1BC1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for IIO clocktick",
+ "CounterType": "FREERUN",
+ "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "UMask": "0x03",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : WbMtoI",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x0f",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in any state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in A state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in I state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in S state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Clean NearMem Read Hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Dirty NearMem Read Hit",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : PMM - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x0720",
+ "UMaskExt": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : PMM - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x1C80",
+ "UMaskExt": "0x1C",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to UPI (M3UPI)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "CounterType": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Non Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Non Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x97",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Number of kfclks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Null FLITs transmitted to any slot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI LL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
new file mode 100644
index 000000000000..2d1368958762
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
@@ -0,0 +1,10 @@
+[
+ {
+ "BriefDescription": "Clockticks of the power control unit (PCU)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
new file mode 100644
index 000000000000..1b9d03039c53
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
@@ -0,0 +1,245 @@
+[
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "STLB flush attempts",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "Speculative": "1",
+ "UMask": "0x20"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 0a6a8c7f937f..5f5df6560202 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -38,6 +38,8 @@ GenuineIntel-6-7D,v1,icelake,core
GenuineIntel-6-7E,v1,icelake,core
GenuineIntel-6-8[CD],v1,icelake,core
GenuineIntel-6-A7,v1,icelake,core
+GenuineIntel-6-6A,v1,icelakex,core
+GenuineIntel-6-6C,v1,icelakex,core
GenuineIntel-6-86,v1,tremontx,core
AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
index 0b7096847991..895f5fc23965 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
@@ -5,68 +5,178 @@
* Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
*/
+/*
+ * Use Py_ssize_t for '#' formats to avoid DeprecationWarning: PY_SSIZE_T_CLEAN
+ * will be required for '#' formats.
+ */
+#define PY_SSIZE_T_CLEAN
+
#include <Python.h>
#include "../../../util/trace-event.h"
+#include "../../../util/event.h"
+#include "../../../util/symbol.h"
+#include "../../../util/thread.h"
+#include "../../../util/map.h"
+#include "../../../util/maps.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/session.h"
+#include "../../../util/srcline.h"
+#include "../../../util/srccode.h"
#if PY_MAJOR_VERSION < 3
#define _PyCapsule_GetPointer(arg1, arg2) \
PyCObject_AsVoidPtr(arg1)
+#define _PyBytes_FromStringAndSize(arg1, arg2) \
+ PyString_FromStringAndSize((arg1), (arg2))
+#define _PyUnicode_AsUTF8(arg) \
+ PyString_AsString(arg)
PyMODINIT_FUNC initperf_trace_context(void);
#else
#define _PyCapsule_GetPointer(arg1, arg2) \
PyCapsule_GetPointer((arg1), (arg2))
+#define _PyBytes_FromStringAndSize(arg1, arg2) \
+ PyBytes_FromStringAndSize((arg1), (arg2))
+#define _PyUnicode_AsUTF8(arg) \
+ PyUnicode_AsUTF8(arg)
PyMODINIT_FUNC PyInit_perf_trace_context(void);
#endif
-static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
+static struct scripting_context *get_args(PyObject *args, const char *name, PyObject **arg2)
{
- static struct scripting_context *scripting_context;
+ int cnt = 1 + !!arg2;
PyObject *context;
- int retval;
- if (!PyArg_ParseTuple(args, "O", &context))
+ if (!PyArg_UnpackTuple(args, name, 1, cnt, &context, arg2))
return NULL;
- scripting_context = _PyCapsule_GetPointer(context, NULL);
- retval = common_pc(scripting_context);
+ return _PyCapsule_GetPointer(context, NULL);
+}
- return Py_BuildValue("i", retval);
+static struct scripting_context *get_scripting_context(PyObject *args)
+{
+ return get_args(args, "context", NULL);
+}
+
+static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
+{
+ struct scripting_context *c = get_scripting_context(args);
+
+ if (!c)
+ return NULL;
+
+ return Py_BuildValue("i", common_pc(c));
}
static PyObject *perf_trace_context_common_flags(PyObject *obj,
PyObject *args)
{
- static struct scripting_context *scripting_context;
- PyObject *context;
- int retval;
+ struct scripting_context *c = get_scripting_context(args);
- if (!PyArg_ParseTuple(args, "O", &context))
+ if (!c)
return NULL;
- scripting_context = _PyCapsule_GetPointer(context, NULL);
- retval = common_flags(scripting_context);
-
- return Py_BuildValue("i", retval);
+ return Py_BuildValue("i", common_flags(c));
}
static PyObject *perf_trace_context_common_lock_depth(PyObject *obj,
PyObject *args)
{
- static struct scripting_context *scripting_context;
- PyObject *context;
- int retval;
+ struct scripting_context *c = get_scripting_context(args);
- if (!PyArg_ParseTuple(args, "O", &context))
+ if (!c)
return NULL;
- scripting_context = _PyCapsule_GetPointer(context, NULL);
- retval = common_lock_depth(scripting_context);
+ return Py_BuildValue("i", common_lock_depth(c));
+}
+static PyObject *perf_sample_insn(PyObject *obj, PyObject *args)
+{
+ struct scripting_context *c = get_scripting_context(args);
+
+ if (!c)
+ return NULL;
+
+ if (c->sample->ip && !c->sample->insn_len &&
+ c->al->thread->maps && c->al->thread->maps->machine)
+ script_fetch_insn(c->sample, c->al->thread, c->al->thread->maps->machine);
+
+ if (!c->sample->insn_len)
+ Py_RETURN_NONE; /* N.B. This is a return statement */
+
+ return _PyBytes_FromStringAndSize(c->sample->insn, c->sample->insn_len);
+}
+
+static PyObject *perf_set_itrace_options(PyObject *obj, PyObject *args)
+{
+ struct scripting_context *c;
+ const char *itrace_options;
+ int retval = -1;
+ PyObject *str;
+
+ c = get_args(args, "itrace_options", &str);
+ if (!c)
+ return NULL;
+
+ if (!c->session || !c->session->itrace_synth_opts)
+ goto out;
+
+ if (c->session->itrace_synth_opts->set) {
+ retval = 1;
+ goto out;
+ }
+
+ itrace_options = _PyUnicode_AsUTF8(str);
+
+ retval = itrace_do_parse_synth_opts(c->session->itrace_synth_opts, itrace_options, 0);
+out:
return Py_BuildValue("i", retval);
}
+static PyObject *perf_sample_src(PyObject *obj, PyObject *args, bool get_srccode)
+{
+ struct scripting_context *c = get_scripting_context(args);
+ unsigned int line = 0;
+ char *srcfile = NULL;
+ char *srccode = NULL;
+ PyObject *result;
+ struct map *map;
+ int len = 0;
+ u64 addr;
+
+ if (!c)
+ return NULL;
+
+ map = c->al->map;
+ addr = c->al->addr;
+
+ if (map && map->dso)
+ srcfile = get_srcline_split(map->dso, map__rip_2objdump(map, addr), &line);
+
+ if (get_srccode) {
+ if (srcfile)
+ srccode = find_sourceline(srcfile, line, &len);
+ result = Py_BuildValue("(sIs#)", srcfile, line, srccode, (Py_ssize_t)len);
+ } else {
+ result = Py_BuildValue("(sI)", srcfile, line);
+ }
+
+ free(srcfile);
+
+ return result;
+}
+
+static PyObject *perf_sample_srcline(PyObject *obj, PyObject *args)
+{
+ return perf_sample_src(obj, args, false);
+}
+
+static PyObject *perf_sample_srccode(PyObject *obj, PyObject *args)
+{
+ return perf_sample_src(obj, args, true);
+}
+
static PyMethodDef ContextMethods[] = {
{ "common_pc", perf_trace_context_common_pc, METH_VARARGS,
"Get the common preempt count event field value."},
@@ -74,6 +184,14 @@ static PyMethodDef ContextMethods[] = {
"Get the common flags event field value."},
{ "common_lock_depth", perf_trace_context_common_lock_depth,
METH_VARARGS, "Get the common lock depth event field value."},
+ { "perf_sample_insn", perf_sample_insn,
+ METH_VARARGS, "Get the machine code instruction."},
+ { "perf_set_itrace_options", perf_set_itrace_options,
+ METH_VARARGS, "Set --itrace options."},
+ { "perf_sample_srcline", perf_sample_srcline,
+ METH_VARARGS, "Get source file name and line number."},
+ { "perf_sample_srccode", perf_sample_srccode,
+ METH_VARARGS, "Get source file name, line number and line."},
{ NULL, NULL, 0, NULL}
};
@@ -96,6 +214,12 @@ PyMODINIT_FUNC PyInit_perf_trace_context(void)
NULL, /* m_clear */
NULL, /* m_free */
};
- return PyModule_Create(&moduledef);
+ PyObject *mod;
+
+ mod = PyModule_Create(&moduledef);
+ /* Add perf_script_context to the module so it can be imported */
+ PyObject_SetAttrString(mod, "perf_script_context", Py_None);
+
+ return mod;
}
#endif
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-record b/tools/perf/scripts/python/bin/intel-pt-events-record
index 10fe2b6977d4..6b9877cfe23e 100644
--- a/tools/perf/scripts/python/bin/intel-pt-events-record
+++ b/tools/perf/scripts/python/bin/intel-pt-events-record
@@ -1,8 +1,8 @@
#!/bin/bash
#
-# print Intel PT Power Events and PTWRITE. The intel_pt PMU event needs
-# to be specified with appropriate config terms.
+# print Intel PT Events including Power Events and PTWRITE. The intel_pt PMU
+# event needs to be specified with appropriate config terms.
#
if ! echo "$@" | grep -q intel_pt ; then
echo "Options must include the Intel PT event e.g. -e intel_pt/pwr_evt,ptw/"
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-report b/tools/perf/scripts/python/bin/intel-pt-events-report
index 9a9c92fcd026..beeac3fde9db 100644
--- a/tools/perf/scripts/python/bin/intel-pt-events-report
+++ b/tools/perf/scripts/python/bin/intel-pt-events-report
@@ -1,3 +1,3 @@
#!/bin/bash
-# description: print Intel PT Power Events and PTWRITE
-perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py \ No newline at end of file
+# description: print Intel PT Events including Power Events and PTWRITE
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index 711d4f9f5645..13f2d8a81610 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -113,6 +113,7 @@ import os
import random
import copy
import math
+from libxed import LibXED
pyside_version_1 = True
if not "--pyside-version-1" in sys.argv:
@@ -4747,94 +4748,6 @@ class MainWindow(QMainWindow):
dialog = AboutDialog(self.glb, self)
dialog.exec_()
-# XED Disassembler
-
-class xed_state_t(Structure):
-
- _fields_ = [
- ("mode", c_int),
- ("width", c_int)
- ]
-
-class XEDInstruction():
-
- def __init__(self, libxed):
- # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
- xedd_t = c_byte * 512
- self.xedd = xedd_t()
- self.xedp = addressof(self.xedd)
- libxed.xed_decoded_inst_zero(self.xedp)
- self.state = xed_state_t()
- self.statep = addressof(self.state)
- # Buffer for disassembled instruction text
- self.buffer = create_string_buffer(256)
- self.bufferp = addressof(self.buffer)
-
-class LibXED():
-
- def __init__(self):
- try:
- self.libxed = CDLL("libxed.so")
- except:
- self.libxed = None
- if not self.libxed:
- self.libxed = CDLL("/usr/local/lib/libxed.so")
-
- self.xed_tables_init = self.libxed.xed_tables_init
- self.xed_tables_init.restype = None
- self.xed_tables_init.argtypes = []
-
- self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
- self.xed_decoded_inst_zero.restype = None
- self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
-
- self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
- self.xed_operand_values_set_mode.restype = None
- self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
-
- self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
- self.xed_decoded_inst_zero_keep_mode.restype = None
- self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
-
- self.xed_decode = self.libxed.xed_decode
- self.xed_decode.restype = c_int
- self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
-
- self.xed_format_context = self.libxed.xed_format_context
- self.xed_format_context.restype = c_uint
- self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
-
- self.xed_tables_init()
-
- def Instruction(self):
- return XEDInstruction(self)
-
- def SetMode(self, inst, mode):
- if mode:
- inst.state.mode = 4 # 32-bit
- inst.state.width = 4 # 4 bytes
- else:
- inst.state.mode = 1 # 64-bit
- inst.state.width = 8 # 8 bytes
- self.xed_operand_values_set_mode(inst.xedp, inst.statep)
-
- def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
- self.xed_decoded_inst_zero_keep_mode(inst.xedp)
- err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
- if err:
- return 0, ""
- # Use AT&T mode (2), alternative is Intel (3)
- ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
- if not ok:
- return 0, ""
- if sys.version_info[0] == 2:
- result = inst.buffer.value
- else:
- result = inst.buffer.value.decode()
- # Return instruction length and the disassembled instruction text
- # For now, assume the length is in byte 166
- return inst.xedd[166], result
-
def TryOpen(file_name):
try:
return open(file_name, "rb")
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index a73847c8f548..1d3a189a9a54 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -1,5 +1,6 @@
-# intel-pt-events.py: Print Intel PT Power Events and PTWRITE
-# Copyright (c) 2017, Intel Corporation.
+# SPDX-License-Identifier: GPL-2.0
+# intel-pt-events.py: Print Intel PT Events including Power Events and PTWRITE
+# Copyright (c) 2017-2021, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -15,16 +16,82 @@ from __future__ import print_function
import os
import sys
import struct
+import argparse
+
+from libxed import LibXED
+from ctypes import create_string_buffer, addressof
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
-# These perf imports are not used at present
-#from perf_trace_context import *
-#from Core import *
+from perf_trace_context import perf_set_itrace_options, \
+ perf_sample_insn, perf_sample_srccode
+
+try:
+ broken_pipe_exception = BrokenPipeError
+except:
+ broken_pipe_exception = IOError
+
+glb_switch_str = None
+glb_switch_printed = True
+glb_insn = False
+glb_disassembler = None
+glb_src = False
+glb_source_file_name = None
+glb_line_number = None
+glb_dso = None
+
+def get_optional_null(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return ""
+
+def get_optional_zero(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return 0
+
+def get_optional_bytes(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return bytes()
+
+def get_optional(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return "[unknown]"
+
+def get_offset(perf_dict, field):
+ if field in perf_dict:
+ return "+%#x" % perf_dict[field]
+ return ""
def trace_begin():
- print("Intel PT Power Events and PTWRITE")
+ ap = argparse.ArgumentParser(usage = "", add_help = False)
+ ap.add_argument("--insn-trace", action='store_true')
+ ap.add_argument("--src-trace", action='store_true')
+ global glb_args
+ global glb_insn
+ global glb_src
+ glb_args = ap.parse_args()
+ if glb_args.insn_trace:
+ print("Intel PT Instruction Trace")
+ itrace = "i0nsepwx"
+ glb_insn = True
+ elif glb_args.src_trace:
+ print("Intel PT Source Trace")
+ itrace = "i0nsepwx"
+ glb_insn = True
+ glb_src = True
+ else:
+ print("Intel PT Branch Trace, Power Events and PTWRITE")
+ itrace = "bepwx"
+ global glb_disassembler
+ try:
+ glb_disassembler = LibXED()
+ except:
+ glb_disassembler = None
+ perf_set_itrace_options(perf_script_context, itrace)
def trace_end():
print("End")
@@ -77,58 +144,212 @@ def print_pwrx(raw_buf):
print("deepest cstate: %u last cstate: %u wake reason: %#x" %
(deepest_cstate, last_cstate, wake_reason), end=' ')
-def print_common_start(comm, sample, name):
+def print_psb(raw_buf):
+ data = struct.unpack_from("<IQ", raw_buf)
+ offset = data[1]
+ print("offset: %#x" % (offset), end=' ')
+
+def common_start_str(comm, sample):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
- print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" %
- (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
- end=' ')
+ return "%16s %5u/%-5u [%03u] %9u.%09u " % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000)
+
+def print_common_start(comm, sample, name):
+ flags_disp = get_optional_null(sample, "flags_disp")
+ # Unused fields:
+ # period = sample["period"]
+ # phys_addr = sample["phys_addr"]
+ # weight = sample["weight"]
+ # transaction = sample["transaction"]
+ # cpumode = get_optional_zero(sample, "cpumode")
+ print(common_start_str(comm, sample) + "%7s %19s" % (name, flags_disp), end=' ')
+
+def print_instructions_start(comm, sample):
+ if "x" in get_optional_null(sample, "flags"):
+ print(common_start_str(comm, sample) + "x", end=' ')
+ else:
+ print(common_start_str(comm, sample), end=' ')
+
+def disassem(insn, ip):
+ inst = glb_disassembler.Instruction()
+ glb_disassembler.SetMode(inst, 0) # Assume 64-bit
+ buf = create_string_buffer(64)
+ buf.value = insn
+ return glb_disassembler.DisassembleOne(inst, addressof(buf), len(insn), ip)
+
+def print_common_ip(param_dict, sample, symbol, dso):
+ ip = sample["ip"]
+ offs = get_offset(param_dict, "symoff")
+ if "cyc_cnt" in sample:
+ cyc_cnt = sample["cyc_cnt"]
+ insn_cnt = get_optional_zero(sample, "insn_cnt")
+ ipc_str = " IPC: %#.2f (%u/%u)" % (insn_cnt / cyc_cnt, insn_cnt, cyc_cnt)
+ else:
+ ipc_str = ""
+ if glb_insn and glb_disassembler is not None:
+ insn = perf_sample_insn(perf_script_context)
+ if insn and len(insn):
+ cnt, text = disassem(insn, ip)
+ byte_str = ("%x" % ip).rjust(16)
+ if sys.version_info.major >= 3:
+ for k in range(cnt):
+ byte_str += " %02x" % insn[k]
+ else:
+ for k in xrange(cnt):
+ byte_str += " %02x" % ord(insn[k])
+ print("%-40s %-30s" % (byte_str, text), end=' ')
+ print("%s%s (%s)" % (symbol, offs, dso), end=' ')
+ else:
+ print("%16x %s%s (%s)" % (ip, symbol, offs, dso), end=' ')
+ if "addr_correlates_sym" in sample:
+ addr = sample["addr"]
+ dso = get_optional(sample, "addr_dso")
+ symbol = get_optional(sample, "addr_symbol")
+ offs = get_offset(sample, "addr_symoff")
+ print("=> %x %s%s (%s)%s" % (addr, symbol, offs, dso, ipc_str))
+ else:
+ print(ipc_str)
-def print_common_ip(sample, symbol, dso):
+def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
ip = sample["ip"]
- print("%16x %s (%s)" % (ip, symbol, dso))
+ if symbol == "[unknown]":
+ start_str = common_start_str(comm, sample) + ("%x" % ip).rjust(16).ljust(40)
+ else:
+ offs = get_offset(param_dict, "symoff")
+ start_str = common_start_str(comm, sample) + (symbol + offs).ljust(40)
-def process_event(param_dict):
+ if with_insn and glb_insn and glb_disassembler is not None:
+ insn = perf_sample_insn(perf_script_context)
+ if insn and len(insn):
+ cnt, text = disassem(insn, ip)
+ start_str += text.ljust(30)
+
+ global glb_source_file_name
+ global glb_line_number
+ global glb_dso
+
+ source_file_name, line_number, source_line = perf_sample_srccode(perf_script_context)
+ if source_file_name:
+ if glb_line_number == line_number and glb_source_file_name == source_file_name:
+ src_str = ""
+ else:
+ if len(source_file_name) > 40:
+ src_file = ("..." + source_file_name[-37:]) + " "
+ else:
+ src_file = source_file_name.ljust(41)
+ if source_line is None:
+ src_str = src_file + str(line_number).rjust(4) + " <source not found>"
+ else:
+ src_str = src_file + str(line_number).rjust(4) + " " + source_line
+ glb_dso = None
+ elif dso == glb_dso:
+ src_str = ""
+ else:
+ src_str = dso
+ glb_dso = dso
+
+ glb_line_number = line_number
+ glb_source_file_name = source_file_name
+
+ print(start_str, src_str)
+
+def do_process_event(param_dict):
+ global glb_switch_printed
+ if not glb_switch_printed:
+ print(glb_switch_str)
+ glb_switch_printed = True
event_attr = param_dict["attr"]
- sample = param_dict["sample"]
- raw_buf = param_dict["raw_buf"]
+ sample = param_dict["sample"]
+ raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
+ # Unused fields:
+ # callchain = param_dict["callchain"]
+ # brstack = param_dict["brstack"]
+ # brstacksym = param_dict["brstacksym"]
# Symbol and dso info are not always resolved
- if "dso" in param_dict:
- dso = param_dict["dso"]
- else:
- dso = "[unknown]"
-
- if "symbol" in param_dict:
- symbol = param_dict["symbol"]
- else:
- symbol = "[unknown]"
+ dso = get_optional(param_dict, "dso")
+ symbol = get_optional(param_dict, "symbol")
- if name == "ptwrite":
+ if name[0:12] == "instructions":
+ if glb_src:
+ print_srccode(comm, param_dict, sample, symbol, dso, True)
+ else:
+ print_instructions_start(comm, sample)
+ print_common_ip(param_dict, sample, symbol, dso)
+ elif name[0:8] == "branches":
+ if glb_src:
+ print_srccode(comm, param_dict, sample, symbol, dso, False)
+ else:
+ print_common_start(comm, sample, name)
+ print_common_ip(param_dict, sample, symbol, dso)
+ elif name == "ptwrite":
print_common_start(comm, sample, name)
print_ptwrite(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "cbr":
print_common_start(comm, sample, name)
print_cbr(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "mwait":
print_common_start(comm, sample, name)
print_mwait(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "pwre":
print_common_start(comm, sample, name)
print_pwre(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "exstop":
print_common_start(comm, sample, name)
print_exstop(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
elif name == "pwrx":
print_common_start(comm, sample, name)
print_pwrx(raw_buf)
- print_common_ip(sample, symbol, dso)
+ print_common_ip(param_dict, sample, symbol, dso)
+ elif name == "psb":
+ print_common_start(comm, sample, name)
+ print_psb(raw_buf)
+ print_common_ip(param_dict, sample, symbol, dso)
+ else:
+ print_common_start(comm, sample, name)
+ print_common_ip(param_dict, sample, symbol, dso)
+
+def process_event(param_dict):
+ try:
+ do_process_event(param_dict)
+ except broken_pipe_exception:
+ # Stop python printing broken pipe errors and traceback
+ sys.stdout = open(os.devnull, 'w')
+ sys.exit(1)
+
+def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
+ try:
+ print("%16s %5u/%-5u [%03u] %9u.%09u error type %u code %u: %s ip 0x%16x" %
+ ("Trace error", pid, tid, cpu, ts / 1000000000, ts %1000000000, typ, code, msg, ip))
+ except broken_pipe_exception:
+ # Stop python printing broken pipe errors and traceback
+ sys.stdout = open(os.devnull, 'w')
+ sys.exit(1)
+
+def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
+ global glb_switch_printed
+ global glb_switch_str
+ if out:
+ out_str = "Switch out "
+ else:
+ out_str = "Switch In "
+ if out_preempt:
+ preempt_str = "preempt"
+ else:
+ preempt_str = ""
+ if machine_pid == -1:
+ machine_str = ""
+ else:
+ machine_str = "machine PID %d" % machine_pid
+ glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
+ (out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
+ glb_switch_printed = False
diff --git a/tools/perf/scripts/python/libxed.py b/tools/perf/scripts/python/libxed.py
new file mode 100644
index 000000000000..2c70a5a7eb9c
--- /dev/null
+++ b/tools/perf/scripts/python/libxed.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0
+# libxed.py: Python wrapper for libxed.so
+# Copyright (c) 2014-2021, Intel Corporation.
+
+# To use Intel XED, libxed.so must be present. To build and install
+# libxed.so:
+# git clone https://github.com/intelxed/mbuild.git mbuild
+# git clone https://github.com/intelxed/xed
+# cd xed
+# ./mfile.py --share
+# sudo ./mfile.py --prefix=/usr/local install
+# sudo ldconfig
+#
+
+import sys
+
+from ctypes import CDLL, Structure, create_string_buffer, addressof, sizeof, \
+ c_void_p, c_bool, c_byte, c_char, c_int, c_uint, c_longlong, c_ulonglong
+
+# XED Disassembler
+
+class xed_state_t(Structure):
+
+ _fields_ = [
+ ("mode", c_int),
+ ("width", c_int)
+ ]
+
+class XEDInstruction():
+
+ def __init__(self, libxed):
+ # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
+ xedd_t = c_byte * 512
+ self.xedd = xedd_t()
+ self.xedp = addressof(self.xedd)
+ libxed.xed_decoded_inst_zero(self.xedp)
+ self.state = xed_state_t()
+ self.statep = addressof(self.state)
+ # Buffer for disassembled instruction text
+ self.buffer = create_string_buffer(256)
+ self.bufferp = addressof(self.buffer)
+
+class LibXED():
+
+ def __init__(self):
+ try:
+ self.libxed = CDLL("libxed.so")
+ except:
+ self.libxed = None
+ if not self.libxed:
+ self.libxed = CDLL("/usr/local/lib/libxed.so")
+
+ self.xed_tables_init = self.libxed.xed_tables_init
+ self.xed_tables_init.restype = None
+ self.xed_tables_init.argtypes = []
+
+ self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
+ self.xed_decoded_inst_zero.restype = None
+ self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
+
+ self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
+ self.xed_operand_values_set_mode.restype = None
+ self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
+
+ self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
+ self.xed_decoded_inst_zero_keep_mode.restype = None
+ self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
+
+ self.xed_decode = self.libxed.xed_decode
+ self.xed_decode.restype = c_int
+ self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
+
+ self.xed_format_context = self.libxed.xed_format_context
+ self.xed_format_context.restype = c_uint
+ self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
+
+ self.xed_tables_init()
+
+ def Instruction(self):
+ return XEDInstruction(self)
+
+ def SetMode(self, inst, mode):
+ if mode:
+ inst.state.mode = 4 # 32-bit
+ inst.state.width = 4 # 4 bytes
+ else:
+ inst.state.mode = 1 # 64-bit
+ inst.state.width = 8 # 8 bytes
+ self.xed_operand_values_set_mode(inst.xedp, inst.statep)
+
+ def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
+ self.xed_decoded_inst_zero_keep_mode(inst.xedp)
+ err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
+ if err:
+ return 0, ""
+ # Use AT&T mode (2), alternative is Intel (3)
+ ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
+ if not ok:
+ return 0, ""
+ if sys.version_info[0] == 2:
+ result = inst.buffer.value
+ else:
+ result = inst.buffer.value.decode()
+ # Return instruction length and the disassembled instruction text
+ # For now, assume the length is in byte 166
+ return inst.xedd[166], result
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index c4b888f18e9c..41e3cf6bb66c 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -510,8 +510,8 @@ static const char *shell_test__description(char *description, size_t size,
return description ? strim(description + 1) : NULL;
}
-#define for_each_shell_test(dir, base, ent) \
- while ((ent = readdir(dir)) != NULL) \
+#define for_each_shell_test(entlist, nr, base, ent) \
+ for (int __i = 0; __i < nr && (ent = entlist[__i]); __i++) \
if (!is_directory(base, ent) && ent->d_name[0] != '.')
static const char *shell_tests__dir(char *path, size_t size)
@@ -538,8 +538,9 @@ static const char *shell_tests__dir(char *path, size_t size)
static int shell_tests__max_desc_width(void)
{
- DIR *dir;
+ struct dirent **entlist;
struct dirent *ent;
+ int n_dirs;
char path_dir[PATH_MAX];
const char *path = shell_tests__dir(path_dir, sizeof(path_dir));
int width = 0;
@@ -547,11 +548,11 @@ static int shell_tests__max_desc_width(void)
if (path == NULL)
return -1;
- dir = opendir(path);
- if (!dir)
+ n_dirs = scandir(path, &entlist, NULL, alphasort);
+ if (n_dirs == -1)
return -1;
- for_each_shell_test(dir, path, ent) {
+ for_each_shell_test(entlist, n_dirs, path, ent) {
char bf[256];
const char *desc = shell_test__description(bf, sizeof(bf), path, ent->d_name);
@@ -563,7 +564,8 @@ static int shell_tests__max_desc_width(void)
}
}
- closedir(dir);
+ free(entlist);
+
return width;
}
@@ -578,7 +580,10 @@ static int shell_test__run(struct test *test, int subdir __maybe_unused)
char script[PATH_MAX];
struct shell_test *st = test->priv;
- path__join(script, sizeof(script), st->dir, st->file);
+ path__join(script, sizeof(script) - 3, st->dir, st->file);
+
+ if (verbose)
+ strncat(script, " -v", sizeof(script) - strlen(script) - 1);
err = system(script);
if (!err)
@@ -589,8 +594,9 @@ static int shell_test__run(struct test *test, int subdir __maybe_unused)
static int run_shell_tests(int argc, const char *argv[], int i, int width)
{
- DIR *dir;
+ struct dirent **entlist;
struct dirent *ent;
+ int n_dirs;
char path_dir[PATH_MAX];
struct shell_test st = {
.dir = shell_tests__dir(path_dir, sizeof(path_dir)),
@@ -599,14 +605,14 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width)
if (st.dir == NULL)
return -1;
- dir = opendir(st.dir);
- if (!dir) {
+ n_dirs = scandir(st.dir, &entlist, NULL, alphasort);
+ if (n_dirs == -1) {
pr_err("failed to open shell test directory: %s\n",
st.dir);
return -1;
}
- for_each_shell_test(dir, st.dir, ent) {
+ for_each_shell_test(entlist, n_dirs, st.dir, ent) {
int curr = i++;
char desc[256];
struct test test = {
@@ -623,7 +629,7 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width)
test_and_print(&test, false, -1);
}
- closedir(dir);
+ free(entlist);
return 0;
}
@@ -722,19 +728,20 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
static int perf_test__list_shell(int argc, const char **argv, int i)
{
- DIR *dir;
+ struct dirent **entlist;
struct dirent *ent;
+ int n_dirs;
char path_dir[PATH_MAX];
const char *path = shell_tests__dir(path_dir, sizeof(path_dir));
if (path == NULL)
return -1;
- dir = opendir(path);
- if (!dir)
+ n_dirs = scandir(path, &entlist, NULL, alphasort);
+ if (n_dirs == -1)
return -1;
- for_each_shell_test(dir, path, ent) {
+ for_each_shell_test(entlist, n_dirs, path, ent) {
int curr = i++;
char bf[256];
struct test t = {
@@ -747,7 +754,7 @@ static int perf_test__list_shell(int argc, const char **argv, int i)
pr_info("%2d: %s\n", i, t.desc);
}
- closedir(dir);
+ free(entlist);
return 0;
}
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 83638097c3bc..a288035eb362 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -17,10 +17,6 @@
#include "callchain.h"
#include "util/synthetic-events.h"
-#if defined (__x86_64__) || defined (__i386__) || defined (__powerpc__)
-#include "arch-tests.h"
-#endif
-
/* For bsearch. We try to unwind functions in shared object. */
#include <stdlib.h>
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 94bd5d215d94..da013e90a945 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -84,9 +84,11 @@ make_no_libaudit := NO_LIBAUDIT=1
make_no_libbionic := NO_LIBBIONIC=1
make_no_auxtrace := NO_AUXTRACE=1
make_no_libbpf := NO_LIBBPF=1
+make_libbpf_dynamic := LIBBPF_DYNAMIC=1
make_no_libbpf_DEBUG := NO_LIBBPF=1 DEBUG=1
make_no_libcrypto := NO_LIBCRYPTO=1
make_with_babeltrace:= LIBBABELTRACE=1
+make_with_coresight := CORESIGHT=1
make_no_sdt := NO_SDT=1
make_no_syscall_tbl := NO_SYSCALL_TABLE=1
make_with_clangllvm := LIBCLANGLLVM=1
@@ -148,11 +150,13 @@ run += make_no_libaudit
run += make_no_libbionic
run += make_no_auxtrace
run += make_no_libbpf
+run += make_libbpf_dynamic
run += make_no_libbpf_DEBUG
run += make_no_libcrypto
run += make_no_sdt
run += make_no_syscall_tbl
run += make_with_babeltrace
+run += make_with_coresight
run += make_with_clangllvm
run += make_with_libpfm4
run += make_help
@@ -266,6 +270,9 @@ test_make_install_info_O := $(test_ok)
test_make_install_pdf := $(test_ok)
test_make_install_pdf_O := $(test_ok)
+test_make_libbpf_dynamic := ldd $(PERF_O)/perf | grep -q libbpf
+test_make_libbpf_dynamic_O := ldd $$TMP_O/perf | grep -q libbpf
+
test_make_python_perf_so_O := test -f $$TMP_O/python/perf.so
test_make_perf_o_O := test -f $$TMP_O/perf.o
test_make_util_map_o_O := test -f $$TMP_O/util/map.o
diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c
index d4b0ef74defc..acd50944f6af 100644
--- a/tools/perf/tests/pfm.c
+++ b/tools/perf/tests/pfm.c
@@ -155,6 +155,16 @@ static int test__pfm_group(void)
.nr_events = 3,
.nr_groups = 1,
},
+ {
+ .events = "instructions}",
+ .nr_events = 1,
+ .nr_groups = 0,
+ },
+ {
+ .events = "{{instructions}}",
+ .nr_events = 0,
+ .nr_groups = 0,
+ },
};
for (i = 0; i < ARRAY_SIZE(table); i++) {
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index 2f9948b3d943..2aed20dc2262 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -22,10 +22,24 @@ compare_number()
}
# skip if --bpf-counters is not supported
-perf stat --bpf-counters true > /dev/null 2>&1 || exit 2
+if ! perf stat --bpf-counters true > /dev/null 2>&1; then
+ if [ "$1" == "-v" ]; then
+ echo "Skipping: --bpf-counters not supported"
+ perf --no-pager stat --bpf-counters true || true
+ fi
+ exit 2
+fi
base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+if [ "$base_cycles" == "<not" ]; then
+ echo "Skipping: cycles event not counted"
+ exit 2
+fi
bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+if [ "$bpf_cycles" == "<not" ]; then
+ echo "Failed: cycles not counted with --bpf-counters"
+ exit 1
+fi
compare_number $base_cycles $bpf_cycles
exit 0
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index b85f005308a3..1100dd55b657 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -133,14 +133,12 @@ bool test__bp_account_is_supported(void);
bool test__wp_is_supported(void);
bool test__tsc_is_supported(void);
-#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
struct perf_sample;
int test__arch_unwind_sample(struct perf_sample *sample,
struct thread *thread);
#endif
-#endif
#if defined(__arm__)
int test__vectors_page(struct test *test, int subtest);
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index ad0a70f0edaf..f5509a958e38 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -343,6 +343,29 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
browser->curr_hot = rb_last(&browser->entries);
}
+static struct annotation_line *annotate_browser__find_next_asm_line(
+ struct annotate_browser *browser,
+ struct annotation_line *al)
+{
+ struct annotation_line *it = al;
+
+ /* find next asm line */
+ list_for_each_entry_continue(it, browser->b.top, node) {
+ if (it->idx_asm >= 0)
+ return it;
+ }
+
+ /* no asm line found forwards, try backwards */
+ it = al;
+ list_for_each_entry_continue_reverse(it, browser->b.top, node) {
+ if (it->idx_asm >= 0)
+ return it;
+ }
+
+ /* There are no asm lines */
+ return NULL;
+}
+
static bool annotate_browser__toggle_source(struct annotate_browser *browser)
{
struct annotation *notes = browser__annotation(&browser->b);
@@ -363,9 +386,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser)
browser->b.index = al->idx;
} else {
if (al->idx_asm < 0) {
- ui_helpline__puts("Only available for assembly lines.");
- browser->b.seek(&browser->b, -offset, SEEK_CUR);
- return false;
+ /* move cursor to next asm line */
+ al = annotate_browser__find_next_asm_line(browser, al);
+ if (!al) {
+ browser->b.seek(&browser->b, -offset, SEEK_CUR);
+ return false;
+ }
}
if (al->idx_asm < offset)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index b64bdc1a7026..1a909b53dc15 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -126,6 +126,7 @@ perf-y += parse-regs-options.o
perf-y += parse-sublevel-options.o
perf-y += term.o
perf-y += help-unknown-cmd.o
+perf-y += dlfilter.o
perf-y += mem-events.o
perf-y += vsprintf.o
perf-y += units.o
@@ -216,7 +217,7 @@ $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-flex.h: util/parse-
$(OUTPUT)util/parse-events-bison.c $(OUTPUT)util/parse-events-bison.h: util/parse-events.y
$(call rule_mkdir)
- $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+ $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/parse-events-bison.c -p parse_events_
$(OUTPUT)util/expr-flex.c $(OUTPUT)util/expr-flex.h: util/expr.l $(OUTPUT)util/expr-bison.c
@@ -226,7 +227,7 @@ $(OUTPUT)util/expr-flex.c $(OUTPUT)util/expr-flex.h: util/expr.l $(OUTPUT)util/e
$(OUTPUT)util/expr-bison.c $(OUTPUT)util/expr-bison.h: util/expr.y
$(call rule_mkdir)
- $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+ $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/expr-bison.c -p expr_
$(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-flex.h: util/pmu.l $(OUTPUT)util/pmu-bison.c
@@ -236,7 +237,7 @@ $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-flex.h: util/pmu.l $(OUTPUT)util/pmu-
$(OUTPUT)util/pmu-bison.c $(OUTPUT)util/pmu-bison.h: util/pmu.y
$(call rule_mkdir)
- $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) \
+ $(Q)$(call echo-cmd,bison)$(BISON) -v $< -d $(PARSER_DEBUG_BISON) $(BISON_FILE_PREFIX_MAP) \
-o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
FLEX_GE_26 := $(shell expr $(shell $(FLEX) --version | sed -e 's/flex \([0-9]\+\).\([0-9]\+\)/\1\2/g') \>\= 26)
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 2539d4baec44..58b7069c5a5f 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -26,6 +26,7 @@
#include "symbol.h"
#include "thread.h"
#include "thread-stack.h"
+#include "tsc.h"
#include "tool.h"
#include "util/synthetic-events.h"
@@ -45,6 +46,8 @@ struct arm_spe {
struct machine *machine;
u32 pmu_type;
+ struct perf_tsc_conversion tc;
+
u8 timeless_decoding;
u8 data_queued;
@@ -231,7 +234,7 @@ static void arm_spe_prep_sample(struct arm_spe *spe,
struct arm_spe_record *record = &speq->decoder->record;
if (!spe->timeless_decoding)
- sample->time = speq->timestamp;
+ sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
sample->ip = record->from_ip;
sample->cpumode = arm_spe_cpumode(spe, sample->ip);
@@ -431,12 +434,36 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
{
struct arm_spe *spe = speq->spe;
+ struct arm_spe_record *record;
int ret;
if (!spe->kernel_start)
spe->kernel_start = machine__kernel_start(spe->machine);
while (1) {
+ /*
+ * The usual logic is firstly to decode the packets, and then
+ * based the record to synthesize sample; but here the flow is
+ * reversed: it calls arm_spe_sample() for synthesizing samples
+ * prior to arm_spe_decode().
+ *
+ * Two reasons for this code logic:
+ * 1. Firstly, when setup queue in arm_spe__setup_queue(), it
+ * has decoded trace data and generated a record, but the record
+ * is left to generate sample until run to here, so it's correct
+ * to synthesize sample for the left record.
+ * 2. After decoding trace data, it needs to compare the record
+ * timestamp with the coming perf event, if the record timestamp
+ * is later than the perf event, it needs bail out and pushs the
+ * record into auxtrace heap, thus the record can be deferred to
+ * synthesize sample until run to here at the next time; so this
+ * can correlate samples between Arm SPE trace data and other
+ * perf events with correct time ordering.
+ */
+ ret = arm_spe_sample(speq);
+ if (ret)
+ return ret;
+
ret = arm_spe_decode(speq->decoder);
if (!ret) {
pr_debug("No data or all data has been processed.\n");
@@ -450,10 +477,17 @@ static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
if (ret < 0)
continue;
- ret = arm_spe_sample(speq);
- if (ret)
- return ret;
+ record = &speq->decoder->record;
+ /* Update timestamp for the last record */
+ if (record->timestamp > speq->timestamp)
+ speq->timestamp = record->timestamp;
+
+ /*
+ * If the timestamp of the queue is later than timestamp of the
+ * coming perf event, bail out so can allow the perf event to
+ * be processed ahead.
+ */
if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
*timestamp = speq->timestamp;
return 0;
@@ -666,7 +700,7 @@ static int arm_spe_process_event(struct perf_session *session,
}
if (sample->time && (sample->time != (u64) -1))
- timestamp = sample->time;
+ timestamp = perf_time_to_tsc(sample->time, &spe->tc);
else
timestamp = 0;
@@ -683,11 +717,7 @@ static int arm_spe_process_event(struct perf_session *session,
sample->time);
}
} else if (timestamp) {
- if (event->header.type == PERF_RECORD_EXIT) {
- err = arm_spe_process_queues(spe, timestamp);
- if (err)
- return err;
- }
+ err = arm_spe_process_queues(spe, timestamp);
}
return err;
@@ -1006,6 +1036,7 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
+ struct perf_record_time_conv *tc = &session->time_conv;
struct arm_spe *spe;
int err;
@@ -1027,6 +1058,28 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
+
+ /*
+ * The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
+ * and the parameters for hardware clock are stored in the session
+ * context. Passes these parameters to the struct perf_tsc_conversion
+ * in "spe->tc", which is used for later conversion between clock
+ * counter and timestamp.
+ *
+ * For backward compatibility, copies the fields starting from
+ * "time_cycles" only if they are contained in the event.
+ */
+ spe->tc.time_shift = tc->time_shift;
+ spe->tc.time_mult = tc->time_mult;
+ spe->tc.time_zero = tc->time_zero;
+
+ if (event_contains(*tc, time_cycles)) {
+ spe->tc.time_cycles = tc->time_cycles;
+ spe->tc.time_mask = tc->time_mask;
+ spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
+ spe->tc.cap_user_time_short = tc->cap_user_time_short;
+ }
+
spe->auxtrace.process_event = arm_spe_process_event;
spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
spe->auxtrace.flush_events = arm_spe_flush;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 1b4091a3b508..9350eeb3a3fc 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1120,8 +1120,9 @@ int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
auxtrace_queue_data_cb, &qd);
}
-void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
+void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw)
{
+ int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ;
size_t adj = buffer->data_offset & (page_size - 1);
size_t size = buffer->size + adj;
off_t file_offset = buffer->data_offset - adj;
@@ -1130,7 +1131,7 @@ void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
if (buffer->data)
return buffer->data;
- addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
+ addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset);
if (addr == MAP_FAILED)
return NULL;
@@ -1404,10 +1405,9 @@ static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *m
* about the options parsed here, which is introduced after this cset,
* when support in 'perf script' for these options is introduced.
*/
-int itrace_parse_synth_opts(const struct option *opt, const char *str,
- int unset)
+int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ const char *str, int unset)
{
- struct itrace_synth_opts *synth_opts = opt->value;
const char *p;
char *endptr;
bool period_type_set = false;
@@ -1569,6 +1569,9 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
case 'q':
synth_opts->quick += 1;
break;
+ case 'Z':
+ synth_opts->timeless_decoding = true;
+ break;
case ' ':
case ',':
break;
@@ -1592,6 +1595,11 @@ out_err:
return -EINVAL;
}
+int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset)
+{
+ return itrace_do_parse_synth_opts(opt->value, str, unset);
+}
+
static const char * const auxtrace_error_type_name[] = {
[PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
};
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index a4fbb33b7245..cc1c1b9cec9c 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -89,6 +89,10 @@ enum itrace_period_type {
* @tlb: whether to synthesize TLB events
* @remote_access: whether to synthesize remote access events
* @mem: whether to synthesize memory events
+ * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps
+ * @vm_time_correlation: perform VM Time Correlation
+ * @vm_tm_corr_dry_run: VM Time Correlation dry-run
+ * @vm_tm_corr_args: VM Time Correlation implementation-specific arguments
* @callchain_sz: maximum callchain size
* @last_branch_sz: branch context size
* @period: 'instructions' events period
@@ -128,6 +132,10 @@ struct itrace_synth_opts {
bool tlb;
bool remote_access;
bool mem;
+ bool timeless_decoding;
+ bool vm_time_correlation;
+ bool vm_tm_corr_dry_run;
+ char *vm_tm_corr_args;
unsigned int callchain_sz;
unsigned int last_branch_sz;
unsigned long long period;
@@ -444,7 +452,7 @@ static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
u64 head = READ_ONCE(pc->aux_head);
/* Ensure all reads are done after we read the head */
- rmb();
+ smp_rmb();
return head;
}
@@ -458,7 +466,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
#endif
/* Ensure all reads are done after we read the head */
- rmb();
+ smp_rmb();
return head;
}
@@ -470,7 +478,7 @@ static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
#endif
/* Ensure all reads are done before we write the tail out */
- mb();
+ smp_mb();
#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
pc->aux_tail = tail;
#else
@@ -525,7 +533,11 @@ int auxtrace_queue_data(struct perf_session *session, bool samples,
bool events);
struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
struct auxtrace_buffer *buffer);
-void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
+void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw);
+static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
+{
+ return auxtrace_buffer__get_data_rw(buffer, fd, false);
+}
void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
@@ -595,6 +607,8 @@ s64 perf_event__process_auxtrace(struct perf_session *session,
union perf_event *event);
int perf_event__process_auxtrace_error(struct perf_session *session,
union perf_event *event);
+int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
+ const char *str, int unset);
int itrace_parse_synth_opts(const struct option *opt, const char *str,
int unset);
void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
@@ -691,9 +705,26 @@ int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
return 0;
}
-#define perf_event__process_auxtrace_info 0
-#define perf_event__process_auxtrace 0
-#define perf_event__process_auxtrace_error 0
+static inline
+int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ return 0;
+}
+
+static inline
+s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ return 0;
+}
+
+static inline
+int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ return 0;
+}
static inline
void perf_session__auxtrace_error_inc(struct perf_session *session
@@ -710,6 +741,14 @@ void events_stats__auxtrace_error_warn(const struct events_stats *stats
}
static inline
+int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused,
+ const char *str __maybe_unused, int unset __maybe_unused)
+{
+ pr_err("AUX area tracing not supported\n");
+ return -EINVAL;
+}
+
+static inline
int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 5ed674a2f55e..21c8e71162b1 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -7,12 +7,8 @@
#include <unistd.h>
#include <sys/file.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <linux/err.h>
#include <linux/zalloc.h>
-#include <bpf/bpf.h>
-#include <bpf/btf.h>
-#include <bpf/libbpf.h>
#include <api/fs/fs.h>
#include <perf/bpf_perf.h>
@@ -37,13 +33,6 @@ static inline void *u64_to_ptr(__u64 ptr)
return (void *)(unsigned long)ptr;
}
-static void set_max_rlimit(void)
-{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
-
- setrlimit(RLIMIT_MEMLOCK, &rinf);
-}
-
static struct bpf_counter *bpf_counter_alloc(void)
{
struct bpf_counter *counter;
@@ -297,33 +286,6 @@ struct bpf_counter_ops bpf_program_profiler_ops = {
.install_pe = bpf_program_profiler__install_pe,
};
-static __u32 bpf_link_get_id(int fd)
-{
- struct bpf_link_info link_info = {0};
- __u32 link_info_len = sizeof(link_info);
-
- bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
- return link_info.id;
-}
-
-static __u32 bpf_link_get_prog_id(int fd)
-{
- struct bpf_link_info link_info = {0};
- __u32 link_info_len = sizeof(link_info);
-
- bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
- return link_info.prog_id;
-}
-
-static __u32 bpf_map_get_id(int fd)
-{
- struct bpf_map_info map_info = {0};
- __u32 map_info_len = sizeof(map_info);
-
- bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
- return map_info.id;
-}
-
static bool bperf_attr_map_compatible(int attr_map_fd)
{
struct bpf_map_info map_info = {0};
@@ -385,20 +347,6 @@ static int bperf_lock_attr_map(struct target *target)
return map_fd;
}
-/* trigger the leader program on a cpu */
-static int bperf_trigger_reading(int prog_fd, int cpu)
-{
- DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
- .ctx_in = NULL,
- .ctx_size_in = 0,
- .flags = BPF_F_TEST_RUN_ON_CPU,
- .cpu = cpu,
- .retval = 0,
- );
-
- return bpf_prog_test_run_opts(prog_fd, &opts);
-}
-
static int bperf_check_target(struct evsel *evsel,
struct target *target,
enum bperf_filter_type *filter_type,
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
index d6d907c3dcf9..65ebaa6694fb 100644
--- a/tools/perf/util/bpf_counter.h
+++ b/tools/perf/util/bpf_counter.h
@@ -3,6 +3,10 @@
#define __PERF_BPF_COUNTER_H 1
#include <linux/list.h>
+#include <sys/resource.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
struct evsel;
struct target;
@@ -76,4 +80,52 @@ static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
#endif /* HAVE_BPF_SKEL */
+static inline void set_max_rlimit(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+}
+
+static inline __u32 bpf_link_get_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.id;
+}
+
+static inline __u32 bpf_link_get_prog_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.prog_id;
+}
+
+static inline __u32 bpf_map_get_id(int fd)
+{
+ struct bpf_map_info map_info = { .id = 0, };
+ __u32 map_info_len = sizeof(map_info);
+
+ bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
+ return map_info.id;
+}
+
+/* trigger the leader program on a cpu */
+static inline int bperf_trigger_reading(int prog_fd, int cpu)
+{
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = NULL,
+ .ctx_size_in = 0,
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ .cpu = cpu,
+ .retval = 0,
+ );
+
+ return bpf_prog_test_run_opts(prog_fd, &opts);
+}
+
#endif /* __PERF_BPF_COUNTER_H */
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index f24ab4585553..e819a4f30fc2 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -9,6 +9,7 @@
#include <linux/zalloc.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/statfs.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
@@ -45,6 +46,49 @@ static int open_cgroup(const char *name)
return fd;
}
+#ifdef HAVE_FILE_HANDLE
+int read_cgroup_id(struct cgroup *cgrp)
+{
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
+ struct {
+ struct file_handle fh;
+ uint64_t cgroup_id;
+ } handle;
+ int mount_id;
+
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, "perf_event"))
+ return -1;
+
+ scnprintf(path, PATH_MAX, "%s/%s", mnt, cgrp->name);
+
+ handle.fh.handle_bytes = sizeof(handle.cgroup_id);
+ if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0)
+ return -1;
+
+ cgrp->id = handle.cgroup_id;
+ return 0;
+}
+#endif /* HAVE_FILE_HANDLE */
+
+#ifndef CGROUP2_SUPER_MAGIC
+#define CGROUP2_SUPER_MAGIC 0x63677270
+#endif
+
+int cgroup_is_v2(const char *subsys)
+{
+ char mnt[PATH_MAX + 1];
+ struct statfs stbuf;
+
+ if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, subsys))
+ return -1;
+
+ if (statfs(mnt, &stbuf) < 0)
+ return -1;
+
+ return (stbuf.f_type == CGROUP2_SUPER_MAGIC);
+}
+
static struct cgroup *evlist__find_cgroup(struct evlist *evlist, const char *str)
{
struct evsel *counter;
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
index 162906f3412a..de5b272560ab 100644
--- a/tools/perf/util/cgroup.h
+++ b/tools/perf/util/cgroup.h
@@ -2,6 +2,7 @@
#ifndef __CGROUP_H__
#define __CGROUP_H__
+#include <linux/compiler.h>
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include "util/env.h"
@@ -38,4 +39,15 @@ struct cgroup *cgroup__find(struct perf_env *env, uint64_t id);
void perf_env__purge_cgroups(struct perf_env *env);
+#ifdef HAVE_FILE_HANDLE
+int read_cgroup_id(struct cgroup *cgrp);
+#else
+static inline int read_cgroup_id(struct cgroup *cgrp __maybe_unused)
+{
+ return -1;
+}
+#endif /* HAVE_FILE_HANDLE */
+
+int cgroup_is_v2(const char *subsys);
+
#endif /* __CGROUP_H__ */
diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c
index 1b52402a8923..ec77e2a7b3ca 100644
--- a/tools/perf/util/cputopo.c
+++ b/tools/perf/util/cputopo.c
@@ -12,6 +12,7 @@
#include "cpumap.h"
#include "debug.h"
#include "env.h"
+#include "pmu-hybrid.h"
#define CORE_SIB_FMT \
"%s/devices/system/cpu/cpu%d/topology/core_siblings_list"
@@ -351,3 +352,82 @@ void numa_topology__delete(struct numa_topology *tp)
free(tp);
}
+
+static int load_hybrid_node(struct hybrid_topology_node *node,
+ struct perf_pmu *pmu)
+{
+ const char *sysfs;
+ char path[PATH_MAX];
+ char *buf = NULL, *p;
+ FILE *fp;
+ size_t len = 0;
+
+ node->pmu_name = strdup(pmu->name);
+ if (!node->pmu_name)
+ return -1;
+
+ sysfs = sysfs__mountpoint();
+ if (!sysfs)
+ goto err;
+
+ snprintf(path, PATH_MAX, CPUS_TEMPLATE_CPU, sysfs, pmu->name);
+ fp = fopen(path, "r");
+ if (!fp)
+ goto err;
+
+ if (getline(&buf, &len, fp) <= 0) {
+ fclose(fp);
+ goto err;
+ }
+
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+
+ fclose(fp);
+ node->cpus = buf;
+ return 0;
+
+err:
+ zfree(&node->pmu_name);
+ free(buf);
+ return -1;
+}
+
+struct hybrid_topology *hybrid_topology__new(void)
+{
+ struct perf_pmu *pmu;
+ struct hybrid_topology *tp = NULL;
+ u32 nr, i = 0;
+
+ nr = perf_pmu__hybrid_pmu_num();
+ if (nr == 0)
+ return NULL;
+
+ tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr);
+ if (!tp)
+ return NULL;
+
+ tp->nr = nr;
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (load_hybrid_node(&tp->nodes[i], pmu)) {
+ hybrid_topology__delete(tp);
+ return NULL;
+ }
+ i++;
+ }
+
+ return tp;
+}
+
+void hybrid_topology__delete(struct hybrid_topology *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->nr; i++) {
+ zfree(&tp->nodes[i].pmu_name);
+ zfree(&tp->nodes[i].cpus);
+ }
+
+ free(tp);
+}
diff --git a/tools/perf/util/cputopo.h b/tools/perf/util/cputopo.h
index 6201c3790d86..d9af97177068 100644
--- a/tools/perf/util/cputopo.h
+++ b/tools/perf/util/cputopo.h
@@ -25,10 +25,23 @@ struct numa_topology {
struct numa_topology_node nodes[];
};
+struct hybrid_topology_node {
+ char *pmu_name;
+ char *cpus;
+};
+
+struct hybrid_topology {
+ u32 nr;
+ struct hybrid_topology_node nodes[];
+};
+
struct cpu_topology *cpu_topology__new(void);
void cpu_topology__delete(struct cpu_topology *tp);
struct numa_topology *numa_topology__new(void);
void numa_topology__delete(struct numa_topology *tp);
+struct hybrid_topology *hybrid_topology__new(void);
+void hybrid_topology__delete(struct hybrid_topology *tp);
+
#endif /* __PERF_CPUTOPO_H */
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 059bcec3f651..3e1a05bc82cc 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -6,6 +6,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <asm/bug.h>
#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/list.h>
@@ -17,6 +18,7 @@
#include "cs-etm.h"
#include "cs-etm-decoder.h"
+#include "debug.h"
#include "intlist.h"
/* use raw logging */
@@ -276,13 +278,13 @@ cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
const uint8_t trace_chan_id)
{
/* No timestamp packet has been received, nothing to do */
- if (!packet_queue->timestamp)
+ if (!packet_queue->cs_timestamp)
return OCSD_RESP_CONT;
- packet_queue->timestamp = packet_queue->next_timestamp;
+ packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
/* Estimate the timestamp for the next range packet */
- packet_queue->next_timestamp += packet_queue->instr_count;
+ packet_queue->next_cs_timestamp += packet_queue->instr_count;
packet_queue->instr_count = 0;
/* Tell the front end which traceid_queue needs attention */
@@ -294,7 +296,8 @@ cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
static ocsd_datapath_resp_t
cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
const ocsd_generic_trace_elem *elem,
- const uint8_t trace_chan_id)
+ const uint8_t trace_chan_id,
+ const ocsd_trc_index_t indx)
{
struct cs_etm_packet_queue *packet_queue;
@@ -308,20 +311,39 @@ cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
* Function do_soft_timestamp() will report the value to the front end,
* hence asking the decoder to keep decoding rather than stopping.
*/
- if (packet_queue->timestamp) {
- packet_queue->next_timestamp = elem->timestamp;
+ if (packet_queue->cs_timestamp) {
+ packet_queue->next_cs_timestamp = elem->timestamp;
return OCSD_RESP_CONT;
}
- /*
- * This is the first timestamp we've seen since the beginning of traces
- * or a discontinuity. Since timestamps packets are generated *after*
- * range packets have been generated, we need to estimate the time at
- * which instructions started by subtracting the number of instructions
- * executed to the timestamp.
- */
- packet_queue->timestamp = elem->timestamp - packet_queue->instr_count;
- packet_queue->next_timestamp = elem->timestamp;
+
+ if (!elem->timestamp) {
+ /*
+ * Zero timestamps can be seen due to misconfiguration or hardware bugs.
+ * Warn once, and don't try to subtract instr_count as it would result in an
+ * underflow.
+ */
+ packet_queue->cs_timestamp = 0;
+ WARN_ONCE(true, "Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
+ ". Decoding may be improved with --itrace=Z...\n", indx);
+ } else if (packet_queue->instr_count > elem->timestamp) {
+ /*
+ * Sanity check that the elem->timestamp - packet_queue->instr_count would not
+ * result in an underflow. Warn and clamp at 0 if it would.
+ */
+ packet_queue->cs_timestamp = 0;
+ pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
+ } else {
+ /*
+ * This is the first timestamp we've seen since the beginning of traces
+ * or a discontinuity. Since timestamps packets are generated *after*
+ * range packets have been generated, we need to estimate the time at
+ * which instructions started by subtracting the number of instructions
+ * executed to the timestamp.
+ */
+ packet_queue->cs_timestamp = elem->timestamp - packet_queue->instr_count;
+ }
+ packet_queue->next_cs_timestamp = elem->timestamp;
packet_queue->instr_count = 0;
/* Tell the front end which traceid_queue needs attention */
@@ -334,8 +356,8 @@ cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
static void
cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
{
- packet_queue->timestamp = 0;
- packet_queue->next_timestamp = 0;
+ packet_queue->cs_timestamp = 0;
+ packet_queue->next_cs_timestamp = 0;
packet_queue->instr_count = 0;
}
@@ -542,7 +564,7 @@ cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
const void *context,
- const ocsd_trc_index_t indx __maybe_unused,
+ const ocsd_trc_index_t indx,
const u8 trace_chan_id __maybe_unused,
const ocsd_generic_trace_elem *elem)
{
@@ -579,7 +601,8 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
break;
case OCSD_GEN_TRC_ELEM_TIMESTAMP:
resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
- trace_chan_id);
+ trace_chan_id,
+ indx);
break;
case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
resp = cs_etm_decoder__set_tid(etmq, packet_queue,
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 7e63e7dedc33..32ad92d3e454 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -38,8 +38,6 @@
#include <tools/libc_compat.h>
#include "util/synthetic-events.h"
-#define MAX_TIMESTAMP (~0ULL)
-
struct cs_etm_auxtrace {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
@@ -56,6 +54,7 @@ struct cs_etm_auxtrace {
u8 sample_instructions;
int num_cpu;
+ u64 latest_kernel_timestamp;
u32 auxtrace_type;
u64 branches_sample_type;
u64 branches_id;
@@ -86,7 +85,7 @@ struct cs_etm_queue {
struct cs_etm_decoder *decoder;
struct auxtrace_buffer *buffer;
unsigned int queue_nr;
- u8 pending_timestamp;
+ u8 pending_timestamp_chan_id;
u64 offset;
const unsigned char *buf;
size_t buf_len, buf_used;
@@ -208,7 +207,7 @@ void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
* be more than one channel per cs_etm_queue, we need to specify
* what traceID queue needs servicing.
*/
- etmq->pending_timestamp = trace_chan_id;
+ etmq->pending_timestamp_chan_id = trace_chan_id;
}
static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
@@ -216,22 +215,22 @@ static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
{
struct cs_etm_packet_queue *packet_queue;
- if (!etmq->pending_timestamp)
+ if (!etmq->pending_timestamp_chan_id)
return 0;
if (trace_chan_id)
- *trace_chan_id = etmq->pending_timestamp;
+ *trace_chan_id = etmq->pending_timestamp_chan_id;
packet_queue = cs_etm__etmq_get_packet_queue(etmq,
- etmq->pending_timestamp);
+ etmq->pending_timestamp_chan_id);
if (!packet_queue)
return 0;
/* Acknowledge pending status */
- etmq->pending_timestamp = 0;
+ etmq->pending_timestamp_chan_id = 0;
/* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
- return packet_queue->timestamp;
+ return packet_queue->cs_timestamp;
}
static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
@@ -814,7 +813,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
int ret = 0;
unsigned int cs_queue_nr;
u8 trace_chan_id;
- u64 timestamp;
+ u64 cs_timestamp;
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
@@ -854,7 +853,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
/*
* Run decoder on the trace block. The decoder will stop when
- * encountering a timestamp, a full packet queue or the end of
+ * encountering a CS timestamp, a full packet queue or the end of
* trace for that block.
*/
ret = cs_etm__decode_data_block(etmq);
@@ -865,10 +864,10 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
* Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
* the timestamp calculation for us.
*/
- timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
+ cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
/* We found a timestamp, no need to continue. */
- if (timestamp)
+ if (cs_timestamp)
break;
/*
@@ -892,7 +891,7 @@ static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
* queue and will be processed in cs_etm__process_queues().
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
- ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
+ ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
out:
return ret;
}
@@ -1194,6 +1193,8 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
event->sample.header.size = sizeof(struct perf_event_header);
+ if (!etm->timeless_decoding)
+ sample.time = etm->latest_kernel_timestamp;
sample.ip = addr;
sample.pid = tidq->pid;
sample.tid = tidq->tid;
@@ -1250,6 +1251,8 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
event->sample.header.size = sizeof(struct perf_event_header);
+ if (!etm->timeless_decoding)
+ sample.time = etm->latest_kernel_timestamp;
sample.ip = ip;
sample.pid = tidq->pid;
sample.tid = tidq->tid;
@@ -2221,7 +2224,7 @@ static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
int ret = 0;
unsigned int cs_queue_nr, queue_nr;
u8 trace_chan_id;
- u64 timestamp;
+ u64 cs_timestamp;
struct auxtrace_queue *queue;
struct cs_etm_queue *etmq;
struct cs_etm_traceid_queue *tidq;
@@ -2283,9 +2286,9 @@ refetch:
if (ret)
goto out;
- timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
+ cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
- if (!timestamp) {
+ if (!cs_timestamp) {
/*
* Function cs_etm__decode_data_block() returns when
* there is no more traces to decode in the current
@@ -2308,7 +2311,7 @@ refetch:
* this queue/traceID.
*/
cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
- ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
+ ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
}
out:
@@ -2380,7 +2383,7 @@ static int cs_etm__process_event(struct perf_session *session,
struct perf_tool *tool)
{
int err = 0;
- u64 timestamp;
+ u64 sample_kernel_timestamp;
struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
struct cs_etm_auxtrace,
auxtrace);
@@ -2394,16 +2397,21 @@ static int cs_etm__process_event(struct perf_session *session,
}
if (sample->time && (sample->time != (u64) -1))
- timestamp = sample->time;
+ sample_kernel_timestamp = sample->time;
else
- timestamp = 0;
+ sample_kernel_timestamp = 0;
- if (timestamp || etm->timeless_decoding) {
+ if (sample_kernel_timestamp || etm->timeless_decoding) {
err = cs_etm__update_queues(etm);
if (err)
return err;
}
+ /*
+ * Don't wait for cs_etm__flush_events() in per-thread/timeless mode to start the decode. We
+ * need the tid of the PERF_RECORD_EXIT event to assign to the synthesised samples because
+ * ETM_OPT_CTXTID is not enabled.
+ */
if (etm->timeless_decoding &&
event->header.type == PERF_RECORD_EXIT)
return cs_etm__process_timeless_queues(etm,
@@ -2414,9 +2422,14 @@ static int cs_etm__process_event(struct perf_session *session,
else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
return cs_etm__process_switch_cpu_wide(etm, event);
- if (!etm->timeless_decoding &&
- event->header.type == PERF_RECORD_AUX)
- return cs_etm__process_queues(etm);
+ if (!etm->timeless_decoding && event->header.type == PERF_RECORD_AUX) {
+ /*
+ * Record the latest kernel timestamp available in the header
+ * for samples so that synthesised samples occur from this point
+ * onwards.
+ */
+ etm->latest_kernel_timestamp = sample_kernel_timestamp;
+ }
return 0;
}
@@ -2464,6 +2477,10 @@ static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
struct evlist *evlist = etm->session->evlist;
bool timeless_decoding = true;
+ /* Override timeless mode with user input from --itrace=Z */
+ if (etm->synth_opts.timeless_decoding)
+ return true;
+
/*
* Circle through the list of event and complain if we find one
* with the time bit set.
@@ -2810,6 +2827,14 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
if (err)
goto err_free_etm;
+ if (session->itrace_synth_opts->set) {
+ etm->synth_opts = *session->itrace_synth_opts;
+ } else {
+ itrace_synth_opts__set_default(&etm->synth_opts,
+ session->itrace_synth_opts->default_no_sample);
+ etm->synth_opts.callchain = false;
+ }
+
etm->session = session;
etm->machine = &session->machines.host;
@@ -2854,14 +2879,6 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
return 0;
}
- if (session->itrace_synth_opts->set) {
- etm->synth_opts = *session->itrace_synth_opts;
- } else {
- itrace_synth_opts__set_default(&etm->synth_opts,
- session->itrace_synth_opts->default_no_sample);
- etm->synth_opts.callchain = false;
- }
-
err = cs_etm__synth_events(etm, session);
if (err)
goto err_delete_thread;
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 36428918411e..d65c7b19407d 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -171,8 +171,8 @@ struct cs_etm_packet_queue {
u32 head;
u32 tail;
u32 instr_count;
- u64 timestamp;
- u64 next_timestamp;
+ u64 cs_timestamp;
+ u64 next_cs_timestamp;
struct cs_etm_packet packet_buffer[CS_ETM_PACKET_MAX_BUFFER];
};
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 8fca4779ae6a..a9c102e8e3c0 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -240,11 +240,12 @@ static bool is_dir(struct perf_data *data)
static int open_file_read(struct perf_data *data)
{
+ int flags = data->in_place_update ? O_RDWR : O_RDONLY;
struct stat st;
int fd;
char sbuf[STRERR_BUFSIZE];
- fd = open(data->file.path, O_RDONLY);
+ fd = open(data->file.path, flags);
if (fd < 0) {
int err = errno;
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 62a3e66fbee8..c9de82af5584 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -31,6 +31,7 @@ struct perf_data {
bool is_dir;
bool force;
bool use_stdio;
+ bool in_place_update;
enum perf_data_mode mode;
struct {
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 5cd189172525..e0d4f08839fb 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -343,7 +343,7 @@ static int db_export__threads(struct db_export *dbe, struct thread *thread,
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al, struct addr_location *addr_al)
{
struct thread *thread = al->thread;
struct export_sample es = {
@@ -389,18 +389,14 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
}
}
- if ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
- sample_addr_correlates_sym(&evsel->core.attr)) {
- struct addr_location addr_al;
-
- thread__resolve(thread, &addr_al, sample);
- err = db_ids_from_al(dbe, &addr_al, &es.addr_dso_db_id,
+ if (addr_al) {
+ err = db_ids_from_al(dbe, addr_al, &es.addr_dso_db_id,
&es.addr_sym_db_id, &es.addr_offset);
if (err)
goto out_put;
if (dbe->crp) {
err = thread_stack__process(thread, comm, sample, al,
- &addr_al, es.db_id,
+ addr_al, es.db_id,
dbe->crp);
if (err)
goto out_put;
diff --git a/tools/perf/util/db-export.h b/tools/perf/util/db-export.h
index 9c3d38f5a40d..23983cb35706 100644
--- a/tools/perf/util/db-export.h
+++ b/tools/perf/util/db-export.h
@@ -97,7 +97,7 @@ int db_export__branch_type(struct db_export *dbe, u32 branch_type,
const char *name);
int db_export__sample(struct db_export *dbe, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
- struct addr_location *al);
+ struct addr_location *al, struct addr_location *addr_al);
int db_export__branch_types(struct db_export *dbe);
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
new file mode 100644
index 000000000000..ca33fbc5efde
--- /dev/null
+++ b/tools/perf/util/dlfilter.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dlfilter.c: Interface to perf script --dlfilter shared object
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <string.h>
+#include <dirent.h>
+#include <subcmd/exec-cmd.h>
+#include <linux/zalloc.h>
+#include <linux/build_bug.h>
+
+#include "debug.h"
+#include "event.h"
+#include "evsel.h"
+#include "dso.h"
+#include "map.h"
+#include "thread.h"
+#include "trace-event.h"
+#include "symbol.h"
+#include "srcline.h"
+#include "dlfilter.h"
+#include "perf_dlfilter.h"
+
+static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al)
+{
+ struct symbol *sym = al->sym;
+
+ d_al->size = sizeof(*d_al);
+ if (al->map) {
+ struct dso *dso = al->map->dso;
+
+ if (symbol_conf.show_kernel_path && dso->long_name)
+ d_al->dso = dso->long_name;
+ else
+ d_al->dso = dso->name;
+ d_al->is_64_bit = dso->is_64_bit;
+ d_al->buildid_size = dso->bid.size;
+ d_al->buildid = dso->bid.data;
+ } else {
+ d_al->dso = NULL;
+ d_al->is_64_bit = 0;
+ d_al->buildid_size = 0;
+ d_al->buildid = NULL;
+ }
+ if (sym) {
+ d_al->sym = sym->name;
+ d_al->sym_start = sym->start;
+ d_al->sym_end = sym->end;
+ if (al->addr < sym->end)
+ d_al->symoff = al->addr - sym->start;
+ else
+ d_al->symoff = al->addr - al->map->start - sym->start;
+ d_al->sym_binding = sym->binding;
+ } else {
+ d_al->sym = NULL;
+ d_al->sym_start = 0;
+ d_al->sym_end = 0;
+ d_al->symoff = 0;
+ d_al->sym_binding = 0;
+ }
+ d_al->addr = al->addr;
+ d_al->comm = NULL;
+ d_al->filtered = 0;
+}
+
+static struct addr_location *get_al(struct dlfilter *d)
+{
+ struct addr_location *al = d->al;
+
+ if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
+ return NULL;
+ return al;
+}
+
+static struct thread *get_thread(struct dlfilter *d)
+{
+ struct addr_location *al = get_al(d);
+
+ return al ? al->thread : NULL;
+}
+
+static const struct perf_dlfilter_al *dlfilter__resolve_ip(void *ctx)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct perf_dlfilter_al *d_al = d->d_ip_al;
+ struct addr_location *al;
+
+ if (!d->ctx_valid)
+ return NULL;
+
+ /* 'size' is also used to indicate already initialized */
+ if (d_al->size)
+ return d_al;
+
+ al = get_al(d);
+ if (!al)
+ return NULL;
+
+ al_to_d_al(al, d_al);
+
+ d_al->is_kernel_ip = machine__kernel_ip(d->machine, d->sample->ip);
+ d_al->comm = al->thread ? thread__comm_str(al->thread) : ":-1";
+ d_al->filtered = al->filtered;
+
+ return d_al;
+}
+
+static const struct perf_dlfilter_al *dlfilter__resolve_addr(void *ctx)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct perf_dlfilter_al *d_addr_al = d->d_addr_al;
+ struct addr_location *addr_al = d->addr_al;
+
+ if (!d->ctx_valid || !d->d_sample->addr_correlates_sym)
+ return NULL;
+
+ /* 'size' is also used to indicate already initialized */
+ if (d_addr_al->size)
+ return d_addr_al;
+
+ if (!addr_al->thread) {
+ struct thread *thread = get_thread(d);
+
+ if (!thread)
+ return NULL;
+ thread__resolve(thread, addr_al, d->sample);
+ }
+
+ al_to_d_al(addr_al, d_addr_al);
+
+ d_addr_al->is_kernel_ip = machine__kernel_ip(d->machine, d->sample->addr);
+
+ return d_addr_al;
+}
+
+static char **dlfilter__args(void *ctx, int *dlargc)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+
+ if (dlargc)
+ *dlargc = 0;
+ else
+ return NULL;
+
+ if (!d->ctx_valid && !d->in_start && !d->in_stop)
+ return NULL;
+
+ *dlargc = d->dlargc;
+ return d->dlargv;
+}
+
+static __s32 dlfilter__resolve_address(void *ctx, __u64 address, struct perf_dlfilter_al *d_al_p)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct perf_dlfilter_al d_al;
+ struct addr_location al;
+ struct thread *thread;
+ __u32 sz;
+
+ if (!d->ctx_valid || !d_al_p)
+ return -1;
+
+ thread = get_thread(d);
+ if (!thread)
+ return -1;
+
+ thread__find_symbol_fb(thread, d->sample->cpumode, address, &al);
+
+ al_to_d_al(&al, &d_al);
+
+ d_al.is_kernel_ip = machine__kernel_ip(d->machine, address);
+
+ sz = d_al_p->size;
+ memcpy(d_al_p, &d_al, min((size_t)sz, sizeof(d_al)));
+ d_al_p->size = sz;
+
+ return 0;
+}
+
+static const __u8 *dlfilter__insn(void *ctx, __u32 *len)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+
+ if (!len)
+ return NULL;
+
+ *len = 0;
+
+ if (!d->ctx_valid)
+ return NULL;
+
+ if (d->sample->ip && !d->sample->insn_len) {
+ struct addr_location *al = d->al;
+
+ if (!al->thread && machine__resolve(d->machine, al, d->sample) < 0)
+ return NULL;
+
+ if (al->thread->maps && al->thread->maps->machine)
+ script_fetch_insn(d->sample, al->thread, al->thread->maps->machine);
+ }
+
+ if (!d->sample->insn_len)
+ return NULL;
+
+ *len = d->sample->insn_len;
+
+ return (__u8 *)d->sample->insn;
+}
+
+static const char *dlfilter__srcline(void *ctx, __u32 *line_no)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct addr_location *al;
+ unsigned int line = 0;
+ char *srcfile = NULL;
+ struct map *map;
+ u64 addr;
+
+ if (!d->ctx_valid || !line_no)
+ return NULL;
+
+ al = get_al(d);
+ if (!al)
+ return NULL;
+
+ map = al->map;
+ addr = al->addr;
+
+ if (map && map->dso)
+ srcfile = get_srcline_split(map->dso, map__rip_2objdump(map, addr), &line);
+
+ *line_no = line;
+ return srcfile;
+}
+
+static struct perf_event_attr *dlfilter__attr(void *ctx)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+
+ if (!d->ctx_valid)
+ return NULL;
+
+ return &d->evsel->core.attr;
+}
+
+static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
+{
+ struct dlfilter *d = (struct dlfilter *)ctx;
+ struct addr_location *al;
+ struct addr_location a;
+ struct map *map;
+ u64 offset;
+
+ if (!d->ctx_valid)
+ return -1;
+
+ al = get_al(d);
+ if (!al)
+ return -1;
+
+ map = al->map;
+
+ if (map && ip >= map->start && ip < map->end &&
+ machine__kernel_ip(d->machine, ip) == machine__kernel_ip(d->machine, d->sample->ip))
+ goto have_map;
+
+ thread__find_map_fb(al->thread, d->sample->cpumode, ip, &a);
+ if (!a.map)
+ return -1;
+
+ map = a.map;
+have_map:
+ offset = map->map_ip(map, ip);
+ if (ip + len >= map->end)
+ len = map->end - ip;
+ return dso__data_read_offset(map->dso, d->machine, offset, buf, len);
+}
+
+static const struct perf_dlfilter_fns perf_dlfilter_fns = {
+ .resolve_ip = dlfilter__resolve_ip,
+ .resolve_addr = dlfilter__resolve_addr,
+ .args = dlfilter__args,
+ .resolve_address = dlfilter__resolve_address,
+ .insn = dlfilter__insn,
+ .srcline = dlfilter__srcline,
+ .attr = dlfilter__attr,
+ .object_code = dlfilter__object_code,
+};
+
+static char *find_dlfilter(const char *file)
+{
+ char path[PATH_MAX];
+ char *exec_path;
+
+ if (strchr(file, '/'))
+ goto out;
+
+ if (!access(file, R_OK)) {
+ /*
+ * Prepend "./" so that dlopen will find the file in the
+ * current directory.
+ */
+ snprintf(path, sizeof(path), "./%s", file);
+ file = path;
+ goto out;
+ }
+
+ exec_path = get_argv_exec_path();
+ if (!exec_path)
+ goto out;
+ snprintf(path, sizeof(path), "%s/dlfilters/%s", exec_path, file);
+ free(exec_path);
+ if (!access(path, R_OK))
+ file = path;
+out:
+ return strdup(file);
+}
+
+#define CHECK_FLAG(x) BUILD_BUG_ON((u64)PERF_DLFILTER_FLAG_ ## x != (u64)PERF_IP_FLAG_ ## x)
+
+static int dlfilter__init(struct dlfilter *d, const char *file, int dlargc, char **dlargv)
+{
+ CHECK_FLAG(BRANCH);
+ CHECK_FLAG(CALL);
+ CHECK_FLAG(RETURN);
+ CHECK_FLAG(CONDITIONAL);
+ CHECK_FLAG(SYSCALLRET);
+ CHECK_FLAG(ASYNC);
+ CHECK_FLAG(INTERRUPT);
+ CHECK_FLAG(TX_ABORT);
+ CHECK_FLAG(TRACE_BEGIN);
+ CHECK_FLAG(TRACE_END);
+ CHECK_FLAG(IN_TX);
+ CHECK_FLAG(VMENTRY);
+ CHECK_FLAG(VMEXIT);
+
+ memset(d, 0, sizeof(*d));
+ d->file = find_dlfilter(file);
+ if (!d->file)
+ return -1;
+ d->dlargc = dlargc;
+ d->dlargv = dlargv;
+ return 0;
+}
+
+static void dlfilter__exit(struct dlfilter *d)
+{
+ zfree(&d->file);
+}
+
+static int dlfilter__open(struct dlfilter *d)
+{
+ d->handle = dlopen(d->file, RTLD_NOW);
+ if (!d->handle) {
+ pr_err("dlopen failed for: '%s'\n", d->file);
+ return -1;
+ }
+ d->start = dlsym(d->handle, "start");
+ d->filter_event = dlsym(d->handle, "filter_event");
+ d->filter_event_early = dlsym(d->handle, "filter_event_early");
+ d->stop = dlsym(d->handle, "stop");
+ d->fns = dlsym(d->handle, "perf_dlfilter_fns");
+ if (d->fns)
+ memcpy(d->fns, &perf_dlfilter_fns, sizeof(struct perf_dlfilter_fns));
+ return 0;
+}
+
+static int dlfilter__close(struct dlfilter *d)
+{
+ return dlclose(d->handle);
+}
+
+struct dlfilter *dlfilter__new(const char *file, int dlargc, char **dlargv)
+{
+ struct dlfilter *d = malloc(sizeof(*d));
+
+ if (!d)
+ return NULL;
+
+ if (dlfilter__init(d, file, dlargc, dlargv))
+ goto err_free;
+
+ if (dlfilter__open(d))
+ goto err_exit;
+
+ return d;
+
+err_exit:
+ dlfilter__exit(d);
+err_free:
+ free(d);
+ return NULL;
+}
+
+static void dlfilter__free(struct dlfilter *d)
+{
+ if (d) {
+ dlfilter__exit(d);
+ free(d);
+ }
+}
+
+int dlfilter__start(struct dlfilter *d, struct perf_session *session)
+{
+ if (d) {
+ d->session = session;
+ if (d->start) {
+ int ret;
+
+ d->in_start = true;
+ ret = d->start(&d->data, d);
+ d->in_start = false;
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int dlfilter__stop(struct dlfilter *d)
+{
+ if (d && d->stop) {
+ int ret;
+
+ d->in_stop = true;
+ ret = d->stop(d->data, d);
+ d->in_stop = false;
+ return ret;
+ }
+ return 0;
+}
+
+void dlfilter__cleanup(struct dlfilter *d)
+{
+ if (d) {
+ dlfilter__stop(d);
+ dlfilter__close(d);
+ dlfilter__free(d);
+ }
+}
+
+#define ASSIGN(x) d_sample.x = sample->x
+
+int dlfilter__do_filter_event(struct dlfilter *d,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al,
+ struct addr_location *addr_al,
+ bool early)
+{
+ struct perf_dlfilter_sample d_sample;
+ struct perf_dlfilter_al d_ip_al;
+ struct perf_dlfilter_al d_addr_al;
+ int ret;
+
+ d->event = event;
+ d->sample = sample;
+ d->evsel = evsel;
+ d->machine = machine;
+ d->al = al;
+ d->addr_al = addr_al;
+ d->d_sample = &d_sample;
+ d->d_ip_al = &d_ip_al;
+ d->d_addr_al = &d_addr_al;
+
+ d_sample.size = sizeof(d_sample);
+ d_ip_al.size = 0; /* To indicate d_ip_al is not initialized */
+ d_addr_al.size = 0; /* To indicate d_addr_al is not initialized */
+
+ ASSIGN(ip);
+ ASSIGN(pid);
+ ASSIGN(tid);
+ ASSIGN(time);
+ ASSIGN(addr);
+ ASSIGN(id);
+ ASSIGN(stream_id);
+ ASSIGN(period);
+ ASSIGN(weight);
+ ASSIGN(ins_lat);
+ ASSIGN(p_stage_cyc);
+ ASSIGN(transaction);
+ ASSIGN(insn_cnt);
+ ASSIGN(cyc_cnt);
+ ASSIGN(cpu);
+ ASSIGN(flags);
+ ASSIGN(data_src);
+ ASSIGN(phys_addr);
+ ASSIGN(data_page_size);
+ ASSIGN(code_page_size);
+ ASSIGN(cgroup);
+ ASSIGN(cpumode);
+ ASSIGN(misc);
+ ASSIGN(raw_size);
+ ASSIGN(raw_data);
+
+ if (sample->branch_stack) {
+ d_sample.brstack_nr = sample->branch_stack->nr;
+ d_sample.brstack = (struct perf_branch_entry *)perf_sample__branch_entries(sample);
+ } else {
+ d_sample.brstack_nr = 0;
+ d_sample.brstack = NULL;
+ }
+
+ if (sample->callchain) {
+ d_sample.raw_callchain_nr = sample->callchain->nr;
+ d_sample.raw_callchain = (__u64 *)sample->callchain->ips;
+ } else {
+ d_sample.raw_callchain_nr = 0;
+ d_sample.raw_callchain = NULL;
+ }
+
+ d_sample.addr_correlates_sym =
+ (evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
+ sample_addr_correlates_sym(&evsel->core.attr);
+
+ d_sample.event = evsel__name(evsel);
+
+ d->ctx_valid = true;
+
+ if (early)
+ ret = d->filter_event_early(d->data, &d_sample, d);
+ else
+ ret = d->filter_event(d->data, &d_sample, d);
+
+ d->ctx_valid = false;
+
+ return ret;
+}
+
+static bool get_filter_desc(const char *dirname, const char *name,
+ char **desc, char **long_desc)
+{
+ char path[PATH_MAX];
+ void *handle;
+ const char *(*desc_fn)(const char **long_description);
+
+ snprintf(path, sizeof(path), "%s/%s", dirname, name);
+ handle = dlopen(path, RTLD_NOW);
+ if (!handle || !(dlsym(handle, "filter_event") || dlsym(handle, "filter_event_early")))
+ return false;
+ desc_fn = dlsym(handle, "filter_description");
+ if (desc_fn) {
+ const char *dsc;
+ const char *long_dsc;
+
+ dsc = desc_fn(&long_dsc);
+ if (dsc)
+ *desc = strdup(dsc);
+ if (long_dsc)
+ *long_desc = strdup(long_dsc);
+ }
+ dlclose(handle);
+ return true;
+}
+
+static void list_filters(const char *dirname)
+{
+ struct dirent *entry;
+ DIR *dir;
+
+ dir = opendir(dirname);
+ if (!dir)
+ return;
+
+ while ((entry = readdir(dir)) != NULL)
+ {
+ size_t n = strlen(entry->d_name);
+ char *long_desc = NULL;
+ char *desc = NULL;
+
+ if (entry->d_type == DT_DIR || n < 4 ||
+ strcmp(".so", entry->d_name + n - 3))
+ continue;
+ if (!get_filter_desc(dirname, entry->d_name, &desc, &long_desc))
+ continue;
+ printf(" %-36s %s\n", entry->d_name, desc ? desc : "");
+ if (verbose) {
+ char *p = long_desc;
+ char *line;
+
+ while ((line = strsep(&p, "\n")) != NULL)
+ printf("%39s%s\n", "", line);
+ }
+ free(long_desc);
+ free(desc);
+ }
+
+ closedir(dir);
+}
+
+int list_available_dlfilters(const struct option *opt __maybe_unused,
+ const char *s __maybe_unused,
+ int unset __maybe_unused)
+{
+ char path[PATH_MAX];
+ char *exec_path;
+
+ printf("List of available dlfilters:\n");
+
+ list_filters(".");
+
+ exec_path = get_argv_exec_path();
+ if (!exec_path)
+ goto out;
+ snprintf(path, sizeof(path), "%s/dlfilters", exec_path);
+
+ list_filters(path);
+
+ free(exec_path);
+out:
+ exit(0);
+}
diff --git a/tools/perf/util/dlfilter.h b/tools/perf/util/dlfilter.h
new file mode 100644
index 000000000000..505980442360
--- /dev/null
+++ b/tools/perf/util/dlfilter.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dlfilter.h: Interface to perf script --dlfilter shared object
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#ifndef PERF_UTIL_DLFILTER_H
+#define PERF_UTIL_DLFILTER_H
+
+struct perf_session;
+union perf_event;
+struct perf_sample;
+struct evsel;
+struct machine;
+struct addr_location;
+struct perf_dlfilter_fns;
+struct perf_dlfilter_sample;
+struct perf_dlfilter_al;
+
+struct dlfilter {
+ char *file;
+ void *handle;
+ void *data;
+ struct perf_session *session;
+ bool ctx_valid;
+ bool in_start;
+ bool in_stop;
+ int dlargc;
+ char **dlargv;
+
+ union perf_event *event;
+ struct perf_sample *sample;
+ struct evsel *evsel;
+ struct machine *machine;
+ struct addr_location *al;
+ struct addr_location *addr_al;
+ struct perf_dlfilter_sample *d_sample;
+ struct perf_dlfilter_al *d_ip_al;
+ struct perf_dlfilter_al *d_addr_al;
+
+ int (*start)(void **data, void *ctx);
+ int (*stop)(void *data, void *ctx);
+
+ int (*filter_event)(void *data,
+ const struct perf_dlfilter_sample *sample,
+ void *ctx);
+ int (*filter_event_early)(void *data,
+ const struct perf_dlfilter_sample *sample,
+ void *ctx);
+
+ struct perf_dlfilter_fns *fns;
+};
+
+struct dlfilter *dlfilter__new(const char *file, int dlargc, char **dlargv);
+
+int dlfilter__start(struct dlfilter *d, struct perf_session *session);
+
+int dlfilter__do_filter_event(struct dlfilter *d,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al,
+ struct addr_location *addr_al,
+ bool early);
+
+void dlfilter__cleanup(struct dlfilter *d);
+
+static inline int dlfilter__filter_event(struct dlfilter *d,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al,
+ struct addr_location *addr_al)
+{
+ if (!d || !d->filter_event)
+ return 0;
+ return dlfilter__do_filter_event(d, event, sample, evsel, machine, al, addr_al, false);
+}
+
+static inline int dlfilter__filter_event_early(struct dlfilter *d,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine,
+ struct addr_location *al,
+ struct addr_location *addr_al)
+{
+ if (!d || !d->filter_event_early)
+ return 0;
+ return dlfilter__do_filter_event(d, event, sample, evsel, machine, al, addr_al, true);
+}
+
+int list_available_dlfilters(const struct option *opt, const char *s, int unset);
+
+#endif
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index bc5e4f294e9e..ebc5e9ad35db 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -203,6 +203,18 @@ void perf_env__exit(struct perf_env *env)
for (i = 0; i < env->nr_memory_nodes; i++)
zfree(&env->memory_nodes[i].set);
zfree(&env->memory_nodes);
+
+ for (i = 0; i < env->nr_hybrid_nodes; i++) {
+ zfree(&env->hybrid_nodes[i].pmu_name);
+ zfree(&env->hybrid_nodes[i].cpus);
+ }
+ zfree(&env->hybrid_nodes);
+
+ for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
+ zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
+ zfree(&env->hybrid_cpc_nodes[i].pmu_name);
+ }
+ zfree(&env->hybrid_cpc_nodes);
}
void perf_env__init(struct perf_env *env __maybe_unused)
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index ca249bf5e984..6824a7423a2d 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -37,6 +37,18 @@ struct memory_node {
unsigned long *set;
};
+struct hybrid_node {
+ char *pmu_name;
+ char *cpus;
+};
+
+struct hybrid_cpc_node {
+ int nr_cpu_pmu_caps;
+ unsigned int max_branches;
+ char *cpu_pmu_caps;
+ char *pmu_name;
+};
+
struct perf_env {
char *hostname;
char *os_release;
@@ -59,6 +71,8 @@ struct perf_env {
int nr_pmu_mappings;
int nr_groups;
int nr_cpu_pmu_caps;
+ int nr_hybrid_nodes;
+ int nr_hybrid_cpc_nodes;
char *cmdline;
const char **cmdline_argv;
char *sibling_cores;
@@ -77,6 +91,8 @@ struct perf_env {
struct numa_node *numa_nodes;
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
+ struct hybrid_node *hybrid_nodes;
+ struct hybrid_cpc_node *hybrid_cpc_nodes;
#ifdef HAVE_LIBBPF_SUPPORT
/*
* bpf_info_lock protects bpf rbtrees. This is needed because the
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6ea3e677dc1e..6ba9664089bd 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -2161,3 +2161,28 @@ int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
return printed;
}
+
+void evlist__check_mem_load_aux(struct evlist *evlist)
+{
+ struct evsel *leader, *evsel, *pos;
+
+ /*
+ * For some platforms, the 'mem-loads' event is required to use
+ * together with 'mem-loads-aux' within a group and 'mem-loads-aux'
+ * must be the group leader. Now we disable this group before reporting
+ * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry
+ * any valid memory load information.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ leader = evsel->leader;
+ if (leader == evsel)
+ continue;
+
+ if (leader->name && strstr(leader->name, "mem-loads-aux")) {
+ for_each_group_evsel(pos, leader) {
+ pos->leader = pos;
+ pos->core.nr_members = 0;
+ }
+ }
+ }
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index a8b97b50cceb..2073cfa79f79 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -367,4 +367,5 @@ int evlist__ctlfd_ack(struct evlist *evlist);
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
+void evlist__check_mem_load_aux(struct evlist *evlist);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index a8d8463f8ee5..b1c930eca40f 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1582,6 +1582,27 @@ int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale)
return 0;
}
+static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
+ int cpu)
+{
+ int cpuid;
+
+ cpuid = perf_cpu_map__cpu(evsel->core.cpus, cpu);
+ return perf_cpu_map__idx(other->core.cpus, cpuid);
+}
+
+static int evsel__hybrid_group_cpu(struct evsel *evsel, int cpu)
+{
+ struct evsel *leader = evsel->leader;
+
+ if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
+ (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
+ return evsel__match_other_cpu(evsel, leader, cpu);
+ }
+
+ return cpu;
+}
+
static int get_group_fd(struct evsel *evsel, int cpu, int thread)
{
struct evsel *leader = evsel->leader;
@@ -1596,6 +1617,10 @@ static int get_group_fd(struct evsel *evsel, int cpu, int thread)
*/
BUG_ON(!leader->core.fd);
+ cpu = evsel__hybrid_group_cpu(evsel, cpu);
+ if (cpu == -1)
+ return -1;
+
fd = FD(leader, cpu, thread);
BUG_ON(fd == -1);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index aa1e42518d37..0158d2945bab 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -49,6 +49,7 @@
#include "cputopo.h"
#include "bpf-event.h"
#include "clockid.h"
+#include "pmu-hybrid.h"
#include <linux/ctype.h>
#include <internal/lib.h>
@@ -932,6 +933,40 @@ static int write_clock_data(struct feat_fd *ff,
return do_write(ff, data64, sizeof(*data64));
}
+static int write_hybrid_topology(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ struct hybrid_topology *tp;
+ int ret;
+ u32 i;
+
+ tp = hybrid_topology__new();
+ if (!tp)
+ return -ENOENT;
+
+ ret = do_write(ff, &tp->nr, sizeof(u32));
+ if (ret < 0)
+ goto err;
+
+ for (i = 0; i < tp->nr; i++) {
+ struct hybrid_topology_node *n = &tp->nodes[i];
+
+ ret = do_write_string(ff, n->pmu_name);
+ if (ret < 0)
+ goto err;
+
+ ret = do_write_string(ff, n->cpus);
+ if (ret < 0)
+ goto err;
+ }
+
+ ret = 0;
+
+err:
+ hybrid_topology__delete(tp);
+ return ret;
+}
+
static int write_dir_format(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
{
@@ -1425,18 +1460,14 @@ static int write_compressed(struct feat_fd *ff __maybe_unused,
return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
}
-static int write_cpu_pmu_caps(struct feat_fd *ff,
- struct evlist *evlist __maybe_unused)
+static int write_per_cpu_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
+ bool write_pmu)
{
- struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
struct perf_pmu_caps *caps = NULL;
int nr_caps;
int ret;
- if (!cpu_pmu)
- return -ENOENT;
-
- nr_caps = perf_pmu__caps_parse(cpu_pmu);
+ nr_caps = perf_pmu__caps_parse(pmu);
if (nr_caps < 0)
return nr_caps;
@@ -1444,7 +1475,7 @@ static int write_cpu_pmu_caps(struct feat_fd *ff,
if (ret < 0)
return ret;
- list_for_each_entry(caps, &cpu_pmu->caps, list) {
+ list_for_each_entry(caps, &pmu->caps, list) {
ret = do_write_string(ff, caps->name);
if (ret < 0)
return ret;
@@ -1454,9 +1485,49 @@ static int write_cpu_pmu_caps(struct feat_fd *ff,
return ret;
}
+ if (write_pmu) {
+ ret = do_write_string(ff, pmu->name);
+ if (ret < 0)
+ return ret;
+ }
+
return ret;
}
+static int write_cpu_pmu_caps(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
+
+ if (!cpu_pmu)
+ return -ENOENT;
+
+ return write_per_cpu_pmu_caps(ff, cpu_pmu, false);
+}
+
+static int write_hybrid_cpu_pmu_caps(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ struct perf_pmu *pmu;
+ u32 nr_pmu = perf_pmu__hybrid_pmu_num();
+ int ret;
+
+ if (nr_pmu == 0)
+ return -ENOENT;
+
+ ret = do_write(ff, &nr_pmu, sizeof(nr_pmu));
+ if (ret < 0)
+ return ret;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ ret = write_per_cpu_pmu_caps(ff, pmu, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1623,6 +1694,18 @@ static void print_clock_data(struct feat_fd *ff, FILE *fp)
clockid_name(clockid));
}
+static void print_hybrid_topology(struct feat_fd *ff, FILE *fp)
+{
+ int i;
+ struct hybrid_node *n;
+
+ fprintf(fp, "# hybrid cpu system:\n");
+ for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) {
+ n = &ff->ph->env.hybrid_nodes[i];
+ fprintf(fp, "# %s cpu list : %s\n", n->pmu_name, n->cpus);
+ }
+}
+
static void print_dir_format(struct feat_fd *ff, FILE *fp)
{
struct perf_session *session;
@@ -1916,18 +1999,28 @@ static void print_compressed(struct feat_fd *ff, FILE *fp)
ff->ph->env.comp_level, ff->ph->env.comp_ratio);
}
-static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+static void print_per_cpu_pmu_caps(FILE *fp, int nr_caps, char *cpu_pmu_caps,
+ char *pmu_name)
{
- const char *delimiter = "# cpu pmu capabilities: ";
- u32 nr_caps = ff->ph->env.nr_cpu_pmu_caps;
- char *str;
+ const char *delimiter;
+ char *str, buf[128];
if (!nr_caps) {
- fprintf(fp, "# cpu pmu capabilities: not available\n");
+ if (!pmu_name)
+ fprintf(fp, "# cpu pmu capabilities: not available\n");
+ else
+ fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
return;
}
- str = ff->ph->env.cpu_pmu_caps;
+ if (!pmu_name)
+ scnprintf(buf, sizeof(buf), "# cpu pmu capabilities: ");
+ else
+ scnprintf(buf, sizeof(buf), "# %s pmu capabilities: ", pmu_name);
+
+ delimiter = buf;
+
+ str = cpu_pmu_caps;
while (nr_caps--) {
fprintf(fp, "%s%s", delimiter, str);
delimiter = ", ";
@@ -1937,6 +2030,24 @@ static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
fprintf(fp, "\n");
}
+static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+{
+ print_per_cpu_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
+ ff->ph->env.cpu_pmu_caps, NULL);
+}
+
+static void print_hybrid_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+{
+ struct hybrid_cpc_node *n;
+
+ for (int i = 0; i < ff->ph->env.nr_hybrid_cpc_nodes; i++) {
+ n = &ff->ph->env.hybrid_cpc_nodes[i];
+ print_per_cpu_pmu_caps(fp, n->nr_cpu_pmu_caps,
+ n->cpu_pmu_caps,
+ n->pmu_name);
+ }
+}
+
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
{
const char *delimiter = "# pmu mappings: ";
@@ -2849,6 +2960,46 @@ static int process_clock_data(struct feat_fd *ff,
return 0;
}
+static int process_hybrid_topology(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ struct hybrid_node *nodes, *n;
+ u32 nr, i;
+
+ /* nr nodes */
+ if (do_read_u32(ff, &nr))
+ return -1;
+
+ nodes = zalloc(sizeof(*nodes) * nr);
+ if (!nodes)
+ return -ENOMEM;
+
+ for (i = 0; i < nr; i++) {
+ n = &nodes[i];
+
+ n->pmu_name = do_read_string(ff);
+ if (!n->pmu_name)
+ goto error;
+
+ n->cpus = do_read_string(ff);
+ if (!n->cpus)
+ goto error;
+ }
+
+ ff->ph->env.nr_hybrid_nodes = nr;
+ ff->ph->env.hybrid_nodes = nodes;
+ return 0;
+
+error:
+ for (i = 0; i < nr; i++) {
+ free(nodes[i].pmu_name);
+ free(nodes[i].cpus);
+ }
+
+ free(nodes);
+ return -1;
+}
+
static int process_dir_format(struct feat_fd *ff,
void *_data __maybe_unused)
{
@@ -3002,8 +3153,9 @@ static int process_compressed(struct feat_fd *ff,
return 0;
}
-static int process_cpu_pmu_caps(struct feat_fd *ff,
- void *data __maybe_unused)
+static int process_per_cpu_pmu_caps(struct feat_fd *ff, int *nr_cpu_pmu_caps,
+ char **cpu_pmu_caps,
+ unsigned int *max_branches)
{
char *name, *value;
struct strbuf sb;
@@ -3017,7 +3169,7 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
return 0;
}
- ff->ph->env.nr_cpu_pmu_caps = nr_caps;
+ *nr_cpu_pmu_caps = nr_caps;
if (strbuf_init(&sb, 128) < 0)
return -1;
@@ -3039,12 +3191,12 @@ static int process_cpu_pmu_caps(struct feat_fd *ff,
goto free_value;
if (!strcmp(name, "branches"))
- ff->ph->env.max_branches = atoi(value);
+ *max_branches = atoi(value);
free(value);
free(name);
}
- ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL);
+ *cpu_pmu_caps = strbuf_detach(&sb, NULL);
return 0;
free_value:
@@ -3056,6 +3208,63 @@ error:
return -1;
}
+static int process_cpu_pmu_caps(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ return process_per_cpu_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps,
+ &ff->ph->env.cpu_pmu_caps,
+ &ff->ph->env.max_branches);
+}
+
+static int process_hybrid_cpu_pmu_caps(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ struct hybrid_cpc_node *nodes;
+ u32 nr_pmu, i;
+ int ret;
+
+ if (do_read_u32(ff, &nr_pmu))
+ return -1;
+
+ if (!nr_pmu) {
+ pr_debug("hybrid cpu pmu capabilities not available\n");
+ return 0;
+ }
+
+ nodes = zalloc(sizeof(*nodes) * nr_pmu);
+ if (!nodes)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_pmu; i++) {
+ struct hybrid_cpc_node *n = &nodes[i];
+
+ ret = process_per_cpu_pmu_caps(ff, &n->nr_cpu_pmu_caps,
+ &n->cpu_pmu_caps,
+ &n->max_branches);
+ if (ret)
+ goto err;
+
+ n->pmu_name = do_read_string(ff);
+ if (!n->pmu_name) {
+ ret = -1;
+ goto err;
+ }
+ }
+
+ ff->ph->env.nr_hybrid_cpc_nodes = nr_pmu;
+ ff->ph->env.hybrid_cpc_nodes = nodes;
+ return 0;
+
+err:
+ for (i = 0; i < nr_pmu; i++) {
+ free(nodes[i].cpu_pmu_caps);
+ free(nodes[i].pmu_name);
+ }
+
+ free(nodes);
+ return ret;
+}
+
#define FEAT_OPR(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
@@ -3117,6 +3326,8 @@ const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(COMPRESSED, compressed, false),
FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
FEAT_OPR(CLOCK_DATA, clock_data, false),
+ FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true),
+ FEAT_OPR(HYBRID_CPU_PMU_CAPS, hybrid_cpu_pmu_caps, false),
};
struct header_print_data {
@@ -3814,6 +4025,11 @@ int perf_session__read_header(struct perf_session *session)
if (perf_file_header__read(&f_header, header, fd) < 0)
return -EINVAL;
+ if (header->needs_swap && data->in_place_update) {
+ pr_err("In-place update not supported when byte-swapping is required\n");
+ return -EINVAL;
+ }
+
/*
* Sanity check that perf.data was written cleanly; data size is
* initialized to 0 and updated only if the on_exit function is run.
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 2aca71763ecf..ae6b1cf19a7d 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -45,6 +45,8 @@ enum {
HEADER_COMPRESSED,
HEADER_CPU_PMU_CAPS,
HEADER_CLOCK_DATA,
+ HEADER_HYBRID_TOPOLOGY,
+ HEADER_HYBRID_CPU_PMU_CAPS,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 20ad663978cc..cb2520abf261 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -35,6 +35,10 @@
#define BIT63 (((uint64_t)1 << 63))
+#define SEVEN_BYTES 0xffffffffffffffULL
+
+#define NO_VMCS 0xffffffffffULL
+
#define INTEL_PT_RETURN 1
/* Maximum number of loops with no packets consumed i.e. stuck in a loop */
@@ -51,6 +55,11 @@ struct intel_pt_stack {
int pos;
};
+enum intel_pt_p_once {
+ INTEL_PT_PRT_ONCE_UNK_VMCS,
+ INTEL_PT_PRT_ONCE_ERANGE,
+};
+
enum intel_pt_pkt_state {
INTEL_PT_STATE_NO_PSB,
INTEL_PT_STATE_NO_IP,
@@ -64,6 +73,7 @@ enum intel_pt_pkt_state {
INTEL_PT_STATE_FUP_NO_TIP,
INTEL_PT_STATE_FUP_IN_PSB,
INTEL_PT_STATE_RESAMPLE,
+ INTEL_PT_STATE_VM_TIME_CORRELATION,
};
static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
@@ -75,6 +85,7 @@ static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
case INTEL_PT_STATE_IN_SYNC:
case INTEL_PT_STATE_TNT_CONT:
case INTEL_PT_STATE_RESAMPLE:
+ case INTEL_PT_STATE_VM_TIME_CORRELATION:
return true;
case INTEL_PT_STATE_TNT:
case INTEL_PT_STATE_TIP:
@@ -107,6 +118,7 @@ struct intel_pt_decoder {
uint64_t max_insn_cnt, void *data);
bool (*pgd_ip)(uint64_t ip, void *data);
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
+ struct intel_pt_vmcs_info *(*findnew_vmcs_info)(void *data, uint64_t vmcs);
void *data;
struct intel_pt_state state;
const unsigned char *buf;
@@ -122,6 +134,11 @@ struct intel_pt_decoder {
bool in_psb;
bool hop;
bool leap;
+ bool vm_time_correlation;
+ bool vm_tm_corr_dry_run;
+ bool vm_tm_corr_reliable;
+ bool vm_tm_corr_same_buf;
+ bool vm_tm_corr_continuous;
bool nr;
bool next_nr;
enum intel_pt_param_flags flags;
@@ -139,6 +156,11 @@ struct intel_pt_decoder {
uint64_t ctc_delta;
uint64_t cycle_cnt;
uint64_t cyc_ref_timestamp;
+ uint64_t first_timestamp;
+ uint64_t last_reliable_timestamp;
+ uint64_t vmcs;
+ uint64_t print_once;
+ uint64_t last_ctc;
uint32_t last_mtc;
uint32_t tsc_ctc_ratio_n;
uint32_t tsc_ctc_ratio_d;
@@ -217,6 +239,31 @@ static uint64_t intel_pt_lower_power_of_2(uint64_t x)
return x << i;
}
+__printf(1, 2)
+static void p_log(const char *fmt, ...)
+{
+ char buf[512];
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ fprintf(stderr, "%s\n", buf);
+ intel_pt_log("%s\n", buf);
+}
+
+static bool intel_pt_print_once(struct intel_pt_decoder *decoder,
+ enum intel_pt_p_once id)
+{
+ uint64_t bit = 1ULL << id;
+
+ if (decoder->print_once & bit)
+ return false;
+ decoder->print_once |= bit;
+ return true;
+}
+
static uint64_t intel_pt_cyc_threshold(uint64_t ctl)
{
if (!(ctl & INTEL_PT_CYC_ENABLE))
@@ -258,11 +305,16 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
decoder->walk_insn = params->walk_insn;
decoder->pgd_ip = params->pgd_ip;
decoder->lookahead = params->lookahead;
+ decoder->findnew_vmcs_info = params->findnew_vmcs_info;
decoder->data = params->data;
decoder->return_compression = params->return_compression;
decoder->branch_enable = params->branch_enable;
decoder->hop = params->quick >= 1;
decoder->leap = params->quick >= 2;
+ decoder->vm_time_correlation = params->vm_time_correlation;
+ decoder->vm_tm_corr_dry_run = params->vm_tm_corr_dry_run;
+ decoder->first_timestamp = params->first_timestamp;
+ decoder->last_reliable_timestamp = params->first_timestamp;
decoder->flags = params->flags;
@@ -312,6 +364,12 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
return decoder;
}
+void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
+ uint64_t first_timestamp)
+{
+ decoder->first_timestamp = first_timestamp;
+}
+
static void intel_pt_pop_blk(struct intel_pt_stack *stack)
{
struct intel_pt_blk *blk = stack->blk;
@@ -577,6 +635,7 @@ static int intel_pt_get_data(struct intel_pt_decoder *decoder, bool reposition)
intel_pt_reposition(decoder);
decoder->ref_timestamp = buffer.ref_timestamp;
decoder->state.trace_nr = buffer.trace_nr;
+ decoder->vm_tm_corr_same_buf = false;
intel_pt_log("Reference timestamp 0x%" PRIx64 "\n",
decoder->ref_timestamp);
return -ENOLINK;
@@ -1469,9 +1528,24 @@ static uint64_t intel_pt_8b_tsc(uint64_t timestamp, uint64_t ref_timestamp)
return timestamp;
}
+/* For use only when decoder->vm_time_correlation is true */
+static bool intel_pt_time_in_range(struct intel_pt_decoder *decoder,
+ uint64_t timestamp)
+{
+ uint64_t max_timestamp = decoder->buf_timestamp;
+
+ if (!max_timestamp) {
+ max_timestamp = decoder->last_reliable_timestamp +
+ 0x400000000ULL;
+ }
+ return timestamp >= decoder->last_reliable_timestamp &&
+ timestamp < decoder->buf_timestamp;
+}
+
static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
{
uint64_t timestamp;
+ bool bad = false;
decoder->have_tma = false;
@@ -1493,10 +1567,21 @@ static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
timestamp = decoder->timestamp;
}
if (timestamp < decoder->timestamp) {
- intel_pt_log_to("Wraparound timestamp", timestamp);
- timestamp += (1ULL << 56);
- decoder->tsc_timestamp = timestamp;
+ if (!decoder->buf_timestamp ||
+ (timestamp + (1ULL << 56) < decoder->buf_timestamp)) {
+ intel_pt_log_to("Wraparound timestamp", timestamp);
+ timestamp += (1ULL << 56);
+ decoder->tsc_timestamp = timestamp;
+ } else {
+ intel_pt_log_to("Suppressing bad timestamp", timestamp);
+ timestamp = decoder->timestamp;
+ bad = true;
+ }
}
+ if (decoder->vm_time_correlation &&
+ (bad || !intel_pt_time_in_range(decoder, timestamp)) &&
+ intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
+ p_log("Timestamp out of range");
decoder->timestamp = timestamp;
decoder->timestamp_insn_cnt = 0;
}
@@ -1573,6 +1658,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
intel_pt_mtc_cyc_cnt_upd(decoder);
decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
+ decoder->last_ctc = ctc - ctc_rem;
decoder->ctc_timestamp = decoder->tsc_timestamp - fc;
if (decoder->tsc_ctc_mult) {
decoder->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
@@ -1957,6 +2043,613 @@ static int intel_pt_resample(struct intel_pt_decoder *decoder)
return 0;
}
+struct intel_pt_vm_tsc_info {
+ struct intel_pt_pkt pip_packet;
+ struct intel_pt_pkt vmcs_packet;
+ struct intel_pt_pkt tma_packet;
+ bool tsc, pip, vmcs, tma, psbend;
+ uint64_t ctc_delta;
+ uint64_t last_ctc;
+ int max_lookahead;
+};
+
+/* Lookahead and get the PIP, VMCS and TMA packets from PSB+ */
+static int intel_pt_vm_psb_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
+{
+ struct intel_pt_vm_tsc_info *data = pkt_info->data;
+
+ switch (pkt_info->packet.type) {
+ case INTEL_PT_PAD:
+ case INTEL_PT_MNT:
+ case INTEL_PT_MODE_EXEC:
+ case INTEL_PT_MODE_TSX:
+ case INTEL_PT_MTC:
+ case INTEL_PT_FUP:
+ case INTEL_PT_CYC:
+ case INTEL_PT_CBR:
+ break;
+
+ case INTEL_PT_TSC:
+ data->tsc = true;
+ break;
+
+ case INTEL_PT_TMA:
+ data->tma_packet = pkt_info->packet;
+ data->tma = true;
+ break;
+
+ case INTEL_PT_PIP:
+ data->pip_packet = pkt_info->packet;
+ data->pip = true;
+ break;
+
+ case INTEL_PT_VMCS:
+ data->vmcs_packet = pkt_info->packet;
+ data->vmcs = true;
+ break;
+
+ case INTEL_PT_PSBEND:
+ data->psbend = true;
+ return 1;
+
+ case INTEL_PT_TIP_PGE:
+ case INTEL_PT_PTWRITE:
+ case INTEL_PT_PTWRITE_IP:
+ case INTEL_PT_EXSTOP:
+ case INTEL_PT_EXSTOP_IP:
+ case INTEL_PT_MWAIT:
+ case INTEL_PT_PWRE:
+ case INTEL_PT_PWRX:
+ case INTEL_PT_BBP:
+ case INTEL_PT_BIP:
+ case INTEL_PT_BEP:
+ case INTEL_PT_BEP_IP:
+ case INTEL_PT_OVF:
+ case INTEL_PT_BAD:
+ case INTEL_PT_TNT:
+ case INTEL_PT_TIP_PGD:
+ case INTEL_PT_TIP:
+ case INTEL_PT_PSB:
+ case INTEL_PT_TRACESTOP:
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+struct intel_pt_ovf_fup_info {
+ int max_lookahead;
+ bool found;
+};
+
+/* Lookahead to detect a FUP packet after OVF */
+static int intel_pt_ovf_fup_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
+{
+ struct intel_pt_ovf_fup_info *data = pkt_info->data;
+
+ if (pkt_info->packet.type == INTEL_PT_CYC ||
+ pkt_info->packet.type == INTEL_PT_MTC ||
+ pkt_info->packet.type == INTEL_PT_TSC)
+ return !--(data->max_lookahead);
+ data->found = pkt_info->packet.type == INTEL_PT_FUP;
+ return 1;
+}
+
+static bool intel_pt_ovf_fup_lookahead(struct intel_pt_decoder *decoder)
+{
+ struct intel_pt_ovf_fup_info data = {
+ .max_lookahead = 16,
+ .found = false,
+ };
+
+ intel_pt_pkt_lookahead(decoder, intel_pt_ovf_fup_lookahead_cb, &data);
+ return data.found;
+}
+
+/* Lookahead and get the TMA packet after TSC */
+static int intel_pt_tma_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
+{
+ struct intel_pt_vm_tsc_info *data = pkt_info->data;
+
+ if (pkt_info->packet.type == INTEL_PT_CYC ||
+ pkt_info->packet.type == INTEL_PT_MTC)
+ return !--(data->max_lookahead);
+
+ if (pkt_info->packet.type == INTEL_PT_TMA) {
+ data->tma_packet = pkt_info->packet;
+ data->tma = true;
+ }
+ return 1;
+}
+
+static uint64_t intel_pt_ctc_to_tsc(struct intel_pt_decoder *decoder, uint64_t ctc)
+{
+ if (decoder->tsc_ctc_mult)
+ return ctc * decoder->tsc_ctc_mult;
+ else
+ return multdiv(ctc, decoder->tsc_ctc_ratio_n, decoder->tsc_ctc_ratio_d);
+}
+
+static uint64_t intel_pt_calc_expected_tsc(struct intel_pt_decoder *decoder,
+ uint32_t ctc,
+ uint32_t fc,
+ uint64_t last_ctc_timestamp,
+ uint64_t ctc_delta,
+ uint32_t last_ctc)
+{
+ /* Number of CTC ticks from last_ctc_timestamp to last_mtc */
+ uint64_t last_mtc_ctc = last_ctc + ctc_delta;
+ /*
+ * Number of CTC ticks from there until current TMA packet. We would
+ * expect last_mtc_ctc to be before ctc, but the TSC packet can slip
+ * past an MTC, so a sign-extended value is used.
+ */
+ uint64_t delta = (int16_t)((uint16_t)ctc - (uint16_t)last_mtc_ctc);
+ /* Total CTC ticks from last_ctc_timestamp to current TMA packet */
+ uint64_t new_ctc_delta = ctc_delta + delta;
+ uint64_t expected_tsc;
+
+ /*
+ * Convert CTC ticks to TSC ticks, add the starting point
+ * (last_ctc_timestamp) and the fast counter from the TMA packet.
+ */
+ expected_tsc = last_ctc_timestamp + intel_pt_ctc_to_tsc(decoder, new_ctc_delta) + fc;
+
+ if (intel_pt_enable_logging) {
+ intel_pt_log_x64(last_mtc_ctc);
+ intel_pt_log_x32(last_ctc);
+ intel_pt_log_x64(ctc_delta);
+ intel_pt_log_x64(delta);
+ intel_pt_log_x32(ctc);
+ intel_pt_log_x64(new_ctc_delta);
+ intel_pt_log_x64(last_ctc_timestamp);
+ intel_pt_log_x32(fc);
+ intel_pt_log_x64(intel_pt_ctc_to_tsc(decoder, new_ctc_delta));
+ intel_pt_log_x64(expected_tsc);
+ }
+
+ return expected_tsc;
+}
+
+static uint64_t intel_pt_expected_tsc(struct intel_pt_decoder *decoder,
+ struct intel_pt_vm_tsc_info *data)
+{
+ uint32_t ctc = data->tma_packet.payload;
+ uint32_t fc = data->tma_packet.count;
+
+ return intel_pt_calc_expected_tsc(decoder, ctc, fc,
+ decoder->ctc_timestamp,
+ data->ctc_delta, data->last_ctc);
+}
+
+static void intel_pt_translate_vm_tsc(struct intel_pt_decoder *decoder,
+ struct intel_pt_vmcs_info *vmcs_info)
+{
+ uint64_t payload = decoder->packet.payload;
+
+ /* VMX adds the TSC Offset, so subtract to get host TSC */
+ decoder->packet.payload -= vmcs_info->tsc_offset;
+ /* TSC packet has only 7 bytes */
+ decoder->packet.payload &= SEVEN_BYTES;
+
+ /*
+ * The buffer is mmapped from the data file, so this also updates the
+ * data file.
+ */
+ if (!decoder->vm_tm_corr_dry_run)
+ memcpy((void *)decoder->buf + 1, &decoder->packet.payload, 7);
+
+ intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
+ " VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
+ payload, decoder->packet.payload, vmcs_info->vmcs,
+ vmcs_info->tsc_offset);
+}
+
+static void intel_pt_translate_vm_tsc_offset(struct intel_pt_decoder *decoder,
+ uint64_t tsc_offset)
+{
+ struct intel_pt_vmcs_info vmcs_info = {
+ .vmcs = NO_VMCS,
+ .tsc_offset = tsc_offset
+ };
+
+ intel_pt_translate_vm_tsc(decoder, &vmcs_info);
+}
+
+static inline bool in_vm(uint64_t pip_payload)
+{
+ return pip_payload & 1;
+}
+
+static inline bool pip_in_vm(struct intel_pt_pkt *pip_packet)
+{
+ return pip_packet->payload & 1;
+}
+
+static void intel_pt_print_vmcs_info(struct intel_pt_vmcs_info *vmcs_info)
+{
+ p_log("VMCS: %#" PRIx64 " TSC Offset %#" PRIx64,
+ vmcs_info->vmcs, vmcs_info->tsc_offset);
+}
+
+static void intel_pt_vm_tm_corr_psb(struct intel_pt_decoder *decoder,
+ struct intel_pt_vm_tsc_info *data)
+{
+ memset(data, 0, sizeof(*data));
+ data->ctc_delta = decoder->ctc_delta;
+ data->last_ctc = decoder->last_ctc;
+ intel_pt_pkt_lookahead(decoder, intel_pt_vm_psb_lookahead_cb, data);
+ if (data->tsc && !data->psbend)
+ p_log("ERROR: PSB without PSBEND");
+ decoder->in_psb = data->psbend;
+}
+
+static void intel_pt_vm_tm_corr_first_tsc(struct intel_pt_decoder *decoder,
+ struct intel_pt_vm_tsc_info *data,
+ struct intel_pt_vmcs_info *vmcs_info,
+ uint64_t host_tsc)
+{
+ if (!decoder->in_psb) {
+ /* Can't happen */
+ p_log("ERROR: First TSC is not in PSB+");
+ }
+
+ if (data->pip) {
+ if (pip_in_vm(&data->pip_packet)) { /* Guest */
+ if (vmcs_info && vmcs_info->tsc_offset) {
+ intel_pt_translate_vm_tsc(decoder, vmcs_info);
+ decoder->vm_tm_corr_reliable = true;
+ } else {
+ p_log("ERROR: First TSC, unknown TSC Offset");
+ }
+ } else { /* Host */
+ decoder->vm_tm_corr_reliable = true;
+ }
+ } else { /* Host or Guest */
+ decoder->vm_tm_corr_reliable = false;
+ if (intel_pt_time_in_range(decoder, host_tsc)) {
+ /* Assume Host */
+ } else {
+ /* Assume Guest */
+ if (vmcs_info && vmcs_info->tsc_offset)
+ intel_pt_translate_vm_tsc(decoder, vmcs_info);
+ else
+ p_log("ERROR: First TSC, no PIP, unknown TSC Offset");
+ }
+ }
+}
+
+static void intel_pt_vm_tm_corr_tsc(struct intel_pt_decoder *decoder,
+ struct intel_pt_vm_tsc_info *data)
+{
+ struct intel_pt_vmcs_info *vmcs_info;
+ uint64_t tsc_offset = 0;
+ uint64_t vmcs;
+ bool reliable = true;
+ uint64_t expected_tsc;
+ uint64_t host_tsc;
+ uint64_t ref_timestamp;
+
+ bool assign = false;
+ bool assign_reliable = false;
+
+ /* Already have 'data' for the in_psb case */
+ if (!decoder->in_psb) {
+ memset(data, 0, sizeof(*data));
+ data->ctc_delta = decoder->ctc_delta;
+ data->last_ctc = decoder->last_ctc;
+ data->max_lookahead = 16;
+ intel_pt_pkt_lookahead(decoder, intel_pt_tma_lookahead_cb, data);
+ if (decoder->pge) {
+ data->pip = true;
+ data->pip_packet.payload = decoder->pip_payload;
+ }
+ }
+
+ /* Calculations depend on having TMA packets */
+ if (!data->tma) {
+ p_log("ERROR: TSC without TMA");
+ return;
+ }
+
+ vmcs = data->vmcs ? data->vmcs_packet.payload : decoder->vmcs;
+ if (vmcs == NO_VMCS)
+ vmcs = 0;
+
+ vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
+
+ ref_timestamp = decoder->timestamp ? decoder->timestamp : decoder->buf_timestamp;
+ host_tsc = intel_pt_8b_tsc(decoder->packet.payload, ref_timestamp);
+
+ if (!decoder->ctc_timestamp) {
+ intel_pt_vm_tm_corr_first_tsc(decoder, data, vmcs_info, host_tsc);
+ return;
+ }
+
+ expected_tsc = intel_pt_expected_tsc(decoder, data);
+
+ tsc_offset = host_tsc - expected_tsc;
+
+ /* Determine if TSC is from Host or Guest */
+ if (data->pip) {
+ if (pip_in_vm(&data->pip_packet)) { /* Guest */
+ if (!vmcs_info) {
+ /* PIP NR=1 without VMCS cannot happen */
+ p_log("ERROR: Missing VMCS");
+ intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
+ decoder->vm_tm_corr_reliable = false;
+ return;
+ }
+ } else { /* Host */
+ decoder->last_reliable_timestamp = host_tsc;
+ decoder->vm_tm_corr_reliable = true;
+ return;
+ }
+ } else { /* Host or Guest */
+ reliable = false; /* Host/Guest is a guess, so not reliable */
+ if (decoder->in_psb) {
+ if (!tsc_offset)
+ return; /* Zero TSC Offset, assume Host */
+ /*
+ * TSC packet has only 7 bytes of TSC. We have no
+ * information about the Guest's 8th byte, but it
+ * doesn't matter because we only need 7 bytes.
+ * Here, since the 8th byte is unreliable and
+ * irrelevant, compare only 7 byes.
+ */
+ if (vmcs_info &&
+ (tsc_offset & SEVEN_BYTES) ==
+ (vmcs_info->tsc_offset & SEVEN_BYTES)) {
+ /* Same TSC Offset as last VMCS, assume Guest */
+ goto guest;
+ }
+ }
+ /*
+ * Check if the host_tsc is within the expected range.
+ * Note, we could narrow the range more by looking ahead for
+ * the next host TSC in the same buffer, but we don't bother to
+ * do that because this is probably good enough.
+ */
+ if (host_tsc >= expected_tsc && intel_pt_time_in_range(decoder, host_tsc)) {
+ /* Within expected range for Host TSC, assume Host */
+ decoder->vm_tm_corr_reliable = false;
+ return;
+ }
+ }
+
+guest: /* Assuming Guest */
+
+ /* Determine whether to assign TSC Offset */
+ if (vmcs_info && vmcs_info->vmcs) {
+ if (vmcs_info->tsc_offset && vmcs_info->reliable) {
+ assign = false;
+ } else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_reliable &&
+ decoder->vm_tm_corr_continuous && decoder->vm_tm_corr_same_buf) {
+ /* Continuous tracing, TSC in a PSB is not a time loss */
+ assign = true;
+ assign_reliable = true;
+ } else if (decoder->in_psb && data->pip && decoder->vm_tm_corr_same_buf) {
+ /*
+ * Unlikely to be a time loss TSC in a PSB which is not
+ * at the start of a buffer.
+ */
+ assign = true;
+ assign_reliable = false;
+ }
+ }
+
+ /* Record VMCS TSC Offset */
+ if (assign && (vmcs_info->tsc_offset != tsc_offset ||
+ vmcs_info->reliable != assign_reliable)) {
+ bool print = vmcs_info->tsc_offset != tsc_offset;
+
+ vmcs_info->tsc_offset = tsc_offset;
+ vmcs_info->reliable = assign_reliable;
+ if (print)
+ intel_pt_print_vmcs_info(vmcs_info);
+ }
+
+ /* Determine what TSC Offset to use */
+ if (vmcs_info && vmcs_info->tsc_offset) {
+ if (!vmcs_info->reliable)
+ reliable = false;
+ intel_pt_translate_vm_tsc(decoder, vmcs_info);
+ } else {
+ reliable = false;
+ if (vmcs_info) {
+ if (!vmcs_info->error_printed) {
+ p_log("ERROR: Unknown TSC Offset for VMCS %#" PRIx64,
+ vmcs_info->vmcs);
+ vmcs_info->error_printed = true;
+ }
+ } else {
+ if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
+ p_log("ERROR: Unknown VMCS");
+ }
+ intel_pt_translate_vm_tsc_offset(decoder, tsc_offset);
+ }
+
+ decoder->vm_tm_corr_reliable = reliable;
+}
+
+static void intel_pt_vm_tm_corr_pebs_tsc(struct intel_pt_decoder *decoder)
+{
+ uint64_t host_tsc = decoder->packet.payload;
+ uint64_t guest_tsc = decoder->packet.payload;
+ struct intel_pt_vmcs_info *vmcs_info;
+ uint64_t vmcs;
+
+ vmcs = decoder->vmcs;
+ if (vmcs == NO_VMCS)
+ vmcs = 0;
+
+ vmcs_info = decoder->findnew_vmcs_info(decoder->data, vmcs);
+
+ if (decoder->pge) {
+ if (in_vm(decoder->pip_payload)) { /* Guest */
+ if (!vmcs_info) {
+ /* PIP NR=1 without VMCS cannot happen */
+ p_log("ERROR: Missing VMCS");
+ }
+ } else { /* Host */
+ return;
+ }
+ } else { /* Host or Guest */
+ if (intel_pt_time_in_range(decoder, host_tsc)) {
+ /* Within expected range for Host TSC, assume Host */
+ return;
+ }
+ }
+
+ if (vmcs_info) {
+ /* Translate Guest TSC to Host TSC */
+ host_tsc = ((guest_tsc & SEVEN_BYTES) - vmcs_info->tsc_offset) & SEVEN_BYTES;
+ host_tsc = intel_pt_8b_tsc(host_tsc, decoder->timestamp);
+ intel_pt_log("Translated VM TSC %#" PRIx64 " -> %#" PRIx64
+ " VMCS %#" PRIx64 " TSC Offset %#" PRIx64 "\n",
+ guest_tsc, host_tsc, vmcs_info->vmcs,
+ vmcs_info->tsc_offset);
+ if (!intel_pt_time_in_range(decoder, host_tsc) &&
+ intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_ERANGE))
+ p_log("Timestamp out of range");
+ } else {
+ if (intel_pt_print_once(decoder, INTEL_PT_PRT_ONCE_UNK_VMCS))
+ p_log("ERROR: Unknown VMCS");
+ host_tsc = decoder->timestamp;
+ }
+
+ decoder->packet.payload = host_tsc;
+
+ if (!decoder->vm_tm_corr_dry_run)
+ memcpy((void *)decoder->buf + 1, &host_tsc, 8);
+}
+
+static int intel_pt_vm_time_correlation(struct intel_pt_decoder *decoder)
+{
+ struct intel_pt_vm_tsc_info data = { .psbend = false };
+ bool pge;
+ int err;
+
+ if (decoder->in_psb)
+ intel_pt_vm_tm_corr_psb(decoder, &data);
+
+ while (1) {
+ err = intel_pt_get_next_packet(decoder);
+ if (err == -ENOLINK)
+ continue;
+ if (err)
+ break;
+
+ switch (decoder->packet.type) {
+ case INTEL_PT_TIP_PGD:
+ decoder->pge = false;
+ decoder->vm_tm_corr_continuous = false;
+ break;
+
+ case INTEL_PT_TNT:
+ case INTEL_PT_TIP:
+ case INTEL_PT_TIP_PGE:
+ decoder->pge = true;
+ break;
+
+ case INTEL_PT_OVF:
+ decoder->in_psb = false;
+ pge = decoder->pge;
+ decoder->pge = intel_pt_ovf_fup_lookahead(decoder);
+ if (pge != decoder->pge)
+ intel_pt_log("Surprising PGE change in OVF!");
+ if (!decoder->pge)
+ decoder->vm_tm_corr_continuous = false;
+ break;
+
+ case INTEL_PT_FUP:
+ if (decoder->in_psb)
+ decoder->pge = true;
+ break;
+
+ case INTEL_PT_TRACESTOP:
+ decoder->pge = false;
+ decoder->vm_tm_corr_continuous = false;
+ decoder->have_tma = false;
+ break;
+
+ case INTEL_PT_PSB:
+ intel_pt_vm_tm_corr_psb(decoder, &data);
+ break;
+
+ case INTEL_PT_PIP:
+ decoder->pip_payload = decoder->packet.payload;
+ break;
+
+ case INTEL_PT_MTC:
+ intel_pt_calc_mtc_timestamp(decoder);
+ break;
+
+ case INTEL_PT_TSC:
+ intel_pt_vm_tm_corr_tsc(decoder, &data);
+ intel_pt_calc_tsc_timestamp(decoder);
+ decoder->vm_tm_corr_same_buf = true;
+ decoder->vm_tm_corr_continuous = decoder->pge;
+ break;
+
+ case INTEL_PT_TMA:
+ intel_pt_calc_tma(decoder);
+ break;
+
+ case INTEL_PT_CYC:
+ intel_pt_calc_cyc_timestamp(decoder);
+ break;
+
+ case INTEL_PT_CBR:
+ intel_pt_calc_cbr(decoder);
+ break;
+
+ case INTEL_PT_PSBEND:
+ decoder->in_psb = false;
+ data.psbend = false;
+ break;
+
+ case INTEL_PT_VMCS:
+ if (decoder->packet.payload != NO_VMCS)
+ decoder->vmcs = decoder->packet.payload;
+ break;
+
+ case INTEL_PT_BBP:
+ decoder->blk_type = decoder->packet.payload;
+ break;
+
+ case INTEL_PT_BIP:
+ if (decoder->blk_type == INTEL_PT_PEBS_BASIC &&
+ decoder->packet.count == 2)
+ intel_pt_vm_tm_corr_pebs_tsc(decoder);
+ break;
+
+ case INTEL_PT_BEP:
+ case INTEL_PT_BEP_IP:
+ decoder->blk_type = 0;
+ break;
+
+ case INTEL_PT_MODE_EXEC:
+ case INTEL_PT_MODE_TSX:
+ case INTEL_PT_MNT:
+ case INTEL_PT_PAD:
+ case INTEL_PT_PTWRITE_IP:
+ case INTEL_PT_PTWRITE:
+ case INTEL_PT_MWAIT:
+ case INTEL_PT_PWRE:
+ case INTEL_PT_EXSTOP_IP:
+ case INTEL_PT_EXSTOP:
+ case INTEL_PT_PWRX:
+ case INTEL_PT_BAD: /* Does not happen */
+ default:
+ break;
+ }
+ }
+
+ return err;
+}
+
#define HOP_PROCESS 0
#define HOP_IGNORE 1
#define HOP_RETURN 2
@@ -2898,6 +3591,15 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
if (err)
return err;
+ if (decoder->vm_time_correlation) {
+ decoder->in_psb = true;
+ if (!decoder->timestamp)
+ decoder->timestamp = 1;
+ decoder->state.type = 0;
+ decoder->pkt_state = INTEL_PT_STATE_VM_TIME_CORRELATION;
+ return 0;
+ }
+
decoder->have_last_ip = true;
decoder->pkt_state = INTEL_PT_STATE_NO_IP;
@@ -2985,6 +3687,9 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
case INTEL_PT_STATE_RESAMPLE:
err = intel_pt_resample(decoder);
break;
+ case INTEL_PT_STATE_VM_TIME_CORRELATION:
+ err = intel_pt_vm_time_correlation(decoder);
+ break;
default:
err = intel_pt_bug(decoder);
break;
@@ -3231,6 +3936,7 @@ static unsigned char *adj_for_padding(unsigned char *buf_b,
* @len_b: size of second buffer
* @consecutive: returns true if there is data in buf_b that is consecutive
* to buf_a
+ * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
*
* If the trace contains TSC we can look at the last TSC of @buf_a and the
* first TSC of @buf_b in order to determine if the buffers overlap, and then
@@ -3243,7 +3949,8 @@ static unsigned char *adj_for_padding(unsigned char *buf_b,
static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
size_t len_a,
unsigned char *buf_b,
- size_t len_b, bool *consecutive)
+ size_t len_b, bool *consecutive,
+ bool ooo_tsc)
{
uint64_t tsc_a, tsc_b;
unsigned char *p;
@@ -3278,7 +3985,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
start = buf_b + len_b - (rem_b - rem_a);
return adj_for_padding(start, buf_a, len_a);
}
- if (cmp < 0)
+ if (cmp < 0 && !ooo_tsc)
return buf_b; /* tsc_a < tsc_b => no overlap */
}
@@ -3296,6 +4003,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
* @have_tsc: can use TSC packets to detect overlap
* @consecutive: returns true if there is data in buf_b that is consecutive
* to buf_a
+ * @ooo_tsc: out-of-order TSC due to VM TSC offset / scaling
*
* When trace samples or snapshots are recorded there is the possibility that
* the data overlaps. Note that, for the purposes of decoding, data is only
@@ -3306,7 +4014,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
*/
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
- bool have_tsc, bool *consecutive)
+ bool have_tsc, bool *consecutive,
+ bool ooo_tsc)
{
unsigned char *found;
@@ -3319,7 +4028,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
if (have_tsc) {
found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
- consecutive);
+ consecutive, ooo_tsc);
if (found)
return found;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index d9e62a7f6f0e..714c475808c0 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -11,6 +11,8 @@
#include <stddef.h>
#include <stdbool.h>
+#include <linux/rbtree.h>
+
#include "intel-pt-insn-decoder.h"
#define INTEL_PT_IN_TX (1 << 0)
@@ -199,6 +201,14 @@ struct intel_pt_blk_items {
bool is_32_bit;
};
+struct intel_pt_vmcs_info {
+ struct rb_node rb_node;
+ uint64_t vmcs;
+ uint64_t tsc_offset;
+ bool reliable;
+ bool error_printed;
+};
+
struct intel_pt_state {
enum intel_pt_sample_type type;
bool from_nr;
@@ -244,9 +254,13 @@ struct intel_pt_params {
uint64_t max_insn_cnt, void *data);
bool (*pgd_ip)(uint64_t ip, void *data);
int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
+ struct intel_pt_vmcs_info *(*findnew_vmcs_info)(void *data, uint64_t vmcs);
void *data;
bool return_compression;
bool branch_enable;
+ bool vm_time_correlation;
+ bool vm_tm_corr_dry_run;
+ uint64_t first_timestamp;
uint64_t ctl;
uint64_t period;
enum intel_pt_period_type period_type;
@@ -269,8 +283,12 @@ int intel_pt_fast_forward(struct intel_pt_decoder *decoder, uint64_t timestamp);
unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
unsigned char *buf_b, size_t len_b,
- bool have_tsc, bool *consecutive);
+ bool have_tsc, bool *consecutive,
+ bool ooo_tsc);
int intel_pt__strerror(int code, char *buf, size_t buflen);
+void intel_pt_set_first_timestamp(struct intel_pt_decoder *decoder,
+ uint64_t first_timestamp);
+
#endif
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
index 388661f89c44..d900aab24b21 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -67,4 +67,9 @@ static inline void intel_pt_log_to(const char *msg, uint64_t u)
intel_pt_log("%s to " x64_fmt "\n", msg, u);
}
+#define intel_pt_log_var(var, fmt) intel_pt_log("%s: " #var " " fmt "\n", __func__, var)
+
+#define intel_pt_log_x32(var) intel_pt_log_var(var, "%#x")
+#define intel_pt_log_x64(var) intel_pt_log_var(var, "%#" PRIx64)
+
#endif
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 0dfec8761b9a..154a1077f22e 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -78,6 +78,7 @@ struct intel_pt {
u64 kernel_start;
u64 switch_ip;
u64 ptss_ip;
+ u64 first_timestamp;
struct perf_tsc_conversion tc;
bool cap_user_time_zero;
@@ -133,6 +134,9 @@ struct intel_pt {
struct ip_callchain *chain;
struct branch_stack *br_stack;
+
+ u64 dflt_tsc_offset;
+ struct rb_root vmcs_info;
};
enum switch_state {
@@ -271,6 +275,65 @@ static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
return !n || !perf_time__ranges_skip_sample(range, n, tm);
}
+static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
+ u64 vmcs,
+ u64 dflt_tsc_offset)
+{
+ struct rb_node **p = &rb_root->rb_node;
+ struct rb_node *parent = NULL;
+ struct intel_pt_vmcs_info *v;
+
+ while (*p) {
+ parent = *p;
+ v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
+
+ if (v->vmcs == vmcs)
+ return v;
+
+ if (vmcs < v->vmcs)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ v = zalloc(sizeof(*v));
+ if (v) {
+ v->vmcs = vmcs;
+ v->tsc_offset = dflt_tsc_offset;
+ v->reliable = dflt_tsc_offset;
+
+ rb_link_node(&v->rb_node, parent, p);
+ rb_insert_color(&v->rb_node, rb_root);
+ }
+
+ return v;
+}
+
+static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
+{
+ struct intel_pt_queue *ptq = data;
+ struct intel_pt *pt = ptq->pt;
+
+ if (!vmcs && !pt->dflt_tsc_offset)
+ return NULL;
+
+ return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
+}
+
+static void intel_pt_free_vmcs_info(struct intel_pt *pt)
+{
+ struct intel_pt_vmcs_info *v;
+ struct rb_node *n;
+
+ n = rb_first(&pt->vmcs_info);
+ while (n) {
+ v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
+ n = rb_next(n);
+ rb_erase(&v->rb_node, &pt->vmcs_info);
+ free(v);
+ }
+}
+
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
@@ -278,9 +341,17 @@ static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *
void *start;
start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
- pt->have_tsc, &consecutive);
+ pt->have_tsc, &consecutive,
+ pt->synth_opts.vm_time_correlation);
if (!start)
return -EINVAL;
+ /*
+ * In the case of vm_time_correlation, the overlap might contain TSC
+ * packets that will not be fixed, and that will then no longer work for
+ * overlap detection. Avoid that by zeroing out the overlap.
+ */
+ if (pt->synth_opts.vm_time_correlation)
+ memset(b->data, 0, start - b->data);
b->use_size = b->data + b->size - start;
b->use_data = start;
if (b->use_size && consecutive)
@@ -901,7 +972,7 @@ static bool intel_pt_timeless_decoding(struct intel_pt *pt)
bool timeless_decoding = true;
u64 config;
- if (!pt->tsc_bit || !pt->cap_user_time_zero)
+ if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
return true;
evlist__for_each_entry(pt->session->evlist, evsel) {
@@ -949,6 +1020,19 @@ static bool intel_pt_have_tsc(struct intel_pt *pt)
return have_tsc;
}
+static bool intel_pt_have_mtc(struct intel_pt *pt)
+{
+ struct evsel *evsel;
+ u64 config;
+
+ evlist__for_each_entry(pt->session->evlist, evsel) {
+ if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
+ (config & pt->mtc_bit))
+ return true;
+ }
+ return false;
+}
+
static bool intel_pt_sampling_mode(struct intel_pt *pt)
{
struct evsel *evsel;
@@ -1103,6 +1187,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
params.get_trace = intel_pt_get_trace;
params.walk_insn = intel_pt_walk_next_insn;
params.lookahead = intel_pt_lookahead;
+ params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
params.data = ptq;
params.return_compression = intel_pt_return_compression(pt);
params.branch_enable = intel_pt_branch_enable(pt);
@@ -1112,6 +1197,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
params.quick = pt->synth_opts.quick;
+ params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
+ params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
+ params.first_timestamp = pt->first_timestamp;
if (pt->filts.cnt > 0)
params.pgd_ip = intel_pt_pgd_ip;
@@ -1176,6 +1264,21 @@ static void intel_pt_free_queue(void *priv)
free(ptq);
}
+static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
+{
+ unsigned int i;
+
+ pt->first_timestamp = timestamp;
+
+ for (i = 0; i < pt->queues.nr_queues; i++) {
+ struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+ struct intel_pt_queue *ptq = queue->priv;
+
+ if (ptq && ptq->decoder)
+ intel_pt_set_first_timestamp(ptq->decoder, timestamp);
+ }
+}
+
static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
struct auxtrace_queue *queue)
{
@@ -2379,7 +2482,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
if (pt->per_cpu_mmaps &&
(pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
!pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
- !pt->sampling_mode) {
+ !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
if (pt->switch_ip) {
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
@@ -2878,6 +2981,8 @@ static int intel_pt_process_event(struct perf_session *session,
sample->time);
}
} else if (timestamp) {
+ if (!pt->first_timestamp)
+ intel_pt_first_timestamp(pt, timestamp);
err = intel_pt_process_queues(pt, timestamp);
}
if (err)
@@ -2964,6 +3069,7 @@ static void intel_pt_free(struct perf_session *session)
auxtrace_heap__free(&pt->heap);
intel_pt_free_events(session);
session->auxtrace = NULL;
+ intel_pt_free_vmcs_info(pt);
thread__put(pt->unknown_thread);
addr_filters__exit(&pt->filts);
zfree(&pt->chain);
@@ -3407,6 +3513,65 @@ static int intel_pt_setup_time_ranges(struct intel_pt *pt,
return 0;
}
+static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
+{
+ struct intel_pt_vmcs_info *vmcs_info;
+ u64 tsc_offset, vmcs;
+ char *p = *args;
+
+ errno = 0;
+
+ p = skip_spaces(p);
+ if (!*p)
+ return 1;
+
+ tsc_offset = strtoull(p, &p, 0);
+ if (errno)
+ return -errno;
+ p = skip_spaces(p);
+ if (*p != ':') {
+ pt->dflt_tsc_offset = tsc_offset;
+ *args = p;
+ return 0;
+ }
+ while (1) {
+ vmcs = strtoull(p, &p, 0);
+ if (errno)
+ return -errno;
+ if (!vmcs)
+ return -EINVAL;
+ vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
+ if (!vmcs_info)
+ return -ENOMEM;
+ p = skip_spaces(p);
+ if (*p != ',')
+ break;
+ p += 1;
+ }
+ *args = p;
+ return 0;
+}
+
+static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
+{
+ char *args = pt->synth_opts.vm_tm_corr_args;
+ int ret;
+
+ if (!args)
+ return 0;
+
+ do {
+ ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
+ } while (!ret);
+
+ if (ret < 0) {
+ pr_err("Failed to parse VM Time Correlation options\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static const char * const intel_pt_info_fmts[] = {
[INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
[INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
@@ -3469,6 +3634,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
if (!pt)
return -ENOMEM;
+ pt->vmcs_info = RB_ROOT;
+
addr_filters__init(&pt->filts);
err = perf_config(intel_pt_perf_config, pt);
@@ -3481,6 +3648,20 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
intel_pt_log_set_name(INTEL_PT_PMU_NAME);
+ if (session->itrace_synth_opts->set) {
+ pt->synth_opts = *session->itrace_synth_opts;
+ } else {
+ struct itrace_synth_opts *opts = session->itrace_synth_opts;
+
+ itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
+ if (!opts->default_no_sample && !opts->inject) {
+ pt->synth_opts.branches = false;
+ pt->synth_opts.callchain = true;
+ pt->synth_opts.add_callchain = true;
+ }
+ pt->synth_opts.thread_stack = opts->thread_stack;
+ }
+
pt->session = session;
pt->machine = &session->machines.host; /* No kvm support */
pt->auxtrace_type = auxtrace_info->type;
@@ -3562,6 +3743,28 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
pt->sampling_mode = intel_pt_sampling_mode(pt);
pt->est_tsc = !pt->timeless_decoding;
+ if (pt->synth_opts.vm_time_correlation) {
+ if (pt->timeless_decoding) {
+ pr_err("Intel PT has no time information for VM Time Correlation\n");
+ err = -EINVAL;
+ goto err_free_queues;
+ }
+ if (session->itrace_synth_opts->ptime_range) {
+ pr_err("Time ranges cannot be specified with VM Time Correlation\n");
+ err = -EINVAL;
+ goto err_free_queues;
+ }
+ /* Currently TSC Offset is calculated using MTC packets */
+ if (!intel_pt_have_mtc(pt)) {
+ pr_err("MTC packets must have been enabled for VM Time Correlation\n");
+ err = -EINVAL;
+ goto err_free_queues;
+ }
+ err = intel_pt_parse_vm_tm_corr_args(pt);
+ if (err)
+ goto err_free_queues;
+ }
+
pt->unknown_thread = thread__new(999999999, 999999999);
if (!pt->unknown_thread) {
err = -ENOMEM;
@@ -3611,21 +3814,6 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
goto err_delete_thread;
}
- if (session->itrace_synth_opts->set) {
- pt->synth_opts = *session->itrace_synth_opts;
- } else {
- itrace_synth_opts__set_default(&pt->synth_opts,
- session->itrace_synth_opts->default_no_sample);
- if (!session->itrace_synth_opts->default_no_sample &&
- !session->itrace_synth_opts->inject) {
- pt->synth_opts.branches = false;
- pt->synth_opts.callchain = true;
- pt->synth_opts.add_callchain = true;
- }
- pt->synth_opts.thread_stack =
- session->itrace_synth_opts->thread_stack;
- }
-
if (pt->synth_opts.log)
intel_pt_log_enable();
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 3ceaf7ef3301..cbd9b268f168 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -504,6 +504,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
goto errout;
}
+ err = -ENOMEM;
if (asprintf(&pipe_template, "%s -emit-llvm | %s -march=bpf %s -filetype=obj -o -",
template, llc_path, opts) < 0) {
pr_err("ERROR:\tnot enough memory to setup command line\n");
@@ -524,6 +525,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
pr_debug("llvm compiling command template: %s\n", template);
+ err = -ENOMEM;
if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
goto errout;
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index f93a852ad838..f0e75df72b80 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -12,6 +12,8 @@
#include "mem-events.h"
#include "debug.h"
#include "symbol.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
unsigned int perf_mem_events__loads_ldlat = 30;
@@ -24,8 +26,6 @@ static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
};
#undef E
-#undef E
-
static char mem_loads_name[100];
static bool mem_loads_name__init;
@@ -37,7 +37,7 @@ struct perf_mem_event * __weak perf_mem_events__ptr(int i)
return &perf_mem_events[i];
}
-char * __weak perf_mem_events__name(int i)
+char * __weak perf_mem_events__name(int i, char *pmu_name __maybe_unused)
{
struct perf_mem_event *e = perf_mem_events__ptr(i);
@@ -100,6 +100,15 @@ int perf_mem_events__parse(const char *str)
return -1;
}
+static bool perf_mem_event__supported(const char *mnt, char *sysfs_name)
+{
+ char path[PATH_MAX];
+ struct stat st;
+
+ scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
+ return !stat(path, &st);
+}
+
int perf_mem_events__init(void)
{
const char *mnt = sysfs__mount();
@@ -110,9 +119,9 @@ int perf_mem_events__init(void)
return -ENOENT;
for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- char path[PATH_MAX];
struct perf_mem_event *e = perf_mem_events__ptr(j);
- struct stat st;
+ struct perf_pmu *pmu;
+ char sysfs_name[100];
/*
* If the event entry isn't valid, skip initialization
@@ -121,11 +130,20 @@ int perf_mem_events__init(void)
if (!e->tag)
continue;
- scnprintf(path, PATH_MAX, "%s/devices/%s",
- mnt, e->sysfs_name);
+ if (!perf_pmu__has_hybrid()) {
+ scnprintf(sysfs_name, sizeof(sysfs_name),
+ e->sysfs_name, "cpu");
+ e->supported = perf_mem_event__supported(mnt, sysfs_name);
+ } else {
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ scnprintf(sysfs_name, sizeof(sysfs_name),
+ e->sysfs_name, pmu->name);
+ e->supported |= perf_mem_event__supported(mnt, sysfs_name);
+ }
+ }
- if (!stat(path, &st))
- e->supported = found = true;
+ if (e->supported)
+ found = true;
}
return found ? 0 : -ENOENT;
@@ -141,11 +159,76 @@ void perf_mem_events__list(void)
fprintf(stderr, "%-13s%-*s%s\n",
e->tag ?: "",
verbose > 0 ? 25 : 0,
- verbose > 0 ? perf_mem_events__name(j) : "",
+ verbose > 0 ? perf_mem_events__name(j, NULL) : "",
e->supported ? ": available" : "");
}
}
+static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
+ int idx)
+{
+ const char *mnt = sysfs__mount();
+ char sysfs_name[100];
+ struct perf_pmu *pmu;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
+ pmu->name);
+ if (!perf_mem_event__supported(mnt, sysfs_name)) {
+ pr_err("failed: event '%s' not supported\n",
+ perf_mem_events__name(idx, pmu->name));
+ }
+ }
+}
+
+int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ char **rec_tmp, int *tmp_nr)
+{
+ int i = *argv_nr, k = 0;
+ struct perf_mem_event *e;
+ struct perf_pmu *pmu;
+ char *s;
+
+ for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ e = perf_mem_events__ptr(j);
+ if (!e->record)
+ continue;
+
+ if (!perf_pmu__has_hybrid()) {
+ if (!e->supported) {
+ pr_err("failed: event '%s' not supported\n",
+ perf_mem_events__name(j, NULL));
+ return -1;
+ }
+
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = perf_mem_events__name(j, NULL);
+ } else {
+ if (!e->supported) {
+ perf_mem_events__print_unsupport_hybrid(e, j);
+ return -1;
+ }
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ rec_argv[i++] = "-e";
+ s = perf_mem_events__name(j, pmu->name);
+ if (s) {
+ s = strdup(s);
+ if (!s)
+ return -1;
+
+ rec_argv[i++] = s;
+ rec_tmp[k++] = s;
+ }
+ }
+ }
+ }
+
+ *argv_nr = i;
+ *tmp_nr = k;
+ return 0;
+}
+
static const char * const tlb_access[] = {
"N/A",
"HIT",
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index cacdebd65b8a..916242f8020a 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -38,11 +38,13 @@ extern unsigned int perf_mem_events__loads_ldlat;
int perf_mem_events__parse(const char *str);
int perf_mem_events__init(void);
-char *perf_mem_events__name(int i);
+char *perf_mem_events__name(int i, char *pmu_name);
struct perf_mem_event *perf_mem_events__ptr(int i);
bool is_mem_loads_aux_event(struct evsel *leader);
void perf_mem_events__list(void);
+int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
+ char **rec_tmp, int *tmp_nr);
int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
diff --git a/tools/perf/util/perf_dlfilter.h b/tools/perf/util/perf_dlfilter.h
new file mode 100644
index 000000000000..3eef03d661b4
--- /dev/null
+++ b/tools/perf/util/perf_dlfilter.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * perf_dlfilter.h: API for perf --dlfilter shared object
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#ifndef _LINUX_PERF_DLFILTER_H
+#define _LINUX_PERF_DLFILTER_H
+
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+/* Definitions for perf_dlfilter_sample flags */
+enum {
+ PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
+ PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
+ PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
+ PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
+ PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
+ PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
+ PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
+ PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
+ PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
+ PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
+ PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
+ PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
+ PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
+};
+
+/*
+ * perf sample event information (as per perf script and <linux/perf_event.h>)
+ */
+struct perf_dlfilter_sample {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 ip;
+ __s32 pid;
+ __s32 tid;
+ __u64 time;
+ __u64 addr;
+ __u64 id;
+ __u64 stream_id;
+ __u64 period;
+ __u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
+ __u64 insn_cnt; /* For instructions-per-cycle (IPC) */
+ __u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
+ __s32 cpu;
+ __u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
+ __u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
+ __u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
+ __u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
+ __u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
+ __u8 addr_correlates_sym; /* True => resolve_addr() can be called */
+ __u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
+ __u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ __u64 brstack_nr; /* Number of brstack entries */
+ const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
+ __u64 raw_callchain_nr; /* Number of raw_callchain entries */
+ const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
+ const char *event;
+};
+
+/*
+ * Address location (as per perf script)
+ */
+struct perf_dlfilter_al {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u32 symoff;
+ const char *sym;
+ __u64 addr; /* Mapped address (from dso) */
+ __u64 sym_start;
+ __u64 sym_end;
+ const char *dso;
+ __u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
+ __u8 is_64_bit; /* Only valid if dso is not NULL */
+ __u8 is_kernel_ip; /* True if in kernel space */
+ __u32 buildid_size;
+ __u8 *buildid;
+ /* Below members are only populated by resolve_ip() */
+ __u8 filtered; /* True if this sample event will be filtered out */
+ const char *comm;
+};
+
+struct perf_dlfilter_fns {
+ /* Return information about ip */
+ const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
+ /* Return information about addr (if addr_correlates_sym) */
+ const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
+ /* Return arguments from --dlarg option */
+ char **(*args)(void *ctx, int *dlargc);
+ /*
+ * Return information about address (al->size must be set before
+ * calling). Returns 0 on success, -1 otherwise.
+ */
+ __s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ /* Return instruction bytes and length */
+ const __u8 *(*insn)(void *ctx, __u32 *length);
+ /* Return source file name and line number */
+ const char *(*srcline)(void *ctx, __u32 *line_number);
+ /* Return perf_event_attr, refer <linux/perf_event.h> */
+ struct perf_event_attr *(*attr)(void *ctx);
+ /* Read object code, return numbers of bytes read */
+ __s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ /* Reserved */
+ void *(*reserved[120])(void *);
+};
+
+/*
+ * If implemented, 'start' will be called at the beginning,
+ * before any calls to 'filter_event'. Return 0 to indicate success,
+ * or return a negative error code. '*data' can be assigned for use
+ * by other functions. 'ctx' is needed for calls to perf_dlfilter_fns,
+ * but most perf_dlfilter_fns are not valid when called from 'start'.
+ */
+int start(void **data, void *ctx);
+
+/*
+ * If implemented, 'stop' will be called at the end,
+ * after any calls to 'filter_event'. Return 0 to indicate success, or
+ * return a negative error code. 'data' is set by start(). 'ctx' is
+ * needed for calls to perf_dlfilter_fns, but most perf_dlfilter_fns
+ * are not valid when called from 'stop'.
+ */
+int stop(void *data, void *ctx);
+
+/*
+ * If implemented, 'filter_event' will be called for each sample
+ * event. Return 0 to keep the sample event, 1 to filter it out, or
+ * return a negative error code. 'data' is set by start(). 'ctx' is
+ * needed for calls to perf_dlfilter_fns.
+ */
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+
+/*
+ * The same as 'filter_event' except it is called before internal
+ * filtering.
+ */
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+
+/*
+ * If implemented, return a one-line description of the filter, and optionally
+ * a longer description.
+ */
+const char *filter_description(const char **long_description);
+
+#endif
diff --git a/tools/perf/util/pmu-hybrid.h b/tools/perf/util/pmu-hybrid.h
index d0fa7bc50a76..2b186c26a43e 100644
--- a/tools/perf/util/pmu-hybrid.h
+++ b/tools/perf/util/pmu-hybrid.h
@@ -19,4 +19,15 @@ struct perf_pmu *perf_pmu__find_hybrid_pmu(const char *name);
bool perf_pmu__is_hybrid(const char *name);
char *perf_pmu__hybrid_type_to_pmu(const char *type);
+static inline int perf_pmu__hybrid_pmu_num(void)
+{
+ struct perf_pmu *pmu;
+ int num = 0;
+
+ perf_pmu__for_each_hybrid_pmu(pmu)
+ num++;
+
+ return num;
+}
+
#endif /* __PMU_HYBRID_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a78c8d59a555..c14e1d228e56 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -108,7 +108,6 @@ void exit_probe_symbol_maps(void)
static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap)
{
- /* kmap->ref_reloc_sym should be set if host_machine is initialized */
struct kmap *kmap;
struct map *map = machine__kernel_map(host_machine);
@@ -683,8 +682,13 @@ static int post_process_probe_trace_point(struct probe_trace_point *tp,
u64 addr = tp->address - offs;
sym = map__find_symbol(map, addr);
- if (!sym)
- return -ENOENT;
+ if (!sym) {
+ /*
+ * If the address is in the inittext section, map can not
+ * find it. Ignore it if we are probing offline kernel.
+ */
+ return (symbol_conf.ignore_vmlinux_buildid) ? 0 : -ENOENT;
+ }
if (strcmp(sym->name, tp->symbol)) {
/* If we have no realname, use symbol for it */
@@ -819,7 +823,10 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
reloc_sym = kernel_get_ref_reloc_sym(&map);
if (!reloc_sym) {
- pr_warning("Relocated base symbol is not found!\n");
+ pr_warning("Relocated base symbol is not found! "
+ "Check /proc/sys/kernel/kptr_restrict\n"
+ "and /proc/sys/kernel/perf_event_paranoid. "
+ "Or run as privileged perf user.\n\n");
return -EINVAL;
}
@@ -2120,19 +2127,55 @@ static int synthesize_probe_trace_arg(struct probe_trace_arg *arg,
}
static int
-synthesize_uprobe_trace_def(struct probe_trace_event *tev, struct strbuf *buf)
+synthesize_probe_trace_args(struct probe_trace_event *tev, struct strbuf *buf)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < tev->nargs && ret >= 0; i++)
+ ret = synthesize_probe_trace_arg(&tev->args[i], buf);
+
+ return ret;
+}
+
+static int
+synthesize_uprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
{
- struct probe_trace_point *tp = &tev->point;
int err;
+ /* Uprobes must have tp->module */
+ if (!tp->module)
+ return -EINVAL;
+ /*
+ * If tp->address == 0, then this point must be a
+ * absolute address uprobe.
+ * try_to_find_absolute_address() should have made
+ * tp->symbol to "0x0".
+ */
+ if (!tp->address && (!tp->symbol || strcmp(tp->symbol, "0x0")))
+ return -EINVAL;
+
+ /* Use the tp->address for uprobes */
err = strbuf_addf(buf, "%s:0x%lx", tp->module, tp->address);
if (err >= 0 && tp->ref_ctr_offset) {
if (!uprobe_ref_ctr_is_supported())
- return -1;
+ return -EINVAL;
err = strbuf_addf(buf, "(0x%lx)", tp->ref_ctr_offset);
}
- return err >= 0 ? 0 : -1;
+ return err >= 0 ? 0 : err;
+}
+
+static int
+synthesize_kprobe_trace_def(struct probe_trace_point *tp, struct strbuf *buf)
+{
+ if (!strncmp(tp->symbol, "0x", 2)) {
+ /* Absolute address. See try_to_find_absolute_address() */
+ return strbuf_addf(buf, "%s%s0x%lx", tp->module ?: "",
+ tp->module ? ":" : "", tp->address);
+ } else {
+ return strbuf_addf(buf, "%s%s%s+%lu", tp->module ?: "",
+ tp->module ? ":" : "", tp->symbol, tp->offset);
+ }
}
char *synthesize_probe_trace_command(struct probe_trace_event *tev)
@@ -2140,11 +2183,7 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev)
struct probe_trace_point *tp = &tev->point;
struct strbuf buf;
char *ret = NULL;
- int i, err;
-
- /* Uprobes must have tp->module */
- if (tev->uprobes && !tp->module)
- return NULL;
+ int err;
if (strbuf_init(&buf, 32) < 0)
return NULL;
@@ -2152,37 +2191,17 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev)
if (strbuf_addf(&buf, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
tev->group, tev->event) < 0)
goto error;
- /*
- * If tp->address == 0, then this point must be a
- * absolute address uprobe.
- * try_to_find_absolute_address() should have made
- * tp->symbol to "0x0".
- */
- if (tev->uprobes && !tp->address) {
- if (!tp->symbol || strcmp(tp->symbol, "0x0"))
- goto error;
- }
- /* Use the tp->address for uprobes */
- if (tev->uprobes) {
- err = synthesize_uprobe_trace_def(tev, &buf);
- } else if (!strncmp(tp->symbol, "0x", 2)) {
- /* Absolute address. See try_to_find_absolute_address() */
- err = strbuf_addf(&buf, "%s%s0x%lx", tp->module ?: "",
- tp->module ? ":" : "", tp->address);
- } else {
- err = strbuf_addf(&buf, "%s%s%s+%lu", tp->module ?: "",
- tp->module ? ":" : "", tp->symbol, tp->offset);
- }
-
- if (err)
- goto error;
+ if (tev->uprobes)
+ err = synthesize_uprobe_trace_def(tp, &buf);
+ else
+ err = synthesize_kprobe_trace_def(tp, &buf);
- for (i = 0; i < tev->nargs; i++)
- if (synthesize_probe_trace_arg(&tev->args[i], &buf) < 0)
- goto error;
+ if (err >= 0)
+ err = synthesize_probe_trace_args(tev, &buf);
- ret = strbuf_detach(&buf, NULL);
+ if (err >= 0)
+ ret = strbuf_detach(&buf, NULL);
error:
strbuf_release(&buf);
return ret;
@@ -2934,7 +2953,7 @@ static int find_probe_functions(struct map *map, char *name,
bool cut_version = true;
if (map__load(map) < 0)
- return 0;
+ return -EACCES; /* Possible permission error to load symbols */
/* If user gives a version, don't cut off the version from symbols */
if (strchr(name, '@'))
@@ -2973,6 +2992,17 @@ void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
struct map *map __maybe_unused,
struct symbol *sym __maybe_unused) { }
+
+static void pr_kallsyms_access_error(void)
+{
+ pr_err("Please ensure you can read the /proc/kallsyms symbol addresses.\n"
+ "If /proc/sys/kernel/kptr_restrict is '2', you can not read\n"
+ "kernel symbol addresses even if you are a superuser. Please change\n"
+ "it to '1'. If kptr_restrict is '1', the superuser can read the\n"
+ "symbol addresses.\n"
+ "In that case, please run this command again with sudo.\n");
+}
+
/*
* Find probe function addresses from map.
* Return an error or the number of found probe_trace_event
@@ -3009,8 +3039,16 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
*/
num_matched_functions = find_probe_functions(map, pp->function, syms);
if (num_matched_functions <= 0) {
- pr_err("Failed to find symbol %s in %s\n", pp->function,
- pev->target ? : "kernel");
+ if (num_matched_functions == -EACCES) {
+ pr_err("Failed to load symbols from %s\n",
+ pev->target ?: "/proc/kallsyms");
+ if (pev->target)
+ pr_err("Please ensure the file is not stripped.\n");
+ else
+ pr_kallsyms_access_error();
+ } else
+ pr_err("Failed to find symbol %s in %s\n", pp->function,
+ pev->target ? : "kernel");
ret = -ENOENT;
goto out;
} else if (num_matched_functions > probe_conf.max_probes) {
@@ -3025,7 +3063,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
(!pp->retprobe || kretprobe_offset_is_supported())) {
reloc_sym = kernel_get_ref_reloc_sym(NULL);
if (!reloc_sym) {
- pr_warning("Relocated base symbol is not found!\n");
+ pr_warning("Relocated base symbol is not found! "
+ "Check /proc/sys/kernel/kptr_restrict\n"
+ "and /proc/sys/kernel/perf_event_paranoid. "
+ "Or run as privileged perf user.\n\n");
ret = -EINVAL;
goto out;
}
@@ -3523,6 +3564,78 @@ int show_probe_trace_events(struct perf_probe_event *pevs, int npevs)
return ret;
}
+static int show_bootconfig_event(struct probe_trace_event *tev)
+{
+ struct probe_trace_point *tp = &tev->point;
+ struct strbuf buf;
+ char *ret = NULL;
+ int err;
+
+ if (strbuf_init(&buf, 32) < 0)
+ return -ENOMEM;
+
+ err = synthesize_kprobe_trace_def(tp, &buf);
+ if (err >= 0)
+ err = synthesize_probe_trace_args(tev, &buf);
+ if (err >= 0)
+ ret = strbuf_detach(&buf, NULL);
+ strbuf_release(&buf);
+
+ if (ret) {
+ printf("'%s'", ret);
+ free(ret);
+ }
+
+ return err;
+}
+
+int show_bootconfig_events(struct perf_probe_event *pevs, int npevs)
+{
+ struct strlist *namelist = strlist__new(NULL, NULL);
+ struct probe_trace_event *tev;
+ struct perf_probe_event *pev;
+ char *cur_name = NULL;
+ int i, j, ret = 0;
+
+ if (!namelist)
+ return -ENOMEM;
+
+ for (j = 0; j < npevs && !ret; j++) {
+ pev = &pevs[j];
+ if (pev->group && strcmp(pev->group, "probe"))
+ pr_warning("WARN: Group name %s is ignored\n", pev->group);
+ if (pev->uprobes) {
+ pr_warning("ERROR: Bootconfig doesn't support uprobes\n");
+ ret = -EINVAL;
+ break;
+ }
+ for (i = 0; i < pev->ntevs && !ret; i++) {
+ tev = &pev->tevs[i];
+ /* Skip if the symbol is out of .text or blacklisted */
+ if (!tev->point.symbol && !pev->uprobes)
+ continue;
+
+ /* Set new name for tev (and update namelist) */
+ ret = probe_trace_event__set_name(tev, pev,
+ namelist, true);
+ if (ret)
+ break;
+
+ if (!cur_name || strcmp(cur_name, tev->event)) {
+ printf("%sftrace.event.kprobes.%s.probe = ",
+ cur_name ? "\n" : "", tev->event);
+ cur_name = tev->event;
+ } else
+ printf(", ");
+ ret = show_bootconfig_event(tev);
+ }
+ }
+ printf("\n");
+ strlist__delete(namelist);
+
+ return ret;
+}
+
int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs)
{
int i, ret = 0;
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 4f0eb3a20c36..65769d7949a3 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -15,6 +15,7 @@ struct probe_conf {
bool force_add;
bool no_inlines;
bool cache;
+ bool bootconfig;
int max_probes;
unsigned long magic_num;
};
@@ -163,6 +164,7 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs);
int convert_perf_probe_events(struct perf_probe_event *pevs, int npevs);
int apply_perf_probe_events(struct perf_probe_event *pevs, int npevs);
int show_probe_trace_events(struct perf_probe_event *pevs, int npevs);
+int show_bootconfig_events(struct perf_probe_event *pevs, int npevs);
void cleanup_perf_probe_events(struct perf_probe_event *pevs, int npevs);
struct strfilter;
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 52273542e6ef..f9a6cbcd6415 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -22,6 +22,7 @@
#include "symbol.h"
#include "strbuf.h"
#include <api/fs/tracing_path.h>
+#include <api/fs/fs.h>
#include "probe-event.h"
#include "probe-file.h"
#include "session.h"
@@ -31,44 +32,78 @@
/* 4096 - 2 ('\n' + '\0') */
#define MAX_CMDLEN 4094
-static void print_open_warning(int err, bool uprobe)
+static bool print_common_warning(int err, bool readwrite)
{
- char sbuf[STRERR_BUFSIZE];
+ if (err == -EACCES)
+ pr_warning("No permission to %s tracefs.\nPlease %s\n",
+ readwrite ? "write" : "read",
+ readwrite ? "run this command again with sudo." :
+ "try 'sudo mount -o remount,mode=755 /sys/kernel/tracing/'");
+ else
+ return false;
- if (err == -ENOENT) {
- const char *config;
+ return true;
+}
- if (uprobe)
- config = "CONFIG_UPROBE_EVENTS";
- else
- config = "CONFIG_KPROBE_EVENTS";
+static bool print_configure_probe_event(int kerr, int uerr)
+{
+ const char *config, *file;
+
+ if (kerr == -ENOENT && uerr == -ENOENT) {
+ file = "{k,u}probe_events";
+ config = "CONFIG_KPROBE_EVENTS=y and CONFIG_UPROBE_EVENTS=y";
+ } else if (kerr == -ENOENT) {
+ file = "kprobe_events";
+ config = "CONFIG_KPROBE_EVENTS=y";
+ } else if (uerr == -ENOENT) {
+ file = "uprobe_events";
+ config = "CONFIG_UPROBE_EVENTS=y";
+ } else
+ return false;
- pr_warning("%cprobe_events file does not exist"
- " - please rebuild kernel with %s.\n",
- uprobe ? 'u' : 'k', config);
- } else if (err == -ENOTSUP)
- pr_warning("Tracefs or debugfs is not mounted.\n");
+ if (!debugfs__configured() && !tracefs__configured())
+ pr_warning("Debugfs or tracefs is not mounted\n"
+ "Please try 'sudo mount -t tracefs nodev /sys/kernel/tracing/'\n");
else
- pr_warning("Failed to open %cprobe_events: %s\n",
- uprobe ? 'u' : 'k',
- str_error_r(-err, sbuf, sizeof(sbuf)));
+ pr_warning("%s/%s does not exist.\nPlease rebuild kernel with %s.\n",
+ tracing_path_mount(), file, config);
+
+ return true;
+}
+
+static void print_open_warning(int err, bool uprobe, bool readwrite)
+{
+ char sbuf[STRERR_BUFSIZE];
+
+ if (print_common_warning(err, readwrite))
+ return;
+
+ if (print_configure_probe_event(uprobe ? 0 : err, uprobe ? err : 0))
+ return;
+
+ pr_warning("Failed to open %s/%cprobe_events: %s\n",
+ tracing_path_mount(), uprobe ? 'u' : 'k',
+ str_error_r(-err, sbuf, sizeof(sbuf)));
}
-static void print_both_open_warning(int kerr, int uerr)
+static void print_both_open_warning(int kerr, int uerr, bool readwrite)
{
- /* Both kprobes and uprobes are disabled, warn it. */
- if (kerr == -ENOTSUP && uerr == -ENOTSUP)
- pr_warning("Tracefs or debugfs is not mounted.\n");
- else if (kerr == -ENOENT && uerr == -ENOENT)
- pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS "
- "or/and CONFIG_UPROBE_EVENTS.\n");
- else {
- char sbuf[STRERR_BUFSIZE];
- pr_warning("Failed to open kprobe events: %s.\n",
+ char sbuf[STRERR_BUFSIZE];
+
+ if (kerr == uerr && print_common_warning(kerr, readwrite))
+ return;
+
+ if (print_configure_probe_event(kerr, uerr))
+ return;
+
+ if (kerr < 0)
+ pr_warning("Failed to open %s/kprobe_events: %s.\n",
+ tracing_path_mount(),
str_error_r(-kerr, sbuf, sizeof(sbuf)));
- pr_warning("Failed to open uprobe events: %s.\n",
+ if (uerr < 0)
+ pr_warning("Failed to open %s/uprobe_events: %s.\n",
+ tracing_path_mount(),
str_error_r(-uerr, sbuf, sizeof(sbuf)));
- }
}
int open_trace_file(const char *trace_file, bool readwrite)
@@ -109,7 +144,7 @@ int probe_file__open(int flag)
else
fd = open_kprobe_events(flag & PF_FL_RW);
if (fd < 0)
- print_open_warning(fd, flag & PF_FL_UPROBE);
+ print_open_warning(fd, flag & PF_FL_UPROBE, flag & PF_FL_RW);
return fd;
}
@@ -122,7 +157,7 @@ int probe_file__open_both(int *kfd, int *ufd, int flag)
*kfd = open_kprobe_events(flag & PF_FL_RW);
*ufd = open_uprobe_events(flag & PF_FL_RW);
if (*kfd < 0 && *ufd < 0) {
- print_both_open_warning(*kfd, *ufd);
+ print_both_open_warning(*kfd, *ufd, flag & PF_FL_RW);
return *kfd;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 0e608a5ef599..32a721b3e9a5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -371,9 +371,6 @@ static void perl_process_tracepoint(struct perf_sample *sample,
s = nsecs / NSEC_PER_SEC;
ns = nsecs - s * NSEC_PER_SEC;
- scripting_context->event_data = data;
- scripting_context->pevent = evsel->tp_format->tep;
-
ENTER;
SAVETMPS;
PUSHMARK(SP);
@@ -456,8 +453,10 @@ static void perl_process_event_generic(union perf_event *event,
static void perl_process_event(union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
+ scripting_context__update(scripting_context, event, sample, evsel, al, addr_al);
perl_process_tracepoint(sample, evsel, al);
perl_process_event_generic(event, sample, evsel);
}
@@ -474,11 +473,14 @@ static void run_start_sub(void)
/*
* Start trace script
*/
-static int perl_start_script(const char *script, int argc, const char **argv)
+static int perl_start_script(const char *script, int argc, const char **argv,
+ struct perf_session *session)
{
const char **command_line;
int i, err = 0;
+ scripting_context->session = session;
+
command_line = malloc((argc + 2) * sizeof(const char *));
command_line[0] = "";
command_line[1] = script;
@@ -750,6 +752,7 @@ sub print_backtrace\n\
struct scripting_ops perl_scripting_ops = {
.name = "Perl",
+ .dirname = "perl",
.start_script = perl_start_script,
.flush_script = perl_flush_script,
.stop_script = perl_stop_script,
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 4e4aa4c97ac5..164d2f45028c 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -726,9 +726,49 @@ static void set_regs_in_dict(PyObject *dict,
_PyUnicode_FromString(bf));
}
+static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
+ const char *dso_field, const char *sym_field,
+ const char *symoff_field)
+{
+ if (al->map) {
+ pydict_set_item_string_decref(dict, dso_field,
+ _PyUnicode_FromString(al->map->dso->name));
+ }
+ if (al->sym) {
+ pydict_set_item_string_decref(dict, sym_field,
+ _PyUnicode_FromString(al->sym->name));
+ pydict_set_item_string_decref(dict, symoff_field,
+ PyLong_FromUnsignedLong(get_offset(al->sym, al)));
+ }
+}
+
+static void set_sample_flags(PyObject *dict, u32 flags)
+{
+ const char *ch = PERF_IP_FLAG_CHARS;
+ char *p, str[33];
+
+ for (p = str; *ch; ch++, flags >>= 1) {
+ if (flags & 1)
+ *p++ = *ch;
+ }
+ *p = 0;
+ pydict_set_item_string_decref(dict, "flags", _PyUnicode_FromString(str));
+}
+
+static void python_process_sample_flags(struct perf_sample *sample, PyObject *dict_sample)
+{
+ char flags_disp[SAMPLE_FLAGS_BUF_SIZE];
+
+ set_sample_flags(dict_sample, sample->flags);
+ perf_sample__sprintf_flags(sample->flags, flags_disp, sizeof(flags_disp));
+ pydict_set_item_string_decref(dict_sample, "flags_disp",
+ _PyUnicode_FromString(flags_disp));
+}
+
static PyObject *get_perf_sample_dict(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al,
+ struct addr_location *addr_al,
PyObject *callchain)
{
PyObject *dict, *dict_sample, *brstack, *brstacksym;
@@ -772,14 +812,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
(const char *)sample->raw_data, sample->raw_size));
pydict_set_item_string_decref(dict, "comm",
_PyUnicode_FromString(thread__comm_str(al->thread)));
- if (al->map) {
- pydict_set_item_string_decref(dict, "dso",
- _PyUnicode_FromString(al->map->dso->name));
- }
- if (al->sym) {
- pydict_set_item_string_decref(dict, "symbol",
- _PyUnicode_FromString(al->sym->name));
- }
+ set_sym_in_dict(dict, al, "dso", "symbol", "symoff");
pydict_set_item_string_decref(dict, "callchain", callchain);
@@ -789,6 +822,26 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
brstacksym = python_process_brstacksym(sample, al->thread);
pydict_set_item_string_decref(dict, "brstacksym", brstacksym);
+ pydict_set_item_string_decref(dict_sample, "cpumode",
+ _PyLong_FromLong((unsigned long)sample->cpumode));
+
+ if (addr_al) {
+ pydict_set_item_string_decref(dict_sample, "addr_correlates_sym",
+ PyBool_FromLong(1));
+ set_sym_in_dict(dict_sample, addr_al, "addr_dso", "addr_symbol", "addr_symoff");
+ }
+
+ if (sample->flags)
+ python_process_sample_flags(sample, dict_sample);
+
+ /* Instructions per cycle (IPC) */
+ if (sample->insn_cnt && sample->cyc_cnt) {
+ pydict_set_item_string_decref(dict_sample, "insn_cnt",
+ PyLong_FromUnsignedLongLong(sample->insn_cnt));
+ pydict_set_item_string_decref(dict_sample, "cyc_cnt",
+ PyLong_FromUnsignedLongLong(sample->cyc_cnt));
+ }
+
set_regs_in_dict(dict, sample, evsel);
return dict;
@@ -796,7 +849,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
static void python_process_tracepoint(struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
struct tep_event *event = evsel->tp_format;
PyObject *handler, *context, *t, *obj = NULL, *callchain;
@@ -843,9 +897,6 @@ static void python_process_tracepoint(struct perf_sample *sample,
s = nsecs / NSEC_PER_SEC;
ns = nsecs - s * NSEC_PER_SEC;
- scripting_context->event_data = data;
- scripting_context->pevent = evsel->tp_format->tep;
-
context = _PyCapsule_New(scripting_context, NULL, NULL);
PyTuple_SetItem(t, n++, _PyUnicode_FromString(handler_name));
@@ -906,7 +957,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
PyTuple_SetItem(t, n++, dict);
if (get_argument_count(handler) == (int) n + 1) {
- all_entries_dict = get_perf_sample_dict(sample, evsel, al,
+ all_entries_dict = get_perf_sample_dict(sample, evsel, al, addr_al,
callchain);
PyTuple_SetItem(t, n++, all_entries_dict);
} else {
@@ -934,7 +985,7 @@ static PyObject *tuple_new(unsigned int sz)
return t;
}
-static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
+static int tuple_set_s64(PyObject *t, unsigned int pos, s64 val)
{
#if BITS_PER_LONG == 64
return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
@@ -944,11 +995,37 @@ static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
#endif
}
+/*
+ * Databases support only signed 64-bit numbers, so even though we are
+ * exporting a u64, it must be as s64.
+ */
+#define tuple_set_d64 tuple_set_s64
+
+static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
+{
+#if BITS_PER_LONG == 64
+ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
+#endif
+#if BITS_PER_LONG == 32
+ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLongLong(val));
+#endif
+}
+
+static int tuple_set_u32(PyObject *t, unsigned int pos, u32 val)
+{
+ return PyTuple_SetItem(t, pos, PyLong_FromUnsignedLong(val));
+}
+
static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
{
return PyTuple_SetItem(t, pos, _PyLong_FromLong(val));
}
+static int tuple_set_bool(PyObject *t, unsigned int pos, bool val)
+{
+ return PyTuple_SetItem(t, pos, PyBool_FromLong(val));
+}
+
static int tuple_set_string(PyObject *t, unsigned int pos, const char *s)
{
return PyTuple_SetItem(t, pos, _PyUnicode_FromString(s));
@@ -967,7 +1044,7 @@ static int python_export_evsel(struct db_export *dbe, struct evsel *evsel)
t = tuple_new(2);
- tuple_set_u64(t, 0, evsel->db_id);
+ tuple_set_d64(t, 0, evsel->db_id);
tuple_set_string(t, 1, evsel__name(evsel));
call_object(tables->evsel_handler, t, "evsel_table");
@@ -985,7 +1062,7 @@ static int python_export_machine(struct db_export *dbe,
t = tuple_new(3);
- tuple_set_u64(t, 0, machine->db_id);
+ tuple_set_d64(t, 0, machine->db_id);
tuple_set_s32(t, 1, machine->pid);
tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
@@ -1004,9 +1081,9 @@ static int python_export_thread(struct db_export *dbe, struct thread *thread,
t = tuple_new(5);
- tuple_set_u64(t, 0, thread->db_id);
- tuple_set_u64(t, 1, machine->db_id);
- tuple_set_u64(t, 2, main_thread_db_id);
+ tuple_set_d64(t, 0, thread->db_id);
+ tuple_set_d64(t, 1, machine->db_id);
+ tuple_set_d64(t, 2, main_thread_db_id);
tuple_set_s32(t, 3, thread->pid_);
tuple_set_s32(t, 4, thread->tid);
@@ -1025,10 +1102,10 @@ static int python_export_comm(struct db_export *dbe, struct comm *comm,
t = tuple_new(5);
- tuple_set_u64(t, 0, comm->db_id);
+ tuple_set_d64(t, 0, comm->db_id);
tuple_set_string(t, 1, comm__str(comm));
- tuple_set_u64(t, 2, thread->db_id);
- tuple_set_u64(t, 3, comm->start);
+ tuple_set_d64(t, 2, thread->db_id);
+ tuple_set_d64(t, 3, comm->start);
tuple_set_s32(t, 4, comm->exec);
call_object(tables->comm_handler, t, "comm_table");
@@ -1046,9 +1123,9 @@ static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
t = tuple_new(3);
- tuple_set_u64(t, 0, db_id);
- tuple_set_u64(t, 1, comm->db_id);
- tuple_set_u64(t, 2, thread->db_id);
+ tuple_set_d64(t, 0, db_id);
+ tuple_set_d64(t, 1, comm->db_id);
+ tuple_set_d64(t, 2, thread->db_id);
call_object(tables->comm_thread_handler, t, "comm_thread_table");
@@ -1068,8 +1145,8 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso,
t = tuple_new(5);
- tuple_set_u64(t, 0, dso->db_id);
- tuple_set_u64(t, 1, machine->db_id);
+ tuple_set_d64(t, 0, dso->db_id);
+ tuple_set_d64(t, 1, machine->db_id);
tuple_set_string(t, 2, dso->short_name);
tuple_set_string(t, 3, dso->long_name);
tuple_set_string(t, 4, sbuild_id);
@@ -1090,10 +1167,10 @@ static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
t = tuple_new(6);
- tuple_set_u64(t, 0, *sym_db_id);
- tuple_set_u64(t, 1, dso->db_id);
- tuple_set_u64(t, 2, sym->start);
- tuple_set_u64(t, 3, sym->end);
+ tuple_set_d64(t, 0, *sym_db_id);
+ tuple_set_d64(t, 1, dso->db_id);
+ tuple_set_d64(t, 2, sym->start);
+ tuple_set_d64(t, 3, sym->end);
tuple_set_s32(t, 4, sym->binding);
tuple_set_string(t, 5, sym->name);
@@ -1130,30 +1207,30 @@ static void python_export_sample_table(struct db_export *dbe,
t = tuple_new(24);
- tuple_set_u64(t, 0, es->db_id);
- tuple_set_u64(t, 1, es->evsel->db_id);
- tuple_set_u64(t, 2, es->al->maps->machine->db_id);
- tuple_set_u64(t, 3, es->al->thread->db_id);
- tuple_set_u64(t, 4, es->comm_db_id);
- tuple_set_u64(t, 5, es->dso_db_id);
- tuple_set_u64(t, 6, es->sym_db_id);
- tuple_set_u64(t, 7, es->offset);
- tuple_set_u64(t, 8, es->sample->ip);
- tuple_set_u64(t, 9, es->sample->time);
+ tuple_set_d64(t, 0, es->db_id);
+ tuple_set_d64(t, 1, es->evsel->db_id);
+ tuple_set_d64(t, 2, es->al->maps->machine->db_id);
+ tuple_set_d64(t, 3, es->al->thread->db_id);
+ tuple_set_d64(t, 4, es->comm_db_id);
+ tuple_set_d64(t, 5, es->dso_db_id);
+ tuple_set_d64(t, 6, es->sym_db_id);
+ tuple_set_d64(t, 7, es->offset);
+ tuple_set_d64(t, 8, es->sample->ip);
+ tuple_set_d64(t, 9, es->sample->time);
tuple_set_s32(t, 10, es->sample->cpu);
- tuple_set_u64(t, 11, es->addr_dso_db_id);
- tuple_set_u64(t, 12, es->addr_sym_db_id);
- tuple_set_u64(t, 13, es->addr_offset);
- tuple_set_u64(t, 14, es->sample->addr);
- tuple_set_u64(t, 15, es->sample->period);
- tuple_set_u64(t, 16, es->sample->weight);
- tuple_set_u64(t, 17, es->sample->transaction);
- tuple_set_u64(t, 18, es->sample->data_src);
+ tuple_set_d64(t, 11, es->addr_dso_db_id);
+ tuple_set_d64(t, 12, es->addr_sym_db_id);
+ tuple_set_d64(t, 13, es->addr_offset);
+ tuple_set_d64(t, 14, es->sample->addr);
+ tuple_set_d64(t, 15, es->sample->period);
+ tuple_set_d64(t, 16, es->sample->weight);
+ tuple_set_d64(t, 17, es->sample->transaction);
+ tuple_set_d64(t, 18, es->sample->data_src);
tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
- tuple_set_u64(t, 21, es->call_path_id);
- tuple_set_u64(t, 22, es->sample->insn_cnt);
- tuple_set_u64(t, 23, es->sample->cyc_cnt);
+ tuple_set_d64(t, 21, es->call_path_id);
+ tuple_set_d64(t, 22, es->sample->insn_cnt);
+ tuple_set_d64(t, 23, es->sample->cyc_cnt);
call_object(tables->sample_handler, t, "sample_table");
@@ -1167,8 +1244,8 @@ static void python_export_synth(struct db_export *dbe, struct export_sample *es)
t = tuple_new(3);
- tuple_set_u64(t, 0, es->db_id);
- tuple_set_u64(t, 1, es->evsel->core.attr.config);
+ tuple_set_d64(t, 0, es->db_id);
+ tuple_set_d64(t, 1, es->evsel->core.attr.config);
tuple_set_bytes(t, 2, es->sample->raw_data, es->sample->raw_size);
call_object(tables->synth_handler, t, "synth_data");
@@ -1200,10 +1277,10 @@ static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
t = tuple_new(4);
- tuple_set_u64(t, 0, cp->db_id);
- tuple_set_u64(t, 1, parent_db_id);
- tuple_set_u64(t, 2, sym_db_id);
- tuple_set_u64(t, 3, cp->ip);
+ tuple_set_d64(t, 0, cp->db_id);
+ tuple_set_d64(t, 1, parent_db_id);
+ tuple_set_d64(t, 2, sym_db_id);
+ tuple_set_d64(t, 3, cp->ip);
call_object(tables->call_path_handler, t, "call_path_table");
@@ -1221,20 +1298,20 @@ static int python_export_call_return(struct db_export *dbe,
t = tuple_new(14);
- tuple_set_u64(t, 0, cr->db_id);
- tuple_set_u64(t, 1, cr->thread->db_id);
- tuple_set_u64(t, 2, comm_db_id);
- tuple_set_u64(t, 3, cr->cp->db_id);
- tuple_set_u64(t, 4, cr->call_time);
- tuple_set_u64(t, 5, cr->return_time);
- tuple_set_u64(t, 6, cr->branch_count);
- tuple_set_u64(t, 7, cr->call_ref);
- tuple_set_u64(t, 8, cr->return_ref);
- tuple_set_u64(t, 9, cr->cp->parent->db_id);
+ tuple_set_d64(t, 0, cr->db_id);
+ tuple_set_d64(t, 1, cr->thread->db_id);
+ tuple_set_d64(t, 2, comm_db_id);
+ tuple_set_d64(t, 3, cr->cp->db_id);
+ tuple_set_d64(t, 4, cr->call_time);
+ tuple_set_d64(t, 5, cr->return_time);
+ tuple_set_d64(t, 6, cr->branch_count);
+ tuple_set_d64(t, 7, cr->call_ref);
+ tuple_set_d64(t, 8, cr->return_ref);
+ tuple_set_d64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
- tuple_set_u64(t, 11, cr->parent_db_id);
- tuple_set_u64(t, 12, cr->insn_count);
- tuple_set_u64(t, 13, cr->cyc_count);
+ tuple_set_d64(t, 11, cr->parent_db_id);
+ tuple_set_d64(t, 12, cr->insn_count);
+ tuple_set_d64(t, 13, cr->cyc_count);
call_object(tables->call_return_handler, t, "call_return_table");
@@ -1254,14 +1331,14 @@ static int python_export_context_switch(struct db_export *dbe, u64 db_id,
t = tuple_new(9);
- tuple_set_u64(t, 0, db_id);
- tuple_set_u64(t, 1, machine->db_id);
- tuple_set_u64(t, 2, sample->time);
+ tuple_set_d64(t, 0, db_id);
+ tuple_set_d64(t, 1, machine->db_id);
+ tuple_set_d64(t, 2, sample->time);
tuple_set_s32(t, 3, sample->cpu);
- tuple_set_u64(t, 4, th_out_id);
- tuple_set_u64(t, 5, comm_out_id);
- tuple_set_u64(t, 6, th_in_id);
- tuple_set_u64(t, 7, comm_in_id);
+ tuple_set_d64(t, 4, th_out_id);
+ tuple_set_d64(t, 5, comm_out_id);
+ tuple_set_d64(t, 6, th_in_id);
+ tuple_set_d64(t, 7, comm_in_id);
tuple_set_s32(t, 8, flags);
call_object(tables->context_switch_handler, t, "context_switch");
@@ -1281,7 +1358,8 @@ static int python_process_call_return(struct call_return *cr, u64 *parent_db_id,
static void python_process_general_event(struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
PyObject *handler, *t, *dict, *callchain;
static char handler_name[64];
@@ -1303,7 +1381,7 @@ static void python_process_general_event(struct perf_sample *sample,
/* ip unwinding */
callchain = python_process_callchain(sample, evsel, al);
- dict = get_perf_sample_dict(sample, evsel, al, callchain);
+ dict = get_perf_sample_dict(sample, evsel, al, addr_al, callchain);
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1)
@@ -1317,23 +1395,64 @@ static void python_process_general_event(struct perf_sample *sample,
static void python_process_event(union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
struct tables *tables = &tables_global;
+ scripting_context__update(scripting_context, event, sample, evsel, al, addr_al);
+
switch (evsel->core.attr.type) {
case PERF_TYPE_TRACEPOINT:
- python_process_tracepoint(sample, evsel, al);
+ python_process_tracepoint(sample, evsel, al, addr_al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
if (tables->db_export_mode)
- db_export__sample(&tables->dbe, event, sample, evsel, al);
+ db_export__sample(&tables->dbe, event, sample, evsel, al, addr_al);
else
- python_process_general_event(sample, evsel, al);
+ python_process_general_event(sample, evsel, al, addr_al);
}
}
+static void python_do_process_switch(union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ const char *handler_name = "context_switch";
+ bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
+ bool out_preempt = out && (event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT);
+ pid_t np_pid = -1, np_tid = -1;
+ PyObject *handler, *t;
+
+ handler = get_handler(handler_name);
+ if (!handler)
+ return;
+
+ if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
+ np_pid = event->context_switch.next_prev_pid;
+ np_tid = event->context_switch.next_prev_tid;
+ }
+
+ t = tuple_new(9);
+ if (!t)
+ return;
+
+ tuple_set_u64(t, 0, sample->time);
+ tuple_set_s32(t, 1, sample->cpu);
+ tuple_set_s32(t, 2, sample->pid);
+ tuple_set_s32(t, 3, sample->tid);
+ tuple_set_s32(t, 4, np_pid);
+ tuple_set_s32(t, 5, np_tid);
+ tuple_set_s32(t, 6, machine->pid);
+ tuple_set_bool(t, 7, out);
+ tuple_set_bool(t, 8, out_preempt);
+
+ call_object(handler, t, handler_name);
+
+ Py_DECREF(t);
+}
+
static void python_process_switch(union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
@@ -1342,6 +1461,44 @@ static void python_process_switch(union perf_event *event,
if (tables->db_export_mode)
db_export__switch(&tables->dbe, event, sample, machine);
+ else
+ python_do_process_switch(event, sample, machine);
+}
+
+static void python_process_auxtrace_error(struct perf_session *session __maybe_unused,
+ union perf_event *event)
+{
+ struct perf_record_auxtrace_error *e = &event->auxtrace_error;
+ u8 cpumode = e->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ const char *handler_name = "auxtrace_error";
+ unsigned long long tm = e->time;
+ const char *msg = e->msg;
+ PyObject *handler, *t;
+
+ handler = get_handler(handler_name);
+ if (!handler)
+ return;
+
+ if (!e->fmt) {
+ tm = 0;
+ msg = (const char *)&e->time;
+ }
+
+ t = tuple_new(9);
+
+ tuple_set_u32(t, 0, e->type);
+ tuple_set_u32(t, 1, e->code);
+ tuple_set_s32(t, 2, e->cpu);
+ tuple_set_s32(t, 3, e->pid);
+ tuple_set_s32(t, 4, e->tid);
+ tuple_set_u64(t, 5, e->ip);
+ tuple_set_u64(t, 6, tm);
+ tuple_set_string(t, 7, msg);
+ tuple_set_u32(t, 8, cpumode);
+
+ call_object(handler, t, handler_name);
+
+ Py_DECREF(t);
}
static void get_handler_name(char *str, size_t size,
@@ -1442,6 +1599,31 @@ static void python_process_stat_interval(u64 tstamp)
Py_DECREF(t);
}
+static int perf_script_context_init(void)
+{
+ PyObject *perf_script_context;
+ PyObject *perf_trace_context;
+ PyObject *dict;
+ int ret;
+
+ perf_trace_context = PyImport_AddModule("perf_trace_context");
+ if (!perf_trace_context)
+ return -1;
+ dict = PyModule_GetDict(perf_trace_context);
+ if (!dict)
+ return -1;
+
+ perf_script_context = _PyCapsule_New(scripting_context, NULL, NULL);
+ if (!perf_script_context)
+ return -1;
+
+ ret = PyDict_SetItemString(dict, "perf_script_context", perf_script_context);
+ if (!ret)
+ ret = PyDict_SetItemString(main_dict, "perf_script_context", perf_script_context);
+ Py_DECREF(perf_script_context);
+ return ret;
+}
+
static int run_start_sub(void)
{
main_module = PyImport_AddModule("__main__");
@@ -1454,6 +1636,9 @@ static int run_start_sub(void)
goto error;
Py_INCREF(main_dict);
+ if (perf_script_context_init())
+ goto error;
+
try_call_object("trace_begin", NULL);
return 0;
@@ -1589,7 +1774,8 @@ static void _free_command_line(wchar_t **command_line, int num)
/*
* Start trace script
*/
-static int python_start_script(const char *script, int argc, const char **argv)
+static int python_start_script(const char *script, int argc, const char **argv,
+ struct perf_session *session)
{
struct tables *tables = &tables_global;
#if PY_MAJOR_VERSION < 3
@@ -1605,6 +1791,7 @@ static int python_start_script(const char *script, int argc, const char **argv)
int i, err = 0;
FILE *fp;
+ scripting_context->session = session;
#if PY_MAJOR_VERSION < 3
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
@@ -1876,11 +2063,13 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
struct scripting_ops python_scripting_ops = {
.name = "Python",
+ .dirname = "python",
.start_script = python_start_script,
.flush_script = python_flush_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.process_switch = python_process_switch,
+ .process_auxtrace_error = python_process_auxtrace_error,
.process_stat = python_process_stat,
.process_stat_interval = python_process_stat_interval,
.generate_script = python_generate_script,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index e59242c361ce..e9c929a39973 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -301,8 +301,11 @@ void perf_session__delete(struct perf_session *session)
perf_session__release_decomp_events(session);
perf_env__exit(&session->header.env);
machines__exit(&session->machines);
- if (session->data)
+ if (session->data) {
+ if (perf_data__is_read(session->data))
+ evlist__delete(session->evlist);
perf_data__close(session->data);
+ }
free(session);
}
@@ -2156,6 +2159,7 @@ struct reader {
u64 data_size;
u64 data_offset;
reader_cb_t process;
+ bool in_place_update;
};
static int
@@ -2189,7 +2193,9 @@ reader__process_events(struct reader *rd, struct perf_session *session,
mmap_prot = PROT_READ;
mmap_flags = MAP_SHARED;
- if (session->header.needs_swap) {
+ if (rd->in_place_update) {
+ mmap_prot |= PROT_WRITE;
+ } else if (session->header.needs_swap) {
mmap_prot |= PROT_WRITE;
mmap_flags = MAP_PRIVATE;
}
@@ -2275,6 +2281,7 @@ static int __perf_session__process_events(struct perf_session *session)
.data_size = session->header.data_size,
.data_offset = session->header.data_offset,
.process = process_simple,
+ .in_place_update = session->data->in_place_update,
};
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
index c29edaaca863..476e99896d5e 100644
--- a/tools/perf/util/srccode.c
+++ b/tools/perf/util/srccode.c
@@ -97,8 +97,7 @@ static struct srcfile *find_srcfile(char *fn)
hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) {
if (!strcmp(fn, h->fn)) {
/* Move to front */
- list_del(&h->nd);
- list_add(&h->nd, &srcfile_list);
+ list_move(&h->nd, &srcfile_list);
return h;
}
}
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index ca326f98c7a2..c588a6b7a8db 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -825,11 +825,11 @@ static void counter_aggr_cb(struct perf_stat_config *config __maybe_unused,
bool first __maybe_unused)
{
struct caggr_data *cd = data;
- struct perf_stat_evsel *ps = counter->stats;
+ struct perf_counts_values *aggr = &counter->counts->aggr;
- cd->avg += avg_stats(&ps->res_stats[0]);
- cd->avg_enabled += avg_stats(&ps->res_stats[1]);
- cd->avg_running += avg_stats(&ps->res_stats[2]);
+ cd->avg += aggr->val;
+ cd->avg_enabled += aggr->ena;
+ cd->avg_running += aggr->run;
}
/*
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 2db46b9bebd0..d3ec2624e036 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -437,18 +437,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
aggr->val = aggr->ena = aggr->run = 0;
- /*
- * We calculate counter's data every interval,
- * and the display code shows ps->res_stats
- * avg value. We need to zero the stats for
- * interval mode, otherwise overall avg running
- * averages will be shown for each interval.
- */
- if (config->interval || config->summary) {
- for (i = 0; i < 3; i++)
- init_stats(&ps->res_stats[i]);
- }
-
if (counter->per_pkg)
evsel__zero_per_pkg(counter);
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 714581b0de65..7172ca05265f 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -12,10 +12,31 @@
#include "debug.h"
#include "trace-event.h"
+#include "event.h"
+#include "evsel.h"
#include <linux/zalloc.h>
struct scripting_context *scripting_context;
+void scripting_context__update(struct scripting_context *c,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct addr_location *al,
+ struct addr_location *addr_al)
+{
+ c->event_data = sample->raw_data;
+ if (evsel->tp_format)
+ c->pevent = evsel->tp_format->tep;
+ else
+ c->pevent = NULL;
+ c->event = event;
+ c->sample = sample;
+ c->evsel = evsel;
+ c->al = al;
+ c->addr_al = addr_al;
+}
+
static int flush_script_unsupported(void)
{
return 0;
@@ -29,7 +50,8 @@ static int stop_script_unsupported(void)
static void process_event_unsupported(union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct evsel *evsel __maybe_unused,
- struct addr_location *al __maybe_unused)
+ struct addr_location *al __maybe_unused,
+ struct addr_location *addr_al __maybe_unused)
{
}
@@ -44,7 +66,8 @@ static void print_python_unsupported_msg(void)
static int python_start_script_unsupported(const char *script __maybe_unused,
int argc __maybe_unused,
- const char **argv __maybe_unused)
+ const char **argv __maybe_unused,
+ struct perf_session *session __maybe_unused)
{
print_python_unsupported_msg();
@@ -63,6 +86,7 @@ static int python_generate_script_unsupported(struct tep_handle *pevent
struct scripting_ops python_scripting_unsupported_ops = {
.name = "Python",
+ .dirname = "python",
.start_script = python_start_script_unsupported,
.flush_script = flush_script_unsupported,
.stop_script = stop_script_unsupported,
@@ -108,7 +132,8 @@ static void print_perl_unsupported_msg(void)
static int perl_start_script_unsupported(const char *script __maybe_unused,
int argc __maybe_unused,
- const char **argv __maybe_unused)
+ const char **argv __maybe_unused,
+ struct perf_session *session __maybe_unused)
{
print_perl_unsupported_msg();
@@ -126,6 +151,7 @@ static int perl_generate_script_unsupported(struct tep_handle *pevent
struct scripting_ops perl_scripting_unsupported_ops = {
.name = "Perl",
+ .dirname = "perl",
.start_script = perl_start_script_unsupported,
.flush_script = flush_script_unsupported,
.stop_script = stop_script_unsupported,
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 72fdf2a3577c..54aadeedf28c 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -11,6 +11,7 @@ union perf_event;
struct perf_tool;
struct thread;
struct tep_plugin_list;
+struct evsel;
struct trace_event {
struct tep_handle *pevent;
@@ -71,16 +72,21 @@ struct perf_stat_config;
struct scripting_ops {
const char *name;
- int (*start_script) (const char *script, int argc, const char **argv);
+ const char *dirname; /* For script path .../scripts/<dirname>/... */
+ int (*start_script)(const char *script, int argc, const char **argv,
+ struct perf_session *session);
int (*flush_script) (void);
int (*stop_script) (void);
void (*process_event) (union perf_event *event,
struct perf_sample *sample,
struct evsel *evsel,
- struct addr_location *al);
+ struct addr_location *al,
+ struct addr_location *addr_al);
void (*process_switch)(union perf_event *event,
struct perf_sample *sample,
struct machine *machine);
+ void (*process_auxtrace_error)(struct perf_session *session,
+ union perf_event *event);
void (*process_stat)(struct perf_stat_config *config,
struct evsel *evsel, u64 tstamp);
void (*process_stat_interval)(u64 tstamp);
@@ -91,16 +97,35 @@ extern unsigned int scripting_max_stack;
int script_spec_register(const char *spec, struct scripting_ops *ops);
+void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
+ struct machine *machine);
+
void setup_perl_scripting(void);
void setup_python_scripting(void);
struct scripting_context {
struct tep_handle *pevent;
void *event_data;
+ union perf_event *event;
+ struct perf_sample *sample;
+ struct evsel *evsel;
+ struct addr_location *al;
+ struct addr_location *addr_al;
+ struct perf_session *session;
};
+void scripting_context__update(struct scripting_context *scripting_context,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct addr_location *al,
+ struct addr_location *addr_al);
+
int common_pc(struct scripting_context *context);
int common_flags(struct scripting_context *context);
int common_lock_depth(struct scripting_context *context);
+#define SAMPLE_FLAGS_BUF_SIZE 64
+int perf_sample__sprintf_flags(u32 flags, char *str, size_t sz);
+
#endif /* _PERF_UTIL_TRACE_EVENT_H */