diff options
461 files changed, 18567 insertions, 13982 deletions
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html index 18f179807563..c30c1957c7e6 100644 --- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html +++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html @@ -155,8 +155,7 @@ keeping lock contention under control at all tree levels regardless of the level of loading on the system. </p><p>RCU updaters wait for normal grace periods by registering -RCU callbacks, either directly via <tt>call_rcu()</tt> and -friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>), +RCU callbacks, either directly via <tt>call_rcu()</tt> or indirectly via <tt>synchronize_rcu()</tt> and friends. RCU callbacks are represented by <tt>rcu_head</tt> structures, which are queued on <tt>rcu_data</tt> structures while they are diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html index 19e7a5fb6b73..57300db4b5ff 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html @@ -56,6 +56,7 @@ sections. RCU-preempt Expedited Grace Periods</a></h2> <p> +<tt>CONFIG_PREEMPT=y</tt> kernels implement RCU-preempt. The overall flow of the handling of a given CPU by an RCU-preempt expedited grace period is shown in the following diagram: @@ -139,6 +140,7 @@ or offline, among other things. RCU-sched Expedited Grace Periods</a></h2> <p> +<tt>CONFIG_PREEMPT=n</tt> kernels implement RCU-sched. The overall flow of the handling of a given CPU by an RCU-sched expedited grace period is shown in the following diagram: @@ -146,7 +148,7 @@ expedited grace period is shown in the following diagram: <p> As with RCU-preempt, RCU-sched's -<tt>synchronize_sched_expedited()</tt> ignores offline and +<tt>synchronize_rcu_expedited()</tt> ignores offline and idle CPUs, again because they are in remotely detectable quiescent states. However, because the diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html index 8d21af02b1f0..c64f8d26609f 100644 --- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html +++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html @@ -34,12 +34,11 @@ Similarly, any code that happens before the beginning of a given RCU grace period is guaranteed to see the effects of all accesses following the end of that grace period that are within RCU read-side critical sections. -<p>This guarantee is particularly pervasive for <tt>synchronize_sched()</tt>, -for which RCU-sched read-side critical sections include any region +<p>Note well that RCU-sched read-side critical sections include any region of code for which preemption is disabled. Given that each individual machine instruction can be thought of as an extremely small region of preemption-disabled code, one can think of -<tt>synchronize_sched()</tt> as <tt>smp_mb()</tt> on steroids. +<tt>synchronize_rcu()</tt> as <tt>smp_mb()</tt> on steroids. <p>RCU updaters use this guarantee by splitting their updates into two phases, one of which is executed before the grace period and diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt index 687777f83b23..881353fd5bff 100644 --- a/Documentation/RCU/NMI-RCU.txt +++ b/Documentation/RCU/NMI-RCU.txt @@ -81,18 +81,19 @@ currently executing on some other CPU. We therefore cannot free up any data structures used by the old NMI handler until execution of it completes on all other CPUs. -One way to accomplish this is via synchronize_sched(), perhaps as +One way to accomplish this is via synchronize_rcu(), perhaps as follows: unset_nmi_callback(); - synchronize_sched(); + synchronize_rcu(); kfree(my_nmi_data); -This works because synchronize_sched() blocks until all CPUs complete -any preemption-disabled segments of code that they were executing. -Since NMI handlers disable preemption, synchronize_sched() is guaranteed +This works because (as of v4.20) synchronize_rcu() blocks until all +CPUs complete any preemption-disabled segments of code that they were +executing. +Since NMI handlers disable preemption, synchronize_rcu() is guaranteed not to return until all ongoing NMI handlers exit. It is therefore safe -to free up the handler's data as soon as synchronize_sched() returns. +to free up the handler's data as soon as synchronize_rcu() returns. Important note: for this to work, the architecture in question must invoke nmi_enter() and nmi_exit() on NMI entry and exit, respectively. diff --git a/Documentation/RCU/UP.txt b/Documentation/RCU/UP.txt index 90ec5341ee98..53bde717017b 100644 --- a/Documentation/RCU/UP.txt +++ b/Documentation/RCU/UP.txt @@ -86,10 +86,8 @@ even on a UP system. So do not do it! Even on a UP system, the RCU infrastructure -must- respect grace periods, and -must- invoke callbacks from a known environment in which no locks are held. -It -is- safe for synchronize_sched() and synchronize_rcu_bh() to return -immediately on an UP system. It is also safe for synchronize_rcu() -to return immediately on UP systems, except when running preemptable -RCU. +Note that it -is- safe for synchronize_rcu() to return immediately on +UP systems, including !PREEMPT SMP builds running on UP systems. Quick Quiz #3: Why can't synchronize_rcu() return immediately on UP systems running preemptable RCU? diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt index 6f469864d9f5..e98ff261a438 100644 --- a/Documentation/RCU/checklist.txt +++ b/Documentation/RCU/checklist.txt @@ -182,16 +182,13 @@ over a rather long period of time, but improvements are always welcome! when publicizing a pointer to a structure that can be traversed by an RCU read-side critical section. -5. If call_rcu(), or a related primitive such as call_rcu_bh(), - call_rcu_sched(), or call_srcu() is used, the callback function - will be called from softirq context. In particular, it cannot - block. +5. If call_rcu() or call_srcu() is used, the callback function will + be called from softirq context. In particular, it cannot block. -6. Since synchronize_rcu() can block, it cannot be called from - any sort of irq context. The same rule applies for - synchronize_rcu_bh(), synchronize_sched(), synchronize_srcu(), - synchronize_rcu_expedited(), synchronize_rcu_bh_expedited(), - synchronize_sched_expedite(), and synchronize_srcu_expedited(). +6. Since synchronize_rcu() can block, it cannot be called + from any sort of irq context. The same rule applies + for synchronize_srcu(), synchronize_rcu_expedited(), and + synchronize_srcu_expedited(). The expedited forms of these primitives have the same semantics as the non-expedited forms, but expediting is both expensive and @@ -212,20 +209,20 @@ over a rather long period of time, but improvements are always welcome! of the system, especially to real-time workloads running on the rest of the system. -7. If the updater uses call_rcu() or synchronize_rcu(), then the - corresponding readers must use rcu_read_lock() and - rcu_read_unlock(). If the updater uses call_rcu_bh() or - synchronize_rcu_bh(), then the corresponding readers must - use rcu_read_lock_bh() and rcu_read_unlock_bh(). If the - updater uses call_rcu_sched() or synchronize_sched(), then - the corresponding readers must disable preemption, possibly - by calling rcu_read_lock_sched() and rcu_read_unlock_sched(). - If the updater uses synchronize_srcu() or call_srcu(), then - the corresponding readers must use srcu_read_lock() and +7. As of v4.20, a given kernel implements only one RCU flavor, + which is RCU-sched for PREEMPT=n and RCU-preempt for PREEMPT=y. + If the updater uses call_rcu() or synchronize_rcu(), + then the corresponding readers my use rcu_read_lock() and + rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(), + or any pair of primitives that disables and re-enables preemption, + for example, rcu_read_lock_sched() and rcu_read_unlock_sched(). + If the updater uses synchronize_srcu() or call_srcu(), + then the corresponding readers must use srcu_read_lock() and srcu_read_unlock(), and with the same srcu_struct. The rules for the expedited primitives are the same as for their non-expedited counterparts. Mixing things up will result in confusion and - broken kernels. + broken kernels, and has even resulted in an exploitable security + issue. One exception to this rule: rcu_read_lock() and rcu_read_unlock() may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh() @@ -288,8 +285,7 @@ over a rather long period of time, but improvements are always welcome! d. Periodically invoke synchronize_rcu(), permitting a limited number of updates per grace period. - The same cautions apply to call_rcu_bh(), call_rcu_sched(), - call_srcu(), and kfree_rcu(). + The same cautions apply to call_srcu() and kfree_rcu(). Note that although these primitives do take action to avoid memory exhaustion when any given CPU has too many callbacks, a determined @@ -322,7 +318,7 @@ over a rather long period of time, but improvements are always welcome! 11. Any lock acquired by an RCU callback must be acquired elsewhere with softirq disabled, e.g., via spin_lock_irqsave(), - spin_lock_bh(), etc. Failing to disable irq on a given + spin_lock_bh(), etc. Failing to disable softirq on a given acquisition of that lock will result in deadlock as soon as the RCU softirq handler happens to run your RCU callback while interrupting that acquisition's critical section. @@ -335,13 +331,16 @@ over a rather long period of time, but improvements are always welcome! must use whatever locking or other synchronization is required to safely access and/or modify that data structure. - RCU callbacks are -usually- executed on the same CPU that executed - the corresponding call_rcu(), call_rcu_bh(), or call_rcu_sched(), - but are by -no- means guaranteed to be. For example, if a given - CPU goes offline while having an RCU callback pending, then that - RCU callback will execute on some surviving CPU. (If this was - not the case, a self-spawning RCU callback would prevent the - victim CPU from ever going offline.) + Do not assume that RCU callbacks will be executed on the same + CPU that executed the corresponding call_rcu() or call_srcu(). + For example, if a given CPU goes offline while having an RCU + callback pending, then that RCU callback will execute on some + surviving CPU. (If this was not the case, a self-spawning RCU + callback would prevent the victim CPU from ever going offline.) + Furthermore, CPUs designated by rcu_nocbs= might well -always- + have their RCU callbacks executed on some other CPUs, in fact, + for some real-time workloads, this is the whole point of using + the rcu_nocbs= kernel boot parameter. 13. Unlike other forms of RCU, it -is- permissible to block in an SRCU read-side critical section (demarked by srcu_read_lock() @@ -381,11 +380,11 @@ over a rather long period of time, but improvements are always welcome! SRCU's expedited primitive (synchronize_srcu_expedited()) never sends IPIs to other CPUs, so it is easier on - real-time workloads than is synchronize_rcu_expedited(), - synchronize_rcu_bh_expedited() or synchronize_sched_expedited(). + real-time workloads than is synchronize_rcu_expedited(). - Note that rcu_dereference() and rcu_assign_pointer() relate to - SRCU just as they do to other forms of RCU. + Note that rcu_assign_pointer() relates to SRCU just as it does to + other forms of RCU, but instead of rcu_dereference() you should + use srcu_dereference() in order to avoid lockdep splats. 14. The whole point of call_rcu(), synchronize_rcu(), and friends is to wait until all pre-existing readers have finished before @@ -405,6 +404,9 @@ over a rather long period of time, but improvements are always welcome! read-side critical sections. It is the responsibility of the RCU update-side primitives to deal with this. + For SRCU readers, you can use smp_mb__after_srcu_read_unlock() + immediately after an srcu_read_unlock() to get a full barrier. + 16. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the __rcu sparse checks to validate your RCU code. These can help find problems as follows: @@ -428,22 +430,19 @@ over a rather long period of time, but improvements are always welcome! These debugging aids can help you find problems that are otherwise extremely difficult to spot. -17. If you register a callback using call_rcu(), call_rcu_bh(), - call_rcu_sched(), or call_srcu(), and pass in a function defined - within a loadable module, then it in necessary to wait for - all pending callbacks to be invoked after the last invocation - and before unloading that module. Note that it is absolutely - -not- sufficient to wait for a grace period! The current (say) - synchronize_rcu() implementation waits only for all previous - callbacks registered on the CPU that synchronize_rcu() is running - on, but it is -not- guaranteed to wait for callbacks registered - on other CPUs. +17. If you register a callback using call_rcu() or call_srcu(), and + pass in a function defined within a loadable module, then it in + necessary to wait for all pending callbacks to be invoked after + the last invocation and before unloading that module. Note that + it is absolutely -not- sufficient to wait for a grace period! + The current (say) synchronize_rcu() implementation is -not- + guaranteed to wait for callbacks registered on other CPUs. + Or even on the current CPU if that CPU recently went offline + and came back online. You instead need to use one of the barrier functions: o call_rcu() -> rcu_barrier() - o call_rcu_bh() -> rcu_barrier() - o call_rcu_sched() -> rcu_barrier() o call_srcu() -> srcu_barrier() However, these barrier functions are absolutely -not- guaranteed diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt index 721b3e426515..c818cf65c5a9 100644 --- a/Documentation/RCU/rcu.txt +++ b/Documentation/RCU/rcu.txt @@ -52,10 +52,10 @@ o If I am running on a uniprocessor kernel, which can only do one o How can I see where RCU is currently used in the Linux kernel? Search for "rcu_read_lock", "rcu_read_unlock", "call_rcu", - "rcu_read_lock_bh", "rcu_read_unlock_bh", "call_rcu_bh", - "srcu_read_lock", "srcu_read_unlock", "synchronize_rcu", - "synchronize_net", "synchronize_srcu", and the other RCU - primitives. Or grab one of the cscope databases from: + "rcu_read_lock_bh", "rcu_read_unlock_bh", "srcu_read_lock", + "srcu_read_unlock", "synchronize_rcu", "synchronize_net", + "synchronize_srcu", and the other RCU primitives. Or grab one + of the cscope databases from: http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt index ab96227bad42..bf699e8cfc75 100644 --- a/Documentation/RCU/rcu_dereference.txt +++ b/Documentation/RCU/rcu_dereference.txt @@ -351,3 +351,106 @@ garbage values. In short, rcu_dereference() is -not- optional when you are going to dereference the resulting pointer. + + +WHICH MEMBER OF THE rcu_dereference() FAMILY SHOULD YOU USE? + +First, please avoid using rcu_dereference_raw() and also please avoid +using rcu_dereference_check() and rcu_dereference_protected() with a +second argument with a constant value of 1 (or true, for that matter). +With that caution out of the way, here is some guidance for which +member of the rcu_dereference() to use in various situations: + +1. If the access needs to be within an RCU read-side critical + section, use rcu_dereference(). With the new consolidated + RCU flavors, an RCU read-side critical section is entered + using rcu_read_lock(), anything that disables bottom halves, + anything that disables interrupts, or anything that disables + preemption. + +2. If the access might be within an RCU read-side critical section + on the one hand, or protected by (say) my_lock on the other, + use rcu_dereference_check(), for example: + + p1 = rcu_dereference_check(p->rcu_protected_pointer, + lockdep_is_held(&my_lock)); + + +3. If the access might be within an RCU read-side critical section + on the one hand, or protected by either my_lock or your_lock on + the other, again use rcu_dereference_check(), for example: + + p1 = rcu_dereference_check(p->rcu_protected_pointer, + lockdep_is_held(&my_lock) || + lockdep_is_held(&your_lock)); + +4. If the access is on the update side, so that it is always protected + by my_lock, use rcu_dereference_protected(): + + p1 = rcu_dereference_protected(p->rcu_protected_pointer, + lockdep_is_held(&my_lock)); + + This can be extended to handle multiple locks as in #3 above, + and both can be extended to check other conditions as well. + +5. If the protection is supplied by the caller, and is thus unknown + to this code, that is the rare case when rcu_dereference_raw() + is appropriate. In addition, rcu_dereference_raw() might be + appropriate when the lockdep expression would be excessively + complex, except that a better approach in that case might be to + take a long hard look at your synchronization design. Still, + there are data-locking cases where any one of a very large number + of locks or reference counters suffices to protect the pointer, + so rcu_dereference_raw() does have its place. + + However, its place is probably quite a bit smaller than one + might expect given the number of uses in the current kernel. + Ditto for its synonym, rcu_dereference_check( ... , 1), and + its close relative, rcu_dereference_protected(... , 1). + + +SPARSE CHECKING OF RCU-PROTECTED POINTERS + +The sparse static-analysis tool checks for direct access to RCU-protected +pointers, which can result in "interesting" bugs due to compiler +optimizations involving invented loads and perhaps also load tearing. +For example, suppose someone mistakenly does something like this: + + p = q->rcu_protected_pointer; + do_something_with(p->a); + do_something_else_with(p->b); + +If register pressure is high, the compiler might optimize "p" out +of existence, transforming the code to something like this: + + do_something_with(q->rcu_protected_pointer->a); + do_something_else_with(q->rcu_protected_pointer->b); + +This could fatally disappoint your code if q->rcu_protected_pointer +changed in the meantime. Nor is this a theoretical problem: Exactly +this sort of bug cost Paul E. McKenney (and several of his innocent +colleagues) a three-day weekend back in the early 1990s. + +Load tearing could of course result in dereferencing a mashup of a pair +of pointers, which also might fatally disappoint your code. + +These problems could have been avoided simply by making the code instead +read as follows: + + p = rcu_dereference(q->rcu_protected_pointer); + do_something_with(p->a); + do_something_else_with(p->b); + +Unfortunately, these sorts of bugs can be extremely hard to spot during +review. This is where the sparse tool comes into play, along with the +"__rcu" marker. If you mark a pointer declaration, whether in a structure +or as a formal parameter, with "__rcu", which tells sparse to complain if +this pointer is accessed directly. It will also cause sparse to complain +if a pointer not marked with "__rcu" is accessed using rcu_dereference() +and friends. For example, ->rcu_protected_pointer might be declared as +follows: + + struct foo __rcu *rcu_protected_pointer; + +Use of "__rcu" is opt-in. If you choose not to use it, then you should +ignore the sparse warnings. diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt index 5d7759071a3e..a2782df69732 100644 --- a/Documentation/RCU/rcubarrier.txt +++ b/Documentation/RCU/rcubarrier.txt @@ -83,16 +83,15 @@ Pseudo-code using rcu_barrier() is as follows: 2. Execute rcu_barrier(). 3. Allow the module to be unloaded. -There are also rcu_barrier_bh(), rcu_barrier_sched(), and srcu_barrier() -functions for the other flavors of RCU, and you of course must match -the flavor of rcu_barrier() with that of call_rcu(). If your module -uses multiple flavors of call_rcu(), then it must also use multiple +There is also an srcu_barrier() function for SRCU, and you of course +must match the flavor of rcu_barrier() with that of call_rcu(). If your +module uses multiple flavors of call_rcu(), then it must also use multiple flavors of rcu_barrier() when unloading that module. For example, if -it uses call_rcu_bh(), call_srcu() on srcu_struct_1, and call_srcu() on +it uses call_rcu(), call_srcu() on srcu_struct_1, and call_srcu() on srcu_struct_2(), then the following three lines of code will be required when unloading: - 1 rcu_barrier_bh(); + 1 rcu_barrier(); 2 srcu_barrier(&srcu_struct_1); 3 srcu_barrier(&srcu_struct_2); @@ -185,12 +184,12 @@ module invokes call_rcu() from timers, you will need to first cancel all the timers, and only then invoke rcu_barrier() to wait for any remaining RCU callbacks to complete. -Of course, if you module uses call_rcu_bh(), you will need to invoke -rcu_barrier_bh() before unloading. Similarly, if your module uses -call_rcu_sched(), you will need to invoke rcu_barrier_sched() before -unloading. If your module uses call_rcu(), call_rcu_bh(), -and- -call_rcu_sched(), then you will need to invoke each of rcu_barrier(), -rcu_barrier_bh(), and rcu_barrier_sched(). +Of course, if you module uses call_rcu(), you will need to invoke +rcu_barrier() before unloading. Similarly, if your module uses +call_srcu(), you will need to invoke srcu_barrier() before unloading, +and on the same srcu_struct structure. If your module uses call_rcu() +-and- call_srcu(), then you will need to invoke rcu_barrier() -and- +srcu_barrier(). Implementing rcu_barrier() @@ -223,8 +222,8 @@ shown below. Note that the final "1" in on_each_cpu()'s argument list ensures that all the calls to rcu_barrier_func() will have completed before on_each_cpu() returns. Line 9 then waits for the completion. -This code was rewritten in 2008 to support rcu_barrier_bh() and -rcu_barrier_sched() in addition to the original rcu_barrier(). +This code was rewritten in 2008 and several times thereafter, but this +still gives the general idea. The rcu_barrier_func() runs on each CPU, where it invokes call_rcu() to post an RCU callback, as follows: diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 1ace20815bb1..981651a8b65d 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -310,7 +310,7 @@ reader, updater, and reclaimer. rcu_assign_pointer() - +--------+ + +--------+ +---------------------->| reader |---------+ | +--------+ | | | | @@ -318,12 +318,12 @@ reader, updater, and reclaimer. | | | rcu_read_lock() | | | rcu_read_unlock() | rcu_dereference() | | - +---------+ | | - | updater |<---------------------+ | - +---------+ V + +---------+ | | + | updater |<----------------+ | + +---------+ V | +-----------+ +----------------------------------->| reclaimer | - +-----------+ + +-----------+ Defer: synchronize_rcu() & call_rcu() diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 2b8ee90bb644..b7e23e9d1770 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2544,6 +2544,38 @@ in the "bleeding edge" mini2440 support kernel at http://repo.or.cz/w/linux-2.6/mini2440.git + mitigations= + [X86,PPC,S390] Control optional mitigations for CPU + vulnerabilities. This is a set of curated, + arch-independent options, each of which is an + aggregation of existing arch-specific options. + + off + Disable all optional CPU mitigations. This + improves system performance, but it may also + expose users to several CPU vulnerabilities. + Equivalent to: nopti [X86,PPC] + nospectre_v1 [PPC] + nobp=0 [S390] + nospectre_v2 [X86,PPC,S390] + spectre_v2_user=off [X86] + spec_store_bypass_disable=off [X86,PPC] + l1tf=off [X86] + + auto (default) + Mitigate all CPU vulnerabilities, but leave SMT + enabled, even if it's vulnerable. This is for + users who don't want to be surprised by SMT + getting disabled across kernel upgrades, or who + have other ways of avoiding SMT-based attacks. + Equivalent to: (default behavior) + + auto,nosmt + Mitigate all CPU vulnerabilities, disabling SMT + if needed. This is for users who always want to + be fully mitigated, even if it means losing SMT. + Equivalent to: l1tf=flush,nosmt [X86] + mminit_loglevel= [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this parameter allows control of the logging verbosity for @@ -3623,7 +3655,9 @@ see CONFIG_RAS_CEC help text. rcu_nocbs= [KNL] - The argument is a cpu list, as described above. + The argument is a cpu list, as described above, + except that the string "all" can be used to + specify every CPU on the system. In kernels built with CONFIG_RCU_NOCB_CPU=y, set the specified list of CPUs to be no-callback CPUs. diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt index 913396ac5824..dca3fb0554db 100644 --- a/Documentation/atomic_t.txt +++ b/Documentation/atomic_t.txt @@ -56,6 +56,23 @@ Barriers: smp_mb__{before,after}_atomic() +TYPES (signed vs unsigned) +----- + +While atomic_t, atomic_long_t and atomic64_t use int, long and s64 +respectively (for hysterical raisins), the kernel uses -fno-strict-overflow +(which implies -fwrapv) and defines signed overflow to behave like +2s-complement. + +Therefore, an explicitly unsigned variant of the atomic ops is strictly +unnecessary and we can simply cast, there is no UB. + +There was a bug in UBSAN prior to GCC-8 that would generate UB warnings for +signed types. + +With this we also conform to the C/C++ _Atomic behaviour and things like +P1236R1. + SEMANTICS --------- diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst index 6eb9d3f090cd..93cb65d52720 100644 --- a/Documentation/core-api/cachetlb.rst +++ b/Documentation/core-api/cachetlb.rst @@ -101,16 +101,6 @@ changes occur: translations for software managed TLB configurations. The sparc64 port currently does this. -6) ``void tlb_migrate_finish(struct mm_struct *mm)`` - - This interface is called at the end of an explicit - process migration. This interface provides a hook - to allow a platform to update TLB or context-specific - information for the address space. - - The ia64 sn2 platform is one example of a platform - that uses this interface. - Next, we have the cache flushing interfaces. In general, when Linux is changing an existing virtual-->physical mapping to a new value, the sequence will be in one of the following forms:: diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 10f4499e677c..ee60e519438a 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -243,10 +243,10 @@ Optimization ^^^^^^^^^^^^ The Kprobe-optimizer doesn't insert the jump instruction immediately; -rather, it calls synchronize_sched() for safety first, because it's +rather, it calls synchronize_rcu() for safety first, because it's possible for a CPU to be interrupted in the middle of executing the -optimized region [3]_. As you know, synchronize_sched() can ensure -that all interruptions that were active when synchronize_sched() +optimized region [3]_. As you know, synchronize_rcu() can ensure +that all interruptions that were active when synchronize_rcu() was called are done, but only if CONFIG_PREEMPT=n. So, this version of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_. diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index 7f01fb1c1084..db0b9d8619f1 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -493,10 +493,8 @@ CPU ì—게 ê¸°ëŒ€í• ìˆ˜ 있는 ìµœì†Œí•œì˜ ë³´ìž¥ì‚¬í• ëª‡ê°€ì§€ê°€ 있습니 ì´ íƒ€ìž…ì˜ ì˜¤í¼ë ˆì´ì…˜ì€ ë‹¨ë°©í–¥ì˜ íˆ¬ê³¼ì„± 배리어처럼 ë™ìž‘합니다. ACQUIRE 오í¼ë ˆì´ì…˜ ë’¤ì˜ ëª¨ë“ ë©”ëª¨ë¦¬ 오í¼ë ˆì´ì…˜ë“¤ì´ ACQUIRE 오í¼ë ˆì´ì…˜ í›„ì— ì¼ì–´ë‚œ 것으로 ì‹œìŠ¤í…œì˜ ë‚˜ë¨¸ì§€ ì»´í¬ë„ŒíŠ¸ë“¤ì— ë³´ì´ê²Œ ë ê²ƒì´ ë³´ìž¥ë©ë‹ˆë‹¤. - LOCK 오í¼ë ˆì´ì…˜ê³¼ smp_load_acquire(), smp_cond_acquire() 오í¼ë ˆì´ì…˜ë„ - ACQUIRE 오í¼ë ˆì´ì…˜ì— í¬í•¨ë©ë‹ˆë‹¤. smp_cond_acquire() 오í¼ë ˆì´ì…˜ì€ 컨트롤 - ì˜ì¡´ì„±ê³¼ smp_rmb() 를 사용해서 ACQUIRE ì˜ ì˜ë¯¸ì 요구사í•(semantic)ì„ - 충족시킵니다. + LOCK 오í¼ë ˆì´ì…˜ê³¼ smp_load_acquire(), smp_cond_load_acquire() 오í¼ë ˆì´ì…˜ë„ + ACQUIRE 오í¼ë ˆì´ì…˜ì— í¬í•¨ë©ë‹ˆë‹¤. ACQUIRE 오í¼ë ˆì´ì…˜ ì•žì˜ ë©”ëª¨ë¦¬ 오í¼ë ˆì´ì…˜ë“¤ì€ ACQUIRE 오í¼ë ˆì´ì…˜ 완료 í›„ì— ìˆ˜í–‰ëœ ê²ƒì²˜ëŸ¼ ë³´ì¼ ìˆ˜ 있습니다. @@ -2146,33 +2144,40 @@ set_current_state() 는 다ìŒì˜ 것들로 ê°ì‹¸ì§ˆ ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤: event_indicated = 1; wake_up_process(event_daemon); -wake_up() ë¥˜ì— ì˜í•´ 쓰기 메모리 배리어가 ë‚´í¬ë©ë‹ˆë‹¤. 만약 ê·¸ê²ƒë“¤ì´ ë”가를 -깨운다면요. ì´ ë°°ë¦¬ì–´ëŠ” íƒœìŠ¤í¬ ìƒíƒœê°€ 지워지기 ì „ì— ìˆ˜í–‰ë˜ë¯€ë¡œ, ì´ë²¤íŠ¸ë¥¼ -알리기 위한 STORE 와 íƒœìŠ¤í¬ ìƒíƒœë¥¼ TASK_RUNNING 으로 ì„¤ì •í•˜ëŠ” STORE 사ì´ì— -위치하게 ë©ë‹ˆë‹¤. +wake_up() ì´ ë¬´ì–¸ê°€ë¥¼ 깨우게 ë˜ë©´, ì´ í•¨ìˆ˜ëŠ” 범용 메모리 배리어를 수행합니다. +ì´ í•¨ìˆ˜ê°€ ì•„ë¬´ê²ƒë„ ê¹¨ìš°ì§€ 않는다면 메모리 배리어는 수행ë 수ë„, 수행ë˜ì§€ ì•Šì„ +ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤; ì´ ê²½ìš°ì— ë©”ëª¨ë¦¬ 배리어를 ìˆ˜í–‰í• ê±°ë¼ ì˜¤í•´í•´ì„ ì•ˆë©ë‹ˆë‹¤. ì´ +배리어는 íƒœìŠ¤í¬ ìƒíƒœê°€ ì ‘ê·¼ë˜ê¸° ì „ì— ìˆ˜í–‰ë˜ëŠ”ë°, ìžì„¸ížˆ ë§í•˜ë©´ ì´ ì´ë²¤íŠ¸ë¥¼ +알리기 위한 STORE 와 TASK_RUNNING 으로 ìƒíƒœë¥¼ 쓰는 STORE 사ì´ì— 수행ë©ë‹ˆë‹¤: - CPU 1 CPU 2 + CPU 1 (Sleeper) CPU 2 (Waker) =============================== =============================== set_current_state(); STORE event_indicated smp_store_mb(); wake_up(); - STORE current->state <쓰기 배리어> - <범용 배리어> STORE current->state - LOAD event_indicated + STORE current->state ... + <범용 배리어> <범용 배리어> + LOAD event_indicated if ((LOAD task->state) & TASK_NORMAL) + STORE task->state -í•œë²ˆë” ë§í•©ë‹ˆë‹¤ë§Œ, ì´ ì“°ê¸° 메모리 배리어는 ì´ ì½”ë“œê°€ ì •ë§ë¡œ ë”가를 깨울 ë•Œì—만 -실행ë©ë‹ˆë‹¤. ì´ê±¸ 설명하기 위해, X 와 Y 는 ëª¨ë‘ 0 으로 초기화 ë˜ì–´ 있다는 ê°€ì • -í•˜ì— ì•„ëž˜ì˜ ì´ë²¤íŠ¸ 시퀀스를 ìƒê°í•´ 봅시다: +여기서 "task" 는 깨어나지는 ì“°ë ˆë“œì´ê³ CPU 1 ì˜ "current" 와 같습니다. + +반복하지만, wake_up() ì´ ë¬´ì–¸ê°€ë¥¼ ì •ë§ ê¹¨ìš´ë‹¤ë©´ 범용 메모리 배리어가 수행ë +ê²ƒì´ ë³´ìž¥ë˜ì§€ë§Œ, ê·¸ë ‡ì§€ 않다면 그런 ë³´ìž¥ì´ ì—†ìŠµë‹ˆë‹¤. ì´ê±¸ ì´í•´í•˜ê¸° 위해, X 와 +Y 는 ëª¨ë‘ 0 으로 초기화 ë˜ì–´ 있다는 ê°€ì • í•˜ì— ì•„ëž˜ì˜ ì´ë²¤íŠ¸ 시퀀스를 ìƒê°í•´ +봅시다: CPU 1 CPU 2 =============================== =============================== - X = 1; STORE event_indicated + X = 1; Y = 1; smp_mb(); wake_up(); - Y = 1; wait_event(wq, Y == 1); - wake_up(); load from Y sees 1, no memory barrier - load from X might see 0 + LOAD Y LOAD X + +ì •ë§ë¡œ 깨우기가 행해졌다면, ë‘ ë¡œë“œ 중 (최소한) 하나는 1 ì„ ë³´ê²Œ ë©ë‹ˆë‹¤. +반면ì—, ì‹¤ì œ 깨우기가 행해지지 않았다면, ë‘ ë¡œë“œ ëª¨ë‘ 0ì„ ë³¼ ìˆ˜ë„ ìžˆìŠµë‹ˆë‹¤. -위 ì˜ˆì œì—ì„œì˜ ê²½ìš°ì™€ 달리 깨우기가 ì •ë§ë¡œ 행해졌다면, CPU 2 ì˜ X 로드는 1 ì„ -ë³¸ë‹¤ê³ ë³´ìž¥ë 수 ìžˆì„ ê²ë‹ˆë‹¤. +wake_up_process() 는 í•ìƒ 범용 메모리 배리어를 수행합니다. ì´ ë°°ë¦¬ì–´ ì—ì‹œ +íƒœìŠ¤í¬ ìƒíƒœê°€ ì ‘ê·¼ë˜ê¸° ì „ì— ìˆ˜í–‰ë©ë‹ˆë‹¤. 특히, ì•žì˜ ì˜ˆì œ 코드ì—ì„œ wake_up() ì´ +wake_up_process() ë¡œ 대체ëœë‹¤ë©´ ë‘ ë¡œë“œ 중 하나는 1ì„ ë³¼ ê²ƒì´ ë³´ìž¥ë©ë‹ˆë‹¤. 사용 가능한 깨우기류 함수들로 다ìŒê³¼ ê°™ì€ ê²ƒë“¤ì´ ìžˆìŠµë‹ˆë‹¤: @@ -2192,6 +2197,8 @@ wake_up() ë¥˜ì— ì˜í•´ 쓰기 메모리 배리어가 ë‚´í¬ë©ë‹ˆë‹¤. 만약 ê wake_up_poll(); wake_up_process(); +메모리 순서규칙 ê´€ì ì—ì„œ, ì´ í•¨ìˆ˜ë“¤ì€ ëª¨ë‘ wake_up() ê³¼ 같거나 보다 ê°•í•œ 순서 +ë³´ìž¥ì„ ì œê³µí•©ë‹ˆë‹¤. [!] ìž ìž¬ìš°ëŠ” 코드와 깨우는 ì½”ë“œì— ë‚´í¬ë˜ëŠ” 메모리 ë°°ë¦¬ì–´ë“¤ì€ ê¹¨ìš°ê¸° ì „ì— ì´ë£¨ì–´ì§„ ìŠ¤í† ì–´ë¥¼ ìž ìž¬ìš°ëŠ” 코드가 set_current_state() 를 호출한 í›„ì— í–‰í•˜ëŠ” diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 67068c47c591..64b38dfcc243 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -321,7 +321,7 @@ cpu's hardware control block. 4.8 KVM_GET_DIRTY_LOG (vm ioctl) Capability: basic -Architectures: x86 +Architectures: all Type: vm ioctl Parameters: struct kvm_dirty_log (in/out) Returns: 0 on success, -1 on error @@ -3810,7 +3810,7 @@ to I/O ports. 4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl) Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT -Architectures: x86 +Architectures: x86, arm, arm64, mips Type: vm ioctl Parameters: struct kvm_dirty_log (in) Returns: 0 on success, -1 on error @@ -3830,8 +3830,9 @@ The ioctl clears the dirty status of pages in a memory slot, according to the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap field. Bit 0 of the bitmap corresponds to page "first_page" in the memory slot, and num_pages is the size in bits of the input bitmap. -Both first_page and num_pages must be a multiple of 64. For each bit -that is set in the input bitmap, the corresponding page is marked "clean" +first_page must be a multiple of 64; num_pages must also be a multiple of +64 unless first_page + num_pages is the size of the memory slot. For each +bit that is set in the input bitmap, the corresponding page is marked "clean" in KVM's dirty bitmap, and dirty tracking is re-enabled for that page (for example via write-protection, or by clearing the dirty bit in a page table entry). @@ -4799,7 +4800,7 @@ and injected exceptions. 7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT -Architectures: all +Architectures: x86, arm, arm64, mips Parameters: args[0] whether feature should be enabled or not With this capability enabled, KVM_GET_DIRTY_LOG will not automatically diff --git a/MAINTAINERS b/MAINTAINERS index 5c38f21aee78..7be412e1a380 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6462,7 +6462,7 @@ S: Maintained F: drivers/media/radio/radio-gemtek* GENERIC GPIO I2C DRIVER -M: Haavard Skinnemoen <hskinnemoen@gmail.com> +M: Wolfram Sang <wsa+renesas@sang-engineering.com> S: Supported F: drivers/i2c/busses/i2c-gpio.c F: include/linux/platform_data/i2c-gpio.h @@ -8994,7 +8994,7 @@ R: Daniel Lustig <dlustig@nvidia.com> L: linux-kernel@vger.kernel.org L: linux-arch@vger.kernel.org S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: tools/memory-model/ F: Documentation/atomic_bitops.txt F: Documentation/atomic_t.txt @@ -9100,7 +9100,6 @@ F: arch/*/include/asm/spinlock*.h F: include/linux/rwlock*.h F: include/linux/mutex*.h F: include/linux/rwsem*.h -F: arch/*/include/asm/rwsem.h F: include/linux/seqlock.h F: lib/locking*.[ch] F: kernel/locking/ @@ -12176,6 +12175,7 @@ F: arch/*/kernel/*/*/perf_event*.c F: arch/*/include/asm/perf_event.h F: arch/*/kernel/perf_callchain.c F: arch/*/events/* +F: arch/*/events/*/* F: tools/perf/ PERSONALITY HANDLING @@ -13042,9 +13042,9 @@ M: Josh Triplett <josh@joshtriplett.org> R: Steven Rostedt <rostedt@goodmis.org> R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> R: Lai Jiangshan <jiangshanlai@gmail.com> -L: linux-kernel@vger.kernel.org +L: rcu@vger.kernel.org S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: tools/testing/selftests/rcutorture RDC R-321X SoC @@ -13090,10 +13090,10 @@ R: Steven Rostedt <rostedt@goodmis.org> R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> R: Lai Jiangshan <jiangshanlai@gmail.com> R: Joel Fernandes <joel@joelfernandes.org> -L: linux-kernel@vger.kernel.org +L: rcu@vger.kernel.org W: http://www.rdrop.com/users/paulmck/RCU/ S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: Documentation/RCU/ X: Documentation/RCU/torture.txt F: include/linux/rcu* @@ -14245,10 +14245,10 @@ M: "Paul E. McKenney" <paulmck@linux.ibm.com> M: Josh Triplett <josh@joshtriplett.org> R: Steven Rostedt <rostedt@goodmis.org> R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> -L: linux-kernel@vger.kernel.org +L: rcu@vger.kernel.org W: http://www.rdrop.com/users/paulmck/RCU/ S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: include/linux/srcu*.h F: kernel/rcu/srcu*.c @@ -15695,7 +15695,7 @@ M: "Paul E. McKenney" <paulmck@linux.ibm.com> M: Josh Triplett <josh@joshtriplett.org> L: linux-kernel@vger.kernel.org S: Supported -T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev F: Documentation/RCU/torture.txt F: kernel/torture.c F: kernel/rcu/rcutorture.c @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = NAME = Shy Crocodile # *DOCUMENTATION* diff --git a/arch/Kconfig b/arch/Kconfig index 33687dddd86a..3ab446bd12ef 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -383,7 +383,13 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE config HAVE_RCU_TABLE_FREE bool -config HAVE_RCU_TABLE_INVALIDATE +config HAVE_RCU_TABLE_NO_INVALIDATE + bool + +config HAVE_MMU_GATHER_PAGE_SIZE + bool + +config HAVE_MMU_GATHER_NO_GATHER bool config ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -901,6 +907,15 @@ config HAVE_ARCH_PREL32_RELOCATIONS config ARCH_USE_MEMREMAP_PROT bool +config LOCK_EVENT_COUNTS + bool "Locking event counts collection" + depends on DEBUG_FS + ---help--- + Enable light-weight counting of various locking related events + in the system with minimal performance impact. This reduces + the chance of application behavior change because of timing + differences. The counts are reported via debugfs. + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 584a6e114853..f7b19b813a70 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -36,6 +36,7 @@ config ALPHA select ODD_RT_SIGACTION select OLD_SIGSUSPEND select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 + select MMU_GATHER_NO_RANGE help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, @@ -49,13 +50,6 @@ config MMU bool default y -config RWSEM_GENERIC_SPINLOCK - bool - -config RWSEM_XCHGADD_ALGORITHM - bool - default y - config ARCH_HAS_ILOG2_U32 bool default n diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h deleted file mode 100644 index cf8fc8f9a2ed..000000000000 --- a/arch/alpha/include/asm/rwsem.h +++ /dev/null @@ -1,211 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ALPHA_RWSEM_H -#define _ALPHA_RWSEM_H - -/* - * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001. - * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h - */ - -#ifndef _LINUX_RWSEM_H -#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ - -#include <linux/compiler.h> - -#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L -#define RWSEM_ACTIVE_BIAS 0x0000000000000001L -#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL -#define RWSEM_WAITING_BIAS (-0x0000000100000000L) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - -static inline int ___down_read(struct rw_semaphore *sem) -{ - long oldcount; -#ifndef CONFIG_SMP - oldcount = sem->count.counter; - sem->count.counter += RWSEM_ACTIVE_READ_BIAS; -#else - long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - " mb\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) - :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); -#endif - return (oldcount < 0); -} - -static inline void __down_read(struct rw_semaphore *sem) -{ - if (unlikely(___down_read(sem))) - rwsem_down_read_failed(sem); -} - -static inline int __down_read_killable(struct rw_semaphore *sem) -{ - if (unlikely(___down_read(sem))) - if (IS_ERR(rwsem_down_read_failed_killable(sem))) - return -EINTR; - - return 0; -} - -/* - * trylock for reading -- returns 1 if successful, 0 if contention - */ -static inline int __down_read_trylock(struct rw_semaphore *sem) -{ - long old, new, res; - - res = atomic_long_read(&sem->count); - do { - new = res + RWSEM_ACTIVE_READ_BIAS; - if (new <= 0) - break; - old = res; - res = atomic_long_cmpxchg(&sem->count, old, new); - } while (res != old); - return res >= 0 ? 1 : 0; -} - -static inline long ___down_write(struct rw_semaphore *sem) -{ - long oldcount; -#ifndef CONFIG_SMP - oldcount = sem->count.counter; - sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS; -#else - long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - " mb\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) - :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); -#endif - return oldcount; -} - -static inline void __down_write(struct rw_semaphore *sem) -{ - if (unlikely(___down_write(sem))) - rwsem_down_write_failed(sem); -} - -static inline int __down_write_killable(struct rw_semaphore *sem) -{ - if (unlikely(___down_write(sem))) { - if (IS_ERR(rwsem_down_write_failed_killable(sem))) - return -EINTR; - } - - return 0; -} - -/* - * trylock for writing -- returns 1 if successful, 0 if contention - */ -static inline int __down_write_trylock(struct rw_semaphore *sem) -{ - long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); - if (ret == RWSEM_UNLOCKED_VALUE) - return 1; - return 0; -} - -static inline void __up_read(struct rw_semaphore *sem) -{ - long oldcount; -#ifndef CONFIG_SMP - oldcount = sem->count.counter; - sem->count.counter -= RWSEM_ACTIVE_READ_BIAS; -#else - long temp; - __asm__ __volatile__( - " mb\n" - "1: ldq_l %0,%1\n" - " subq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) - :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); -#endif - if (unlikely(oldcount < 0)) - if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0) - rwsem_wake(sem); -} - -static inline void __up_write(struct rw_semaphore *sem) -{ - long count; -#ifndef CONFIG_SMP - sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS; - count = sem->count.counter; -#else - long temp; - __asm__ __volatile__( - " mb\n" - "1: ldq_l %0,%1\n" - " subq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - " subq %0,%3,%0\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (count), "=m" (sem->count), "=&r" (temp) - :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); -#endif - if (unlikely(count)) - if ((int)count == 0) - rwsem_wake(sem); -} - -/* - * downgrade write lock to read lock - */ -static inline void __downgrade_write(struct rw_semaphore *sem) -{ - long oldcount; -#ifndef CONFIG_SMP - oldcount = sem->count.counter; - sem->count.counter -= RWSEM_WAITING_BIAS; -#else - long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " stq_c %2,%1\n" - " beq %2,2f\n" - " mb\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) - :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory"); -#endif - if (unlikely(oldcount < 0)) - rwsem_downgrade_wake(sem); -} - -#endif /* __KERNEL__ */ -#endif /* _ALPHA_RWSEM_H */ diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h index 8f5042b61875..4f79e331af5e 100644 --- a/arch/alpha/include/asm/tlb.h +++ b/arch/alpha/include/asm/tlb.h @@ -2,12 +2,6 @@ #ifndef _ALPHA_TLB_H #define _ALPHA_TLB_H -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0) - -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <asm-generic/tlb.h> #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index c781e45d1d99..23e063df5d2c 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -63,9 +63,6 @@ config SCHED_OMIT_FRAME_POINTER config GENERIC_CSUM def_bool y -config RWSEM_GENERIC_SPINLOCK - def_bool y - config ARCH_DISCONTIGMEM_ENABLE def_bool n diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h index a9db5f62aaf3..90cac97643a4 100644 --- a/arch/arc/include/asm/tlb.h +++ b/arch/arc/include/asm/tlb.h @@ -9,38 +9,6 @@ #ifndef _ASM_ARC_TLB_H #define _ASM_ARC_TLB_H -#define tlb_flush(tlb) \ -do { \ - if (tlb->fullmm) \ - flush_tlb_mm((tlb)->mm); \ -} while (0) - -/* - * This pair is called at time of munmap/exit to flush cache and TLB entries - * for mappings being torn down. - * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$ - * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range - * - * Note, read http://lkml.org/lkml/2004/1/15/6 - */ -#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING -#define tlb_start_vma(tlb, vma) -#else -#define tlb_start_vma(tlb, vma) \ -do { \ - if (!tlb->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ -} while(0) -#endif - -#define tlb_end_vma(tlb, vma) \ -do { \ - if (!tlb->fullmm) \ - flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ -} while (0) - -#define __tlb_remove_tlb_entry(tlb, ptep, address) - #include <linux/pagemap.h> #include <asm-generic/tlb.h> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9aed25a6019b..dc9855c4a3b4 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -178,10 +178,6 @@ config TRACE_IRQFLAGS_SUPPORT bool default !CPU_V7M -config RWSEM_XCHGADD_ALGORITHM - bool - default y - config ARCH_HAS_ILOG2_U32 bool diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index a8a4eb7f6dae..8fb51b7bf1d5 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -12,7 +12,6 @@ generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += parport.h generic-y += preempt.h -generic-y += rwsem.h generic-y += seccomp.h generic-y += segment.h generic-y += serial.h diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f854148c8d7c..bc6d04a09899 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -33,271 +33,42 @@ #include <asm/pgalloc.h> #include <asm/tlbflush.h> -#define MMU_GATHER_BUNDLE 8 - -#ifdef CONFIG_HAVE_RCU_TABLE_FREE static inline void __tlb_remove_table(void *_table) { free_page_and_swap_cache((struct page *)_table); } -struct mmu_table_batch { - struct rcu_head rcu; - unsigned int nr; - void *tables[0]; -}; - -#define MAX_TABLE_BATCH \ - ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) - -extern void tlb_table_flush(struct mmu_gather *tlb); -extern void tlb_remove_table(struct mmu_gather *tlb, void *table); - -#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) -#else -#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) -#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ - -/* - * TLB handling. This allows us to remove pages from the page - * tables, and efficiently handle the TLB issues. - */ -struct mmu_gather { - struct mm_struct *mm; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - struct mmu_table_batch *batch; - unsigned int need_flush; -#endif - unsigned int fullmm; - struct vm_area_struct *vma; - unsigned long start, end; - unsigned long range_start; - unsigned long range_end; - unsigned int nr; - unsigned int max; - struct page **pages; - struct page *local[MMU_GATHER_BUNDLE]; -}; - -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); - -/* - * This is unnecessarily complex. There's three ways the TLB shootdown - * code is used: - * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). - * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. - * tlb->vma will be non-NULL. - * 2. Unmapping all vmas. See exit_mmap(). - * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. - * tlb->vma will be non-NULL. Additionally, page tables will be freed. - * 3. Unmapping argument pages. See shift_arg_pages(). - * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. - * tlb->vma will be NULL. - */ -static inline void tlb_flush(struct mmu_gather *tlb) -{ - if (tlb->fullmm || !tlb->vma) - flush_tlb_mm(tlb->mm); - else if (tlb->range_end > 0) { - flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); - tlb->range_start = TASK_SIZE; - tlb->range_end = 0; - } -} - -static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) -{ - if (!tlb->fullmm) { - if (addr < tlb->range_start) - tlb->range_start = addr; - if (addr + PAGE_SIZE > tlb->range_end) - tlb->range_end = addr + PAGE_SIZE; - } -} - -static inline void __tlb_alloc_page(struct mmu_gather *tlb) -{ - unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); - - if (addr) { - tlb->pages = (void *)addr; - tlb->max = PAGE_SIZE / sizeof(struct page *); - } -} - -static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) -{ - tlb_flush(tlb); -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb_table_flush(tlb); -#endif -} - -static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) -{ - free_pages_and_swap_cache(tlb->pages, tlb->nr); - tlb->nr = 0; - if (tlb->pages == tlb->local) - __tlb_alloc_page(tlb); -} - -static inline void tlb_flush_mmu(struct mmu_gather *tlb) -{ - tlb_flush_mmu_tlbonly(tlb); - tlb_flush_mmu_free(tlb); -} - -static inline void -arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - tlb->mm = mm; - tlb->fullmm = !(start | (end+1)); - tlb->start = start; - tlb->end = end; - tlb->vma = NULL; - tlb->max = ARRAY_SIZE(tlb->local); - tlb->pages = tlb->local; - tlb->nr = 0; - __tlb_alloc_page(tlb); +#include <asm-generic/tlb.h> -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb->batch = NULL; +#ifndef CONFIG_HAVE_RCU_TABLE_FREE +#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry) #endif -} - -static inline void -arch_tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end, bool force) -{ - if (force) { - tlb->range_start = start; - tlb->range_end = end; - } - - tlb_flush_mmu(tlb); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - if (tlb->pages != tlb->local) - free_pages((unsigned long)tlb->pages, 0); -} - -/* - * Memorize the range for the TLB flush. - */ -static inline void -tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) -{ - tlb_add_flush(tlb, addr); -} - -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ - tlb_remove_tlb_entry(tlb, ptep, address) -/* - * In the case of tlb vma handling, we can optimise these away in the - * case where we're doing a full MM flush. When we're doing a munmap, - * the vmas are adjusted to only cover the region to be torn down. - */ -static inline void -tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - if (!tlb->fullmm) { - flush_cache_range(vma, vma->vm_start, vma->vm_end); - tlb->vma = vma; - tlb->range_start = TASK_SIZE; - tlb->range_end = 0; - } -} static inline void -tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - if (!tlb->fullmm) - tlb_flush(tlb); -} - -static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - tlb->pages[tlb->nr++] = page; - VM_WARN_ON(tlb->nr > tlb->max); - if (tlb->nr == tlb->max) - return true; - return false; -} - -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - if (__tlb_remove_page(tlb, page)) - tlb_flush_mmu(tlb); -} - -static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) -{ - return __tlb_remove_page(tlb, page); -} - -static inline void tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) -{ - return tlb_remove_page(tlb, page); -} - -static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, - unsigned long addr) +__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { pgtable_page_dtor(pte); -#ifdef CONFIG_ARM_LPAE - tlb_add_flush(tlb, addr); -#else +#ifndef CONFIG_ARM_LPAE /* * With the classic ARM MMU, a pte page has two corresponding pmd * entries, each covering 1MB. */ - addr &= PMD_MASK; - tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); - tlb_add_flush(tlb, addr + SZ_1M); + addr = (addr & PMD_MASK) + SZ_1M; + __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE); #endif - tlb_remove_entry(tlb, pte); -} - -static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, - unsigned long addr) -{ -#ifdef CONFIG_ARM_LPAE - tlb_add_flush(tlb, addr); - tlb_remove_entry(tlb, virt_to_page(pmdp)); -#endif + tlb_remove_table(tlb, pte); } static inline void -tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) -{ - tlb_add_flush(tlb, addr); -} - -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) -#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) - -#define tlb_migrate_finish(mm) do { } while (0) - -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, - unsigned int page_size) +__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { -} - -static inline void tlb_flush_remove_tables(struct mm_struct *mm) -{ -} +#ifdef CONFIG_ARM_LPAE + struct page *page = virt_to_page(pmdp); -static inline void tlb_flush_remove_tables_local(void *arg) -{ + tlb_remove_table(tlb, page); +#endif } #endif /* CONFIG_MMU */ diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 76bb8de6bf6b..be5edfdde558 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -549,8 +549,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) int ret; /* - * Increment event counter and perform fixup for the pre-signal - * frame. + * Perform fixup for the pre-signal frame. */ rseq_signal_deliver(ksig, regs); diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index a56e7c856ab5..86870f40f9a0 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -115,8 +115,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk, * running on another CPU? For now, ignore it as we * can't guarantee we won't explode. */ - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else frame.fp = thread_saved_fp(tsk); @@ -134,8 +132,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk, } walk_stackframe(&frame, save_trace, &data); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) @@ -153,8 +149,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) frame.pc = regs->ARM_pc; walk_stackframe(&frame, save_trace, &data); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7e34b9eba5de..d81adca1b04d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -149,7 +149,6 @@ config ARM64 select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RCU_TABLE_FREE - select HAVE_RCU_TABLE_INVALIDATE select HAVE_RSEQ select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS @@ -237,9 +236,6 @@ config LOCKDEP_SUPPORT config TRACE_IRQFLAGS_SUPPORT def_bool y -config RWSEM_XCHGADD_ALGORITHM - def_bool y - config GENERIC_BUG def_bool y depends on BUG diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 1e17ea5c372b..60a933b07001 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += qrwlock.h generic-y += qspinlock.h -generic-y += rwsem.h generic-y += segment.h generic-y += serial.h generic-y += set_memory.h diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 106fdc951b6e..37603b5616a5 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -27,6 +27,7 @@ static inline void __tlb_remove_table(void *_table) free_page_and_swap_cache((struct page *)_table); } +#define tlb_flush tlb_flush static void tlb_flush(struct mmu_gather *tlb); #include <asm-generic/tlb.h> diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index d908b5e9e949..b00ec7d483d1 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -140,8 +140,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) #endif walk_stackframe(current, &frame, save_trace, &data); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_regs); @@ -172,8 +170,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk, #endif walk_stackframe(tsk, &frame, save_trace, &data); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; put_task_stack(tsk); } diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index e5cd3c5f8399..eeb0471268a0 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -20,6 +20,7 @@ config C6X select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA select ARCH_NO_COHERENT_DMA_MMAP + select MMU_GATHER_NO_RANGE if MMU config MMU def_bool n @@ -27,9 +28,6 @@ config MMU config FPU def_bool n -config RWSEM_GENERIC_SPINLOCK - def_bool y - config GENERIC_CALIBRATE_DELAY def_bool y diff --git a/arch/c6x/include/asm/tlb.h b/arch/c6x/include/asm/tlb.h index 34525dea1356..240ba0febb57 100644 --- a/arch/c6x/include/asm/tlb.h +++ b/arch/c6x/include/asm/tlb.h @@ -2,8 +2,6 @@ #ifndef _ASM_C6X_TLB_H #define _ASM_C6X_TLB_H -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <asm-generic/tlb.h> #endif /* _ASM_C6X_TLB_H */ diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 725a115759c9..6555d1781132 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -92,9 +92,6 @@ config GENERIC_HWEIGHT config MMU def_bool y -config RWSEM_GENERIC_SPINLOCK - def_bool y - config STACKTRACE_SUPPORT def_bool y diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index c071da34e081..61c01db6c292 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -27,9 +27,6 @@ config H8300 config CPU_BIG_ENDIAN def_bool y -config RWSEM_GENERIC_SPINLOCK - def_bool y - config GENERIC_HWEIGHT def_bool y diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h index 98f344279904..d8201ca31206 100644 --- a/arch/h8300/include/asm/tlb.h +++ b/arch/h8300/include/asm/tlb.h @@ -2,8 +2,6 @@ #ifndef __H8300_TLB_H__ #define __H8300_TLB_H__ -#define tlb_flush(tlb) do { } while (0) - #include <asm-generic/tlb.h> #endif diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index ac441680dcc0..3e54a53208d5 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -65,12 +65,6 @@ config GENERIC_CSUM config GENERIC_IRQ_PROBE def_bool y -config RWSEM_GENERIC_SPINLOCK - def_bool n - -config RWSEM_XCHGADD_ALGORITHM - def_bool y - config GENERIC_HWEIGHT def_bool y diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index d046e8ccdf78..3ff5f297acda 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -27,7 +27,6 @@ generic-y += mm-arch-hooks.h generic-y += pci.h generic-y += percpu.h generic-y += preempt.h -generic-y += rwsem.h generic-y += sections.h generic-y += segment.h generic-y += serial.h diff --git a/arch/hexagon/include/asm/tlb.h b/arch/hexagon/include/asm/tlb.h index 2f00772cc08a..f71c4ba83614 100644 --- a/arch/hexagon/include/asm/tlb.h +++ b/arch/hexagon/include/asm/tlb.h @@ -22,18 +22,6 @@ #include <linux/pagemap.h> #include <asm/tlbflush.h> -/* - * We don't need any special per-pte or per-vma handling... - */ -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) - -/* - * .. because we flush the whole mm when it fills up - */ -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <asm-generic/tlb.h> #endif diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 8d7396bd1790..73a26f04644e 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -83,10 +83,6 @@ config STACKTRACE_SUPPORT config GENERIC_LOCKBREAK def_bool n -config RWSEM_XCHGADD_ALGORITHM - bool - default y - config HUGETLB_PAGE_SIZE_VARIABLE bool depends on HUGETLB_PAGE diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 5133739966bc..beae261fbcb4 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -30,7 +30,6 @@ typedef void ia64_mv_irq_init_t (void); typedef void ia64_mv_send_ipi_t (int, int, int, int); typedef void ia64_mv_timer_interrupt_t (int, void *); typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long); -typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *); typedef u8 ia64_mv_irq_to_vector (int); typedef unsigned int ia64_mv_local_vector_to_irq (u8); typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); @@ -80,11 +79,6 @@ machvec_noop (void) } static inline void -machvec_noop_mm (struct mm_struct *mm) -{ -} - -static inline void machvec_noop_task (struct task_struct *task) { } @@ -96,7 +90,6 @@ machvec_noop_bus (struct pci_bus *bus) extern void machvec_setup (char **); extern void machvec_timer_interrupt (int, void *); -extern void machvec_tlb_migrate_finish (struct mm_struct *); # if defined (CONFIG_IA64_HP_SIM) # include <asm/machvec_hpsim.h> @@ -124,7 +117,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # define platform_send_ipi ia64_mv.send_ipi # define platform_timer_interrupt ia64_mv.timer_interrupt # define platform_global_tlb_purge ia64_mv.global_tlb_purge -# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish # define platform_dma_init ia64_mv.dma_init # define platform_dma_get_ops ia64_mv.dma_get_ops # define platform_irq_to_vector ia64_mv.irq_to_vector @@ -167,7 +159,6 @@ struct ia64_machine_vector { ia64_mv_send_ipi_t *send_ipi; ia64_mv_timer_interrupt_t *timer_interrupt; ia64_mv_global_tlb_purge_t *global_tlb_purge; - ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; ia64_mv_dma_init *dma_init; ia64_mv_dma_get_ops *dma_get_ops; ia64_mv_irq_to_vector *irq_to_vector; @@ -206,7 +197,6 @@ struct ia64_machine_vector { platform_send_ipi, \ platform_timer_interrupt, \ platform_global_tlb_purge, \ - platform_tlb_migrate_finish, \ platform_dma_init, \ platform_dma_get_ops, \ platform_irq_to_vector, \ @@ -270,9 +260,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *); #ifndef platform_global_tlb_purge # define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ #endif -#ifndef platform_tlb_migrate_finish -# define platform_tlb_migrate_finish machvec_noop_mm -#endif #ifndef platform_kernel_launch_event # define platform_kernel_launch_event machvec_noop #endif diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h index b5153d300289..a243e4fb4877 100644 --- a/arch/ia64/include/asm/machvec_sn2.h +++ b/arch/ia64/include/asm/machvec_sn2.h @@ -34,7 +34,6 @@ extern ia64_mv_irq_init_t sn_irq_init; extern ia64_mv_send_ipi_t sn2_send_IPI; extern ia64_mv_timer_interrupt_t sn_timer_interrupt; extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; -extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish; extern ia64_mv_irq_to_vector sn_irq_to_vector; extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem; @@ -77,7 +76,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; #define platform_send_ipi sn2_send_IPI #define platform_timer_interrupt sn_timer_interrupt #define platform_global_tlb_purge sn2_global_tlb_purge -#define platform_tlb_migrate_finish sn_tlb_migrate_finish #define platform_pci_fixup sn_pci_fixup #define platform_inb __sn_inb #define platform_inw __sn_inw diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h deleted file mode 100644 index 917910607e0e..000000000000 --- a/arch/ia64/include/asm/rwsem.h +++ /dev/null @@ -1,172 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * R/W semaphores for ia64 - * - * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> - * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> - * Copyright (C) 2005 Christoph Lameter <cl@linux.com> - * - * Based on asm-i386/rwsem.h and other architecture implementation. - * - * The MSW of the count is the negated number of active writers and - * waiting lockers, and the LSW is the total number of active locks. - * - * The lock count is initialized to 0 (no active and no waiting lockers). - * - * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for - * the case of an uncontended lock. Readers increment by 1 and see a positive - * value when uncontended, negative if there are writers (and maybe) readers - * waiting (in which case it goes to sleep). - */ - -#ifndef _ASM_IA64_RWSEM_H -#define _ASM_IA64_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." -#endif - -#include <asm/intrinsics.h> - -#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000) -#define RWSEM_ACTIVE_BIAS (1L) -#define RWSEM_ACTIVE_MASK (0xffffffffL) -#define RWSEM_WAITING_BIAS (-0x100000000L) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - -/* - * lock for reading - */ -static inline int -___down_read (struct rw_semaphore *sem) -{ - long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1); - - return (result < 0); -} - -static inline void -__down_read (struct rw_semaphore *sem) -{ - if (___down_read(sem)) - rwsem_down_read_failed(sem); -} - -static inline int -__down_read_killable (struct rw_semaphore *sem) -{ - if (___down_read(sem)) - if (IS_ERR(rwsem_down_read_failed_killable(sem))) - return -EINTR; - - return 0; -} - -/* - * lock for writing - */ -static inline long -___down_write (struct rw_semaphore *sem) -{ - long old, new; - - do { - old = atomic_long_read(&sem->count); - new = old + RWSEM_ACTIVE_WRITE_BIAS; - } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old); - - return old; -} - -static inline void -__down_write (struct rw_semaphore *sem) -{ - if (___down_write(sem)) - rwsem_down_write_failed(sem); -} - -static inline int -__down_write_killable (struct rw_semaphore *sem) -{ - if (___down_write(sem)) { - if (IS_ERR(rwsem_down_write_failed_killable(sem))) - return -EINTR; - } - - return 0; -} - -/* - * unlock after reading - */ -static inline void -__up_read (struct rw_semaphore *sem) -{ - long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1); - - if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) - rwsem_wake(sem); -} - -/* - * unlock after writing - */ -static inline void -__up_write (struct rw_semaphore *sem) -{ - long old, new; - - do { - old = atomic_long_read(&sem->count); - new = old - RWSEM_ACTIVE_WRITE_BIAS; - } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old); - - if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0) - rwsem_wake(sem); -} - -/* - * trylock for reading -- returns 1 if successful, 0 if contention - */ -static inline int -__down_read_trylock (struct rw_semaphore *sem) -{ - long tmp; - while ((tmp = atomic_long_read(&sem->count)) >= 0) { - if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) { - return 1; - } - } - return 0; -} - -/* - * trylock for writing -- returns 1 if successful, 0 if contention - */ -static inline int -__down_write_trylock (struct rw_semaphore *sem) -{ - long tmp = atomic_long_cmpxchg_acquire(&sem->count, - RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS); - return tmp == RWSEM_UNLOCKED_VALUE; -} - -/* - * downgrade write lock to read lock - */ -static inline void -__downgrade_write (struct rw_semaphore *sem) -{ - long old, new; - - do { - old = atomic_long_read(&sem->count); - new = old - RWSEM_WAITING_BIAS; - } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old); - - if (old < 0) - rwsem_downgrade_wake(sem); -} - -#endif /* _ASM_IA64_RWSEM_H */ diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 516355a774bf..86ec034ba499 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -47,263 +47,6 @@ #include <asm/tlbflush.h> #include <asm/machvec.h> -/* - * If we can't allocate a page to make a big batch of page pointers - * to work on, then just handle a few from the on-stack structure. - */ -#define IA64_GATHER_BUNDLE 8 - -struct mmu_gather { - struct mm_struct *mm; - unsigned int nr; - unsigned int max; - unsigned char fullmm; /* non-zero means full mm flush */ - unsigned char need_flush; /* really unmapped some PTEs? */ - unsigned long start, end; - unsigned long start_addr; - unsigned long end_addr; - struct page **pages; - struct page *local[IA64_GATHER_BUNDLE]; -}; - -struct ia64_tr_entry { - u64 ifa; - u64 itir; - u64 pte; - u64 rr; -}; /*Record for tr entry!*/ - -extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); -extern void ia64_ptr_entry(u64 target_mask, int slot); - -extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; - -/* - region register macros -*/ -#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) -#define RR_VE(val) (((val) & 0x0000000000000001) << 0) -#define RR_VE_MASK 0x0000000000000001L -#define RR_VE_SHIFT 0 -#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) -#define RR_PS(val) (((val) & 0x000000000000003f) << 2) -#define RR_PS_MASK 0x00000000000000fcL -#define RR_PS_SHIFT 2 -#define RR_RID_MASK 0x00000000ffffff00L -#define RR_TO_RID(val) ((val >> 8) & 0xffffff) - -static inline void -ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end) -{ - tlb->need_flush = 0; - - if (tlb->fullmm) { - /* - * Tearing down the entire address space. This happens both as a result - * of exit() and execve(). The latter case necessitates the call to - * flush_tlb_mm() here. - */ - flush_tlb_mm(tlb->mm); - } else if (unlikely (end - start >= 1024*1024*1024*1024UL - || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) - { - /* - * If we flush more than a tera-byte or across regions, we're probably - * better off just flushing the entire TLB(s). This should be very rare - * and is not worth optimizing for. - */ - flush_tlb_all(); - } else { - /* - * flush_tlb_range() takes a vma instead of a mm pointer because - * some architectures want the vm_flags for ITLB/DTLB flush. - */ - struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); - - /* flush the address range from the tlb: */ - flush_tlb_range(&vma, start, end); - /* now flush the virt. page-table area mapping the address range: */ - flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); - } - -} - -static inline void -ia64_tlb_flush_mmu_free(struct mmu_gather *tlb) -{ - unsigned long i; - unsigned int nr; - - /* lastly, release the freed pages */ - nr = tlb->nr; - - tlb->nr = 0; - tlb->start_addr = ~0UL; - for (i = 0; i < nr; ++i) - free_page_and_swap_cache(tlb->pages[i]); -} - -/* - * Flush the TLB for address range START to END and, if not in fast mode, release the - * freed pages that where gathered up to this point. - */ -static inline void -ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) -{ - if (!tlb->need_flush) - return; - ia64_tlb_flush_mmu_tlbonly(tlb, start, end); - ia64_tlb_flush_mmu_free(tlb); -} - -static inline void __tlb_alloc_page(struct mmu_gather *tlb) -{ - unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); - - if (addr) { - tlb->pages = (void *)addr; - tlb->max = PAGE_SIZE / sizeof(void *); - } -} - - -static inline void -arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - tlb->mm = mm; - tlb->max = ARRAY_SIZE(tlb->local); - tlb->pages = tlb->local; - tlb->nr = 0; - tlb->fullmm = !(start | (end+1)); - tlb->start = start; - tlb->end = end; - tlb->start_addr = ~0UL; -} - -/* - * Called at the end of the shootdown operation to free up any resources that were - * collected. - */ -static inline void -arch_tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end, bool force) -{ - if (force) - tlb->need_flush = 1; - /* - * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and - * tlb->end_addr. - */ - ia64_tlb_flush_mmu(tlb, start, end); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - if (tlb->pages != tlb->local) - free_pages((unsigned long)tlb->pages, 0); -} - -/* - * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page - * must be delayed until after the TLB has been flushed (see comments at the beginning of - * this file). - */ -static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - tlb->need_flush = 1; - - if (!tlb->nr && tlb->pages == tlb->local) - __tlb_alloc_page(tlb); - - tlb->pages[tlb->nr++] = page; - VM_WARN_ON(tlb->nr > tlb->max); - if (tlb->nr == tlb->max) - return true; - return false; -} - -static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) -{ - ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr); -} - -static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) -{ - ia64_tlb_flush_mmu_free(tlb); -} - -static inline void tlb_flush_mmu(struct mmu_gather *tlb) -{ - ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); -} - -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - if (__tlb_remove_page(tlb, page)) - tlb_flush_mmu(tlb); -} - -static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) -{ - return __tlb_remove_page(tlb, page); -} - -static inline void tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) -{ - return tlb_remove_page(tlb, page); -} - -/* - * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any - * PTE, not just those pointing to (normal) physical memory. - */ -static inline void -__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) -{ - if (tlb->start_addr == ~0UL) - tlb->start_addr = address; - tlb->end_addr = address + PAGE_SIZE; -} - -#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) - -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) - -#define tlb_remove_tlb_entry(tlb, ptep, addr) \ -do { \ - tlb->need_flush = 1; \ - __tlb_remove_tlb_entry(tlb, ptep, addr); \ -} while (0) - -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ - tlb_remove_tlb_entry(tlb, ptep, address) - -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, - unsigned int page_size) -{ -} - -#define pte_free_tlb(tlb, ptep, address) \ -do { \ - tlb->need_flush = 1; \ - __pte_free_tlb(tlb, ptep, address); \ -} while (0) - -#define pmd_free_tlb(tlb, ptep, address) \ -do { \ - tlb->need_flush = 1; \ - __pmd_free_tlb(tlb, ptep, address); \ -} while (0) - -#define pud_free_tlb(tlb, pudp, address) \ -do { \ - tlb->need_flush = 1; \ - __pud_free_tlb(tlb, pudp, address); \ -} while (0) +#include <asm-generic/tlb.h> #endif /* _ASM_IA64_TLB_H */ diff --git a/arch/ia64/include/asm/tlbflush.h b/arch/ia64/include/asm/tlbflush.h index 25e280810f6c..ceac10c4d6e2 100644 --- a/arch/ia64/include/asm/tlbflush.h +++ b/arch/ia64/include/asm/tlbflush.h @@ -14,6 +14,31 @@ #include <asm/mmu_context.h> #include <asm/page.h> +struct ia64_tr_entry { + u64 ifa; + u64 itir; + u64 pte; + u64 rr; +}; /*Record for tr entry!*/ + +extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); +extern void ia64_ptr_entry(u64 target_mask, int slot); +extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; + +/* + region register macros +*/ +#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) +#define RR_VE(val) (((val) & 0x0000000000000001) << 0) +#define RR_VE_MASK 0x0000000000000001L +#define RR_VE_SHIFT 0 +#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) +#define RR_PS(val) (((val) & 0x000000000000003f) << 2) +#define RR_PS_MASK 0x00000000000000fcL +#define RR_PS_SHIFT 2 +#define RR_RID_MASK 0x00000000ffffff00L +#define RR_TO_RID(val) ((val >> 8) & 0xffffff) + /* * Now for some TLB flushing routines. This is the kind of stuff that * can be very expensive, so try to avoid them whenever possible. diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 583a3746d70b..c9cfa760cd57 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -1058,9 +1058,7 @@ check_bugs (void) static int __init run_dmi_scan(void) { - dmi_scan_machine(); - dmi_memdev_walk(); - dmi_set_dump_stack_arch_desc(); + dmi_setup(); return 0; } core_initcall(run_dmi_scan); diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 5fc89aabdce1..5158bd28de05 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -305,8 +305,8 @@ local_flush_tlb_all (void) ia64_srlz_i(); /* srlz.i implies srlz.d */ } -void -flush_tlb_range (struct vm_area_struct *vma, unsigned long start, +static void +__flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; @@ -343,6 +343,25 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, preempt_enable(); ia64_srlz_i(); /* srlz.i implies srlz.d */ } + +void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + if (unlikely(end - start >= 1024*1024*1024*1024UL + || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) { + /* + * If we flush more than a tera-byte or across regions, we're + * probably better off just flushing the entire TLB(s). This + * should be very rare and is not worth optimizing for. + */ + flush_tlb_all(); + } else { + /* flush the address range from the tlb */ + __flush_tlb_range(vma, start, end); + /* flush the virt. page-table area mapping the addr range */ + __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end)); + } +} EXPORT_SYMBOL(flush_tlb_range); void ia64_tlb_init(void) diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index b73b0ebf8214..b510f4f17fd4 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -120,13 +120,6 @@ void sn_migrate(struct task_struct *task) cpu_relax(); } -void sn_tlb_migrate_finish(struct mm_struct *mm) -{ - /* flush_tlb_mm is inefficient if more than 1 users of mm */ - if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1) - flush_tlb_mm(mm); -} - static void sn2_ipi_flush_all_tlb(struct mm_struct *mm) { diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index b54206408f91..735b9679fe6f 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -28,17 +28,11 @@ config M68K select OLD_SIGSUSPEND3 select OLD_SIGACTION select ARCH_DISCARD_MEMBLOCK + select MMU_GATHER_NO_RANGE if MMU config CPU_BIG_ENDIAN def_bool y -config RWSEM_GENERIC_SPINLOCK - bool - default y - -config RWSEM_XCHGADD_ALGORITHM - bool - config ARCH_HAS_ILOG2_U32 bool diff --git a/arch/m68k/include/asm/tlb.h b/arch/m68k/include/asm/tlb.h index b4b9efb6f963..3c81f6adfc8b 100644 --- a/arch/m68k/include/asm/tlb.h +++ b/arch/m68k/include/asm/tlb.h @@ -2,20 +2,6 @@ #ifndef _M68K_TLB_H #define _M68K_TLB_H -/* - * m68k doesn't need any special per-pte or - * per-vma handling.. - */ -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) - -/* - * .. because we flush the whole mm when it - * fills up. - */ -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <asm-generic/tlb.h> #endif /* _M68K_TLB_H */ diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index a51b965b3b82..adb179f519f9 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -41,6 +41,7 @@ config MICROBLAZE select TRACING_SUPPORT select VIRT_TO_BUS select CPU_NO_EFFICIENT_FFS + select MMU_GATHER_NO_RANGE if MMU # Endianness selection choice @@ -58,15 +59,9 @@ config CPU_LITTLE_ENDIAN endchoice -config RWSEM_GENERIC_SPINLOCK - def_bool y - config ZONE_DMA def_bool y -config RWSEM_XCHGADD_ALGORITHM - bool - config ARCH_HAS_ILOG2_U32 def_bool n diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h index 99b6ded54849..628a78ee0a72 100644 --- a/arch/microblaze/include/asm/tlb.h +++ b/arch/microblaze/include/asm/tlb.h @@ -11,16 +11,7 @@ #ifndef _ASM_MICROBLAZE_TLB_H #define _ASM_MICROBLAZE_TLB_H -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <linux/pagemap.h> - -#ifdef CONFIG_MMU -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) -#endif - #include <asm-generic/tlb.h> #endif /* _ASM_MICROBLAZE_TLB_H */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4a5f5b0ee9a9..b9c48b27162d 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1037,13 +1037,6 @@ source "arch/mips/paravirt/Kconfig" endmenu -config RWSEM_GENERIC_SPINLOCK - bool - default y - -config RWSEM_XCHGADD_ALGORITHM - bool - config GENERIC_HWEIGHT bool default y diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h index b6823b9e94da..90f3ad76d9e0 100644 --- a/arch/mips/include/asm/tlb.h +++ b/arch/mips/include/asm/tlb.h @@ -5,23 +5,6 @@ #include <asm/cpu-features.h> #include <asm/mipsregs.h> -/* - * MIPS doesn't need any special per-pte or per-vma handling, except - * we need to flush cache for area to be unmapped. - */ -#define tlb_start_vma(tlb, vma) \ - do { \ - if (!tlb->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ - } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) - -/* - * .. because we flush the whole mm when it fills up. - */ -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #define _UNIQUE_ENTRYHI(base, idx) \ (((base) + ((idx) << (PAGE_SHIFT + 1))) | \ (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index addb7f5f5264..55559ca0efe4 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -60,9 +60,6 @@ config GENERIC_LOCKBREAK def_bool y depends on PREEMPT -config RWSEM_GENERIC_SPINLOCK - def_bool y - config TRACE_IRQFLAGS_SUPPORT def_bool y diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h index b35ae5eae3ab..d5ae571c8d30 100644 --- a/arch/nds32/include/asm/tlb.h +++ b/arch/nds32/include/asm/tlb.h @@ -4,22 +4,6 @@ #ifndef __ASMNDS32_TLB_H #define __ASMNDS32_TLB_H -#define tlb_start_vma(tlb,vma) \ - do { \ - if (!tlb->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ - } while (0) - -#define tlb_end_vma(tlb,vma) \ - do { \ - if(!tlb->fullmm) \ - flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ - } while (0) - -#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0) - -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <asm-generic/tlb.h> #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h index 9b411f401903..38ee769b18d8 100644 --- a/arch/nds32/include/asm/tlbflush.h +++ b/arch/nds32/include/asm/tlbflush.h @@ -42,6 +42,5 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * pte); -void tlb_migrate_finish(struct mm_struct *mm); #endif diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 4ef15a61b7bc..ea37394ff3ea 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -24,6 +24,7 @@ config NIOS2 select USB_ARCH_HAS_HCD if USB_SUPPORT select CPU_NO_EFFICIENT_FFS select ARCH_DISCARD_MEMBLOCK + select MMU_GATHER_NO_RANGE if MMU config GENERIC_CSUM def_bool y @@ -40,9 +41,6 @@ config NO_IOPORT_MAP config FPU def_bool n -config RWSEM_GENERIC_SPINLOCK - def_bool y - config TRACE_IRQFLAGS_SUPPORT def_bool n diff --git a/arch/nios2/include/asm/tlb.h b/arch/nios2/include/asm/tlb.h index d3bc648e08b5..f9f2e27e32dd 100644 --- a/arch/nios2/include/asm/tlb.h +++ b/arch/nios2/include/asm/tlb.h @@ -11,22 +11,12 @@ #ifndef _ASM_NIOS2_TLB_H #define _ASM_NIOS2_TLB_H -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - extern void set_mmu_pid(unsigned long pid); /* - * NiosII doesn't need any special per-pte or per-vma handling, except - * we need to flush cache for the area to be unmapped. + * NIOS32 does have flush_tlb_range(), but it lacks a limit and fallback to + * full mm invalidation. So use flush_tlb_mm() for everything. */ -#define tlb_start_vma(tlb, vma) \ - do { \ - if (!tlb->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ - } while (0) - -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) #include <linux/pagemap.h> #include <asm-generic/tlb.h> diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index a5e361fbb75a..7cfb20555b10 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -36,6 +36,7 @@ config OPENRISC select OMPIC if SMP select ARCH_WANT_FRAME_POINTERS select GENERIC_IRQ_MULTI_HANDLER + select MMU_GATHER_NO_RANGE if MMU config CPU_BIG_ENDIAN def_bool y @@ -43,12 +44,6 @@ config CPU_BIG_ENDIAN config MMU def_bool y -config RWSEM_GENERIC_SPINLOCK - def_bool y - -config RWSEM_XCHGADD_ALGORITHM - def_bool n - config GENERIC_HWEIGHT def_bool y diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h index fa4376a4515d..92d8a4209884 100644 --- a/arch/openrisc/include/asm/tlb.h +++ b/arch/openrisc/include/asm/tlb.h @@ -20,14 +20,10 @@ #define __ASM_OPENRISC_TLB_H__ /* - * or32 doesn't need any special per-pte or - * per-vma handling.. + * OpenRISC doesn't have an efficient flush_tlb_range() so use flush_tlb_mm() + * for everything. */ -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) #include <linux/pagemap.h> #include <asm-generic/tlb.h> diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index c8e621296092..f1ed8ddfe486 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -75,12 +75,6 @@ config GENERIC_LOCKBREAK default y depends on SMP && PREEMPT -config RWSEM_GENERIC_SPINLOCK - def_bool y - -config RWSEM_XCHGADD_ALGORITHM - bool - config ARCH_HAS_ILOG2_U32 bool default n diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h index 0c881e74d8a6..8c0446b04c9e 100644 --- a/arch/parisc/include/asm/tlb.h +++ b/arch/parisc/include/asm/tlb.h @@ -2,24 +2,6 @@ #ifndef _PARISC_TLB_H #define _PARISC_TLB_H -#define tlb_flush(tlb) \ -do { if ((tlb)->fullmm) \ - flush_tlb_mm((tlb)->mm);\ -} while (0) - -#define tlb_start_vma(tlb, vma) \ -do { if (!(tlb)->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ -} while (0) - -#define tlb_end_vma(tlb, vma) \ -do { if (!(tlb)->fullmm) \ - flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ -} while (0) - -#define __tlb_remove_tlb_entry(tlb, pte, address) \ - do { } while (0) - #include <asm-generic/tlb.h> #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) diff --git a/arch/parisc/kernel/stacktrace.c b/arch/parisc/kernel/stacktrace.c index ec5835e83a7a..6f0b9c8d8052 100644 --- a/arch/parisc/kernel/stacktrace.c +++ b/arch/parisc/kernel/stacktrace.c @@ -29,22 +29,17 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace) } } - /* * Save stack-backtrace addresses into a stack_trace buffer. */ void save_stack_trace(struct stack_trace *trace) { dump_trace(current, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { dump_trace(tsk, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bc98b0e37a10..88a4fb3647a2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -103,13 +103,6 @@ config LOCKDEP_SUPPORT bool default y -config RWSEM_GENERIC_SPINLOCK - bool - -config RWSEM_XCHGADD_ALGORITHM - bool - default y - config GENERIC_LOCKBREAK bool default y @@ -218,6 +211,8 @@ config PPC select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if SMP + select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE + select HAVE_MMU_GATHER_PAGE_SIZE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN select HAVE_SYSCALL_TRACEPOINTS diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index a0c132bedfae..36bda391e549 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -8,6 +8,5 @@ generic-y += irq_regs.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += preempt.h -generic-y += rwsem.h generic-y += vtime.h generic-y += msi.h diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index e24c67d5ba75..34fba1ce27f7 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -27,8 +27,8 @@ #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change +#define tlb_flush tlb_flush extern void tlb_flush(struct mmu_gather *tlb); /* Get the generic bits... */ @@ -46,22 +46,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, #endif } -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, - unsigned int page_size) -{ - if (!tlb->page_size) - tlb->page_size = page_size; - else if (tlb->page_size != page_size) { - if (!tlb->fullmm) - tlb_flush_mmu(tlb); - /* - * update the page size after flush for the new - * mmu_gather. - */ - tlb->page_size = page_size; - } -} - #ifdef CONFIG_SMP static inline int mm_is_core_local(struct mm_struct *mm) { diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index b33bafb8fcea..70568ccbd9fd 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -57,7 +57,7 @@ void setup_barrier_nospec(void) enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); - if (!no_nospec) + if (!no_nospec && !cpu_mitigations_off()) enable_barrier_nospec(enable); } @@ -116,7 +116,7 @@ static int __init handle_nospectre_v2(char *p) early_param("nospectre_v2", handle_nospectre_v2); void setup_spectre_v2(void) { - if (no_spectrev2) + if (no_spectrev2 || cpu_mitigations_off()) do_btb_flush_fixups(); else btb_flush_enabled = true; @@ -300,7 +300,7 @@ void setup_stf_barrier(void) stf_enabled_flush_types = type; - if (!no_stf_barrier) + if (!no_stf_barrier && !cpu_mitigations_off()) stf_barrier_enable(enable); } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index ba404dd9ce1d..4f49e1a3594c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -932,7 +932,7 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable) enabled_flush_types = types; - if (!no_rfi_flush) + if (!no_rfi_flush && !cpu_mitigations_off()) rfi_flush_enable(enable); } diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index f02b04973710..f100e331e69b 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -543,14 +543,14 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; + idx = srcu_read_lock(&vcpu->kvm->srcu); + ret = kvmppc_tce_validate(stt, tce); if (ret != H_SUCCESS) - return ret; + goto unlock_exit; dir = iommu_tce_direction(tce); - idx = srcu_read_lock(&vcpu->kvm->srcu); - if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { ret = H_PARAMETER; goto unlock_exit; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 06964350b97a..b2b29d4f9842 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3423,7 +3423,9 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2); vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3); - mtspr(SPRN_PSSCR, host_psscr); + /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */ + mtspr(SPRN_PSSCR, host_psscr | + (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); mtspr(SPRN_HFSCR, host_hfscr); mtspr(SPRN_CIABR, host_ciabr); mtspr(SPRN_DAWR, host_dawr); diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f29d2f118b44..5d9c3ff728c9 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -98,10 +98,20 @@ static int find_free_bat(void) return -1; } +/* + * This function calculates the size of the larger block usable to map the + * beginning of an area based on the start address and size of that area: + * - max block size is 8M on 601 and 256 on other 6xx. + * - base address must be aligned to the block size. So the maximum block size + * is identified by the lowest bit set to 1 in the base address (for instance + * if base is 0x16000000, max size is 0x02000000). + * - block size has to be a power of two. This is calculated by finding the + * highest bit set to 1. + */ static unsigned int block_size(unsigned long base, unsigned long top) { unsigned int max_size = (cpu_has_feature(CPU_FTR_601) ? 8 : 256) << 20; - unsigned int base_shift = (fls(base) - 1) & 31; + unsigned int base_shift = (ffs(base) - 1) & 31; unsigned int block_shift = (fls(top - base) - 1) & 31; return min3(max_size, 1U << base_shift, 1U << block_shift); @@ -157,7 +167,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { - int done; + unsigned long done; unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; if (__map_without_bats) { @@ -169,10 +179,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return __mmu_mapin_ram(base, top); done = __mmu_mapin_ram(base, border); - if (done != border - base) + if (done != border) return done; - return done + __mmu_mapin_ram(border, top); + return __mmu_mapin_ram(border, top); } void mmu_mark_initmem_nx(void) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index eb56c82d8aa1..0582260fb6c2 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -69,9 +69,6 @@ config STACKTRACE_SUPPORT config TRACE_IRQFLAGS_SUPPORT def_bool y -config RWSEM_GENERIC_SPINLOCK - def_bool y - config GENERIC_BUG def_bool y depends on BUG diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 439dc7072e05..1ad8d093c58b 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -18,6 +18,7 @@ struct mmu_gather; static void tlb_flush(struct mmu_gather *tlb); +#define tlb_flush tlb_flush #include <asm-generic/tlb.h> static inline void tlb_flush(struct mmu_gather *tlb) diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index a4b1d94371a0..4d403274c2e8 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -169,8 +169,6 @@ static bool save_trace(unsigned long pc, void *arg) void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { walk_stackframe(tsk, NULL, save_trace, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b6e3d0653002..97b555e772d7 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -14,12 +14,6 @@ config LOCKDEP_SUPPORT config STACKTRACE_SUPPORT def_bool y -config RWSEM_GENERIC_SPINLOCK - bool - -config RWSEM_XCHGADD_ALGORITHM - def_bool y - config ARCH_HAS_ILOG2_U32 def_bool n @@ -164,11 +158,13 @@ config S390 select HAVE_PERF_USER_STACK_DUMP select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_PHYS_MAP + select HAVE_MMU_GATHER_NO_GATHER select HAVE_MOD_ARCH_SPECIFIC select HAVE_NOP_MCOUNT select HAVE_OPROFILE select HAVE_PCI select HAVE_PERF_EVENTS + select HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RSEQ select HAVE_SYSCALL_TRACEPOINTS diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 12d77cb11fe5..d5fadefea33c 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -20,7 +20,6 @@ generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h -generic-y += rwsem.h generic-y += trace_clock.h generic-y += unaligned.h generic-y += word-at-a-time.h diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b31c779cf581..aa406c05a350 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -22,98 +22,39 @@ * Pages used for the page tables is a different story. FIXME: more */ -#include <linux/mm.h> -#include <linux/pagemap.h> -#include <linux/swap.h> -#include <asm/processor.h> -#include <asm/pgalloc.h> -#include <asm/tlbflush.h> - -struct mmu_gather { - struct mm_struct *mm; - struct mmu_table_batch *batch; - unsigned int fullmm; - unsigned long start, end; -}; - -struct mmu_table_batch { - struct rcu_head rcu; - unsigned int nr; - void *tables[0]; -}; - -#define MAX_TABLE_BATCH \ - ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) - -extern void tlb_table_flush(struct mmu_gather *tlb); -extern void tlb_remove_table(struct mmu_gather *tlb, void *table); - -static inline void -arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - tlb->mm = mm; - tlb->start = start; - tlb->end = end; - tlb->fullmm = !(start | (end+1)); - tlb->batch = NULL; -} - -static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) -{ - __tlb_flush_mm_lazy(tlb->mm); -} - -static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) -{ - tlb_table_flush(tlb); -} - +void __tlb_remove_table(void *_table); +static inline void tlb_flush(struct mmu_gather *tlb); +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size); -static inline void tlb_flush_mmu(struct mmu_gather *tlb) -{ - tlb_flush_mmu_tlbonly(tlb); - tlb_flush_mmu_free(tlb); -} +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) -static inline void -arch_tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end, bool force) -{ - if (force) { - tlb->start = start; - tlb->end = end; - } +#define tlb_flush tlb_flush +#define pte_free_tlb pte_free_tlb +#define pmd_free_tlb pmd_free_tlb +#define p4d_free_tlb p4d_free_tlb +#define pud_free_tlb pud_free_tlb - tlb_flush_mmu(tlb); -} +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#include <asm-generic/tlb.h> /* * Release the page cache reference for a pte removed by * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page * has already been freed, so just do free_page_and_swap_cache. */ -static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - free_page_and_swap_cache(page); - return false; /* avoid calling tlb_flush_mmu */ -} - -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - free_page_and_swap_cache(page); -} - static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { - return __tlb_remove_page(tlb, page); + free_page_and_swap_cache(page); + return false; } -static inline void tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) +static inline void tlb_flush(struct mmu_gather *tlb) { - return tlb_remove_page(tlb, page); + __tlb_flush_mm_lazy(tlb->mm); } /* @@ -121,8 +62,17 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb, * page table from the tlb. */ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, - unsigned long address) + unsigned long address) { + __tlb_adjust_range(tlb, address, PAGE_SIZE); + tlb->mm->context.flush_mm = 1; + tlb->freed_tables = 1; + tlb->cleared_ptes = 1; + /* + * page_table_free_rcu takes care of the allocation bit masks + * of the 2K table fragments in the 4K page table page, + * then calls tlb_remove_table. + */ page_table_free_rcu(tlb, (unsigned long *) pte, address); } @@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, if (mm_pmd_folded(tlb->mm)) return; pgtable_pmd_page_dtor(virt_to_page(pmd)); + __tlb_adjust_range(tlb, address, PAGE_SIZE); + tlb->mm->context.flush_mm = 1; + tlb->freed_tables = 1; + tlb->cleared_puds = 1; tlb_remove_table(tlb, pmd); } @@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, { if (mm_p4d_folded(tlb->mm)) return; + __tlb_adjust_range(tlb, address, PAGE_SIZE); + tlb->mm->context.flush_mm = 1; + tlb->freed_tables = 1; + tlb->cleared_p4ds = 1; tlb_remove_table(tlb, p4d); } @@ -169,21 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, { if (mm_pud_folded(tlb->mm)) return; + tlb->mm->context.flush_mm = 1; + tlb->freed_tables = 1; + tlb->cleared_puds = 1; tlb_remove_table(tlb, pud); } -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) -#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0) -#define tlb_migrate_finish(mm) do { } while (0) -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ - tlb_remove_tlb_entry(tlb, ptep, address) - -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, - unsigned int page_size) -{ -} #endif /* _S390_TLB_H */ diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index bdddaae96559..649135cbedd5 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/device.h> +#include <linux/cpu.h> #include <asm/nospec-branch.h> static int __init nobp_setup_early(char *str) @@ -58,7 +59,7 @@ early_param("nospectre_v2", nospectre_v2_setup_early); void __init nospec_auto_detect(void) { - if (test_facility(156)) { + if (test_facility(156) || cpu_mitigations_off()) { /* * The machine supports etokens. * Disable expolines and disable nobp. diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 460dcfba7d4e..cc9ed9787068 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -45,8 +45,6 @@ void save_stack_trace(struct stack_trace *trace) sp = current_stack_pointer(); dump_trace(save_address, trace, NULL, sp); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace); @@ -58,8 +56,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) if (tsk == current) sp = current_stack_pointer(); dump_trace(save_address_nosched, trace, tsk, sp); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); @@ -69,7 +65,5 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) sp = kernel_stack_pointer(regs); dump_trace(save_address, trace, NULL, sp); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_regs); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index db6bb2f97a2c..99e06213a22b 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, tlb_remove_table(tlb, table); } -static void __tlb_remove_table(void *_table) +void __tlb_remove_table(void *_table) { unsigned int mask = (unsigned long) _table & 3; void *table = (void *)((unsigned long) _table ^ mask); @@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table) } } -static void tlb_remove_table_smp_sync(void *arg) -{ - /* Simply deliver the interrupt */ -} - -static void tlb_remove_table_one(void *table) -{ - /* - * This isn't an RCU grace period and hence the page-tables cannot be - * assumed to be actually RCU-freed. - * - * It is however sufficient for software page-table walkers that rely - * on IRQ disabling. See the comment near struct mmu_table_batch. - */ - smp_call_function(tlb_remove_table_smp_sync, NULL, 1); - __tlb_remove_table(table); -} - -static void tlb_remove_table_rcu(struct rcu_head *head) -{ - struct mmu_table_batch *batch; - int i; - - batch = container_of(head, struct mmu_table_batch, rcu); - - for (i = 0; i < batch->nr; i++) - __tlb_remove_table(batch->tables[i]); - - free_page((unsigned long)batch); -} - -void tlb_table_flush(struct mmu_gather *tlb) -{ - struct mmu_table_batch **batch = &tlb->batch; - - if (*batch) { - call_rcu(&(*batch)->rcu, tlb_remove_table_rcu); - *batch = NULL; - } -} - -void tlb_remove_table(struct mmu_gather *tlb, void *table) -{ - struct mmu_table_batch **batch = &tlb->batch; - - tlb->mm->context.flush_mm = 1; - if (*batch == NULL) { - *batch = (struct mmu_table_batch *) - __get_free_page(GFP_NOWAIT | __GFP_NOWARN); - if (*batch == NULL) { - __tlb_flush_mm_lazy(tlb->mm); - tlb_remove_table_one(table); - return; - } - (*batch)->nr = 0; - } - (*batch)->tables[(*batch)->nr++] = table; - if ((*batch)->nr == MAX_TABLE_BATCH) - tlb_flush_mmu(tlb); -} - /* * Base infrastructure required to generate basic asces, region, segment, * and page tables that do not make use of enhanced features like EDAT1. diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index b1c91ea9a958..0be08d586d40 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -90,12 +90,6 @@ config ARCH_DEFCONFIG default "arch/sh/configs/shx3_defconfig" if SUPERH32 default "arch/sh/configs/cayman_defconfig" if SUPERH64 -config RWSEM_GENERIC_SPINLOCK - def_bool y - -config RWSEM_XCHGADD_ALGORITHM - bool - config GENERIC_BUG def_bool y depends on BUG && SUPERH32 diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 7bf2cb680d32..73fff39a0122 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -17,7 +17,6 @@ generic-y += mm-arch-hooks.h generic-y += parport.h generic-y += percpu.h generic-y += preempt.h -generic-y += rwsem.h generic-y += serial.h generic-y += sizes.h generic-y += trace_clock.h diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 8ad73cb31121..b56f908b1395 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -70,6 +70,15 @@ do { \ tlb_remove_page((tlb), (pte)); \ } while (0) +#if CONFIG_PGTABLE_LEVELS > 2 +#define __pmd_free_tlb(tlb, pmdp, addr) \ +do { \ + struct page *page = virt_to_page(pmdp); \ + pgtable_pmd_page_dtor(page); \ + tlb_remove_page((tlb), page); \ +} while (0); +#endif + static inline void check_pgt_cache(void) { quicklist_trim(QUICK_PT, NULL, 25, 16); diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 77abe192fb43..bc77f3dd4261 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -11,133 +11,8 @@ #ifdef CONFIG_MMU #include <linux/swap.h> -#include <asm/pgalloc.h> -#include <asm/tlbflush.h> -#include <asm/mmu_context.h> -/* - * TLB handling. This allows us to remove pages from the page - * tables, and efficiently handle the TLB issues. - */ -struct mmu_gather { - struct mm_struct *mm; - unsigned int fullmm; - unsigned long start, end; -}; - -static inline void init_tlb_gather(struct mmu_gather *tlb) -{ - tlb->start = TASK_SIZE; - tlb->end = 0; - - if (tlb->fullmm) { - tlb->start = 0; - tlb->end = TASK_SIZE; - } -} - -static inline void -arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - tlb->mm = mm; - tlb->start = start; - tlb->end = end; - tlb->fullmm = !(start | (end+1)); - - init_tlb_gather(tlb); -} - -static inline void -arch_tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end, bool force) -{ - if (tlb->fullmm || force) - flush_tlb_mm(tlb->mm); - - /* keep the page table cache within bounds */ - check_pgt_cache(); -} - -static inline void -tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) -{ - if (tlb->start > address) - tlb->start = address; - if (tlb->end < address + PAGE_SIZE) - tlb->end = address + PAGE_SIZE; -} - -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ - tlb_remove_tlb_entry(tlb, ptep, address) - -/* - * In the case of tlb vma handling, we can optimise these away in the - * case where we're doing a full MM flush. When we're doing a munmap, - * the vmas are adjusted to only cover the region to be torn down. - */ -static inline void -tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - if (!tlb->fullmm) - flush_cache_range(vma, vma->vm_start, vma->vm_end); -} - -static inline void -tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - if (!tlb->fullmm && tlb->end) { - flush_tlb_range(vma, tlb->start, tlb->end); - init_tlb_gather(tlb); - } -} - -static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) -{ -} - -static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) -{ -} - -static inline void tlb_flush_mmu(struct mmu_gather *tlb) -{ -} - -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - free_page_and_swap_cache(page); - return false; /* avoid calling tlb_flush_mmu */ -} - -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - __tlb_remove_page(tlb, page); -} - -static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) -{ - return __tlb_remove_page(tlb, page); -} - -static inline void tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) -{ - return tlb_remove_page(tlb, page); -} - -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, - unsigned int page_size) -{ -} - -#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) -#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) -#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) - -#define tlb_migrate_finish(mm) do { } while (0) +#include <asm-generic/tlb.h> #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); @@ -157,11 +32,6 @@ static inline void tlb_unwire_entry(void) #else /* CONFIG_MMU */ -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) -#define tlb_flush(tlb) do { } while (0) - #include <asm-generic/tlb.h> #endif /* CONFIG_MMU */ diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index f3cb2cccb262..2950b19ad077 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -49,8 +49,6 @@ void save_stack_trace(struct stack_trace *trace) unsigned long *sp = (unsigned long *)current_stack_pointer; unwind_stack(current, NULL, sp, &save_stack_ops, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace); @@ -84,7 +82,5 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) unsigned long *sp = (unsigned long *)tsk->thread.sp; unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 40f8f4f73fe8..f6421c9ce5d3 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -63,6 +63,7 @@ config SPARC64 select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_RCU_TABLE_FREE if SMP + select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_MEMBLOCK_NODE_MAP select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_DYNAMIC_FTRACE @@ -191,14 +192,6 @@ config NR_CPUS source "kernel/Kconfig.hz" -config RWSEM_GENERIC_SPINLOCK - bool - default y if SPARC32 - -config RWSEM_XCHGADD_ALGORITHM - bool - default y if SPARC64 - config GENERIC_HWEIGHT bool default y diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index a22cfd5c0ee8..2ca3200d3616 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -18,7 +18,6 @@ generic-y += mm-arch-hooks.h generic-y += module.h generic-y += msi.h generic-y += preempt.h -generic-y += rwsem.h generic-y += serial.h generic-y += trace_clock.h generic-y += word-at-a-time.h diff --git a/arch/sparc/include/asm/tlb_32.h b/arch/sparc/include/asm/tlb_32.h index 343cea19e573..5cd28a8793e3 100644 --- a/arch/sparc/include/asm/tlb_32.h +++ b/arch/sparc/include/asm/tlb_32.h @@ -2,24 +2,6 @@ #ifndef _SPARC_TLB_H #define _SPARC_TLB_H -#define tlb_start_vma(tlb, vma) \ -do { \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ -} while (0) - -#define tlb_end_vma(tlb, vma) \ -do { \ - flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ -} while (0) - -#define __tlb_remove_tlb_entry(tlb, pte, address) \ - do { } while (0) - -#define tlb_flush(tlb) \ -do { \ - flush_tlb_mm((tlb)->mm); \ -} while (0) - #include <asm-generic/tlb.h> #endif /* _SPARC_TLB_H */ diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index dce6db147f24..70ee60383900 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -2,162 +2,8 @@ #ifndef __UM_TLB_H #define __UM_TLB_H -#include <linux/pagemap.h> -#include <linux/swap.h> -#include <asm/percpu.h> -#include <asm/pgalloc.h> #include <asm/tlbflush.h> - -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - -/* struct mmu_gather is an opaque type used by the mm code for passing around - * any data needed by arch specific code for tlb_remove_page. - */ -struct mmu_gather { - struct mm_struct *mm; - unsigned int need_flush; /* Really unmapped some ptes? */ - unsigned long start; - unsigned long end; - unsigned int fullmm; /* non-zero means full mm flush */ -}; - -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, - unsigned long address) -{ - if (tlb->start > address) - tlb->start = address; - if (tlb->end < address + PAGE_SIZE) - tlb->end = address + PAGE_SIZE; -} - -static inline void init_tlb_gather(struct mmu_gather *tlb) -{ - tlb->need_flush = 0; - - tlb->start = TASK_SIZE; - tlb->end = 0; - - if (tlb->fullmm) { - tlb->start = 0; - tlb->end = TASK_SIZE; - } -} - -static inline void -arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - tlb->mm = mm; - tlb->start = start; - tlb->end = end; - tlb->fullmm = !(start | (end+1)); - - init_tlb_gather(tlb); -} - -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, - unsigned long end); - -static inline void -tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) -{ - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end); -} - -static inline void -tlb_flush_mmu_free(struct mmu_gather *tlb) -{ - init_tlb_gather(tlb); -} - -static inline void -tlb_flush_mmu(struct mmu_gather *tlb) -{ - if (!tlb->need_flush) - return; - - tlb_flush_mmu_tlbonly(tlb); - tlb_flush_mmu_free(tlb); -} - -/* arch_tlb_finish_mmu - * Called at the end of the shootdown operation to free up any resources - * that were required. - */ -static inline void -arch_tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end, bool force) -{ - if (force) { - tlb->start = start; - tlb->end = end; - tlb->need_flush = 1; - } - tlb_flush_mmu(tlb); - - /* keep the page table cache within bounds */ - check_pgt_cache(); -} - -/* tlb_remove_page - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), - * while handling the additional races in SMP caused by other CPUs - * caching valid mappings in their TLBs. - */ -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - tlb->need_flush = 1; - free_page_and_swap_cache(page); - return false; /* avoid calling tlb_flush_mmu */ -} - -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - __tlb_remove_page(tlb, page); -} - -static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) -{ - return __tlb_remove_page(tlb, page); -} - -static inline void tlb_remove_page_size(struct mmu_gather *tlb, - struct page *page, int page_size) -{ - return tlb_remove_page(tlb, page); -} - -/** - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. - * - * Record the fact that pte's were really umapped in ->need_flush, so we can - * later optimise away the tlb invalidate. This helps when userspace is - * unmapping already-unmapped pages, which happens quite a lot. - */ -#define tlb_remove_tlb_entry(tlb, ptep, address) \ - do { \ - tlb->need_flush = 1; \ - __tlb_remove_tlb_entry(tlb, ptep, address); \ - } while (0) - -#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ - tlb_remove_tlb_entry(tlb, ptep, address) - -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, - unsigned int page_size) -{ -} - -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) - -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr) - -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) - -#define tlb_migrate_finish(mm) do {} while (0) +#include <asm-generic/cacheflush.h> +#include <asm-generic/tlb.h> #endif diff --git a/arch/um/kernel/stacktrace.c b/arch/um/kernel/stacktrace.c index ebe7bcf62684..bd95e020d509 100644 --- a/arch/um/kernel/stacktrace.c +++ b/arch/um/kernel/stacktrace.c @@ -63,8 +63,6 @@ static const struct stacktrace_ops dump_ops = { static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace) { dump_trace(tsk, &dump_ops, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } void save_stack_trace(struct stack_trace *trace) diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 817d82608712..2445dfcf6444 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -20,6 +20,7 @@ config UNICORE32 select GENERIC_IOMAP select MODULES_USE_ELF_REL select NEED_DMA_MAP_STATE + select MMU_GATHER_NO_RANGE if MMU help UniCore-32 is 32-bit Instruction Set Architecture, including a series of low-power-consumption RISC chip @@ -38,12 +39,6 @@ config STACKTRACE_SUPPORT config LOCKDEP_SUPPORT def_bool y -config RWSEM_GENERIC_SPINLOCK - def_bool y - -config RWSEM_XCHGADD_ALGORITHM - bool - config ARCH_HAS_ILOG2_U32 bool diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h index 9cca15cdae94..00a8477333f6 100644 --- a/arch/unicore32/include/asm/tlb.h +++ b/arch/unicore32/include/asm/tlb.h @@ -12,10 +12,9 @@ #ifndef __UNICORE_TLB_H__ #define __UNICORE_TLB_H__ -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) +/* + * unicore32 lacks an efficient flush_tlb_range(), use flush_tlb_mm(). + */ #define __pte_free_tlb(tlb, pte, addr) \ do { \ diff --git a/arch/unicore32/kernel/stacktrace.c b/arch/unicore32/kernel/stacktrace.c index 9976e767d51c..e37da8c6837b 100644 --- a/arch/unicore32/kernel/stacktrace.c +++ b/arch/unicore32/kernel/stacktrace.c @@ -120,8 +120,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) } walk_stackframe(&frame, save_trace, &data); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } void save_stack_trace(struct stack_trace *trace) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 62fc3fda1a05..90e2640ade75 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -74,6 +74,7 @@ config X86 select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_STACKWALK select ARCH_SUPPORTS_ACPI select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 @@ -183,7 +184,6 @@ config X86 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if PARAVIRT - select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION select HAVE_FUNCTION_ARG_ACCESS_API @@ -268,9 +268,6 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API -config RWSEM_XCHGADD_ALGORITHM - def_bool y - config GENERIC_CALIBRATE_DELAY def_bool y @@ -783,14 +780,6 @@ config PARAVIRT_SPINLOCKS If you are unsure how to answer this question, answer Y. -config QUEUED_LOCK_STAT - bool "Paravirt queued spinlock statistics" - depends on PARAVIRT_SPINLOCKS && DEBUG_FS - ---help--- - Enable the collection of statistical data on the slowpath - behavior of paravirtualized queued spinlocks and report - them on debugfs. - source "arch/x86/xen/Kconfig" config KVM_GUEST diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index d309f30cf7af..5fc76b755510 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -650,6 +650,7 @@ ENTRY(__switch_to_asm) pushl %ebx pushl %edi pushl %esi + pushfl /* switch stack */ movl %esp, TASK_threadsp(%eax) @@ -672,6 +673,7 @@ ENTRY(__switch_to_asm) #endif /* restore callee-saved registers */ + popfl popl %esi popl %edi popl %ebx diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index d45f3fbd232e..f15441b07dad 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -116,6 +116,110 @@ static __initconst const u64 amd_hw_cache_event_ids }, }; +static __initconst const u64 amd_hw_cache_event_ids_f17h + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */ + [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */ + [C(RESULT_MISS)] = 0, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */ + [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */ + [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */ + [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */ + [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */ + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = 0, + [C(RESULT_MISS)] = 0, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = -1, + [C(RESULT_MISS)] = -1, + }, +}, +}; + /* * AMD Performance Monitor K7 and later, up to and including Family 16h: */ @@ -865,9 +969,10 @@ __init int amd_pmu_init(void) x86_pmu.amd_nb_constraints = 0; } - /* Events are common for all AMDs */ - memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); + if (boot_cpu_data.x86 >= 0x17) + memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids)); + else + memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); return 0; } diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 81911e11a15d..f315425d8468 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -560,6 +560,21 @@ int x86_pmu_hw_config(struct perf_event *event) return -EINVAL; } + /* sample_regs_user never support XMM registers */ + if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS)) + return -EINVAL; + /* + * Besides the general purpose registers, XMM registers may + * be collected in PEBS on some platforms, e.g. Icelake + */ + if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) { + if (x86_pmu.pebs_no_xmm_regs) + return -EINVAL; + + if (!event->attr.precise_ip) + return -EINVAL; + } + return x86_setup_perfctr(event); } @@ -661,6 +676,10 @@ static inline int is_x86_event(struct perf_event *event) return event->pmu == &pmu; } +struct pmu *x86_get_pmu(void) +{ + return &pmu; +} /* * Event scheduler state: * @@ -849,18 +868,43 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) struct event_constraint *c; unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; struct perf_event *e; - int i, wmin, wmax, unsched = 0; + int n0, i, wmin, wmax, unsched = 0; struct hw_perf_event *hwc; bitmap_zero(used_mask, X86_PMC_IDX_MAX); + /* + * Compute the number of events already present; see x86_pmu_add(), + * validate_group() and x86_pmu_commit_txn(). For the former two + * cpuc->n_events hasn't been updated yet, while for the latter + * cpuc->n_txn contains the number of events added in the current + * transaction. + */ + n0 = cpuc->n_events; + if (cpuc->txn_flags & PERF_PMU_TXN_ADD) + n0 -= cpuc->n_txn; + if (x86_pmu.start_scheduling) x86_pmu.start_scheduling(cpuc); for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { - cpuc->event_constraint[i] = NULL; - c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); - cpuc->event_constraint[i] = c; + c = cpuc->event_constraint[i]; + + /* + * Previously scheduled events should have a cached constraint, + * while new events should not have one. + */ + WARN_ON_ONCE((c && i >= n0) || (!c && i < n0)); + + /* + * Request constraints for new events; or for those events that + * have a dynamic constraint -- for those the constraint can + * change due to external factors (sibling state, allow_tfa). + */ + if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) { + c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); + cpuc->event_constraint[i] = c; + } wmin = min(wmin, c->weight); wmax = max(wmax, c->weight); @@ -925,25 +969,20 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (!unsched && assign) { for (i = 0; i < n; i++) { e = cpuc->event_list[i]; - e->hw.flags |= PERF_X86_EVENT_COMMITTED; if (x86_pmu.commit_scheduling) x86_pmu.commit_scheduling(cpuc, i, assign[i]); } } else { - for (i = 0; i < n; i++) { + for (i = n0; i < n; i++) { e = cpuc->event_list[i]; - /* - * do not put_constraint() on comitted events, - * because they are good to go - */ - if ((e->hw.flags & PERF_X86_EVENT_COMMITTED)) - continue; /* * release events that failed scheduling */ if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(cpuc, e); + + cpuc->event_constraint[i] = NULL; } } @@ -1373,11 +1412,6 @@ static void x86_pmu_del(struct perf_event *event, int flags) int i; /* - * event is descheduled - */ - event->hw.flags &= ~PERF_X86_EVENT_COMMITTED; - - /* * If we're called during a txn, we only need to undo x86_pmu.add. * The events never got scheduled and ->cancel_txn will truncate * the event_list. @@ -1413,6 +1447,7 @@ static void x86_pmu_del(struct perf_event *event, int flags) cpuc->event_list[i-1] = cpuc->event_list[i]; cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; } + cpuc->event_constraint[i-1] = NULL; --cpuc->n_events; perf_event_update_userpage(event); @@ -2024,7 +2059,7 @@ static int validate_event(struct perf_event *event) if (IS_ERR(fake_cpuc)) return PTR_ERR(fake_cpuc); - c = x86_pmu.get_event_constraints(fake_cpuc, -1, event); + c = x86_pmu.get_event_constraints(fake_cpuc, 0, event); if (!c || !c->weight) ret = -EINVAL; @@ -2072,8 +2107,7 @@ static int validate_group(struct perf_event *event) if (n < 0) goto out; - fake_cpuc->n_events = n; - + fake_cpuc->n_events = 0; ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); out: @@ -2348,6 +2382,15 @@ void arch_perf_update_userpage(struct perf_event *event, cyc2ns_read_end(); } +/* + * Determine whether the regs were taken from an irq/exception handler rather + * than from perf_arch_fetch_caller_regs(). + */ +static bool perf_hw_regs(struct pt_regs *regs) +{ + return regs->flags & X86_EFLAGS_FIXED; +} + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { @@ -2359,11 +2402,15 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re return; } - if (perf_callchain_store(entry, regs->ip)) - return; + if (perf_hw_regs(regs)) { + if (perf_callchain_store(entry, regs->ip)) + return; + unwind_start(&state, current, regs, NULL); + } else { + unwind_start(&state, current, NULL, (void *)regs->sp); + } - for (unwind_start(&state, current, regs, NULL); !unwind_done(&state); - unwind_next_frame(&state)) { + for (; !unwind_done(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); if (!addr || perf_callchain_store(entry, addr)) return; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index f9451566cd9b..ef763f535e3a 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -239,6 +239,35 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; +static struct event_constraint intel_icl_event_constraints[] = { + FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ + INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */ + FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ + FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ + FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */ + INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf), + INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */ + INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf), + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */ + INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */ + INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */ + INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), + INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), + EVENT_CONSTRAINT_END +}; + +static struct extra_reg intel_icl_extra_regs[] __read_mostly = { + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1), + INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), + EVENT_EXTRA_END +}; + EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3"); EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3"); EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2"); @@ -1827,6 +1856,45 @@ static __initconst const u64 glp_hw_cache_extra_regs }, }; +#define TNT_LOCAL_DRAM BIT_ULL(26) +#define TNT_DEMAND_READ GLM_DEMAND_DATA_RD +#define TNT_DEMAND_WRITE GLM_DEMAND_RFO +#define TNT_LLC_ACCESS GLM_ANY_RESPONSE +#define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \ + SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM) +#define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM) + +static __initconst const u64 tnt_hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = TNT_DEMAND_READ| + TNT_LLC_ACCESS, + [C(RESULT_MISS)] = TNT_DEMAND_READ| + TNT_LLC_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE| + TNT_LLC_ACCESS, + [C(RESULT_MISS)] = TNT_DEMAND_WRITE| + TNT_LLC_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = 0x0, + [C(RESULT_MISS)] = 0x0, + }, + }, +}; + +static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { + /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0), + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1), + EVENT_EXTRA_END +}; + #define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */ #define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */ #define KNL_MCDRAM_LOCAL BIT_ULL(21) @@ -2015,7 +2083,7 @@ static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int /* * We're going to use PMC3, make sure TFA is set before we touch it. */ - if (cntr == 3 && !cpuc->is_fake) + if (cntr == 3) intel_set_tfa(cpuc, true); } @@ -2091,15 +2159,19 @@ static void intel_pmu_disable_event(struct perf_event *event) cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); cpuc->intel_cp_status &= ~(1ull << hwc->idx); - if (unlikely(event->attr.precise_ip)) - intel_pmu_pebs_disable(event); - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { intel_pmu_disable_fixed(hwc); return; } x86_pmu_disable_event(event); + + /* + * Needs to be called after x86_pmu_disable_event, + * so we don't trigger the event without PEBS bit set. + */ + if (unlikely(event->attr.precise_ip)) + intel_pmu_pebs_disable(event); } static void intel_pmu_del_event(struct perf_event *event) @@ -2145,6 +2217,11 @@ static void intel_pmu_enable_fixed(struct perf_event *event) bits <<= (idx * 4); mask = 0xfULL << (idx * 4); + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) { + bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4); + mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4); + } + rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; ctrl_val |= bits; @@ -2688,7 +2765,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (x86_pmu.event_constraints) { for_each_event_constraint(c, x86_pmu.event_constraints) { - if ((event->hw.config & c->cmask) == c->code) { + if (constraint_match(c, event->hw.config)) { event->hw.flags |= c->flags; return c; } @@ -2838,7 +2915,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; struct intel_excl_states *xlo; int tid = cpuc->excl_thread_id; - int is_excl, i; + int is_excl, i, w; /* * validating a group does not require @@ -2894,36 +2971,40 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, * SHARED : sibling counter measuring non-exclusive event * UNUSED : sibling counter unused */ + w = c->weight; for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) { /* * exclusive event in sibling counter * our corresponding counter cannot be used * regardless of our event */ - if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) + if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) { __clear_bit(i, c->idxmsk); + w--; + continue; + } /* * if measuring an exclusive event, sibling * measuring non-exclusive, then counter cannot * be used */ - if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) + if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) { __clear_bit(i, c->idxmsk); + w--; + continue; + } } /* - * recompute actual bit weight for scheduling algorithm - */ - c->weight = hweight64(c->idxmsk64); - - /* * if we return an empty mask, then switch * back to static empty constraint to avoid * the cost of freeing later on */ - if (c->weight == 0) + if (!w) c = &emptyconstraint; + c->weight = w; + return c; } @@ -2931,11 +3012,9 @@ static struct event_constraint * intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { - struct event_constraint *c1 = NULL; - struct event_constraint *c2; + struct event_constraint *c1, *c2; - if (idx >= 0) /* fake does < 0 */ - c1 = cpuc->event_constraint[idx]; + c1 = cpuc->event_constraint[idx]; /* * first time only @@ -2943,7 +3022,8 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, * - dynamic constraint: handled by intel_get_excl_constraints() */ c2 = __intel_get_event_constraints(cpuc, idx, event); - if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) { + if (c1) { + WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC)); bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX); c1->weight = c2->weight; c2 = c1; @@ -3366,6 +3446,12 @@ static struct event_constraint counter0_constraint = static struct event_constraint counter2_constraint = EVENT_CONSTRAINT(0, 0x4, 0); +static struct event_constraint fixed0_constraint = + FIXED_EVENT_CONSTRAINT(0x00c0, 0); + +static struct event_constraint fixed0_counter0_constraint = + INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL); + static struct event_constraint * hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) @@ -3385,6 +3471,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, } static struct event_constraint * +icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + /* + * Fixed counter 0 has less skid. + * Force instruction:ppp in Fixed counter 0 + */ + if ((event->attr.precise_ip == 3) && + constraint_match(&fixed0_constraint, event->hw.config)) + return &fixed0_constraint; + + return hsw_get_event_constraints(cpuc, idx, event); +} + +static struct event_constraint * glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { @@ -3399,6 +3500,29 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, return c; } +static struct event_constraint * +tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct event_constraint *c; + + /* + * :ppp means to do reduced skid PEBS, + * which is available on PMC0 and fixed counter 0. + */ + if (event->attr.precise_ip == 3) { + /* Force instruction:ppp on PMC0 and Fixed counter 0 */ + if (constraint_match(&fixed0_constraint, event->hw.config)) + return &fixed0_counter0_constraint; + + return &counter0_constraint; + } + + c = intel_get_event_constraints(cpuc, idx, event); + + return c; +} + static bool allow_tsx_force_abort = true; static struct event_constraint * @@ -3410,7 +3534,7 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, /* * Without TFA we must not use PMC3. */ - if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) { + if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { c = dyn_constraint(cpuc, c, idx); c->idxmsk64 &= ~(1ULL << 3); c->weight--; @@ -3507,6 +3631,8 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) { + cpuc->pebs_record_size = x86_pmu.pebs_record_size; + if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { cpuc->shared_regs = allocate_shared_regs(cpu); if (!cpuc->shared_regs) @@ -4114,6 +4240,42 @@ static struct attribute *hsw_tsx_events_attrs[] = { NULL }; +EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80"); +EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2"); +EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80"); +EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2"); + +static struct attribute *icl_events_attrs[] = { + EVENT_PTR(mem_ld_hsw), + EVENT_PTR(mem_st_hsw), + NULL, +}; + +static struct attribute *icl_tsx_events_attrs[] = { + EVENT_PTR(tx_start), + EVENT_PTR(tx_abort), + EVENT_PTR(tx_commit), + EVENT_PTR(tx_capacity_read), + EVENT_PTR(tx_capacity_write), + EVENT_PTR(tx_conflict), + EVENT_PTR(el_start), + EVENT_PTR(el_abort), + EVENT_PTR(el_commit), + EVENT_PTR(el_capacity_read), + EVENT_PTR(el_capacity_write), + EVENT_PTR(el_conflict), + EVENT_PTR(cycles_t), + EVENT_PTR(cycles_ct), + NULL, +}; + +static __init struct attribute **get_icl_events_attrs(void) +{ + return boot_cpu_has(X86_FEATURE_RTM) ? + merge_attr(icl_events_attrs, icl_tsx_events_attrs) : + icl_events_attrs; +} + static ssize_t freeze_on_smi_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -4153,6 +4315,50 @@ done: return count; } +static void update_tfa_sched(void *ignored) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * check if PMC3 is used + * and if so force schedule out for all event types all contexts + */ + if (test_bit(3, cpuc->active_mask)) + perf_pmu_resched(x86_get_pmu()); +} + +static ssize_t show_sysctl_tfa(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, 40, "%d\n", allow_tsx_force_abort); +} + +static ssize_t set_sysctl_tfa(struct device *cdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + bool val; + ssize_t ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + /* no change */ + if (val == allow_tsx_force_abort) + return count; + + allow_tsx_force_abort = val; + + get_online_cpus(); + on_each_cpu(update_tfa_sched, NULL, 1); + put_online_cpus(); + + return count; +} + + static DEVICE_ATTR_RW(freeze_on_smi); static ssize_t branches_show(struct device *cdev, @@ -4185,7 +4391,9 @@ static struct attribute *intel_pmu_caps_attrs[] = { NULL }; -static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort); +static DEVICE_ATTR(allow_tsx_force_abort, 0644, + show_sysctl_tfa, + set_sysctl_tfa); static struct attribute *intel_pmu_attrs[] = { &dev_attr_freeze_on_smi.attr, @@ -4446,6 +4654,32 @@ __init int intel_pmu_init(void) name = "goldmont_plus"; break; + case INTEL_FAM6_ATOM_TREMONT_X: + x86_pmu.late_ack = true; + memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, + sizeof(hw_cache_extra_regs)); + hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; + + intel_pmu_lbr_init_skl(); + + x86_pmu.event_constraints = intel_slm_event_constraints; + x86_pmu.extra_regs = intel_tnt_extra_regs; + /* + * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS + * for precise cycles. + */ + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.lbr_pt_coexist = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.get_event_constraints = tnt_get_event_constraints; + extra_attr = slm_format_attr; + pr_cont("Tremont events, "); + name = "Tremont"; + break; + case INTEL_FAM6_WESTMERE: case INTEL_FAM6_WESTMERE_EP: case INTEL_FAM6_WESTMERE_EX: @@ -4694,13 +4928,41 @@ __init int intel_pmu_init(void) x86_pmu.get_event_constraints = tfa_get_event_constraints; x86_pmu.enable_all = intel_tfa_pmu_enable_all; x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; - intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr; + intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr; } pr_cont("Skylake events, "); name = "skylake"; break; + case INTEL_FAM6_ICELAKE_MOBILE: + x86_pmu.late_ack = true; + memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; + intel_pmu_lbr_init_skl(); + + x86_pmu.event_constraints = intel_icl_event_constraints; + x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints; + x86_pmu.extra_regs = intel_icl_extra_regs; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_NO_HT_SHARING; + + x86_pmu.hw_config = hsw_hw_config; + x86_pmu.get_event_constraints = icl_get_event_constraints; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; + extra_attr = merge_attr(extra_attr, skl_format_attr); + x86_pmu.cpu_events = get_icl_events_attrs(); + x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02); + x86_pmu.lbr_pt_coexist = true; + intel_pmu_pebs_data_source_skl(false); + pr_cont("Icelake events, "); + name = "icelake"; + break; + default: switch (x86_pmu.version) { case 1: diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index d41de9af7a39..6072f92cb8ea 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -578,6 +578,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates), X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates), + + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates), { }, }; MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 10c99ce1fead..7a9f5dac5abe 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -849,6 +849,26 @@ struct event_constraint intel_skl_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; +struct event_constraint intel_icl_pebs_event_constraints[] = { + INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ + INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */ + + INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */ + + INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ + + INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */ + + /* + * Everything else is handled by PMU_FL_PEBS_ALL, because we + * need the full constraints from the main table. + */ + + EVENT_CONSTRAINT_END +}; + struct event_constraint *intel_pebs_constraints(struct perf_event *event) { struct event_constraint *c; @@ -858,7 +878,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event) if (x86_pmu.pebs_constraints) { for_each_event_constraint(c, x86_pmu.pebs_constraints) { - if ((event->hw.config & c->cmask) == c->code) { + if (constraint_match(c, event->hw.config)) { event->hw.flags |= c->flags; return c; } @@ -906,17 +926,87 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc) if (cpuc->n_pebs == cpuc->n_large_pebs) { threshold = ds->pebs_absolute_maximum - - reserved * x86_pmu.pebs_record_size; + reserved * cpuc->pebs_record_size; } else { - threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size; + threshold = ds->pebs_buffer_base + cpuc->pebs_record_size; } ds->pebs_interrupt_threshold = threshold; } +static void adaptive_pebs_record_size_update(void) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + u64 pebs_data_cfg = cpuc->pebs_data_cfg; + int sz = sizeof(struct pebs_basic); + + if (pebs_data_cfg & PEBS_DATACFG_MEMINFO) + sz += sizeof(struct pebs_meminfo); + if (pebs_data_cfg & PEBS_DATACFG_GP) + sz += sizeof(struct pebs_gprs); + if (pebs_data_cfg & PEBS_DATACFG_XMMS) + sz += sizeof(struct pebs_xmm); + if (pebs_data_cfg & PEBS_DATACFG_LBRS) + sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry); + + cpuc->pebs_record_size = sz; +} + +#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \ + PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \ + PERF_SAMPLE_TRANSACTION) + +static u64 pebs_update_adaptive_cfg(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + u64 sample_type = attr->sample_type; + u64 pebs_data_cfg = 0; + bool gprs, tsx_weight; + + if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) && + attr->precise_ip > 1) + return pebs_data_cfg; + + if (sample_type & PERF_PEBS_MEMINFO_TYPE) + pebs_data_cfg |= PEBS_DATACFG_MEMINFO; + + /* + * We need GPRs when: + * + user requested them + * + precise_ip < 2 for the non event IP + * + For RTM TSX weight we need GPRs for the abort code. + */ + gprs = (sample_type & PERF_SAMPLE_REGS_INTR) && + (attr->sample_regs_intr & PEBS_GP_REGS); + + tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) && + ((attr->config & INTEL_ARCH_EVENT_MASK) == + x86_pmu.rtm_abort_event); + + if (gprs || (attr->precise_ip < 2) || tsx_weight) + pebs_data_cfg |= PEBS_DATACFG_GP; + + if ((sample_type & PERF_SAMPLE_REGS_INTR) && + (attr->sample_regs_intr & PEBS_XMM_REGS)) + pebs_data_cfg |= PEBS_DATACFG_XMMS; + + if (sample_type & PERF_SAMPLE_BRANCH_STACK) { + /* + * For now always log all LBRs. Could configure this + * later. + */ + pebs_data_cfg |= PEBS_DATACFG_LBRS | + ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT); + } + + return pebs_data_cfg; +} + static void -pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu) +pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, + struct perf_event *event, bool add) { + struct pmu *pmu = event->ctx->pmu; /* * Make sure we get updated with the first PEBS * event. It will trigger also during removal, but @@ -933,6 +1023,29 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu) update = true; } + /* + * The PEBS record doesn't shrink on pmu::del(). Doing so would require + * iterating all remaining PEBS events to reconstruct the config. + */ + if (x86_pmu.intel_cap.pebs_baseline && add) { + u64 pebs_data_cfg; + + /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */ + if (cpuc->n_pebs == 1) { + cpuc->pebs_data_cfg = 0; + cpuc->pebs_record_size = sizeof(struct pebs_basic); + } + + pebs_data_cfg = pebs_update_adaptive_cfg(event); + + /* Update pebs_record_size if new event requires more data. */ + if (pebs_data_cfg & ~cpuc->pebs_data_cfg) { + cpuc->pebs_data_cfg |= pebs_data_cfg; + adaptive_pebs_record_size_update(); + update = true; + } + } + if (update) pebs_update_threshold(cpuc); } @@ -947,7 +1060,7 @@ void intel_pmu_pebs_add(struct perf_event *event) if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) cpuc->n_large_pebs++; - pebs_update_state(needed_cb, cpuc, event->ctx->pmu); + pebs_update_state(needed_cb, cpuc, event, true); } void intel_pmu_pebs_enable(struct perf_event *event) @@ -960,11 +1073,19 @@ void intel_pmu_pebs_enable(struct perf_event *event) cpuc->pebs_enabled |= 1ULL << hwc->idx; - if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) + if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5)) cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32); else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled |= 1ULL << 63; + if (x86_pmu.intel_cap.pebs_baseline) { + hwc->config |= ICL_EVENTSEL_ADAPTIVE; + if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) { + wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg); + cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg; + } + } + /* * Use auto-reload if possible to save a MSR write in the PMI. * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD. @@ -991,7 +1112,7 @@ void intel_pmu_pebs_del(struct perf_event *event) if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) cpuc->n_large_pebs--; - pebs_update_state(needed_cb, cpuc, event->ctx->pmu); + pebs_update_state(needed_cb, cpuc, event, false); } void intel_pmu_pebs_disable(struct perf_event *event) @@ -1004,7 +1125,8 @@ void intel_pmu_pebs_disable(struct perf_event *event) cpuc->pebs_enabled &= ~(1ULL << hwc->idx); - if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) + if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && + (x86_pmu.version < 5)) cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled &= ~(1ULL << 63); @@ -1125,34 +1247,57 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) return 0; } -static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs) +static inline u64 intel_get_tsx_weight(u64 tsx_tuning) { - if (pebs->tsx_tuning) { - union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning }; + if (tsx_tuning) { + union hsw_tsx_tuning tsx = { .value = tsx_tuning }; return tsx.cycles_last_block; } return 0; } -static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs) +static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax) { - u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; + u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; /* For RTM XABORTs also log the abort code from AX */ - if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1)) - txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; + if ((txn & PERF_TXN_TRANSACTION) && (ax & 1)) + txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; return txn; } -static void setup_pebs_sample_data(struct perf_event *event, - struct pt_regs *iregs, void *__pebs, - struct perf_sample_data *data, - struct pt_regs *regs) +static inline u64 get_pebs_status(void *n) { + if (x86_pmu.intel_cap.pebs_format < 4) + return ((struct pebs_record_nhm *)n)->status; + return ((struct pebs_basic *)n)->applicable_counters; +} + #define PERF_X86_EVENT_PEBS_HSW_PREC \ (PERF_X86_EVENT_PEBS_ST_HSW | \ PERF_X86_EVENT_PEBS_LD_HSW | \ PERF_X86_EVENT_PEBS_NA_HSW) + +static u64 get_data_src(struct perf_event *event, u64 aux) +{ + u64 val = PERF_MEM_NA; + int fl = event->hw.flags; + bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); + + if (fl & PERF_X86_EVENT_PEBS_LDLAT) + val = load_latency_data(aux); + else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) + val = precise_datala_hsw(event, aux); + else if (fst) + val = precise_store_data(aux); + return val; +} + +static void setup_pebs_fixed_sample_data(struct perf_event *event, + struct pt_regs *iregs, void *__pebs, + struct perf_sample_data *data, + struct pt_regs *regs) +{ /* * We cast to the biggest pebs_record but are careful not to * unconditionally access the 'extra' entries. @@ -1160,17 +1305,13 @@ static void setup_pebs_sample_data(struct perf_event *event, struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct pebs_record_skl *pebs = __pebs; u64 sample_type; - int fll, fst, dsrc; - int fl = event->hw.flags; + int fll; if (pebs == NULL) return; sample_type = event->attr.sample_type; - dsrc = sample_type & PERF_SAMPLE_DATA_SRC; - - fll = fl & PERF_X86_EVENT_PEBS_LDLAT; - fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC); + fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT; perf_sample_data_init(data, 0, event->hw.last_period); @@ -1185,16 +1326,8 @@ static void setup_pebs_sample_data(struct perf_event *event, /* * data.data_src encodes the data source */ - if (dsrc) { - u64 val = PERF_MEM_NA; - if (fll) - val = load_latency_data(pebs->dse); - else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC)) - val = precise_datala_hsw(event, pebs->dse); - else if (fst) - val = precise_store_data(pebs->dse); - data->data_src.val = val; - } + if (sample_type & PERF_SAMPLE_DATA_SRC) + data->data_src.val = get_data_src(event, pebs->dse); /* * We must however always use iregs for the unwinder to stay sane; the @@ -1281,10 +1414,11 @@ static void setup_pebs_sample_data(struct perf_event *event, if (x86_pmu.intel_cap.pebs_format >= 2) { /* Only set the TSX weight when no memory weight. */ if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll) - data->weight = intel_hsw_weight(pebs); + data->weight = intel_get_tsx_weight(pebs->tsx_tuning); if (sample_type & PERF_SAMPLE_TRANSACTION) - data->txn = intel_hsw_transaction(pebs); + data->txn = intel_get_tsx_transaction(pebs->tsx_tuning, + pebs->ax); } /* @@ -1301,6 +1435,140 @@ static void setup_pebs_sample_data(struct perf_event *event, data->br_stack = &cpuc->lbr_stack; } +static void adaptive_pebs_save_regs(struct pt_regs *regs, + struct pebs_gprs *gprs) +{ + regs->ax = gprs->ax; + regs->bx = gprs->bx; + regs->cx = gprs->cx; + regs->dx = gprs->dx; + regs->si = gprs->si; + regs->di = gprs->di; + regs->bp = gprs->bp; + regs->sp = gprs->sp; +#ifndef CONFIG_X86_32 + regs->r8 = gprs->r8; + regs->r9 = gprs->r9; + regs->r10 = gprs->r10; + regs->r11 = gprs->r11; + regs->r12 = gprs->r12; + regs->r13 = gprs->r13; + regs->r14 = gprs->r14; + regs->r15 = gprs->r15; +#endif +} + +/* + * With adaptive PEBS the layout depends on what fields are configured. + */ + +static void setup_pebs_adaptive_sample_data(struct perf_event *event, + struct pt_regs *iregs, void *__pebs, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct pebs_basic *basic = __pebs; + void *next_record = basic + 1; + u64 sample_type; + u64 format_size; + struct pebs_meminfo *meminfo = NULL; + struct pebs_gprs *gprs = NULL; + struct x86_perf_regs *perf_regs; + + if (basic == NULL) + return; + + perf_regs = container_of(regs, struct x86_perf_regs, regs); + perf_regs->xmm_regs = NULL; + + sample_type = event->attr.sample_type; + format_size = basic->format_size; + perf_sample_data_init(data, 0, event->hw.last_period); + data->period = event->hw.last_period; + + if (event->attr.use_clockid == 0) + data->time = native_sched_clock_from_tsc(basic->tsc); + + /* + * We must however always use iregs for the unwinder to stay sane; the + * record BP,SP,IP can point into thin air when the record is from a + * previous PMI context or an (I)RET happened between the record and + * PMI. + */ + if (sample_type & PERF_SAMPLE_CALLCHAIN) + data->callchain = perf_callchain(event, iregs); + + *regs = *iregs; + /* The ip in basic is EventingIP */ + set_linear_ip(regs, basic->ip); + regs->flags = PERF_EFLAGS_EXACT; + + /* + * The record for MEMINFO is in front of GP + * But PERF_SAMPLE_TRANSACTION needs gprs->ax. + * Save the pointer here but process later. + */ + if (format_size & PEBS_DATACFG_MEMINFO) { + meminfo = next_record; + next_record = meminfo + 1; + } + + if (format_size & PEBS_DATACFG_GP) { + gprs = next_record; + next_record = gprs + 1; + + if (event->attr.precise_ip < 2) { + set_linear_ip(regs, gprs->ip); + regs->flags &= ~PERF_EFLAGS_EXACT; + } + + if (sample_type & PERF_SAMPLE_REGS_INTR) + adaptive_pebs_save_regs(regs, gprs); + } + + if (format_size & PEBS_DATACFG_MEMINFO) { + if (sample_type & PERF_SAMPLE_WEIGHT) + data->weight = meminfo->latency ?: + intel_get_tsx_weight(meminfo->tsx_tuning); + + if (sample_type & PERF_SAMPLE_DATA_SRC) + data->data_src.val = get_data_src(event, meminfo->aux); + + if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) + data->addr = meminfo->address; + + if (sample_type & PERF_SAMPLE_TRANSACTION) + data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning, + gprs ? gprs->ax : 0); + } + + if (format_size & PEBS_DATACFG_XMMS) { + struct pebs_xmm *xmm = next_record; + + next_record = xmm + 1; + perf_regs->xmm_regs = xmm->xmm; + } + + if (format_size & PEBS_DATACFG_LBRS) { + struct pebs_lbr *lbr = next_record; + int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT) + & 0xff) + 1; + next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry); + + if (has_branch_stack(event)) { + intel_pmu_store_pebs_lbrs(lbr); + data->br_stack = &cpuc->lbr_stack; + } + } + + WARN_ONCE(next_record != __pebs + (format_size >> 48), + "PEBS record size %llu, expected %llu, config %llx\n", + format_size >> 48, + (u64)(next_record - __pebs), + basic->format_size); +} + static inline void * get_next_pebs_record_by_bit(void *base, void *top, int bit) { @@ -1318,19 +1586,19 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) if (base == NULL) return NULL; - for (at = base; at < top; at += x86_pmu.pebs_record_size) { - struct pebs_record_nhm *p = at; + for (at = base; at < top; at += cpuc->pebs_record_size) { + unsigned long status = get_pebs_status(at); - if (test_bit(bit, (unsigned long *)&p->status)) { + if (test_bit(bit, (unsigned long *)&status)) { /* PEBS v3 has accurate status bits */ if (x86_pmu.intel_cap.pebs_format >= 3) return at; - if (p->status == (1 << bit)) + if (status == (1 << bit)) return at; /* clear non-PEBS bit and re-check */ - pebs_status = p->status & cpuc->pebs_enabled; + pebs_status = status & cpuc->pebs_enabled; pebs_status &= PEBS_COUNTER_MASK; if (pebs_status == (1 << bit)) return at; @@ -1410,11 +1678,18 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *base, void *top, - int bit, int count) + int bit, int count, + void (*setup_sample)(struct perf_event *, + struct pt_regs *, + void *, + struct perf_sample_data *, + struct pt_regs *)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; struct perf_sample_data data; - struct pt_regs regs; + struct x86_perf_regs perf_regs; + struct pt_regs *regs = &perf_regs.regs; void *at = get_next_pebs_record_by_bit(base, top, bit); if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { @@ -1429,20 +1704,20 @@ static void __intel_pmu_pebs_event(struct perf_event *event, return; while (count > 1) { - setup_pebs_sample_data(event, iregs, at, &data, ®s); - perf_event_output(event, &data, ®s); - at += x86_pmu.pebs_record_size; + setup_sample(event, iregs, at, &data, regs); + perf_event_output(event, &data, regs); + at += cpuc->pebs_record_size; at = get_next_pebs_record_by_bit(at, top, bit); count--; } - setup_pebs_sample_data(event, iregs, at, &data, ®s); + setup_sample(event, iregs, at, &data, regs); /* * All but the last records are processed. * The last one is left to be able to call the overflow handler. */ - if (perf_event_overflow(event, &data, ®s)) { + if (perf_event_overflow(event, &data, regs)) { x86_pmu_stop(event, 0); return; } @@ -1483,7 +1758,27 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) return; } - __intel_pmu_pebs_event(event, iregs, at, top, 0, n); + __intel_pmu_pebs_event(event, iregs, at, top, 0, n, + setup_pebs_fixed_sample_data); +} + +static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size) +{ + struct perf_event *event; + int bit; + + /* + * The drain_pebs() could be called twice in a short period + * for auto-reload event in pmu::read(). There are no + * overflows have happened in between. + * It needs to call intel_pmu_save_and_restart_reload() to + * update the event->count for this case. + */ + for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { + event = cpuc->events[bit]; + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); + } } static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) @@ -1513,19 +1808,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) } if (unlikely(base >= top)) { - /* - * The drain_pebs() could be called twice in a short period - * for auto-reload event in pmu::read(). There are no - * overflows have happened in between. - * It needs to call intel_pmu_save_and_restart_reload() to - * update the event->count for this case. - */ - for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, - size) { - event = cpuc->events[bit]; - if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) - intel_pmu_save_and_restart_reload(event, 0); - } + intel_pmu_pebs_event_update_no_drain(cpuc, size); return; } @@ -1538,8 +1821,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) /* PEBS v3 has more accurate status bits */ if (x86_pmu.intel_cap.pebs_format >= 3) { - for_each_set_bit(bit, (unsigned long *)&pebs_status, - size) + for_each_set_bit(bit, (unsigned long *)&pebs_status, size) counts[bit]++; continue; @@ -1578,8 +1860,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) * If collision happened, the record will be dropped. */ if (p->status != (1ULL << bit)) { - for_each_set_bit(i, (unsigned long *)&pebs_status, - x86_pmu.max_pebs_events) + for_each_set_bit(i, (unsigned long *)&pebs_status, size) error[i]++; continue; } @@ -1587,7 +1868,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) counts[bit]++; } - for (bit = 0; bit < size; bit++) { + for_each_set_bit(bit, (unsigned long *)&mask, size) { if ((counts[bit] == 0) && (error[bit] == 0)) continue; @@ -1608,11 +1889,66 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) if (counts[bit]) { __intel_pmu_pebs_event(event, iregs, base, - top, bit, counts[bit]); + top, bit, counts[bit], + setup_pebs_fixed_sample_data); } } } +static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs) +{ + short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct debug_store *ds = cpuc->ds; + struct perf_event *event; + void *base, *at, *top; + int bit, size; + u64 mask; + + if (!x86_pmu.pebs_active) + return; + + base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; + top = (struct pebs_basic *)(unsigned long)ds->pebs_index; + + ds->pebs_index = ds->pebs_buffer_base; + + mask = ((1ULL << x86_pmu.max_pebs_events) - 1) | + (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED); + size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed; + + if (unlikely(base >= top)) { + intel_pmu_pebs_event_update_no_drain(cpuc, size); + return; + } + + for (at = base; at < top; at += cpuc->pebs_record_size) { + u64 pebs_status; + + pebs_status = get_pebs_status(at) & cpuc->pebs_enabled; + pebs_status &= mask; + + for_each_set_bit(bit, (unsigned long *)&pebs_status, size) + counts[bit]++; + } + + for_each_set_bit(bit, (unsigned long *)&mask, size) { + if (counts[bit] == 0) + continue; + + event = cpuc->events[bit]; + if (WARN_ON_ONCE(!event)) + continue; + + if (WARN_ON_ONCE(!event->attr.precise_ip)) + continue; + + __intel_pmu_pebs_event(event, iregs, base, + top, bit, counts[bit], + setup_pebs_adaptive_sample_data); + } +} + /* * BTS, PEBS probe and setup */ @@ -1628,12 +1964,18 @@ void __init intel_ds_init(void) x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; - if (x86_pmu.version <= 4) + if (x86_pmu.version <= 4) { x86_pmu.pebs_no_isolation = 1; + x86_pmu.pebs_no_xmm_regs = 1; + } if (x86_pmu.pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; + char *pebs_qual = ""; int format = x86_pmu.intel_cap.pebs_format; + if (format < 4) + x86_pmu.intel_cap.pebs_baseline = 0; + switch (format) { case 0: pr_cont("PEBS fmt0%c, ", pebs_type); @@ -1669,6 +2011,29 @@ void __init intel_ds_init(void) x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; break; + case 4: + x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl; + x86_pmu.pebs_record_size = sizeof(struct pebs_basic); + if (x86_pmu.intel_cap.pebs_baseline) { + x86_pmu.large_pebs_flags |= + PERF_SAMPLE_BRANCH_STACK | + PERF_SAMPLE_TIME; + x86_pmu.flags |= PMU_FL_PEBS_ALL; + pebs_qual = "-baseline"; + } else { + /* Only basic record supported */ + x86_pmu.pebs_no_xmm_regs = 1; + x86_pmu.large_pebs_flags &= + ~(PERF_SAMPLE_ADDR | + PERF_SAMPLE_TIME | + PERF_SAMPLE_DATA_SRC | + PERF_SAMPLE_TRANSACTION | + PERF_SAMPLE_REGS_USER | + PERF_SAMPLE_REGS_INTR); + } + pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual); + break; + default: pr_cont("no PEBS fmt%d%c, ", format, pebs_type); x86_pmu.pebs = 0; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 580c1b91c454..6f814a27416b 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -488,6 +488,8 @@ void intel_pmu_lbr_add(struct perf_event *event) * be 'new'. Conversely, a new event can get installed through the * context switch path for the first time. */ + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) + cpuc->lbr_pebs_users++; perf_sched_cb_inc(event->ctx->pmu); if (!cpuc->lbr_users++ && !event->total_time_running) intel_pmu_lbr_reset(); @@ -507,8 +509,11 @@ void intel_pmu_lbr_del(struct perf_event *event) task_ctx->lbr_callstack_users--; } + if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) + cpuc->lbr_pebs_users--; cpuc->lbr_users--; WARN_ON_ONCE(cpuc->lbr_users < 0); + WARN_ON_ONCE(cpuc->lbr_pebs_users < 0); perf_sched_cb_dec(event->ctx->pmu); } @@ -658,7 +663,13 @@ void intel_pmu_lbr_read(void) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - if (!cpuc->lbr_users) + /* + * Don't read when all LBRs users are using adaptive PEBS. + * + * This could be smarter and actually check the event, + * but this simple approach seems to work for now. + */ + if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users) return; if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) @@ -1080,6 +1091,28 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) } } +void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + int i; + + cpuc->lbr_stack.nr = x86_pmu.lbr_nr; + for (i = 0; i < x86_pmu.lbr_nr; i++) { + u64 info = lbr->lbr[i].info; + struct perf_branch_entry *e = &cpuc->lbr_entries[i]; + + e->from = lbr->lbr[i].from; + e->to = lbr->lbr[i].to; + e->mispred = !!(info & LBR_INFO_MISPRED); + e->predicted = !(info & LBR_INFO_MISPRED); + e->in_tx = !!(info & LBR_INFO_IN_TX); + e->abort = !!(info & LBR_INFO_ABORT); + e->cycles = info & LBR_INFO_CYCLES; + e->reserved = 0; + } + intel_pmu_lbr_filter(cpuc); +} + /* * Map interface branch filters onto LBR filters */ diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index fb3a2f13fc70..339d7628080c 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1525,8 +1525,7 @@ static __init int pt_init(void) } if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) - pt_pmu.pmu.capabilities = - PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF; + pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG; pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE; pt_pmu.pmu.attr_groups = pt_attr_groups; diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 94dc564146ca..37ebf6fc5415 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -775,6 +775,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init), + + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init), {}, }; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 9fe64c01a2e5..fc40a1473058 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1367,6 +1367,11 @@ static const struct intel_uncore_init_fun skx_uncore_init __initconst = { .pci_init = skx_uncore_pci_init, }; +static const struct intel_uncore_init_fun icl_uncore_init __initconst = { + .cpu_init = icl_uncore_cpu_init, + .pci_init = skl_uncore_pci_init, +}; + static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init), @@ -1393,6 +1398,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init), X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init), + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init), {}, }; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 853a49a8ccf6..79eb2e21e4f0 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -512,6 +512,7 @@ int skl_uncore_pci_init(void); void snb_uncore_cpu_init(void); void nhm_uncore_cpu_init(void); void skl_uncore_cpu_init(void); +void icl_uncore_cpu_init(void); int snb_pci2phy_map_init(int devid); /* uncore_snbep.c */ diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 13493f43b247..f8431819b3e1 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -34,6 +34,8 @@ #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 +#define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02 +#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12 /* SNB event control */ #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff @@ -93,6 +95,12 @@ #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) +/* ICL Cbo register */ +#define ICL_UNC_CBO_CONFIG 0x396 +#define ICL_UNC_NUM_CBO_MASK 0xf +#define ICL_UNC_CBO_0_PER_CTR0 0x702 +#define ICL_UNC_CBO_MSR_OFFSET 0x8 + DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); @@ -280,6 +288,70 @@ void skl_uncore_cpu_init(void) snb_uncore_arb.ops = &skl_uncore_msr_ops; } +static struct intel_uncore_type icl_uncore_cbox = { + .name = "cbox", + .num_counters = 4, + .perf_ctr_bits = 44, + .perf_ctr = ICL_UNC_CBO_0_PER_CTR0, + .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, + .event_mask = SNB_UNC_RAW_EVENT_MASK, + .msr_offset = ICL_UNC_CBO_MSR_OFFSET, + .ops = &skl_uncore_msr_ops, + .format_group = &snb_uncore_format_group, +}; + +static struct uncore_event_desc icl_uncore_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"), + { /* end: all zeroes */ }, +}; + +static struct attribute *icl_uncore_clock_formats_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group icl_uncore_clock_format_group = { + .name = "format", + .attrs = icl_uncore_clock_formats_attr, +}; + +static struct intel_uncore_type icl_uncore_clockbox = { + .name = "clock", + .num_counters = 1, + .num_boxes = 1, + .fixed_ctr_bits = 48, + .fixed_ctr = SNB_UNC_FIXED_CTR, + .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, + .single_fixed = 1, + .event_mask = SNB_UNC_CTL_EV_SEL_MASK, + .format_group = &icl_uncore_clock_format_group, + .ops = &skl_uncore_msr_ops, + .event_descs = icl_uncore_events, +}; + +static struct intel_uncore_type *icl_msr_uncores[] = { + &icl_uncore_cbox, + &snb_uncore_arb, + &icl_uncore_clockbox, + NULL, +}; + +static int icl_get_cbox_num(void) +{ + u64 num_boxes; + + rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes); + + return num_boxes & ICL_UNC_NUM_CBO_MASK; +} + +void icl_uncore_cpu_init(void) +{ + uncore_msr_uncores = icl_msr_uncores; + icl_uncore_cbox.num_boxes = icl_get_cbox_num(); + snb_uncore_arb.ops = &skl_uncore_msr_ops; +} + enum { SNB_PCI_UNCORE_IMC, }; @@ -668,6 +740,18 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { { /* end: all zeroes */ }, }; +static const struct pci_device_id icl_uncore_pci_ids[] = { + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* end: all zeroes */ }, +}; + static struct pci_driver snb_uncore_pci_driver = { .name = "snb_uncore", .id_table = snb_uncore_pci_ids, @@ -693,6 +777,11 @@ static struct pci_driver skl_uncore_pci_driver = { .id_table = skl_uncore_pci_ids, }; +static struct pci_driver icl_uncore_pci_driver = { + .name = "icl_uncore", + .id_table = icl_uncore_pci_ids, +}; + struct imc_uncore_pci_dev { __u32 pci_id; struct pci_driver *driver; @@ -732,6 +821,8 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ + IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ + IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */ { /* end marker */ } }; diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index a878e6286e4a..f3f4c2263501 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -89,6 +89,7 @@ static bool test_intel(int idx) case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_KABYLAKE_MOBILE: case INTEL_FAM6_KABYLAKE_DESKTOP: + case INTEL_FAM6_ICELAKE_MOBILE: if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF) return true; break; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 1e98a42b560a..07fc84bb85c1 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -49,28 +49,33 @@ struct event_constraint { unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; u64 idxmsk64; }; - u64 code; - u64 cmask; - int weight; - int overlap; - int flags; + u64 code; + u64 cmask; + int weight; + int overlap; + int flags; + unsigned int size; }; + +static inline bool constraint_match(struct event_constraint *c, u64 ecode) +{ + return ((ecode & c->cmask) - c->code) <= (u64)c->size; +} + /* * struct hw_perf_event.flags flags */ #define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */ #define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */ #define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */ -#define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */ -#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */ -#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */ -#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ -#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ -#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ -#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ -#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */ -#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */ - +#define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */ +#define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */ +#define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */ +#define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */ +#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */ +#define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */ +#define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */ +#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ struct amd_nb { int nb_id; /* NorthBridge id */ @@ -116,6 +121,24 @@ struct amd_nb { (1ULL << PERF_REG_X86_R14) | \ (1ULL << PERF_REG_X86_R15)) +#define PEBS_XMM_REGS \ + ((1ULL << PERF_REG_X86_XMM0) | \ + (1ULL << PERF_REG_X86_XMM1) | \ + (1ULL << PERF_REG_X86_XMM2) | \ + (1ULL << PERF_REG_X86_XMM3) | \ + (1ULL << PERF_REG_X86_XMM4) | \ + (1ULL << PERF_REG_X86_XMM5) | \ + (1ULL << PERF_REG_X86_XMM6) | \ + (1ULL << PERF_REG_X86_XMM7) | \ + (1ULL << PERF_REG_X86_XMM8) | \ + (1ULL << PERF_REG_X86_XMM9) | \ + (1ULL << PERF_REG_X86_XMM10) | \ + (1ULL << PERF_REG_X86_XMM11) | \ + (1ULL << PERF_REG_X86_XMM12) | \ + (1ULL << PERF_REG_X86_XMM13) | \ + (1ULL << PERF_REG_X86_XMM14) | \ + (1ULL << PERF_REG_X86_XMM15)) + /* * Per register state. */ @@ -207,10 +230,16 @@ struct cpu_hw_events { int n_pebs; int n_large_pebs; + /* Current super set of events hardware configuration */ + u64 pebs_data_cfg; + u64 active_pebs_data_cfg; + int pebs_record_size; + /* * Intel LBR bits */ int lbr_users; + int lbr_pebs_users; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; struct er_account *lbr_sel; @@ -257,18 +286,29 @@ struct cpu_hw_events { void *kfree_on_online[X86_PERF_KFREE_MAX]; }; -#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\ +#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \ { .idxmsk64 = (n) }, \ .code = (c), \ + .size = (e) - (c), \ .cmask = (m), \ .weight = (w), \ .overlap = (o), \ .flags = f, \ } +#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \ + __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f) + #define EVENT_CONSTRAINT(c, n, m) \ __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0) +/* + * The constraint_match() function only works for 'simple' event codes + * and not for extended (AMD64_EVENTSEL_EVENT) events codes. + */ +#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \ + __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0) + #define INTEL_EXCLEVT_CONSTRAINT(c, n) \ __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\ 0, PERF_X86_EVENT_EXCL) @@ -304,6 +344,12 @@ struct cpu_hw_events { EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT) /* + * Constraint on a range of Event codes + */ +#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \ + EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT) + +/* * Constraint on the Event code + UMask + fixed-mask * * filter mask to validate fixed counter events. @@ -350,6 +396,9 @@ struct cpu_hw_events { #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \ EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) +#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \ + EVENT_CONSTRAINT_RANGE(c, e, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS) + /* Check only flags, but allow all event/umask */ #define INTEL_ALL_EVENT_CONSTRAINT(code, n) \ EVENT_CONSTRAINT(code, n, X86_ALL_EVENT_FLAGS) @@ -366,6 +415,11 @@ struct cpu_hw_events { ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) +#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \ + __EVENT_CONSTRAINT_RANGE(code, end, n, \ + ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ + HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW) + #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \ __EVENT_CONSTRAINT(code, n, \ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \ @@ -473,6 +527,7 @@ union perf_capabilities { * values > 32bit. */ u64 full_width_write:1; + u64 pebs_baseline:1; }; u64 capabilities; }; @@ -613,14 +668,16 @@ struct x86_pmu { pebs_broken :1, pebs_prec_dist :1, pebs_no_tlb :1, - pebs_no_isolation :1; + pebs_no_isolation :1, + pebs_no_xmm_regs :1; int pebs_record_size; int pebs_buffer_size; + int max_pebs_events; void (*drain_pebs)(struct pt_regs *regs); struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); - int max_pebs_events; unsigned long large_pebs_flags; + u64 rtm_abort_event; /* * Intel LBR @@ -714,6 +771,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \ .event_str_ht = ht, \ } +struct pmu *x86_get_pmu(void); extern struct x86_pmu x86_pmu __read_mostly; static inline bool x86_pmu_has_lbr_callstack(void) @@ -941,6 +999,8 @@ extern struct event_constraint intel_bdw_pebs_event_constraints[]; extern struct event_constraint intel_skl_pebs_event_constraints[]; +extern struct event_constraint intel_icl_pebs_event_constraints[]; + struct event_constraint *intel_pebs_constraints(struct perf_event *event); void intel_pmu_pebs_add(struct perf_event *event); @@ -959,6 +1019,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); void intel_pmu_auto_reload_read(struct perf_event *event); +void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr); + void intel_ds_init(void); void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 321fe5f5d0e9..4d5fcd47ab75 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -61,9 +61,8 @@ } while (0) #define RELOAD_SEG(seg) { \ - unsigned int pre = GET_SEG(seg); \ + unsigned int pre = (seg) | 3; \ unsigned int cur = get_user_seg(seg); \ - pre |= 3; \ if (pre != cur) \ set_user_seg(seg, pre); \ } @@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_32 __user *sc) { unsigned int tmpflags, err = 0; + u16 gs, fs, es, ds; void __user *buf; u32 tmp; @@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, current->restart_block.fn = do_no_restart_syscall; get_user_try { - /* - * Reload fs and gs if they have changed in the signal - * handler. This does not handle long fs/gs base changes in - * the handler, but does not clobber them at least in the - * normal case. - */ - RELOAD_SEG(gs); - RELOAD_SEG(fs); - RELOAD_SEG(ds); - RELOAD_SEG(es); + gs = GET_SEG(gs); + fs = GET_SEG(fs); + ds = GET_SEG(ds); + es = GET_SEG(es); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); COPY(ax); @@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, buf = compat_ptr(tmp); } get_user_catch(err); + /* + * Reload fs and gs if they have changed in the signal + * handler. This does not handle long fs/gs base changes in + * the handler, but does not clobber them at least in the + * normal case. + */ + RELOAD_SEG(gs); + RELOAD_SEG(fs); + RELOAD_SEG(ds); + RELOAD_SEG(es); + err |= fpu__restore_sig(buf, 1); force_iret(); diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 31b627b43a8e..464034db299f 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -20,6 +20,17 @@ #endif /* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +.macro ANNOTATE_IGNORE_ALTERNATIVE + .Lannotate_\@: + .pushsection .discard.ignore_alts + .long .Lannotate_\@ - . + .popsection +.endm + +/* * Issue one struct alt_instr descriptor entry (need to put it into * the section .altinstructions, see below). This entry contains * enough information for the alternatives patching code to patch an diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 4c74073a19cc..094fbc9c0b1c 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -45,6 +45,16 @@ #define LOCK_PREFIX "" #endif +/* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +#define ANNOTATE_IGNORE_ALTERNATIVE \ + "999:\n\t" \ + ".pushsection .discard.ignore_alts\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + struct alt_instr { s32 instr_offset; /* original instruction */ s32 repl_offset; /* offset to replacement instruction */ diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 6467757bb39f..3ff577c0b102 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -148,30 +148,6 @@ _ASM_PTR (entry); \ .popsection -.macro ALIGN_DESTINATION - /* check for bad alignment of destination */ - movl %edi,%ecx - andl $7,%ecx - jz 102f /* already aligned */ - subl $8,%ecx - negl %ecx - subl %ecx,%edx -100: movb (%rsi),%al -101: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 100b -102: - .section .fixup,"ax" -103: addl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail - .previous - - _ASM_EXTABLE_UA(100b, 103b) - _ASM_EXTABLE_UA(101b, 103b) - .endm - #else # define _EXPAND_EXTABLE_HANDLE(x) #x # define _ASM_EXTABLE_HANDLE(from, to, handler) \ diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h index ae26df1c2789..8380c3ddd4b2 100644 --- a/arch/x86/include/asm/intel_ds.h +++ b/arch/x86/include/asm/intel_ds.h @@ -8,7 +8,7 @@ /* The maximal number of PEBS events: */ #define MAX_PEBS_EVENTS 8 -#define MAX_FIXED_PEBS_EVENTS 3 +#define MAX_FIXED_PEBS_EVENTS 4 /* * A debug store configuration. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a9d03af34030..c79abe7ca093 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -295,6 +295,7 @@ union kvm_mmu_extended_role { unsigned int valid:1; unsigned int execonly:1; unsigned int cr0_pg:1; + unsigned int cr4_pae:1; unsigned int cr4_pse:1; unsigned int cr4_pke:1; unsigned int cr4_smap:1; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index ca5bc0eacb95..1378518cf63f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -116,6 +116,7 @@ #define LBR_INFO_CYCLES 0xffff #define MSR_IA32_PEBS_ENABLE 0x000003f1 +#define MSR_PEBS_DATA_CFG 0x000003f2 #define MSR_IA32_DS_AREA 0x00000600 #define MSR_IA32_PERF_CAPABILITIES 0x00000345 #define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index dad12b767ba0..daf25b60c9e3 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -11,6 +11,15 @@ #include <asm/msr-index.h> /* + * This should be used immediately before a retpoline alternative. It tells + * objtool where the retpolines are so that it can make sense of the control + * flow by just reading the original instruction(s) and ignoring the + * alternatives. + */ +#define ANNOTATE_NOSPEC_ALTERNATIVE \ + ANNOTATE_IGNORE_ALTERNATIVE + +/* * Fill the CPU return stack buffer. * * Each entry in the RSB, if used for a speculative 'ret', contains an @@ -57,19 +66,6 @@ #ifdef __ASSEMBLY__ /* - * This should be used immediately before a retpoline alternative. It tells - * objtool where the retpolines are so that it can make sense of the control - * flow by just reading the original instruction(s) and ignoring the - * alternatives. - */ -.macro ANNOTATE_NOSPEC_ALTERNATIVE - .Lannotate_\@: - .pushsection .discard.nospec - .long .Lannotate_\@ - . - .popsection -.endm - -/* * This should be used immediately before an indirect jump/call. It tells * objtool the subsequent indirect jump/call is vouched safe for retpoline * builds. @@ -152,12 +148,6 @@ #else /* __ASSEMBLY__ */ -#define ANNOTATE_NOSPEC_ALTERNATIVE \ - "999:\n\t" \ - ".pushsection .discard.nospec\n\t" \ - ".long 999b - .\n\t" \ - ".popsection\n\t" - #define ANNOTATE_RETPOLINE_SAFE \ "999:\n\t" \ ".pushsection .discard.retpoline_safe\n\t" \ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 8bdf74902293..1392d5e6e8d6 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -7,7 +7,7 @@ */ #define INTEL_PMC_MAX_GENERIC 32 -#define INTEL_PMC_MAX_FIXED 3 +#define INTEL_PMC_MAX_FIXED 4 #define INTEL_PMC_IDX_FIXED 32 #define X86_PMC_IDX_MAX 64 @@ -32,6 +32,8 @@ #define HSW_IN_TX (1ULL << 32) #define HSW_IN_TX_CHECKPOINTED (1ULL << 33) +#define ICL_EVENTSEL_ADAPTIVE (1ULL << 34) +#define ICL_FIXED_0_ADAPTIVE (1ULL << 32) #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36) #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40) @@ -87,6 +89,12 @@ #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 #define ARCH_PERFMON_EVENTS_COUNT 7 +#define PEBS_DATACFG_MEMINFO BIT_ULL(0) +#define PEBS_DATACFG_GP BIT_ULL(1) +#define PEBS_DATACFG_XMMS BIT_ULL(2) +#define PEBS_DATACFG_LBRS BIT_ULL(3) +#define PEBS_DATACFG_LBR_SHIFT 24 + /* * Intel "Architectural Performance Monitoring" CPUID * detection/enumeration details: @@ -177,6 +185,41 @@ struct x86_pmu_capability { #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55) /* + * Adaptive PEBS v4 + */ + +struct pebs_basic { + u64 format_size; + u64 ip; + u64 applicable_counters; + u64 tsc; +}; + +struct pebs_meminfo { + u64 address; + u64 aux; + u64 latency; + u64 tsx_tuning; +}; + +struct pebs_gprs { + u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di; + u64 r8, r9, r10, r11, r12, r13, r14, r15; +}; + +struct pebs_xmm { + u64 xmm[16*2]; /* two entries for each register */ +}; + +struct pebs_lbr_entry { + u64 from, to, info; +}; + +struct pebs_lbr { + struct pebs_lbr_entry lbr[0]; /* Variable length */ +}; + +/* * IBS cpuid feature detection */ @@ -248,6 +291,11 @@ extern void perf_events_lapic_init(void); #define PERF_EFLAGS_VM (1UL << 5) struct pt_regs; +struct x86_perf_regs { + struct pt_regs regs; + u64 *xmm_regs; +}; + extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) @@ -260,14 +308,9 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); */ #define perf_arch_fetch_caller_regs(regs, __ip) { \ (regs)->ip = (__ip); \ - (regs)->bp = caller_frame_pointer(); \ + (regs)->sp = (unsigned long)__builtin_frame_address(0); \ (regs)->cs = __KERNEL_CS; \ regs->flags = 0; \ - asm volatile( \ - _ASM_MOV "%%"_ASM_SP ", %0\n" \ - : "=m" ((regs)->sp) \ - :: "memory" \ - ); \ } struct perf_guest_switch_msr { diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h deleted file mode 100644 index 4c25cf6caefa..000000000000 --- a/arch/x86/include/asm/rwsem.h +++ /dev/null @@ -1,237 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ - * - * Written by David Howells (dhowells@redhat.com). - * - * Derived from asm-x86/semaphore.h - * - * - * The MSW of the count is the negated number of active writers and waiting - * lockers, and the LSW is the total number of active locks - * - * The lock count is initialized to 0 (no active and no waiting lockers). - * - * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an - * uncontended lock. This can be determined because XADD returns the old value. - * Readers increment by 1 and see a positive value when uncontended, negative - * if there are writers (and maybe) readers waiting (in which case it goes to - * sleep). - * - * The value of WAITING_BIAS supports up to 32766 waiting processes. This can - * be extended to 65534 by manually checking the whole MSW rather than relying - * on the S flag. - * - * The value of ACTIVE_BIAS supports up to 65535 active processes. - * - * This should be totally fair - if anything is waiting, a process that wants a - * lock will go to the back of the queue. When the currently active lock is - * released, if there's a writer at the front of the queue, then that and only - * that will be woken up; if there's a bunch of consecutive readers at the - * front, then they'll all be woken up, but no other readers will be. - */ - -#ifndef _ASM_X86_RWSEM_H -#define _ASM_X86_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ -#include <asm/asm.h> - -/* - * The bias values and the counter type limits the number of - * potential readers/writers to 32767 for 32 bits and 2147483647 - * for 64 bits. - */ - -#ifdef CONFIG_X86_64 -# define RWSEM_ACTIVE_MASK 0xffffffffL -#else -# define RWSEM_ACTIVE_MASK 0x0000ffffL -#endif - -#define RWSEM_UNLOCKED_VALUE 0x00000000L -#define RWSEM_ACTIVE_BIAS 0x00000001L -#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - -/* - * lock for reading - */ -#define ____down_read(sem, slow_path) \ -({ \ - struct rw_semaphore* ret; \ - asm volatile("# beginning down_read\n\t" \ - LOCK_PREFIX _ASM_INC "(%[sem])\n\t" \ - /* adds 0x00000001 */ \ - " jns 1f\n" \ - " call " slow_path "\n" \ - "1:\n\t" \ - "# ending down_read\n\t" \ - : "+m" (sem->count), "=a" (ret), \ - ASM_CALL_CONSTRAINT \ - : [sem] "a" (sem) \ - : "memory", "cc"); \ - ret; \ -}) - -static inline void __down_read(struct rw_semaphore *sem) -{ - ____down_read(sem, "call_rwsem_down_read_failed"); -} - -static inline int __down_read_killable(struct rw_semaphore *sem) -{ - if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable"))) - return -EINTR; - return 0; -} - -/* - * trylock for reading -- returns 1 if successful, 0 if contention - */ -static inline bool __down_read_trylock(struct rw_semaphore *sem) -{ - long result, tmp; - asm volatile("# beginning __down_read_trylock\n\t" - " mov %[count],%[result]\n\t" - "1:\n\t" - " mov %[result],%[tmp]\n\t" - " add %[inc],%[tmp]\n\t" - " jle 2f\n\t" - LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t" - " jnz 1b\n\t" - "2:\n\t" - "# ending __down_read_trylock\n\t" - : [count] "+m" (sem->count), [result] "=&a" (result), - [tmp] "=&r" (tmp) - : [inc] "i" (RWSEM_ACTIVE_READ_BIAS) - : "memory", "cc"); - return result >= 0; -} - -/* - * lock for writing - */ -#define ____down_write(sem, slow_path) \ -({ \ - long tmp; \ - struct rw_semaphore* ret; \ - \ - asm volatile("# beginning down_write\n\t" \ - LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \ - /* adds 0xffff0001, returns the old value */ \ - " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \ - /* was the active mask 0 before? */\ - " jz 1f\n" \ - " call " slow_path "\n" \ - "1:\n" \ - "# ending down_write" \ - : "+m" (sem->count), [tmp] "=d" (tmp), \ - "=a" (ret), ASM_CALL_CONSTRAINT \ - : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \ - : "memory", "cc"); \ - ret; \ -}) - -static inline void __down_write(struct rw_semaphore *sem) -{ - ____down_write(sem, "call_rwsem_down_write_failed"); -} - -static inline int __down_write_killable(struct rw_semaphore *sem) -{ - if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable"))) - return -EINTR; - - return 0; -} - -/* - * trylock for writing -- returns 1 if successful, 0 if contention - */ -static inline bool __down_write_trylock(struct rw_semaphore *sem) -{ - bool result; - long tmp0, tmp1; - asm volatile("# beginning __down_write_trylock\n\t" - " mov %[count],%[tmp0]\n\t" - "1:\n\t" - " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" - /* was the active mask 0 before? */ - " jnz 2f\n\t" - " mov %[tmp0],%[tmp1]\n\t" - " add %[inc],%[tmp1]\n\t" - LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t" - " jnz 1b\n\t" - "2:\n\t" - CC_SET(e) - "# ending __down_write_trylock\n\t" - : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0), - [tmp1] "=&r" (tmp1), CC_OUT(e) (result) - : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS) - : "memory"); - return result; -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - long tmp; - asm volatile("# beginning __up_read\n\t" - LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" - /* subtracts 1, returns the old value */ - " jns 1f\n\t" - " call call_rwsem_wake\n" /* expects old value in %edx */ - "1:\n" - "# ending __up_read\n" - : "+m" (sem->count), [tmp] "=d" (tmp) - : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS) - : "memory", "cc"); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - long tmp; - asm volatile("# beginning __up_write\n\t" - LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" - /* subtracts 0xffff0001, returns the old value */ - " jns 1f\n\t" - " call call_rwsem_wake\n" /* expects old value in %edx */ - "1:\n\t" - "# ending __up_write\n" - : "+m" (sem->count), [tmp] "=d" (tmp) - : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS) - : "memory", "cc"); -} - -/* - * downgrade write lock to read lock - */ -static inline void __downgrade_write(struct rw_semaphore *sem) -{ - asm volatile("# beginning __downgrade_write\n\t" - LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t" - /* - * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) - * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) - */ - " jns 1f\n\t" - " call call_rwsem_downgrade_wake\n" - "1:\n\t" - "# ending __downgrade_write\n" - : "+m" (sem->count) - : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS) - : "memory", "cc"); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_X86_RWSEM_H */ diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index db333300bd4b..f94a7d0ddd49 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -13,13 +13,12 @@ #ifndef _ASM_X86_SMAP_H #define _ASM_X86_SMAP_H -#include <linux/stringify.h> #include <asm/nops.h> #include <asm/cpufeatures.h> /* "Raw" instruction opcodes */ -#define __ASM_CLAC .byte 0x0f,0x01,0xca -#define __ASM_STAC .byte 0x0f,0x01,0xcb +#define __ASM_CLAC ".byte 0x0f,0x01,0xca" +#define __ASM_STAC ".byte 0x0f,0x01,0xcb" #ifdef __ASSEMBLY__ @@ -28,10 +27,10 @@ #ifdef CONFIG_X86_SMAP #define ASM_CLAC \ - ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP + ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP #define ASM_STAC \ - ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP + ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP #else /* CONFIG_X86_SMAP */ @@ -49,26 +48,46 @@ static __always_inline void clac(void) { /* Note: a barrier is implicit in alternative() */ - alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); + alternative("", __ASM_CLAC, X86_FEATURE_SMAP); } static __always_inline void stac(void) { /* Note: a barrier is implicit in alternative() */ - alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); + alternative("", __ASM_STAC, X86_FEATURE_SMAP); +} + +static __always_inline unsigned long smap_save(void) +{ + unsigned long flags; + + asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC, + X86_FEATURE_SMAP) + : "=rm" (flags) : : "memory", "cc"); + + return flags; +} + +static __always_inline void smap_restore(unsigned long flags) +{ + asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP) + : : "g" (flags) : "memory", "cc"); } /* These macros can be used in asm() statements */ #define ASM_CLAC \ - ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP) + ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP) #define ASM_STAC \ - ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP) + ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP) #else /* CONFIG_X86_SMAP */ static inline void clac(void) { } static inline void stac(void) { } +static inline unsigned long smap_save(void) { return 0; } +static inline void smap_restore(unsigned long flags) { } + #define ASM_CLAC #define ASM_STAC diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index f335aad404a4..beef7ad9e43a 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -98,19 +98,6 @@ struct stack_frame_ia32 { u32 return_address; }; -static inline unsigned long caller_frame_pointer(void) -{ - struct stack_frame *frame; - - frame = __builtin_frame_address(0); - -#ifdef CONFIG_FRAME_POINTER - frame = frame->next_frame; -#endif - - return (unsigned long)frame; -} - void show_opcodes(struct pt_regs *regs, const char *loglvl); void show_ip(struct pt_regs *regs, const char *loglvl); #endif /* _ASM_X86_STACKTRACE_H */ diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 7cf1a270d891..18a4b6890fa8 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -46,6 +46,7 @@ struct inactive_task_frame { unsigned long r13; unsigned long r12; #else + unsigned long flags; unsigned long si; unsigned long di; #endif diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 404b8b1d44f5..f23e7aaff4cd 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -6,6 +6,7 @@ #define tlb_end_vma(tlb, vma) do { } while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +#define tlb_flush tlb_flush static inline void tlb_flush(struct mmu_gather *tlb); #include <asm-generic/tlb.h> diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 1954dd5552a2..bb21913885a3 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -427,10 +427,11 @@ do { \ ({ \ __label__ __pu_label; \ int __pu_err = -EFAULT; \ - __typeof__(*(ptr)) __pu_val; \ - __pu_val = x; \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(ptr) __pu_ptr = (ptr); \ + __typeof__(size) __pu_size = (size); \ __uaccess_begin(); \ - __put_user_size(__pu_val, (ptr), (size), __pu_label); \ + __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \ __pu_err = 0; \ __pu_label: \ __uaccess_end(); \ @@ -705,7 +706,7 @@ extern struct movsl_mask { * checking before using them, but you have to surround them with the * user_access_begin/end() pair. */ -static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) +static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr,len))) return 0; @@ -715,6 +716,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define user_access_begin(a,b) user_access_begin(a,b) #define user_access_end() __uaccess_end() +#define user_access_save() smap_save() +#define user_access_restore(x) smap_restore(x) + #define unsafe_put_user(x, ptr, label) \ __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index a9d637bc301d..5cd1caa8bc65 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -208,9 +208,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) } unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len); - -unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len); #endif /* _ASM_X86_UACCESS_64_H */ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 2863c2026655..d50c7b747d8b 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -217,6 +217,22 @@ xen_single_call(unsigned int call, return (long)__res; } +static __always_inline void __xen_stac(void) +{ + /* + * Suppress objtool seeing the STAC/CLAC and getting confused about it + * calling random code with AC=1. + */ + asm volatile(ANNOTATE_IGNORE_ALTERNATIVE + ASM_STAC ::: "memory", "flags"); +} + +static __always_inline void __xen_clac(void) +{ + asm volatile(ANNOTATE_IGNORE_ALTERNATIVE + ASM_CLAC ::: "memory", "flags"); +} + static inline long privcmd_call(unsigned int call, unsigned long a1, unsigned long a2, @@ -225,9 +241,9 @@ privcmd_call(unsigned int call, { long res; - stac(); + __xen_stac(); res = xen_single_call(call, a1, a2, a3, a4, a5); - clac(); + __xen_clac(); return res; } @@ -424,9 +440,9 @@ HYPERVISOR_dm_op( domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs) { int ret; - stac(); + __xen_stac(); ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs); - clac(); + __xen_clac(); return ret; } diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index dabfcf7c3941..7a0e64ccd6ff 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -381,6 +381,7 @@ struct kvm_sync_regs { #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2) +#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3) #define KVM_STATE_NESTED_GUEST_MODE 0x00000001 #define KVM_STATE_NESTED_RUN_PENDING 0x00000002 diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h index f3329cabce5c..ac67bbea10ca 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -27,8 +27,29 @@ enum perf_event_x86_regs { PERF_REG_X86_R13, PERF_REG_X86_R14, PERF_REG_X86_R15, - + /* These are the limits for the GPRs. */ PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1, PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1, + + /* These all need two bits set because they are 128bit */ + PERF_REG_X86_XMM0 = 32, + PERF_REG_X86_XMM1 = 34, + PERF_REG_X86_XMM2 = 36, + PERF_REG_X86_XMM3 = 38, + PERF_REG_X86_XMM4 = 40, + PERF_REG_X86_XMM5 = 42, + PERF_REG_X86_XMM6 = 44, + PERF_REG_X86_XMM7 = 46, + PERF_REG_X86_XMM8 = 48, + PERF_REG_X86_XMM9 = 50, + PERF_REG_X86_XMM10 = 52, + PERF_REG_X86_XMM11 = 54, + PERF_REG_X86_XMM12 = 56, + PERF_REG_X86_XMM13 = 58, + PERF_REG_X86_XMM14 = 60, + PERF_REG_X86_XMM15 = 62, + + /* These include both GPRs and XMMX registers */ + PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2, }; #endif /* _ASM_X86_PERF_REGS_H */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index b91b3bfa5cfb..29630393f300 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -440,7 +440,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) char arg[20]; int ret, i; - if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) + if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || + cpu_mitigations_off()) return SPECTRE_V2_CMD_NONE; ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); @@ -672,7 +673,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) char arg[20]; int ret, i; - if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { + if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || + cpu_mitigations_off()) { return SPEC_STORE_BYPASS_CMD_NONE; } else { ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", @@ -1008,6 +1010,11 @@ static void __init l1tf_select_mitigation(void) if (!boot_cpu_has_bug(X86_BUG_L1TF)) return; + if (cpu_mitigations_off()) + l1tf_mitigation = L1TF_MITIGATION_OFF; + else if (cpu_mitigations_auto_nosmt()) + l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; + override_cache_bits(&boot_cpu_data); switch (l1tf_mitigation) { diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index c06c4c16c6b6..07c30ee17425 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -59,18 +59,34 @@ static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = { u64 perf_reg_value(struct pt_regs *regs, int idx) { + struct x86_perf_regs *perf_regs; + + if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { + perf_regs = container_of(regs, struct x86_perf_regs, regs); + if (!perf_regs->xmm_regs) + return 0; + return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; + } + if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset))) return 0; return regs_get_register(regs, pt_regs_offset[idx]); } -#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL)) - #ifdef CONFIG_X86_32 +#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ + (1ULL << PERF_REG_X86_R9) | \ + (1ULL << PERF_REG_X86_R10) | \ + (1ULL << PERF_REG_X86_R11) | \ + (1ULL << PERF_REG_X86_R12) | \ + (1ULL << PERF_REG_X86_R13) | \ + (1ULL << PERF_REG_X86_R14) | \ + (1ULL << PERF_REG_X86_R15)) + int perf_reg_validate(u64 mask) { - if (!mask || mask & REG_RESERVED) + if (!mask || (mask & REG_NOSUPPORT)) return -EINVAL; return 0; @@ -96,10 +112,7 @@ void perf_get_regs_user(struct perf_regs *regs_user, int perf_reg_validate(u64 mask) { - if (!mask || mask & REG_RESERVED) - return -EINVAL; - - if (mask & REG_NOSUPPORT) + if (!mask || (mask & REG_NOSUPPORT)) return -EINVAL; return 0; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index e471d8e6f0b2..70933193878c 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct task_struct *tsk; int err; + /* + * For a new task use the RESET flags value since there is no before. + * All the status flags are zero; DF and all the system flags must also + * be 0, specifically IF must be 0 because we context switch to the new + * task with interrupts disabled. + */ + frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6a62f4af9fcf..844a28b29967 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -392,6 +392,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; + frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 3d872a527cd9..3773905cd2c1 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1005,13 +1005,11 @@ void __init setup_arch(char **cmdline_p) if (efi_enabled(EFI_BOOT)) efi_init(); - dmi_scan_machine(); - dmi_memdev_walk(); - dmi_set_dump_stack_arch_desc(); + dmi_setup(); /* * VMware detection requires dmi to be available, so this - * needs to be done after dmi_scan_machine(), for the boot CPU. + * needs to be done after dmi_setup(), for the boot CPU. */ init_hypervisor_platform(); diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 08dfd4c1a4f9..dff90fb6a9af 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs, COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); -#ifdef CONFIG_X86_64 - /* - * Fix up SS if needed for the benefit of old DOSEMU and - * CRIU. - */ - if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && - user_64bit_mode(regs))) - force_valid_ss(regs); -#endif - get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ @@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs, buf = (void __user *)buf_val; } get_user_catch(err); +#ifdef CONFIG_X86_64 + /* + * Fix up SS if needed for the benefit of old DOSEMU and + * CRIU. + */ + if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) + force_valid_ss(regs); +#endif + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32)); force_iret(); @@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, { struct rt_sigframe __user *frame; void __user *fp = NULL; + unsigned long uc_flags; int err = 0; frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); @@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); save_altstack_ex(&frame->uc.uc_stack, regs->sp); @@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig, { #ifdef CONFIG_X86_X32_ABI struct rt_sigframe_x32 __user *frame; + unsigned long uc_flags; void __user *restorer; int err = 0; void __user *fpstate = NULL; @@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); put_user_ex(0, &frame->uc.uc__pad0); @@ -688,10 +693,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) sigset_t *set = sigmask_to_save(); compat_sigset_t *cset = (compat_sigset_t *) set; - /* - * Increment event counter and perform fixup for the pre-signal - * frame. - */ + /* Perform fixup for the pre-signal frame. */ rseq_signal_deliver(ksig, regs); /* Set up the stack frame */ diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 5c2d71a1dc06..2abf27d7df6b 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -12,78 +12,31 @@ #include <asm/stacktrace.h> #include <asm/unwind.h> -static int save_stack_address(struct stack_trace *trace, unsigned long addr, - bool nosched) -{ - if (nosched && in_sched_functions(addr)) - return 0; - - if (trace->skip > 0) { - trace->skip--; - return 0; - } - - if (trace->nr_entries >= trace->max_entries) - return -1; - - trace->entries[trace->nr_entries++] = addr; - return 0; -} - -static void noinline __save_stack_trace(struct stack_trace *trace, - struct task_struct *task, struct pt_regs *regs, - bool nosched) +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs) { struct unwind_state state; unsigned long addr; - if (regs) - save_stack_address(trace, regs->ip, nosched); + if (regs && !consume_entry(cookie, regs->ip, false)) + return; for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); unwind_next_frame(&state)) { addr = unwind_get_return_address(&state); - if (!addr || save_stack_address(trace, addr, nosched)) + if (!addr || !consume_entry(cookie, addr, false)) break; } - - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; } /* - * Save stack-backtrace addresses into a stack_trace buffer. + * This function returns an error if it detects any unreliable features of the + * stack. Otherwise it guarantees that the stack trace is reliable. + * + * If the task is not 'current', the caller *must* ensure the task is inactive. */ -void save_stack_trace(struct stack_trace *trace) -{ - trace->skip++; - __save_stack_trace(trace, current, NULL, false); -} -EXPORT_SYMBOL_GPL(save_stack_trace); - -void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) -{ - __save_stack_trace(trace, current, regs, false); -} - -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) -{ - if (!try_get_task_stack(tsk)) - return; - - if (tsk == current) - trace->skip++; - __save_stack_trace(trace, tsk, NULL, true); - - put_task_stack(tsk); -} -EXPORT_SYMBOL_GPL(save_stack_trace_tsk); - -#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE - -static int __always_inline -__save_stack_trace_reliable(struct stack_trace *trace, - struct task_struct *task) +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task) { struct unwind_state state; struct pt_regs *regs; @@ -97,7 +50,7 @@ __save_stack_trace_reliable(struct stack_trace *trace, if (regs) { /* Success path for user tasks */ if (user_mode(regs)) - goto success; + return 0; /* * Kernel mode registers on the stack indicate an @@ -120,7 +73,7 @@ __save_stack_trace_reliable(struct stack_trace *trace, if (!addr) return -EINVAL; - if (save_stack_address(trace, addr, false)) + if (!consume_entry(cookie, addr, false)) return -EINVAL; } @@ -132,39 +85,9 @@ __save_stack_trace_reliable(struct stack_trace *trace, if (!(task->flags & (PF_KTHREAD | PF_IDLE))) return -EINVAL; -success: - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; - return 0; } -/* - * This function returns an error if it detects any unreliable features of the - * stack. Otherwise it guarantees that the stack trace is reliable. - * - * If the task is not 'current', the caller *must* ensure the task is inactive. - */ -int save_stack_trace_tsk_reliable(struct task_struct *tsk, - struct stack_trace *trace) -{ - int ret; - - /* - * If the task doesn't have a stack (e.g., a zombie), the stack is - * "reliably" empty. - */ - if (!try_get_task_stack(tsk)) - return 0; - - ret = __save_stack_trace_reliable(trace, tsk); - - put_task_stack(tsk); - - return ret; -} -#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ - /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ struct stack_frame_user { @@ -189,15 +112,15 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) return ret; } -static inline void __save_stack_trace_user(struct stack_trace *trace) +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs) { - const struct pt_regs *regs = task_pt_regs(current); const void __user *fp = (const void __user *)regs->bp; - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = regs->ip; + if (!consume_entry(cookie, regs->ip, false)) + return; - while (trace->nr_entries < trace->max_entries) { + while (1) { struct stack_frame_user frame; frame.next_fp = NULL; @@ -207,8 +130,8 @@ static inline void __save_stack_trace_user(struct stack_trace *trace) if ((unsigned long)fp < regs->sp) break; if (frame.ret_addr) { - trace->entries[trace->nr_entries++] = - frame.ret_addr; + if (!consume_entry(cookie, frame.ret_addr, false)) + return; } if (fp == frame.next_fp) break; @@ -216,14 +139,3 @@ static inline void __save_stack_trace_user(struct stack_trace *trace) } } -void save_stack_trace_user(struct stack_trace *trace) -{ - /* - * Trace user stack if we are not a kernel thread - */ - if (current->mm) { - __save_stack_trace_user(trace); - } - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; -} diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 421899f6ad7b..cc24b3a32c44 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1371,7 +1371,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, valid_bank_mask = BIT_ULL(0); sparse_banks[0] = flush.processor_mask; - all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS; + + /* + * Work around possible WS2012 bug: it sends hypercalls + * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear, + * while also expecting us to flush something and crashing if + * we don't. Let's treat processor_mask == 0 same as + * HV_FLUSH_ALL_PROCESSORS. + */ + all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) || + flush.processor_mask == 0; } else { if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex, sizeof(flush_ex)))) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 9bf70cf84564..bd13fdddbdc4 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -70,7 +70,6 @@ #define APIC_BROADCAST 0xFF #define X2APIC_BROADCAST 0xFFFFFFFFul -static bool lapic_timer_advance_adjust_done = false; #define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100 /* step-by-step approximation to mitigate fluctuation */ #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8 @@ -1482,14 +1481,32 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu) return false; } +static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles) +{ + u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns; + + /* + * If the guest TSC is running at a different ratio than the host, then + * convert the delay to nanoseconds to achieve an accurate delay. Note + * that __delay() uses delay_tsc whenever the hardware has TSC, thus + * always for VMX enabled hardware. + */ + if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) { + __delay(min(guest_cycles, + nsec_to_cycles(vcpu, timer_advance_ns))); + } else { + u64 delay_ns = guest_cycles * 1000000ULL; + do_div(delay_ns, vcpu->arch.virtual_tsc_khz); + ndelay(min_t(u32, delay_ns, timer_advance_ns)); + } +} + void wait_lapic_expire(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; + u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns; u64 guest_tsc, tsc_deadline, ns; - if (!lapic_in_kernel(vcpu)) - return; - if (apic->lapic_timer.expired_tscdeadline == 0) return; @@ -1501,33 +1518,37 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu) guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); - /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ if (guest_tsc < tsc_deadline) - __delay(min(tsc_deadline - guest_tsc, - nsec_to_cycles(vcpu, lapic_timer_advance_ns))); + __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc); - if (!lapic_timer_advance_adjust_done) { + if (!apic->lapic_timer.timer_advance_adjust_done) { /* too early */ if (guest_tsc < tsc_deadline) { ns = (tsc_deadline - guest_tsc) * 1000000ULL; do_div(ns, vcpu->arch.virtual_tsc_khz); - lapic_timer_advance_ns -= min((unsigned int)ns, - lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP); + timer_advance_ns -= min((u32)ns, + timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP); } else { /* too late */ ns = (guest_tsc - tsc_deadline) * 1000000ULL; do_div(ns, vcpu->arch.virtual_tsc_khz); - lapic_timer_advance_ns += min((unsigned int)ns, - lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP); + timer_advance_ns += min((u32)ns, + timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP); } if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE) - lapic_timer_advance_adjust_done = true; + apic->lapic_timer.timer_advance_adjust_done = true; + if (unlikely(timer_advance_ns > 5000)) { + timer_advance_ns = 0; + apic->lapic_timer.timer_advance_adjust_done = true; + } + apic->lapic_timer.timer_advance_ns = timer_advance_ns; } } static void start_sw_tscdeadline(struct kvm_lapic *apic) { - u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; + struct kvm_timer *ktimer = &apic->lapic_timer; + u64 guest_tsc, tscdeadline = ktimer->tscdeadline; u64 ns = 0; ktime_t expire; struct kvm_vcpu *vcpu = apic->vcpu; @@ -1542,13 +1563,15 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic) now = ktime_get(); guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); - if (likely(tscdeadline > guest_tsc)) { - ns = (tscdeadline - guest_tsc) * 1000000ULL; - do_div(ns, this_tsc_khz); + + ns = (tscdeadline - guest_tsc) * 1000000ULL; + do_div(ns, this_tsc_khz); + + if (likely(tscdeadline > guest_tsc) && + likely(ns > apic->lapic_timer.timer_advance_ns)) { expire = ktime_add_ns(now, ns); - expire = ktime_sub_ns(expire, lapic_timer_advance_ns); - hrtimer_start(&apic->lapic_timer.timer, - expire, HRTIMER_MODE_ABS_PINNED); + expire = ktime_sub_ns(expire, ktimer->timer_advance_ns); + hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_PINNED); } else apic_timer_expired(apic); @@ -2255,7 +2278,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } -int kvm_create_lapic(struct kvm_vcpu *vcpu) +int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) { struct kvm_lapic *apic; @@ -2279,6 +2302,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); apic->lapic_timer.timer.function = apic_timer_fn; + if (timer_advance_ns == -1) { + apic->lapic_timer.timer_advance_ns = 1000; + apic->lapic_timer.timer_advance_adjust_done = false; + } else { + apic->lapic_timer.timer_advance_ns = timer_advance_ns; + apic->lapic_timer.timer_advance_adjust_done = true; + } + /* * APIC is created enabled. This will prevent kvm_lapic_set_base from diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index ff6ef9c3d760..d6d049ba3045 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -31,8 +31,10 @@ struct kvm_timer { u32 timer_mode_mask; u64 tscdeadline; u64 expired_tscdeadline; + u32 timer_advance_ns; atomic_t pending; /* accumulated triggered timers */ bool hv_timer_in_use; + bool timer_advance_adjust_done; }; struct kvm_lapic { @@ -62,7 +64,7 @@ struct kvm_lapic { struct dest_map; -int kvm_create_lapic(struct kvm_vcpu *vcpu); +int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns); void kvm_free_lapic(struct kvm_vcpu *vcpu); int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e10962dfc203..d9c7b45d231f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4781,6 +4781,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) union kvm_mmu_extended_role ext = {0}; ext.cr0_pg = !!is_paging(vcpu); + ext.cr4_pae = !!is_pae(vcpu); ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); ext.cr4_pse = !!is_pse(vcpu); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 6401eb7ef19c..0c601d079cd2 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5423,7 +5423,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, return ret; /* Empty 'VMXON' state is permitted */ - if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12)) + if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) return 0; if (kvm_state->vmx.vmcs_pa != -1ull) { @@ -5467,7 +5467,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, vmcs12->vmcs_link_pointer != -1ull) { struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); - if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12)) + if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12)) return -EINVAL; if (copy_from_user(shadow_vmcs12, diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 7b272738c576..d4cb1945b2e3 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -3,6 +3,7 @@ #include <asm/asm.h> #include <asm/bitsperlong.h> #include <asm/kvm_vcpu_regs.h> +#include <asm/nospec-branch.h> #define WORD_SIZE (BITS_PER_LONG / 8) @@ -77,6 +78,17 @@ ENDPROC(vmx_vmenter) * referred to by VMCS.HOST_RIP. */ ENTRY(vmx_vmexit) +#ifdef CONFIG_RETPOLINE + ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE + /* Preserve guest's RAX, it's used to stuff the RSB. */ + push %_ASM_AX + + /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ + FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE + + pop %_ASM_AX +.Lvmexit_skip_rsb: +#endif ret ENDPROC(vmx_vmexit) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b4e7d645275a..0c955bb286ff 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6462,9 +6462,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); - /* Eliminate branch target predictions from guest mode */ - vmexit_fill_RSB(); - /* All fields are clean at this point */ if (static_branch_unlikely(&enable_evmcs)) current_evmcs->hv_clean_fields |= @@ -7032,6 +7029,7 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) { struct vcpu_vmx *vmx; u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles; + struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; if (kvm_mwait_in_guest(vcpu->kvm)) return -EOPNOTSUPP; @@ -7040,7 +7038,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) tscl = rdtsc(); guest_tscl = kvm_read_l1_tsc(vcpu, tscl); delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; - lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns); + lapic_timer_advance_cycles = nsec_to_cycles(vcpu, + ktimer->timer_advance_ns); if (delta_tsc > lapic_timer_advance_cycles) delta_tsc -= lapic_timer_advance_cycles; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a0d1fc80ac5a..b5edc8e3ce1d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -136,10 +136,14 @@ EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio); static u32 __read_mostly tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); -/* lapic timer advance (tscdeadline mode only) in nanoseconds */ -unsigned int __read_mostly lapic_timer_advance_ns = 1000; +/* + * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables + * adaptive tuning starting from default advancment of 1000ns. '0' disables + * advancement entirely. Any other value is used as-is and disables adaptive + * tuning, i.e. allows priveleged userspace to set an exact advancement time. + */ +static int __read_mostly lapic_timer_advance_ns = -1; module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); -EXPORT_SYMBOL_GPL(lapic_timer_advance_ns); static bool __read_mostly vector_hashing = true; module_param(vector_hashing, bool, S_IRUGO); @@ -6535,6 +6539,12 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, } EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); +static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) +{ + vcpu->arch.pio.count = 0; + return 1; +} + static int complete_fast_pio_out(struct kvm_vcpu *vcpu) { vcpu->arch.pio.count = 0; @@ -6551,12 +6561,23 @@ static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); + if (ret) + return ret; - if (!ret) { + /* + * Workaround userspace that relies on old KVM behavior of %rip being + * incremented prior to exiting to userspace to handle "OUT 0x7e". + */ + if (port == 0x7e && + kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { + vcpu->arch.complete_userspace_io = + complete_fast_pio_out_port_0x7e; + kvm_skip_emulated_instruction(vcpu); + } else { vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); vcpu->arch.complete_userspace_io = complete_fast_pio_out; } - return ret; + return 0; } static int complete_fast_pio_in(struct kvm_vcpu *vcpu) @@ -7873,7 +7894,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } trace_kvm_entry(vcpu->vcpu_id); - if (lapic_timer_advance_ns) + if (lapic_in_kernel(vcpu) && + vcpu->arch.apic->lapic_timer.timer_advance_ns) wait_lapic_expire(vcpu); guest_enter_irqoff(); @@ -9061,7 +9083,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); - r = kvm_create_lapic(vcpu); + r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); if (r < 0) goto fail_mmu_destroy; } else diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index aedc5d0d4989..534d3f28bb01 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -294,8 +294,6 @@ extern u64 kvm_supported_xcr0(void); extern unsigned int min_timer_period_us; -extern unsigned int lapic_timer_advance_ns; - extern bool enable_vmware_backdoor; extern struct static_key kvm_no_apic_vcpu; diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 140e61843a07..5246db42de45 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -6,6 +6,18 @@ # Produces uninteresting flaky coverage. KCOV_INSTRUMENT_delay.o := n +# Early boot use of cmdline; don't instrument it +ifdef CONFIG_AMD_MEM_ENCRYPT +KCOV_INSTRUMENT_cmdline.o := n +KASAN_SANITIZE_cmdline.o := n + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_cmdline.o = -pg +endif + +CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector) +endif + inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt quiet_cmd_inat_tables = GEN $@ @@ -23,7 +35,6 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o lib-y := delay.o misc.o cmdline.o cpu.o lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += memcpy_$(BITS).o -lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index db4e5aa0858b..b2f1822084ae 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -16,6 +16,30 @@ #include <asm/smap.h> #include <asm/export.h> +.macro ALIGN_DESTINATION + /* check for bad alignment of destination */ + movl %edi,%ecx + andl $7,%ecx + jz 102f /* already aligned */ + subl $8,%ecx + negl %ecx + subl %ecx,%edx +100: movb (%rsi),%al +101: movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 100b +102: + .section .fixup,"ax" +103: addl %ecx,%edx /* ecx is zerorest also */ + jmp copy_user_handle_tail + .previous + + _ASM_EXTABLE_UA(100b, 103b) + _ASM_EXTABLE_UA(101b, 103b) + .endm + /* * copy_user_generic_unrolled - memory copy with exception handling. * This version is for CPUs like P4 that don't have efficient micro @@ -194,6 +218,30 @@ ENDPROC(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* + * Try to copy last bytes and clear the rest if needed. + * Since protection fault in copy_from/to_user is not a normal situation, + * it is not necessary to optimize tail handling. + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +ALIGN; +copy_user_handle_tail: + movl %edx,%ecx +1: rep movsb +2: mov %ecx,%eax + ASM_CLAC + ret + + _ASM_EXTABLE_UA(1b, 2b) +ENDPROC(copy_user_handle_tail) + +/* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination out of cache for more performance. * diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 3b24dc05251c..9d05572370ed 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe) /* Copy successful. Return zero */ .L_done_memcpy_trap: xorl %eax, %eax +.L_done: ret ENDPROC(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) @@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe) addl %edx, %ecx .E_trailing_bytes: mov %ecx, %eax - ret + jmp .L_done /* * For write fault handling, given the destination is unaligned, diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S deleted file mode 100644 index dc2ab6ea6768..000000000000 --- a/arch/x86/lib/rwsem.S +++ /dev/null @@ -1,156 +0,0 @@ -/* - * x86 semaphore implementation. - * - * (C) Copyright 1999 Linus Torvalds - * - * Portions Copyright 1999 Red Hat, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> - */ - -#include <linux/linkage.h> -#include <asm/alternative-asm.h> -#include <asm/frame.h> - -#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) -#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) - -#ifdef CONFIG_X86_32 - -/* - * The semaphore operations have a special calling sequence that - * allow us to do a simpler in-line version of them. These routines - * need to convert that sequence back into the C sequence when - * there is contention on the semaphore. - * - * %eax contains the semaphore pointer on entry. Save the C-clobbered - * registers (%eax, %edx and %ecx) except %eax which is either a return - * value or just gets clobbered. Same is true for %edx so make sure GCC - * reloads it after the slow path, by making it hold a temporary, for - * example see ____down_write(). - */ - -#define save_common_regs \ - pushl %ecx - -#define restore_common_regs \ - popl %ecx - - /* Avoid uglifying the argument copying x86-64 needs to do. */ - .macro movq src, dst - .endm - -#else - -/* - * x86-64 rwsem wrappers - * - * This interfaces the inline asm code to the slow-path - * C routines. We need to save the call-clobbered regs - * that the asm does not mark as clobbered, and move the - * argument from %rax to %rdi. - * - * NOTE! We don't need to save %rax, because the functions - * will always return the semaphore pointer in %rax (which - * is also the input argument to these helpers) - * - * The following can clobber %rdx because the asm clobbers it: - * call_rwsem_down_write_failed - * call_rwsem_wake - * but %rdi, %rsi, %rcx, %r8-r11 always need saving. - */ - -#define save_common_regs \ - pushq %rdi; \ - pushq %rsi; \ - pushq %rcx; \ - pushq %r8; \ - pushq %r9; \ - pushq %r10; \ - pushq %r11 - -#define restore_common_regs \ - popq %r11; \ - popq %r10; \ - popq %r9; \ - popq %r8; \ - popq %rcx; \ - popq %rsi; \ - popq %rdi - -#endif - -/* Fix up special calling conventions */ -ENTRY(call_rwsem_down_read_failed) - FRAME_BEGIN - save_common_regs - __ASM_SIZE(push,) %__ASM_REG(dx) - movq %rax,%rdi - call rwsem_down_read_failed - __ASM_SIZE(pop,) %__ASM_REG(dx) - restore_common_regs - FRAME_END - ret -ENDPROC(call_rwsem_down_read_failed) - -ENTRY(call_rwsem_down_read_failed_killable) - FRAME_BEGIN - save_common_regs - __ASM_SIZE(push,) %__ASM_REG(dx) - movq %rax,%rdi - call rwsem_down_read_failed_killable - __ASM_SIZE(pop,) %__ASM_REG(dx) - restore_common_regs - FRAME_END - ret -ENDPROC(call_rwsem_down_read_failed_killable) - -ENTRY(call_rwsem_down_write_failed) - FRAME_BEGIN - save_common_regs - movq %rax,%rdi - call rwsem_down_write_failed - restore_common_regs - FRAME_END - ret -ENDPROC(call_rwsem_down_write_failed) - -ENTRY(call_rwsem_down_write_failed_killable) - FRAME_BEGIN - save_common_regs - movq %rax,%rdi - call rwsem_down_write_failed_killable - restore_common_regs - FRAME_END - ret -ENDPROC(call_rwsem_down_write_failed_killable) - -ENTRY(call_rwsem_wake) - FRAME_BEGIN - /* do nothing if still outstanding active readers */ - __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) - jnz 1f - save_common_regs - movq %rax,%rdi - call rwsem_wake - restore_common_regs -1: FRAME_END - ret -ENDPROC(call_rwsem_wake) - -ENTRY(call_rwsem_downgrade_wake) - FRAME_BEGIN - save_common_regs - __ASM_SIZE(push,) %__ASM_REG(dx) - movq %rax,%rdi - call rwsem_downgrade_wake - __ASM_SIZE(pop,) %__ASM_REG(dx) - restore_common_regs - FRAME_END - ret -ENDPROC(call_rwsem_downgrade_wake) diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index ee42bb0cbeb3..9952a01cad24 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -55,26 +55,6 @@ unsigned long clear_user(void __user *to, unsigned long n) EXPORT_SYMBOL(clear_user); /* - * Try to copy last bytes and clear the rest if needed. - * Since protection fault in copy_from/to_user is not a normal situation, - * it is not necessary to optimize tail handling. - */ -__visible unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len) -{ - for (; len; --len, to++) { - char c; - - if (__get_user_nocheck(c, from++, sizeof(char))) - break; - if (__put_user_nocheck(c, to, sizeof(char))) - break; - } - clac(); - return len; -} - -/* * Similar to copy_user_handle_tail, probe for the write fault point, * but reuse __memcpy_mcsafe in case a new read error is encountered. * clac() is handled in _copy_to_iter_mcsafe(). diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 139b28a01ce4..d0255d64edce 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -35,6 +35,7 @@ #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/uaccess.h> +#include <linux/cpu.h> #include <asm/cpufeature.h> #include <asm/hypervisor.h> @@ -115,7 +116,8 @@ void __init pti_check_boottime_disable(void) } } - if (cmdline_find_option_bool(boot_command_line, "nopti")) { + if (cmdline_find_option_bool(boot_command_line, "nopti") || + cpu_mitigations_off()) { pti_mode = PTI_FORCE_OFF; pti_print_if_insecure("disabled on command line."); return; diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index a9e80e44178c..a8985e1f7432 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -32,12 +32,6 @@ config ARCH_DEFCONFIG default "arch/um/configs/i386_defconfig" if X86_32 default "arch/um/configs/x86_64_defconfig" if X86_64 -config RWSEM_XCHGADD_ALGORITHM - def_bool 64BIT - -config RWSEM_GENERIC_SPINLOCK - def_bool !RWSEM_XCHGADD_ALGORITHM - config 3_LEVEL_PGTABLES bool "Three-level pagetables" if !64BIT default 64BIT diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile index 2d686ae54681..33c51c064c77 100644 --- a/arch/x86/um/Makefile +++ b/arch/x86/um/Makefile @@ -21,14 +21,12 @@ obj-y += checksum_32.o syscalls_32.o obj-$(CONFIG_ELF_CORE) += elfcore.o subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o -subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o else obj-y += syscalls_64.o vdso/ -subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \ - ../lib/rwsem.o +subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o endif diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 4b9aafe766c5..35c8d91e6106 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -46,9 +46,6 @@ config XTENSA with reasonable minimum requirements. The Xtensa Linux project has a home page at <http://www.linux-xtensa.org/>. -config RWSEM_XCHGADD_ALGORITHM - def_bool y - config GENERIC_HWEIGHT def_bool y diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 3843198e03d4..4148090cafb0 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -25,7 +25,6 @@ generic-y += percpu.h generic-y += preempt.h generic-y += qrwlock.h generic-y += qspinlock.h -generic-y += rwsem.h generic-y += sections.h generic-y += socket.h generic-y += topology.h diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h index 0d766f9c1083..50889935138a 100644 --- a/arch/xtensa/include/asm/tlb.h +++ b/arch/xtensa/include/asm/tlb.h @@ -14,32 +14,6 @@ #include <asm/cache.h> #include <asm/page.h> -#if (DCACHE_WAY_SIZE <= PAGE_SIZE) - -/* Note, read http://lkml.org/lkml/2004/1/15/6 */ - -# define tlb_start_vma(tlb,vma) do { } while (0) -# define tlb_end_vma(tlb,vma) do { } while (0) - -#else - -# define tlb_start_vma(tlb, vma) \ - do { \ - if (!tlb->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ - } while(0) - -# define tlb_end_vma(tlb, vma) \ - do { \ - if (!tlb->fullmm) \ - flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ - } while(0) - -#endif - -#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0) -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - #include <asm-generic/tlb.h> #define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 8c4435c53f09..6e787cc9e5b9 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -46,6 +46,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) if (con_id) best_possible += 1; + lockdep_assert_held(&clocks_mutex); + list_for_each_entry(p, &clocks, node) { match = 0; if (p->dev_id) { @@ -402,7 +404,10 @@ void devm_clk_release_clkdev(struct device *dev, const char *con_id, struct clk_lookup *cl; int rval; + mutex_lock(&clocks_mutex); cl = clk_find(dev_id, con_id); + mutex_unlock(&clocks_mutex); + WARN_ON(!cl); rval = devres_release(dev, devm_clkdev_release, devm_clk_match_clkdev, cl); diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index 9b49adb20d07..cbcdf664f336 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c @@ -167,7 +167,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); - u32 n_mask, k_mask, m_mask, p_mask; + u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0; struct _ccu_nkmp _nkmp; unsigned long flags; u32 reg; @@ -186,10 +186,24 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, ccu_nkmp_find_best(parent_rate, rate, &_nkmp); - n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift); - k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift); - m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift); - p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift); + /* + * If width is 0, GENMASK() macro may not generate expected mask (0) + * as it falls under undefined behaviour by C standard due to shifts + * which are equal or greater than width of left operand. This can + * be easily avoided by explicitly checking if width is 0. + */ + if (nkmp->n.width) + n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, + nkmp->n.shift); + if (nkmp->k.width) + k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, + nkmp->k.shift); + if (nkmp->m.width) + m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, + nkmp->m.shift); + if (nkmp->p.width) + p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, + nkmp->p.shift); spin_lock_irqsave(nkmp->common.lock, flags); diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 099d83e4e910..fae2d5c43314 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -416,11 +416,8 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v) nr++; } -void __init dmi_memdev_walk(void) +static void __init dmi_memdev_walk(void) { - if (!dmi_available) - return; - if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) { dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr); if (dmi_memdev) @@ -614,7 +611,7 @@ static int __init dmi_smbios3_present(const u8 *buf) return 1; } -void __init dmi_scan_machine(void) +static void __init dmi_scan_machine(void) { char __iomem *p, *q; char buf[32]; @@ -769,15 +766,20 @@ static int __init dmi_init(void) subsys_initcall(dmi_init); /** - * dmi_set_dump_stack_arch_desc - set arch description for dump_stack() + * dmi_setup - scan and setup DMI system information * - * Invoke dump_stack_set_arch_desc() with DMI system information so that - * DMI identifiers are printed out on task dumps. Arch boot code should - * call this function after dmi_scan_machine() if it wants to print out DMI - * identifiers on task dumps. + * Scan the DMI system information. This setups DMI identifiers + * (dmi_system_id) for printing it out on task dumps and prepares + * DIMM entry information (dmi_memdev_info) from the SMBIOS table + * for using this when reporting memory errors. */ -void __init dmi_set_dump_stack_arch_desc(void) +void __init dmi_setup(void) { + dmi_scan_machine(); + if (!dmi_available) + return; + + dmi_memdev_walk(); dump_stack_set_arch_desc("%s", dmi_ids_string); } @@ -841,7 +843,7 @@ static bool dmi_is_end_of_table(const struct dmi_system_id *dmi) * returns non zero or we hit the end. Callback function is called for * each successful match. Returns the number of matches. * - * dmi_scan_machine must be called before this function is called. + * dmi_setup must be called before this function is called. */ int dmi_check_system(const struct dmi_system_id *list) { @@ -871,7 +873,7 @@ EXPORT_SYMBOL(dmi_check_system); * Walk the blacklist table until the first match is found. Return the * pointer to the matching entry or NULL if there's no match. * - * dmi_scan_machine must be called before this function is called. + * dmi_setup must be called before this function is called. */ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list) { diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 0c1af675c338..e2ac5fa5531b 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -162,13 +162,11 @@ void efi_virtmap_unload(void) static int __init arm_dmi_init(void) { /* - * On arm64/ARM, DMI depends on UEFI, and dmi_scan_machine() needs to + * On arm64/ARM, DMI depends on UEFI, and dmi_setup() needs to * be called early because dmi_id_init(), which is an arch_initcall * itself, depends on dmi_scan_machine() having been called already. */ - dmi_scan_machine(); - if (dmi_available) - dmi_set_dump_stack_arch_desc(); + dmi_setup(); return 0; } core_initcall(arm_dmi_init); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index b0103e16fc1b..b1f7b64652db 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -71,7 +71,6 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y) lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y)) -STUBCOPY_RM-y := -R *ksymtab* -R *kcrctab* STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \ --prefix-symbols=__efistub_ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS @@ -86,12 +85,13 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE # this time, use objcopy and leave all sections in place. # quiet_cmd_stubcopy = STUBCPY $@ - cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \ - then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \ - then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \ - rm -f $@; /bin/false); \ - else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \ - else /bin/false; fi + cmd_stubcopy = \ + $(STRIP) --strip-debug -o $@ $<; \ + if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then \ + echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \ + /bin/false; \ + fi; \ + $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@ # # ARM discards the .data section because it disallows r/w data in the diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 2b4f373736c7..8b4cd31ce7bd 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -106,25 +106,19 @@ static noinline void save_stack(struct drm_mm_node *node) { unsigned long entries[STACKDEPTH]; - struct stack_trace trace = { - .entries = entries, - .max_entries = STACKDEPTH, - .skip = 1 - }; + unsigned int n; - save_stack_trace(&trace); - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries-1] == ULONG_MAX) - trace.nr_entries--; + n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); /* May be called under spinlock, so avoid sleeping */ - node->stack = depot_save_stack(&trace, GFP_NOWAIT); + node->stack = stack_depot_save(entries, n, GFP_NOWAIT); } static void show_leaks(struct drm_mm *mm) { struct drm_mm_node *node; - unsigned long entries[STACKDEPTH]; + unsigned long *entries; + unsigned int nr_entries; char *buf; buf = kmalloc(BUFSZ, GFP_KERNEL); @@ -132,19 +126,14 @@ static void show_leaks(struct drm_mm *mm) return; list_for_each_entry(node, drm_mm_nodes(mm), node_list) { - struct stack_trace trace = { - .entries = entries, - .max_entries = STACKDEPTH - }; - if (!node->stack) { DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", node->start, node->size); continue; } - depot_fetch_stack(node->stack, &trace); - snprint_stack_trace(buf, BUFSZ, &trace, 0); + nr_entries = stack_depot_fetch(node->stack, &entries); + stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", node->start, node->size, buf); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 02adcaf6ebea..16f80a448820 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1667,6 +1667,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) len)) { end_user: user_access_end(); +end: kvfree(relocs); err = -EFAULT; goto err; @@ -1686,7 +1687,7 @@ end_user: * relocations were valid. */ if (!user_access_begin(urelocs, size)) - goto end_user; + goto end; for (copied = 0; copied < nreloc; copied++) unsafe_put_user(-1, @@ -2695,7 +2696,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, * when we did the "copy_from_user()" above. */ if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) - goto end_user; + goto end; for (i = 0; i < args->buffer_count; i++) { if (!(exec2_list[i].offset & UPDATE)) @@ -2709,6 +2710,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, } end_user: user_access_end(); +end:; } args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index b713bed20c38..41b5bcb803cb 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -36,11 +36,8 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) { - unsigned long entries[12]; - struct stack_trace trace = { - .entries = entries, - .max_entries = ARRAY_SIZE(entries), - }; + unsigned long *entries; + unsigned int nr_entries; char buf[512]; if (!vma->node.stack) { @@ -49,8 +46,8 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) return; } - depot_fetch_stack(vma->node.stack, &trace); - snprint_stack_trace(buf, sizeof(buf), &trace, 0); + nr_entries = stack_depot_fetch(vma->node.stack, &entries); + stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", vma->node.start, vma->node.size, reason, buf); } diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index a017a4232c0f..20c4434474e3 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -60,31 +60,20 @@ static noinline depot_stack_handle_t __save_depot_stack(void) { unsigned long entries[STACKDEPTH]; - struct stack_trace trace = { - .entries = entries, - .max_entries = ARRAY_SIZE(entries), - .skip = 1, - }; + unsigned int n; - save_stack_trace(&trace); - if (trace.nr_entries && - trace.entries[trace.nr_entries - 1] == ULONG_MAX) - trace.nr_entries--; - - return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN); + n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); + return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); } static void __print_depot_stack(depot_stack_handle_t stack, char *buf, int sz, int indent) { - unsigned long entries[STACKDEPTH]; - struct stack_trace trace = { - .entries = entries, - .max_entries = ARRAY_SIZE(entries), - }; + unsigned long *entries; + unsigned int nr_entries; - depot_fetch_stack(stack, &trace); - snprint_stack_trace(buf, sz, &trace, indent); + nr_entries = stack_depot_fetch(stack, &entries); + stack_trace_snprint(buf, sz, entries, nr_entries, indent); } static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 578d867a81d5..f33e349c4ec5 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -255,10 +255,14 @@ static struct drm_driver qxl_driver = { #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, #endif + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = qxl_gem_prime_pin, .gem_prime_unpin = qxl_gem_prime_unpin, + .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, + .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c index 8b448eca1cd9..114653b471c6 100644 --- a/drivers/gpu/drm/qxl/qxl_prime.c +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -42,6 +42,18 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) qxl_bo_unpin(bo); } +struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + return ERR_PTR(-ENOSYS); +} + +struct drm_gem_object *qxl_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) +{ + return ERR_PTR(-ENOSYS); +} + void *qxl_gem_prime_vmap(struct drm_gem_object *obj) { struct qxl_bo *bo = gem_to_qxl_bo(obj); diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index bb8e3f149979..d464799e40a3 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -426,8 +426,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) pm_runtime_get_sync(dev->dev); - if (dev->suspended) { - dev_err(dev->dev, "Error %s call while suspended\n", __func__); + if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) { ret = -ESHUTDOWN; goto done_nolock; } diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index c0c3043b5d61..fd70b110e8f4 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -515,9 +515,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct clk_notifier_data *ndata = data; - struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk, + struct imx_i2c_struct *i2c_imx = container_of(nb, struct imx_i2c_struct, - clk); + clk_change_nb); if (action & POST_RATE_CHANGE) i2c_imx_set_clk(i2c_imx, ndata->new_rate); diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index d18b0941b71a..f14d4b3fab44 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -597,6 +597,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev) i2c->adapter = synquacer_i2c_ops; i2c_set_adapdata(&i2c->adapter, i2c); i2c->adapter.dev.parent = &pdev->dev; + i2c->adapter.dev.of_node = pdev->dev.of_node; + ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev)); i2c->adapter.nr = pdev->id; init_completion(&i2c->completion); diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c480ca385ffb..688aa3b5f3ac 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -327,6 +327,8 @@ static int i2c_device_probe(struct device *dev) if (client->flags & I2C_CLIENT_HOST_NOTIFY) { dev_dbg(dev, "Using Host Notify IRQ\n"); + /* Keep adapter active when Host Notify is required */ + pm_runtime_get_sync(&client->adapter->dev); irq = i2c_smbus_host_notify_to_irq(client); } else if (dev->of_node) { irq = of_irq_get_byname(dev->of_node, "irq"); @@ -431,6 +433,8 @@ static int i2c_device_remove(struct device *dev) device_init_wakeup(&client->dev, false); client->irq = client->init_irq; + if (client->flags & I2C_CLIENT_HOST_NOTIFY) + pm_runtime_put(&client->adapter->dev); return status; } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 1ecef76225a1..2a48ea3f1b30 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -150,7 +150,7 @@ struct dm_buffer { void (*end_io)(struct dm_buffer *, blk_status_t); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING #define MAX_STACK 10 - struct stack_trace stack_trace; + unsigned int stack_len; unsigned long stack_entries[MAX_STACK]; #endif }; @@ -232,11 +232,7 @@ static DEFINE_MUTEX(dm_bufio_clients_lock); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING static void buffer_record_stack(struct dm_buffer *b) { - b->stack_trace.nr_entries = 0; - b->stack_trace.max_entries = MAX_STACK; - b->stack_trace.entries = b->stack_entries; - b->stack_trace.skip = 2; - save_stack_trace(&b->stack_trace); + b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2); } #endif @@ -438,7 +434,7 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) adjust_total_allocated(b->data_mode, (long)c->block_size); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - memset(&b->stack_trace, 0, sizeof(b->stack_trace)); + b->stack_len = 0; #endif return b; } @@ -1520,8 +1516,9 @@ static void drop_buffers(struct dm_bufio_client *c) DMERR("leaked buffer %llx, hold count %u, list %d", (unsigned long long)b->block, b->hold_count, i); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - print_stack_trace(&b->stack_trace, 1); - b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */ + stack_trace_print(b->stack_entries, b->stack_len, 1); + /* mark unclaimed to avoid BUG_ON below */ + b->hold_count = 0; #endif } diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 3972232b8037..749ec268d957 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -35,7 +35,10 @@ #define MAX_HOLDERS 4 #define MAX_STACK 10 -typedef unsigned long stack_entries[MAX_STACK]; +struct stack_store { + unsigned int nr_entries; + unsigned long entries[MAX_STACK]; +}; struct block_lock { spinlock_t lock; @@ -44,8 +47,7 @@ struct block_lock { struct task_struct *holders[MAX_HOLDERS]; #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - struct stack_trace traces[MAX_HOLDERS]; - stack_entries entries[MAX_HOLDERS]; + struct stack_store traces[MAX_HOLDERS]; #endif }; @@ -73,7 +75,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task) { unsigned h = __find_holder(lock, NULL); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING - struct stack_trace *t; + struct stack_store *t; #endif get_task_struct(task); @@ -81,11 +83,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task) #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING t = lock->traces + h; - t->nr_entries = 0; - t->max_entries = MAX_STACK; - t->entries = lock->entries[h]; - t->skip = 2; - save_stack_trace(t); + t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2); #endif } @@ -106,7 +104,8 @@ static int __check_holder(struct block_lock *lock) DMERR("recursive lock detected in metadata"); #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING DMERR("previously held here:"); - print_stack_trace(lock->traces + i, 4); + stack_trace_print(lock->traces[i].entries, + lock->traces[i].nr_entries, 4); DMERR("subsequent acquisition attempted here:"); dump_stack(); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2c43e12b70af..6265d9225ec8 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -388,7 +388,7 @@ static void nvme_free_ns_head(struct kref *ref) nvme_mpath_remove_disk(head); ida_simple_remove(&head->subsys->ns_ida, head->instance); list_del_init(&head->entry); - cleanup_srcu_struct_quiesced(&head->srcu); + cleanup_srcu_struct(&head->srcu); nvme_put_subsystem(head->subsys); kfree(head); } diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index b121d8f8f3d7..27aeca30eeae 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -266,7 +266,7 @@ MODULE_PARM_DESC(pc104_3, "set interface types for ISA(PC104) board #3 (e.g. pc1 module_param_array(pc104_4, ulong, NULL, 0); MODULE_PARM_DESC(pc104_4, "set interface types for ISA(PC104) board #4 (e.g. pc104_4=232,232,485,485,..."); -static int rp_init(void); +static int __init rp_init(void); static void rp_cleanup_module(void); module_init(rp_init); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index ba906876cc45..9e529cc2b4ff 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { + if (efi_enabled(EFI_BOOT) && + !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { if ((efifb_fix.smem_start + efifb_fix.smem_len) > (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n", diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index d09b6cdb785a..b283d3a6e837 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -205,28 +205,17 @@ static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid) #ifdef CONFIG_STACKTRACE static void __save_stack_trace(struct ref_action *ra) { - struct stack_trace stack_trace; - - stack_trace.max_entries = MAX_TRACE; - stack_trace.nr_entries = 0; - stack_trace.entries = ra->trace; - stack_trace.skip = 2; - save_stack_trace(&stack_trace); - ra->trace_len = stack_trace.nr_entries; + ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2); } static void __print_stack_trace(struct btrfs_fs_info *fs_info, struct ref_action *ra) { - struct stack_trace trace; - if (ra->trace_len == 0) { btrfs_err(fs_info, " ref-verify: no stacktrace"); return; } - trace.nr_entries = ra->trace_len; - trace.entries = ra->trace; - print_stack_trace(&trace, 2); + stack_trace_print(ra->trace, ra->trace_len, 2); } #else static void inline __save_stack_trace(struct ref_action *ra) diff --git a/fs/inode.c b/fs/inode.c index e9d97add2b36..9a453f3637f8 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file) int kill; int error = 0; - /* Fast path for nothing security related */ - if (IS_NOSEC(inode)) + /* + * Fast path for nothing security related. + * As well for non-regular files, e.g. blkdev inodes. + * For example, blkdev_write_iter() might get here + * trying to remove privs which it is not allowed to. + */ + if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode)) return 0; kill = dentry_needs_remove_privs(dentry); diff --git a/fs/proc/base.c b/fs/proc/base.c index 6a803a0b75df..f179568b4c76 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -407,7 +407,6 @@ static void unlock_trace(struct task_struct *task) static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { - struct stack_trace trace; unsigned long *entries; int err; @@ -430,20 +429,17 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, if (!entries) return -ENOMEM; - trace.nr_entries = 0; - trace.max_entries = MAX_STACK_TRACE_DEPTH; - trace.entries = entries; - trace.skip = 0; - err = lock_trace(task); if (!err) { - unsigned int i; + unsigned int i, nr_entries; - save_stack_trace_tsk(task, &trace); + nr_entries = stack_trace_save_tsk(task, entries, + MAX_STACK_TRACE_DEPTH, 0); - for (i = 0; i < trace.nr_entries; i++) { + for (i = 0; i < nr_entries; i++) { seq_printf(m, "[<0>] %pB\n", (void *)entries[i]); } + unlock_trace(task); } kfree(entries); @@ -489,10 +485,9 @@ static int lstats_show_proc(struct seq_file *m, void *v) lr->count, lr->time, lr->max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { unsigned long bt = lr->backtrace[q]; + if (!bt) break; - if (bt == ULONG_MAX) - break; seq_printf(m, " %ps", (void *)bt); } seq_putc(m, '\n'); diff --git a/fs/super.c b/fs/super.c index 583a0124bc39..2739f57515f8 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1467,11 +1467,6 @@ int vfs_get_tree(struct fs_context *fc) struct super_block *sb; int error; - if (fc->fs_type->fs_flags & FS_REQUIRES_DEV && !fc->source) { - errorf(fc, "Filesystem requires source device"); - return -ENOENT; - } - if (fc->root) return -EBUSY; diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 1fd3011ea623..7fd4802222b8 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -229,7 +229,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) case UFS_UID_44BSD: return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid); case UFS_UID_EFT: - if (inode->ui_u1.oldids.ui_suid == 0xFFFF) + if (inode->ui_u1.oldids.ui_sgid == 0xFFFF) return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid); /* Fall through */ default: diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h deleted file mode 100644 index 93e67a055a4d..000000000000 --- a/include/asm-generic/rwsem.h +++ /dev/null @@ -1,140 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_RWSEM_H -#define _ASM_GENERIC_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." -#endif - -#ifdef __KERNEL__ - -/* - * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. - * Adapted largely from include/asm-i386/rwsem.h - * by Paul Mackerras <paulus@samba.org>. - */ - -/* - * the semaphore definition - */ -#ifdef CONFIG_64BIT -# define RWSEM_ACTIVE_MASK 0xffffffffL -#else -# define RWSEM_ACTIVE_MASK 0x0000ffffL -#endif - -#define RWSEM_UNLOCKED_VALUE 0x00000000L -#define RWSEM_ACTIVE_BIAS 0x00000001L -#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) - rwsem_down_read_failed(sem); -} - -static inline int __down_read_killable(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { - if (IS_ERR(rwsem_down_read_failed_killable(sem))) - return -EINTR; - } - - return 0; -} - -static inline int __down_read_trylock(struct rw_semaphore *sem) -{ - long tmp; - - while ((tmp = atomic_long_read(&sem->count)) >= 0) { - if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, - tmp + RWSEM_ACTIVE_READ_BIAS)) { - return 1; - } - } - return 0; -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count); - if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) - rwsem_down_write_failed(sem); -} - -static inline int __down_write_killable(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count); - if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) - if (IS_ERR(rwsem_down_write_failed_killable(sem))) - return -EINTR; - return 0; -} - -static inline int __down_write_trylock(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); - return tmp == RWSEM_UNLOCKED_VALUE; -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - long tmp; - - tmp = atomic_long_dec_return_release(&sem->count); - if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) - rwsem_wake(sem); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, - &sem->count) < 0)) - rwsem_wake(sem); -} - -/* - * downgrade write lock to read lock - */ -static inline void __downgrade_write(struct rw_semaphore *sem) -{ - long tmp; - - /* - * When downgrading from exclusive to shared ownership, - * anything inside the write-locked region cannot leak - * into the read side. In contrast, anything in the - * read-locked region is ok to be re-ordered into the - * write side. As such, rely on RELEASE semantics. - */ - tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count); - if (tmp < 0) - rwsem_downgrade_wake(sem); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_GENERIC_RWSEM_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 6be86c1c5c58..b9edc7608d90 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -19,9 +19,131 @@ #include <linux/swap.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> +#include <asm/cacheflush.h> #ifdef CONFIG_MMU +/* + * Generic MMU-gather implementation. + * + * The mmu_gather data structure is used by the mm code to implement the + * correct and efficient ordering of freeing pages and TLB invalidations. + * + * This correct ordering is: + * + * 1) unhook page + * 2) TLB invalidate page + * 3) free page + * + * That is, we must never free a page before we have ensured there are no live + * translations left to it. Otherwise it might be possible to observe (or + * worse, change) the page content after it has been reused. + * + * The mmu_gather API consists of: + * + * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather + * + * Finish in particular will issue a (final) TLB invalidate and free + * all (remaining) queued pages. + * + * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA + * + * Defaults to flushing at tlb_end_vma() to reset the range; helps when + * there's large holes between the VMAs. + * + * - tlb_remove_page() / __tlb_remove_page() + * - tlb_remove_page_size() / __tlb_remove_page_size() + * + * __tlb_remove_page_size() is the basic primitive that queues a page for + * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a + * boolean indicating if the queue is (now) full and a call to + * tlb_flush_mmu() is required. + * + * tlb_remove_page() and tlb_remove_page_size() imply the call to + * tlb_flush_mmu() when required and has no return value. + * + * - tlb_change_page_size() + * + * call before __tlb_remove_page*() to set the current page-size; implies a + * possible tlb_flush_mmu() call. + * + * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() + * + * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets + * related state, like the range) + * + * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees + * whatever pages are still batched. + * + * - mmu_gather::fullmm + * + * A flag set by tlb_gather_mmu() to indicate we're going to free + * the entire mm; this allows a number of optimizations. + * + * - We can ignore tlb_{start,end}_vma(); because we don't + * care about ranges. Everything will be shot down. + * + * - (RISC) architectures that use ASIDs can cycle to a new ASID + * and delay the invalidation until ASID space runs out. + * + * - mmu_gather::need_flush_all + * + * A flag that can be set by the arch code if it wants to force + * flush the entire TLB irrespective of the range. For instance + * x86-PAE needs this when changing top-level entries. + * + * And allows the architecture to provide and implement tlb_flush(): + * + * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make + * use of: + * + * - mmu_gather::start / mmu_gather::end + * + * which provides the range that needs to be flushed to cover the pages to + * be freed. + * + * - mmu_gather::freed_tables + * + * set when we freed page table pages + * + * - tlb_get_unmap_shift() / tlb_get_unmap_size() + * + * returns the smallest TLB entry size unmapped in this range. + * + * If an architecture does not provide tlb_flush() a default implementation + * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is + * specified, in which case we'll default to flush_tlb_mm(). + * + * Additionally there are a few opt-in features: + * + * HAVE_MMU_GATHER_PAGE_SIZE + * + * This ensures we call tlb_flush() every time tlb_change_page_size() actually + * changes the size and provides mmu_gather::page_size to tlb_flush(). + * + * HAVE_RCU_TABLE_FREE + * + * This provides tlb_remove_table(), to be used instead of tlb_remove_page() + * for page directores (__p*_free_tlb()). This provides separate freeing of + * the page-table pages themselves in a semi-RCU fashion (see comment below). + * Useful if your architecture doesn't use IPIs for remote TLB invalidates + * and therefore doesn't naturally serialize with software page-table walkers. + * + * When used, an architecture is expected to provide __tlb_remove_table() + * which does the actual freeing of these pages. + * + * HAVE_RCU_TABLE_NO_INVALIDATE + * + * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before + * freeing the page-table pages. This can be avoided if you use + * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux + * page-tables natively. + * + * MMU_GATHER_NO_RANGE + * + * Use this if your architecture lacks an efficient flush_tlb_range(). + */ + #ifdef CONFIG_HAVE_RCU_TABLE_FREE /* * Semi RCU freeing of the page directories. @@ -60,11 +182,11 @@ struct mmu_table_batch { #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) -extern void tlb_table_flush(struct mmu_gather *tlb); extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #endif +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER /* * If we can't allocate a page to make a big batch of page pointers * to work on, then just handle a few from the on-stack structure. @@ -89,14 +211,21 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) -/* struct mmu_gather is an opaque type used by the mm code for passing around +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); +#endif + +/* + * struct mmu_gather is an opaque type used by the mm code for passing around * any data needed by arch specific code for tlb_remove_page. */ struct mmu_gather { struct mm_struct *mm; + #ifdef CONFIG_HAVE_RCU_TABLE_FREE struct mmu_table_batch *batch; #endif + unsigned long start; unsigned long end; /* @@ -124,23 +253,30 @@ struct mmu_gather { unsigned int cleared_puds : 1; unsigned int cleared_p4ds : 1; + /* + * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma + */ + unsigned int vma_exec : 1; + unsigned int vma_huge : 1; + + unsigned int batch_count; + +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER struct mmu_gather_batch *active; struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; - unsigned int batch_count; - int page_size; -}; -#define HAVE_GENERIC_MMU_GATHER +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + unsigned int page_size; +#endif +#endif +}; void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); -void tlb_flush_mmu_free(struct mmu_gather *tlb); -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address, @@ -163,8 +299,94 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) tlb->cleared_pmds = 0; tlb->cleared_puds = 0; tlb->cleared_p4ds = 0; + /* + * Do not reset mmu_gather::vma_* fields here, we do not + * call into tlb_start_vma() again to set them if there is an + * intermediate flush. + */ +} + +#ifdef CONFIG_MMU_GATHER_NO_RANGE + +#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma) +#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma() +#endif + +/* + * When an architecture does not have efficient means of range flushing TLBs + * there is no point in doing intermediate flushes on tlb_end_vma() to keep the + * range small. We equally don't have to worry about page granularity or other + * things. + * + * All we need to do is issue a full flush for any !0 range. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->end) + flush_tlb_mm(tlb->mm); +} + +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#define tlb_end_vma tlb_end_vma +static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#else /* CONFIG_MMU_GATHER_NO_RANGE */ + +#ifndef tlb_flush + +#if defined(tlb_start_vma) || defined(tlb_end_vma) +#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma() +#endif + +/* + * When an architecture does not provide its own tlb_flush() implementation + * but does have a reasonably efficient flush_vma_range() implementation + * use that. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->fullmm || tlb->need_flush_all) { + flush_tlb_mm(tlb->mm); + } else if (tlb->end) { + struct vm_area_struct vma = { + .vm_mm = tlb->mm, + .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | + (tlb->vma_huge ? VM_HUGETLB : 0), + }; + + flush_tlb_range(&vma, tlb->start, tlb->end); + } } +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + /* + * flush_tlb_range() implementations that look at VM_HUGETLB (tile, + * mips-4k) flush only large pages. + * + * flush_tlb_range() implementations that flush I-TLB also flush D-TLB + * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing + * range. + * + * We rely on tlb_end_vma() to issue a flush, such that when we reset + * these values the batch is empty. + */ + tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB); + tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); +} + +#else + +static inline void +tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { } + +#endif + +#endif /* CONFIG_MMU_GATHER_NO_RANGE */ + static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { if (!tlb->end) @@ -196,21 +418,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) return tlb_remove_page_size(tlb, page, PAGE_SIZE); } -#ifndef tlb_remove_check_page_size_change -#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change -static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, +static inline void tlb_change_page_size(struct mmu_gather *tlb, unsigned int page_size) { - /* - * We don't care about page size change, just update - * mmu_gather page size here so that debug checks - * doesn't throw false warning. - */ -#ifdef CONFIG_DEBUG_VM +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + if (tlb->page_size && tlb->page_size != page_size) { + if (!tlb->fullmm) + tlb_flush_mmu(tlb); + } + tlb->page_size = page_size; #endif } -#endif static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) { @@ -237,17 +456,30 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) * the vmas are adjusted to only cover the region to be torn down. */ #ifndef tlb_start_vma -#define tlb_start_vma(tlb, vma) do { } while (0) -#endif +static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (tlb->fullmm) + return; -#define __tlb_end_vma(tlb, vma) \ - do { \ - if (!tlb->fullmm) \ - tlb_flush_mmu_tlbonly(tlb); \ - } while (0) + tlb_update_vma_flags(tlb, vma); + flush_cache_range(vma, vma->vm_start, vma->vm_end); +} +#endif #ifndef tlb_end_vma -#define tlb_end_vma __tlb_end_vma +static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (tlb->fullmm) + return; + + /* + * Do a TLB flush and reset the range at VMA boundaries; this avoids + * the ranges growing with the unused space between consecutive VMAs, + * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on + * this. + */ + tlb_flush_mmu_tlbonly(tlb); +} #endif #ifndef __tlb_remove_tlb_entry @@ -372,6 +604,4 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) #endif /* CONFIG_MMU */ -#define tlb_migrate_finish(mm) do {} while (0) - #endif /* _ASM_GENERIC__TLB_H */ diff --git a/include/linux/clk.h b/include/linux/clk.h index d8bc1a856b39..f689fc58d7be 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -811,6 +811,22 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent) return true; } +static inline int clk_set_rate_range(struct clk *clk, unsigned long min, + unsigned long max) +{ + return 0; +} + +static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + +static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) +{ + return 0; +} + static inline int clk_set_parent(struct clk *clk, struct clk *parent) { return 0; diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 445348facea9..d58aa0db05f9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -67,7 +67,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, .line = __LINE__, \ }; \ ______r = !!(cond); \ - ______f.miss_hit[______r]++; \ + ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\ ______r; \ })) #endif /* CONFIG_PROFILE_ALL_BRANCHES */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 73baab8535c1..2d62f01108a0 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -204,4 +204,28 @@ static inline void cpu_smt_disable(bool force) { } static inline void cpu_smt_check_topology(void) { } #endif +/* + * These are used for a global "mitigations=" cmdline option for toggling + * optional CPU mitigations. + */ +enum cpu_mitigations { + CPU_MITIGATIONS_OFF, + CPU_MITIGATIONS_AUTO, + CPU_MITIGATIONS_AUTO_NOSMT, +}; + +extern enum cpu_mitigations cpu_mitigations; + +/* mitigations=off */ +static inline bool cpu_mitigations_off(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_OFF; +} + +/* mitigations=auto,nosmt */ +static inline bool cpu_mitigations_auto_nosmt(void) +{ + return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; +} + #endif /* _LINUX_CPU_H_ */ diff --git a/include/linux/dmi.h b/include/linux/dmi.h index c46fdb36700b..8de8c4f15163 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -102,9 +102,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); extern const char * dmi_get_system_info(int field); extern const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from); -extern void dmi_scan_machine(void); -extern void dmi_memdev_walk(void); -extern void dmi_set_dump_stack_arch_desc(void); +extern void dmi_setup(void); extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); extern int dmi_get_bios_year(void); extern int dmi_name_in_vendors(const char *str); @@ -122,9 +120,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0; static inline const char * dmi_get_system_info(int field) { return NULL; } static inline const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from) { return NULL; } -static inline void dmi_scan_machine(void) { return; } -static inline void dmi_memdev_walk(void) { } -static inline void dmi_set_dump_stack_arch_desc(void) { } +static inline void dmi_setup(void) { } static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) { if (yearp) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 730876187344..20899919ead8 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { #ifdef CONFIG_STACK_TRACER -#define STACK_TRACE_ENTRIES 500 - -struct stack_trace; - -extern unsigned stack_trace_index[]; -extern struct stack_trace stack_trace_max; -extern unsigned long stack_trace_max_size; -extern arch_spinlock_t stack_trace_max_lock; - extern int stack_tracer_enabled; -void stack_trace_print(void); -int -stack_trace_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); + +int stack_trace_sysctl(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ DECLARE_PER_CPU(int, disable_stack_tracer); diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h index a49f2b45b3f0..42710d5949ba 100644 --- a/include/linux/jump_label_ratelimit.h +++ b/include/linux/jump_label_ratelimit.h @@ -12,21 +12,79 @@ struct static_key_deferred { struct delayed_work work; }; -extern void static_key_slow_dec_deferred(struct static_key_deferred *key); -extern void static_key_deferred_flush(struct static_key_deferred *key); +struct static_key_true_deferred { + struct static_key_true key; + unsigned long timeout; + struct delayed_work work; +}; + +struct static_key_false_deferred { + struct static_key_false key; + unsigned long timeout; + struct delayed_work work; +}; + +#define static_key_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout) +#define static_branch_slow_dec_deferred(x) \ + __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout) + +#define static_key_deferred_flush(x) \ + __static_key_deferred_flush((x), &(x)->work) + +extern void +__static_key_slow_dec_deferred(struct static_key *key, + struct delayed_work *work, + unsigned long timeout); +extern void __static_key_deferred_flush(void *key, struct delayed_work *work); extern void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); +extern void jump_label_update_timeout(struct work_struct *work); + +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { \ + .key = { STATIC_KEY_INIT_TRUE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { \ + .key = { STATIC_KEY_INIT_FALSE }, \ + .timeout = (rl), \ + .work = __DELAYED_WORK_INITIALIZER((name).work, \ + jump_label_update_timeout, \ + 0), \ + } + +#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key) + #else /* !CONFIG_JUMP_LABEL */ struct static_key_deferred { struct static_key key; }; +struct static_key_true_deferred { + struct static_key_true key; +}; +struct static_key_false_deferred { + struct static_key_false key; +}; +#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \ + struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT } +#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \ + struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT } + +#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key) + static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) { STATIC_KEY_CHECK_USE(key); static_key_slow_dec(&key->key); } -static inline void static_key_deferred_flush(struct static_key_deferred *key) +static inline void static_key_deferred_flush(void *key) { STATIC_KEY_CHECK_USE(key); } diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 79c3873d58ac..6e2377e6c1d6 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,6 +66,11 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; +struct lock_trace { + unsigned int nr_entries; + unsigned int offset; +}; + #define LOCKSTAT_POINTS 4 /* @@ -100,7 +105,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; + struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -188,7 +193,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct stack_trace trace; + struct lock_trace trace; int distance; /* @@ -471,7 +476,7 @@ struct pin_cookie { }; #define NIL_COOKIE (struct pin_cookie){ } -#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; }) +#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e47ef764f613..15a82ff0aefe 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -240,7 +240,6 @@ struct perf_event; #define PERF_PMU_CAP_NO_INTERRUPT 0x01 #define PERF_PMU_CAP_NO_NMI 0x02 #define PERF_PMU_CAP_AUX_NO_SG 0x04 -#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 #define PERF_PMU_CAP_EXCLUSIVE 0x10 #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 @@ -464,7 +463,7 @@ enum perf_addr_filter_action_t { /** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage - * @inode: object file's inode for file-based filters + * @path: object file's path for file-based filters * @offset: filter range offset * @size: filter range size (size==0 means single address trigger) * @action: filter/start/stop @@ -888,6 +887,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu); extern void perf_sched_cb_inc(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); + +extern void perf_pmu_resched(struct pmu *pmu); + extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); @@ -1055,12 +1057,18 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo #endif /* - * Take a snapshot of the regs. Skip ip and frame pointer to - * the nth caller. We only need a few of the regs: + * When generating a perf sample in-line, instead of from an interrupt / + * exception, we lack a pt_regs. This is typically used from software events + * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. + * + * We typically don't need a full set, but (for x86) do require: * - ip for PERF_SAMPLE_IP * - cs for user_mode() tests - * - bp for callchains - * - eflags, for future purposes, just in case + * - sp for PERF_SAMPLE_CALLCHAIN + * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) + * + * NOTE: assumes @regs is otherwise already 0 filled; this is important for + * things like PERF_SAMPLE_REGS_INTR. */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6cdb1db776cf..922bb6848813 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -878,9 +878,11 @@ static inline void rcu_head_init(struct rcu_head *rhp) static inline bool rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) { - if (READ_ONCE(rhp->func) == f) + rcu_callback_t func = READ_ONCE(rhp->func); + + if (func == f) return true; - WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L); + WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h deleted file mode 100644 index e47568363e5e..000000000000 --- a/include/linux/rwsem-spinlock.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* rwsem-spinlock.h: fallback C implementation - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de> - * - Derived also from comments by Linus - */ - -#ifndef _LINUX_RWSEM_SPINLOCK_H -#define _LINUX_RWSEM_SPINLOCK_H - -#ifndef _LINUX_RWSEM_H -#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ -/* - * the rw-semaphore definition - * - if count is 0 then there are no active readers or writers - * - if count is +ve then that is the number of active readers - * - if count is -1 then there is one active writer - * - if wait_list is not empty, then there are processes waiting for the semaphore - */ -struct rw_semaphore { - __s32 count; - raw_spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#define RWSEM_UNLOCKED_VALUE 0x00000000 - -extern void __down_read(struct rw_semaphore *sem); -extern int __must_check __down_read_killable(struct rw_semaphore *sem); -extern int __down_read_trylock(struct rw_semaphore *sem); -extern void __down_write(struct rw_semaphore *sem); -extern int __must_check __down_write_killable(struct rw_semaphore *sem); -extern int __down_write_trylock(struct rw_semaphore *sem); -extern void __up_read(struct rw_semaphore *sem); -extern void __up_write(struct rw_semaphore *sem); -extern void __downgrade_write(struct rw_semaphore *sem); -extern int rwsem_is_locked(struct rw_semaphore *sem); - -#endif /* __KERNEL__ */ -#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 67dbb57508b1..2ea18a3def04 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -20,25 +20,30 @@ #include <linux/osq_lock.h> #endif -struct rw_semaphore; - -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include <linux/rwsem-spinlock.h> /* use a generic implementation */ -#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE -#else -/* All arch specific implementations share the same struct */ +/* + * For an uncontended rwsem, count and owner are the only fields a task + * needs to touch when acquiring the rwsem. So they are put next to each + * other to increase the chance that they will share the same cacheline. + * + * In a contended rwsem, the owner is likely the most frequently accessed + * field in the structure as the optimistic waiter that holds the osq lock + * will spin on owner. For an embedded rwsem, other hot fields in the + * containing structure should be moved further away from the rwsem to + * reduce the chance that they will share the same cacheline causing + * cacheline bouncing problem. + */ struct rw_semaphore { atomic_long_t count; - struct list_head wait_list; - raw_spinlock_t wait_lock; #ifdef CONFIG_RWSEM_SPIN_ON_OWNER - struct optimistic_spin_queue osq; /* spinner MCS lock */ /* * Write owner. Used as a speculative check to see * if the owner is running on the cpu. */ struct task_struct *owner; + struct optimistic_spin_queue osq; /* spinner MCS lock */ #endif + raw_spinlock_t wait_lock; + struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -50,24 +55,14 @@ struct rw_semaphore { */ #define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L) -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); - -/* Include the arch specific part */ -#include <asm/rwsem.h> - /* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { return atomic_long_read(&sem->count) != 0; } +#define RWSEM_UNLOCKED_VALUE 0L #define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) -#endif /* Common initializer macros and functions */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1549584a1538..50606a6e73d6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1057,7 +1057,6 @@ struct task_struct { #ifdef CONFIG_RSEQ struct rseq __user *rseq; - u32 rseq_len; u32 rseq_sig; /* * RmW on rseq_event_mask must be performed atomically @@ -1855,12 +1854,10 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) { if (clone_flags & CLONE_THREAD) { t->rseq = NULL; - t->rseq_len = 0; t->rseq_sig = 0; t->rseq_event_mask = 0; } else { t->rseq = current->rseq; - t->rseq_len = current->rseq_len; t->rseq_sig = current->rseq_sig; t->rseq_event_mask = current->rseq_event_mask; } @@ -1869,7 +1866,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) static inline void rseq_execve(struct task_struct *t) { t->rseq = NULL; - t->rseq_len = 0; t->rseq_sig = 0; t->rseq_event_mask = 0; } diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c495b2d51569..e432cc92c73d 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -56,45 +56,11 @@ struct srcu_struct { }; void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void (*func)(struct rcu_head *head)); -void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced); +void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); void synchronize_srcu(struct srcu_struct *ssp); -/** - * cleanup_srcu_struct - deconstruct a sleep-RCU structure - * @ssp: structure to clean up. - * - * Must invoke this after you are finished using a given srcu_struct that - * was initialized via init_srcu_struct(), else you leak memory. - */ -static inline void cleanup_srcu_struct(struct srcu_struct *ssp) -{ - _cleanup_srcu_struct(ssp, false); -} - -/** - * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure - * @ssp: structure to clean up. - * - * Must invoke this after you are finished using a given srcu_struct that - * was initialized via init_srcu_struct(), else you leak memory. Also, - * all grace-period processing must have completed. - * - * "Completed" means that the last synchronize_srcu() and - * synchronize_srcu_expedited() calls must have returned before the call - * to cleanup_srcu_struct_quiesced(). It also means that the callback - * from the last call_srcu() must have been invoked before the call to - * cleanup_srcu_struct_quiesced(), but you can use srcu_barrier() to help - * with this last. Violating these rules will get you a WARN_ON() splat - * (with high probability, anyway), and will also cause the srcu_struct - * to be leaked. - */ -static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp) -{ - _cleanup_srcu_struct(ssp, true); -} - #ifdef CONFIG_DEBUG_LOCK_ALLOC /** diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 7978b3e2c1e1..0805dee1b6b8 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -23,10 +23,10 @@ typedef u32 depot_stack_handle_t; -struct stack_trace; +depot_stack_handle_t stack_depot_save(unsigned long *entries, + unsigned int nr_entries, gfp_t gfp_flags); -depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags); - -void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace); +unsigned int stack_depot_fetch(depot_stack_handle_t handle, + unsigned long **entries); #endif diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index ba29a0613e66..f0cfd12cb45e 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -3,11 +3,64 @@ #define __LINUX_STACKTRACE_H #include <linux/types.h> +#include <asm/errno.h> struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE +void stack_trace_print(unsigned long *trace, unsigned int nr_entries, + int spaces); +int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, + unsigned int nr_entries, int spaces); +unsigned int stack_trace_save(unsigned long *store, unsigned int size, + unsigned int skipnr); +unsigned int stack_trace_save_tsk(struct task_struct *task, + unsigned long *store, unsigned int size, + unsigned int skipnr); +unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, + unsigned int size, unsigned int skipnr); +unsigned int stack_trace_save_user(unsigned long *store, unsigned int size); + +/* Internal interfaces. Do not use in generic code */ +#ifdef CONFIG_ARCH_STACKWALK + +/** + * stack_trace_consume_fn - Callback for arch_stack_walk() + * @cookie: Caller supplied pointer handed back by arch_stack_walk() + * @addr: The stack entry address to consume + * @reliable: True when the stack entry is reliable. Required by + * some printk based consumers. + * + * Return: True, if the entry was consumed or skipped + * False, if there is no space left to store + */ +typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr, + bool reliable); +/** + * arch_stack_walk - Architecture specific function to walk the stack + * @consume_entry: Callback which is invoked by the architecture code for + * each entry. + * @cookie: Caller supplied pointer which is handed back to + * @consume_entry + * @task: Pointer to a task struct, can be NULL + * @regs: Pointer to registers, can be NULL + * + * ============ ======= ============================================ + * task regs + * ============ ======= ============================================ + * task NULL Stack trace from task (can be current) + * current regs Stack trace starting on regs->stackpointer + * ============ ======= ============================================ + */ +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs); +int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task); +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs); + +#else /* CONFIG_ARCH_STACKWALK */ struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; @@ -21,24 +74,20 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); extern int save_stack_trace_tsk_reliable(struct task_struct *tsk, struct stack_trace *trace); - -extern void print_stack_trace(struct stack_trace *trace, int spaces); -extern int snprint_stack_trace(char *buf, size_t size, - struct stack_trace *trace, int spaces); - -#ifdef CONFIG_USER_STACKTRACE_SUPPORT extern void save_stack_trace_user(struct stack_trace *trace); +#endif /* !CONFIG_ARCH_STACKWALK */ +#endif /* CONFIG_STACKTRACE */ + +#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE) +int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, + unsigned int size); #else -# define save_stack_trace_user(trace) do { } while (0) +static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk, + unsigned long *store, + unsigned int size) +{ + return -ENOSYS; +} #endif -#else /* !CONFIG_STACKTRACE */ -# define save_stack_trace(trace) do { } while (0) -# define save_stack_trace_tsk(tsk, trace) do { } while (0) -# define save_stack_trace_user(trace) do { } while (0) -# define print_stack_trace(trace, spaces) do { } while (0) -# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) -# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; }) -#endif /* CONFIG_STACKTRACE */ - #endif /* __LINUX_STACKTRACE_H */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 37b226e8df13..2b70130af585 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #define user_access_end() do { } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) +static inline unsigned long user_access_save(void) { return 0UL; } +static inline void user_access_restore(unsigned long flags) { } #endif #ifdef CONFIG_HARDENED_USERCOPY diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index fbba478ae522..e335953fa704 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -229,7 +229,7 @@ config MUTEX_SPIN_ON_OWNER config RWSEM_SPIN_ON_OWNER def_bool y - depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW + depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW config LOCK_SPIN_ON_OWNER def_bool y diff --git a/kernel/Makefile b/kernel/Makefile index 6c57e78817da..62471e75a2b0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n # Don't self-instrument. KCOV_INSTRUMENT_kcov.o := n KASAN_SANITIZE_kcov.o := n +CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) # cond_syscall is currently not LTO compatible CFLAGS_sys_ni.o = $(DISABLE_LTO) diff --git a/kernel/backtracetest.c b/kernel/backtracetest.c index 1323360d90e3..a563c8fdad0d 100644 --- a/kernel/backtracetest.c +++ b/kernel/backtracetest.c @@ -48,19 +48,14 @@ static void backtrace_test_irq(void) #ifdef CONFIG_STACKTRACE static void backtrace_test_saved(void) { - struct stack_trace trace; unsigned long entries[8]; + unsigned int nr_entries; pr_info("Testing a saved backtrace.\n"); pr_info("The following trace is a kernel self test and not a bug!\n"); - trace.nr_entries = 0; - trace.max_entries = ARRAY_SIZE(entries); - trace.entries = entries; - trace.skip = 0; - - save_stack_trace(&trace); - print_stack_trace(&trace, 0); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + stack_trace_print(entries, nr_entries, 0); } #else static void backtrace_test_saved(void) diff --git a/kernel/cpu.c b/kernel/cpu.c index d1bf6e2b4752..1e890f548df8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -2312,3 +2312,18 @@ void __init boot_cpu_hotplug_init(void) #endif this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); } + +enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO; + +static int __init mitigations_parse_cmdline(char *arg) +{ + if (!strcmp(arg, "off")) + cpu_mitigations = CPU_MITIGATIONS_OFF; + else if (!strcmp(arg, "auto")) + cpu_mitigations = CPU_MITIGATIONS_AUTO; + else if (!strcmp(arg, "auto,nosmt")) + cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; + + return 0; +} +early_param("mitigations", mitigations_parse_cmdline); diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index a218e43cc382..badd77670d00 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -89,8 +89,8 @@ struct dma_debug_entry { int sg_mapped_ents; enum map_err_types map_err_type; #ifdef CONFIG_STACKTRACE - struct stack_trace stacktrace; - unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; + unsigned int stack_len; + unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; #endif }; @@ -174,7 +174,7 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry) #ifdef CONFIG_STACKTRACE if (entry) { pr_warning("Mapped at:\n"); - print_stack_trace(&entry->stacktrace, 0); + stack_trace_print(entry->stack_entries, entry->stack_len, 0); } #endif } @@ -704,12 +704,10 @@ static struct dma_debug_entry *dma_entry_alloc(void) spin_unlock_irqrestore(&free_entries_lock, flags); #ifdef CONFIG_STACKTRACE - entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; - entry->stacktrace.entries = entry->st_entries; - entry->stacktrace.skip = 1; - save_stack_trace(&entry->stacktrace); + entry->stack_len = stack_trace_save(entry->stack_entries, + ARRAY_SIZE(entry->stack_entries), + 1); #endif - return entry; } diff --git a/kernel/events/core.c b/kernel/events/core.c index dc7dead2d2cc..abbd4b3b96c2 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2478,6 +2478,16 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, perf_pmu_enable(cpuctx->ctx.pmu); } +void perf_pmu_resched(struct pmu *pmu) +{ + struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + struct perf_event_context *task_ctx = cpuctx->task_ctx; + + perf_ctx_lock(cpuctx, task_ctx); + ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); + perf_ctx_unlock(cpuctx, task_ctx); +} + /* * Cross CPU call to install and enable a performance event * @@ -11917,7 +11927,7 @@ static void __init perf_event_init_all_cpus(void) } } -void perf_swevent_init_cpu(unsigned int cpu) +static void perf_swevent_init_cpu(unsigned int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 5eedb49a65ea..674b35383491 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -610,8 +610,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, * PMU requests more than one contiguous chunks of memory * for SW double buffering */ - if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) && - !overwrite) { + if (!overwrite) { if (!max_order) return -EINVAL; diff --git a/kernel/iomem.c b/kernel/iomem.c index f7525e14ebc6..93c264444510 100644 --- a/kernel/iomem.c +++ b/kernel/iomem.c @@ -55,7 +55,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size, * * MEMREMAP_WB - matches the default mapping for System RAM on * the architecture. This is usually a read-allocate write-back cache. - * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM + * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM * memremap() will bypass establishing a new mapping and instead return * a pointer into the direct map. * @@ -86,7 +86,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags) /* Try all mapping types requested until one returns non-NULL */ if (flags & MEMREMAP_WB) { /* - * MEMREMAP_WB is special in that it can be satisifed + * MEMREMAP_WB is special in that it can be satisfied * from the direct map. Some archs depend on the * capability of memremap() to autodetect cases where * the requested range is potentially in System RAM. diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index f808c6a97dcc..f6e5515ee077 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c @@ -220,9 +220,8 @@ devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct, irq_flow_handler_t handler) { struct irq_chip_generic *gc; - unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); - gc = devm_kzalloc(dev, sz, GFP_KERNEL); + gc = devm_kzalloc(dev, struct_size(gc, chip_types, num_ct), GFP_KERNEL); if (gc) irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base, handler); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1401afa0d58a..53a081392115 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -357,8 +357,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) desc->affinity_notify = notify; raw_spin_unlock_irqrestore(&desc->lock, flags); - if (old_notify) + if (old_notify) { + cancel_work_sync(&old_notify->work); kref_put(&old_notify->kref, old_notify->release); + } return 0; } diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c index 1e4cb63a5c82..90c735da15d0 100644 --- a/kernel/irq/timings.c +++ b/kernel/irq/timings.c @@ -9,6 +9,7 @@ #include <linux/idr.h> #include <linux/irq.h> #include <linux/math64.h> +#include <linux/log2.h> #include <trace/events/irq.h> @@ -18,16 +19,6 @@ DEFINE_STATIC_KEY_FALSE(irq_timing_enabled); DEFINE_PER_CPU(struct irq_timings, irq_timings); -struct irqt_stat { - u64 next_evt; - u64 last_ts; - u64 variance; - u32 avg; - u32 nr_samples; - int anomalies; - int valid; -}; - static DEFINE_IDR(irqt_stats); void irq_timings_enable(void) @@ -40,75 +31,360 @@ void irq_timings_disable(void) static_branch_disable(&irq_timing_enabled); } -/** - * irqs_update - update the irq timing statistics with a new timestamp +/* + * The main goal of this algorithm is to predict the next interrupt + * occurrence on the current CPU. + * + * Currently, the interrupt timings are stored in a circular array + * buffer every time there is an interrupt, as a tuple: the interrupt + * number and the associated timestamp when the event occurred <irq, + * timestamp>. + * + * For every interrupt occurring in a short period of time, we can + * measure the elapsed time between the occurrences for the same + * interrupt and we end up with a suite of intervals. The experience + * showed the interrupts are often coming following a periodic + * pattern. + * + * The objective of the algorithm is to find out this periodic pattern + * in a fastest way and use its period to predict the next irq event. + * + * When the next interrupt event is requested, we are in the situation + * where the interrupts are disabled and the circular buffer + * containing the timings is filled with the events which happened + * after the previous next-interrupt-event request. + * + * At this point, we read the circular buffer and we fill the irq + * related statistics structure. After this step, the circular array + * containing the timings is empty because all the values are + * dispatched in their corresponding buffers. + * + * Now for each interrupt, we can predict the next event by using the + * suffix array, log interval and exponential moving average + * + * 1. Suffix array + * + * Suffix array is an array of all the suffixes of a string. It is + * widely used as a data structure for compression, text search, ... + * For instance for the word 'banana', the suffixes will be: 'banana' + * 'anana' 'nana' 'ana' 'na' 'a' + * + * Usually, the suffix array is sorted but for our purpose it is + * not necessary and won't provide any improvement in the context of + * the solved problem where we clearly define the boundaries of the + * search by a max period and min period. + * + * The suffix array will build a suite of intervals of different + * length and will look for the repetition of each suite. If the suite + * is repeating then we have the period because it is the length of + * the suite whatever its position in the buffer. + * + * 2. Log interval + * + * We saw the irq timings allow to compute the interval of the + * occurrences for a specific interrupt. We can reasonibly assume the + * longer is the interval, the higher is the error for the next event + * and we can consider storing those interval values into an array + * where each slot in the array correspond to an interval at the power + * of 2 of the index. For example, index 12 will contain values + * between 2^11 and 2^12. + * + * At the end we have an array of values where at each index defines a + * [2^index - 1, 2 ^ index] interval values allowing to store a large + * number of values inside a small array. + * + * For example, if we have the value 1123, then we store it at + * ilog2(1123) = 10 index value. + * + * Storing those value at the specific index is done by computing an + * exponential moving average for this specific slot. For instance, + * for values 1800, 1123, 1453, ... fall under the same slot (10) and + * the exponential moving average is computed every time a new value + * is stored at this slot. + * + * 3. Exponential Moving Average + * + * The EMA is largely used to track a signal for stocks or as a low + * pass filter. The magic of the formula, is it is very simple and the + * reactivity of the average can be tuned with the factors called + * alpha. + * + * The higher the alphas are, the faster the average respond to the + * signal change. In our case, if a slot in the array is a big + * interval, we can have numbers with a big difference between + * them. The impact of those differences in the average computation + * can be tuned by changing the alpha value. + * + * + * -- The algorithm -- + * + * We saw the different processing above, now let's see how they are + * used together. + * + * For each interrupt: + * For each interval: + * Compute the index = ilog2(interval) + * Compute a new_ema(buffer[index], interval) + * Store the index in a circular buffer + * + * Compute the suffix array of the indexes + * + * For each suffix: + * If the suffix is reverse-found 3 times + * Return suffix + * + * Return Not found + * + * However we can not have endless suffix array to be build, it won't + * make sense and it will add an extra overhead, so we can restrict + * this to a maximum suffix length of 5 and a minimum suffix length of + * 2. The experience showed 5 is the majority of the maximum pattern + * period found for different devices. + * + * The result is a pattern finding less than 1us for an interrupt. * - * @irqs: an irqt_stat struct pointer - * @ts: the new timestamp + * Example based on real values: * - * The statistics are computed online, in other words, the code is - * designed to compute the statistics on a stream of values rather - * than doing multiple passes on the values to compute the average, - * then the variance. The integer division introduces a loss of - * precision but with an acceptable error margin regarding the results - * we would have with the double floating precision: we are dealing - * with nanosec, so big numbers, consequently the mantisse is - * negligeable, especially when converting the time in usec - * afterwards. + * Example 1 : MMC write/read interrupt interval: * - * The computation happens at idle time. When the CPU is not idle, the - * interrupts' timestamps are stored in the circular buffer, when the - * CPU goes idle and this routine is called, all the buffer's values - * are injected in the statistical model continuying to extend the - * statistics from the previous busy-idle cycle. + * 223947, 1240, 1384, 1386, 1386, + * 217416, 1236, 1384, 1386, 1387, + * 214719, 1241, 1386, 1387, 1384, + * 213696, 1234, 1384, 1386, 1388, + * 219904, 1240, 1385, 1389, 1385, + * 212240, 1240, 1386, 1386, 1386, + * 214415, 1236, 1384, 1386, 1387, + * 214276, 1234, 1384, 1388, ? * - * The observations showed a device will trigger a burst of periodic - * interrupts followed by one or two peaks of longer time, for - * instance when a SD card device flushes its cache, then the periodic - * intervals occur again. A one second inactivity period resets the - * stats, that gives us the certitude the statistical values won't - * exceed 1x10^9, thus the computation won't overflow. + * For each element, apply ilog2(value) * - * Basically, the purpose of the algorithm is to watch the periodic - * interrupts and eliminate the peaks. + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, ? * - * An interrupt is considered periodically stable if the interval of - * its occurences follow the normal distribution, thus the values - * comply with: + * Max period of 5, we take the last (max_period * 3) 15 elements as + * we can be confident if the pattern repeats itself three times it is + * a repeating pattern. * - * avg - 3 x stddev < value < avg + 3 x stddev + * 8, + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, 8, + * 15, 8, 8, 8, ? * - * Which can be simplified to: + * Suffixes are: * - * -3 x stddev < value - avg < 3 x stddev + * 1) 8, 15, 8, 8, 8 <- max period + * 2) 8, 15, 8, 8 + * 3) 8, 15, 8 + * 4) 8, 15 <- min period * - * abs(value - avg) < 3 x stddev + * From there we search the repeating pattern for each suffix. * - * In order to save a costly square root computation, we use the - * variance. For the record, stddev = sqrt(variance). The equation - * above becomes: + * buffer: 8, 15, 8, 8, 8, 8, 15, 8, 8, 8, 8, 15, 8, 8, 8 + * | | | | | | | | | | | | | | | + * 8, 15, 8, 8, 8 | | | | | | | | | | + * 8, 15, 8, 8, 8 | | | | | + * 8, 15, 8, 8, 8 * - * abs(value - avg) < 3 x sqrt(variance) + * When moving the suffix, we found exactly 3 matches. * - * And finally we square it: + * The first suffix with period 5 is repeating. * - * (value - avg) ^ 2 < (3 x sqrt(variance)) ^ 2 + * The next event is (3 * max_period) % suffix_period * - * (value - avg) x (value - avg) < 9 x variance + * In this example, the result 0, so the next event is suffix[0] => 8 * - * Statistically speaking, any values out of this interval is - * considered as an anomaly and is discarded. However, a normal - * distribution appears when the number of samples is 30 (it is the - * rule of thumb in statistics, cf. "30 samples" on Internet). When - * there are three consecutive anomalies, the statistics are resetted. + * However, 8 is the index in the array of exponential moving average + * which was calculated on the fly when storing the values, so the + * interval is ema[8] = 1366 * + * + * Example 2: + * + * 4, 3, 5, 100, + * 3, 3, 5, 117, + * 4, 4, 5, 112, + * 4, 3, 4, 110, + * 3, 5, 3, 117, + * 4, 4, 5, 112, + * 4, 3, 4, 110, + * 3, 4, 5, 112, + * 4, 3, 4, 110 + * + * ilog2 + * + * 0, 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4 + * + * Max period 5: + * 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4, + * 0, 0, 0, 4 + * + * Suffixes: + * + * 1) 0, 0, 4, 0, 0 + * 2) 0, 0, 4, 0 + * 3) 0, 0, 4 + * 4) 0, 0 + * + * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4 + * | | | | | | X + * 0, 0, 4, 0, 0, | X + * 0, 0 + * + * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4 + * | | | | | | | | | | | | | | | + * 0, 0, 4, 0, | | | | | | | | | | | + * 0, 0, 4, 0, | | | | | | | + * 0, 0, 4, 0, | | | + * 0 0 4 + * + * Pattern is found 3 times, the remaining is 1 which results from + * (max_period * 3) % suffix_period. This value is the index in the + * suffix arrays. The suffix array for a period 4 has the value 4 + * at index 1. + */ +#define EMA_ALPHA_VAL 64 +#define EMA_ALPHA_SHIFT 7 + +#define PREDICTION_PERIOD_MIN 2 +#define PREDICTION_PERIOD_MAX 5 +#define PREDICTION_FACTOR 4 +#define PREDICTION_MAX 10 /* 2 ^ PREDICTION_MAX useconds */ +#define PREDICTION_BUFFER_SIZE 16 /* slots for EMAs, hardly more than 16 */ + +struct irqt_stat { + u64 last_ts; + u64 ema_time[PREDICTION_BUFFER_SIZE]; + int timings[IRQ_TIMINGS_SIZE]; + int circ_timings[IRQ_TIMINGS_SIZE]; + int count; +}; + +/* + * Exponential moving average computation */ -static void irqs_update(struct irqt_stat *irqs, u64 ts) +static u64 irq_timings_ema_new(u64 value, u64 ema_old) +{ + s64 diff; + + if (unlikely(!ema_old)) + return value; + + diff = (value - ema_old) * EMA_ALPHA_VAL; + /* + * We can use a s64 type variable to be added with the u64 + * ema_old variable as this one will never have its topmost + * bit set, it will be always smaller than 2^63 nanosec + * interrupt interval (292 years). + */ + return ema_old + (diff >> EMA_ALPHA_SHIFT); +} + +static int irq_timings_next_event_index(int *buffer, size_t len, int period_max) +{ + int i; + + /* + * The buffer contains the suite of intervals, in a ilog2 + * basis, we are looking for a repetition. We point the + * beginning of the search three times the length of the + * period beginning at the end of the buffer. We do that for + * each suffix. + */ + for (i = period_max; i >= PREDICTION_PERIOD_MIN ; i--) { + + int *begin = &buffer[len - (i * 3)]; + int *ptr = begin; + + /* + * We look if the suite with period 'i' repeat + * itself. If it is truncated at the end, as it + * repeats we can use the period to find out the next + * element. + */ + while (!memcmp(ptr, begin, i * sizeof(*ptr))) { + ptr += i; + if (ptr >= &buffer[len]) + return begin[((i * 3) % i)]; + } + } + + return -1; +} + +static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now) +{ + int index, i, period_max, count, start, min = INT_MAX; + + if ((now - irqs->last_ts) >= NSEC_PER_SEC) { + irqs->count = irqs->last_ts = 0; + return U64_MAX; + } + + /* + * As we want to find three times the repetition, we need a + * number of intervals greater or equal to three times the + * maximum period, otherwise we truncate the max period. + */ + period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ? + PREDICTION_PERIOD_MAX : irqs->count / 3; + + /* + * If we don't have enough irq timings for this prediction, + * just bail out. + */ + if (period_max <= PREDICTION_PERIOD_MIN) + return U64_MAX; + + /* + * 'count' will depends if the circular buffer wrapped or not + */ + count = irqs->count < IRQ_TIMINGS_SIZE ? + irqs->count : IRQ_TIMINGS_SIZE; + + start = irqs->count < IRQ_TIMINGS_SIZE ? + 0 : (irqs->count & IRQ_TIMINGS_MASK); + + /* + * Copy the content of the circular buffer into another buffer + * in order to linearize the buffer instead of dealing with + * wrapping indexes and shifted array which will be prone to + * error and extremelly difficult to debug. + */ + for (i = 0; i < count; i++) { + int index = (start + i) & IRQ_TIMINGS_MASK; + + irqs->timings[i] = irqs->circ_timings[index]; + min = min_t(int, irqs->timings[i], min); + } + + index = irq_timings_next_event_index(irqs->timings, count, period_max); + if (index < 0) + return irqs->last_ts + irqs->ema_time[min]; + + return irqs->last_ts + irqs->ema_time[index]; +} + +static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts) { u64 old_ts = irqs->last_ts; - u64 variance = 0; u64 interval; - s64 diff; + int index; /* * The timestamps are absolute time values, we need to compute @@ -135,87 +411,28 @@ static void irqs_update(struct irqt_stat *irqs, u64 ts) * want as we need another timestamp to compute an interval. */ if (interval >= NSEC_PER_SEC) { - memset(irqs, 0, sizeof(*irqs)); - irqs->last_ts = ts; + irqs->count = 0; return; } /* - * Pre-compute the delta with the average as the result is - * used several times in this function. - */ - diff = interval - irqs->avg; - - /* - * Increment the number of samples. - */ - irqs->nr_samples++; - - /* - * Online variance divided by the number of elements if there - * is more than one sample. Normally the formula is division - * by nr_samples - 1 but we assume the number of element will be - * more than 32 and dividing by 32 instead of 31 is enough - * precise. - */ - if (likely(irqs->nr_samples > 1)) - variance = irqs->variance >> IRQ_TIMINGS_SHIFT; - - /* - * The rule of thumb in statistics for the normal distribution - * is having at least 30 samples in order to have the model to - * apply. Values outside the interval are considered as an - * anomaly. - */ - if ((irqs->nr_samples >= 30) && ((diff * diff) > (9 * variance))) { - /* - * After three consecutive anomalies, we reset the - * stats as it is no longer stable enough. - */ - if (irqs->anomalies++ >= 3) { - memset(irqs, 0, sizeof(*irqs)); - irqs->last_ts = ts; - return; - } - } else { - /* - * The anomalies must be consecutives, so at this - * point, we reset the anomalies counter. - */ - irqs->anomalies = 0; - } - - /* - * The interrupt is considered stable enough to try to predict - * the next event on it. + * Get the index in the ema table for this interrupt. The + * PREDICTION_FACTOR increase the interval size for the array + * of exponential average. */ - irqs->valid = 1; + index = likely(interval) ? + ilog2((interval >> 10) / PREDICTION_FACTOR) : 0; /* - * Online average algorithm: - * - * new_average = average + ((value - average) / count) - * - * The variance computation depends on the new average - * to be computed here first. - * + * Store the index as an element of the pattern in another + * circular array. */ - irqs->avg = irqs->avg + (diff >> IRQ_TIMINGS_SHIFT); + irqs->circ_timings[irqs->count & IRQ_TIMINGS_MASK] = index; - /* - * Online variance algorithm: - * - * new_variance = variance + (value - average) x (value - new_average) - * - * Warning: irqs->avg is updated with the line above, hence - * 'interval - irqs->avg' is no longer equal to 'diff' - */ - irqs->variance = irqs->variance + (diff * (interval - irqs->avg)); + irqs->ema_time[index] = irq_timings_ema_new(interval, + irqs->ema_time[index]); - /* - * Update the next event - */ - irqs->next_evt = ts + irqs->avg; + irqs->count++; } /** @@ -259,6 +476,9 @@ u64 irq_timings_next_event(u64 now) */ lockdep_assert_irqs_disabled(); + if (!irqts->count) + return next_evt; + /* * Number of elements in the circular buffer: If it happens it * was flushed before, then the number of elements could be @@ -269,21 +489,19 @@ u64 irq_timings_next_event(u64 now) * type but with the cost of extra computation in the * interrupt handler hot path. We choose efficiency. * - * Inject measured irq/timestamp to the statistical model - * while decrementing the counter because we consume the data - * from our circular buffer. + * Inject measured irq/timestamp to the pattern prediction + * model while decrementing the counter because we consume the + * data from our circular buffer. */ - for (i = irqts->count & IRQ_TIMINGS_MASK, - irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count); - irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) { - irq = irq_timing_decode(irqts->values[i], &ts); + i = (irqts->count & IRQ_TIMINGS_MASK) - 1; + irqts->count = min(IRQ_TIMINGS_SIZE, irqts->count); + for (; irqts->count > 0; irqts->count--, i = (i + 1) & IRQ_TIMINGS_MASK) { + irq = irq_timing_decode(irqts->values[i], &ts); s = idr_find(&irqt_stats, irq); - if (s) { - irqs = this_cpu_ptr(s); - irqs_update(irqs, ts); - } + if (s) + irq_timings_store(irq, this_cpu_ptr(s), ts); } /* @@ -294,26 +512,12 @@ u64 irq_timings_next_event(u64 now) irqs = this_cpu_ptr(s); - if (!irqs->valid) - continue; + ts = __irq_timings_next_event(irqs, i, now); + if (ts <= now) + return now; - if (irqs->next_evt <= now) { - irq = i; - next_evt = now; - - /* - * This interrupt mustn't use in the future - * until new events occur and update the - * statistics. - */ - irqs->valid = 0; - break; - } - - if (irqs->next_evt < next_evt) { - irq = i; - next_evt = irqs->next_evt; - } + if (ts < next_evt) + next_evt = ts; } return next_evt; diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 6b7cdf17ccf8..73288914ed5e 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void) */ } -/* - * Enqueue the irq_work @work on @cpu unless it's already pending - * somewhere. - * - * Can be re-enqueued while the callback is still in progress. - */ -bool irq_work_queue_on(struct irq_work *work, int cpu) +/* Enqueue on current CPU, work must already be claimed and preempt disabled */ +static void __irq_work_queue_local(struct irq_work *work) { - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(cpu)); - -#ifdef CONFIG_SMP - - /* Arch remote IPI send/receive backend aren't NMI safe */ - WARN_ON_ONCE(in_nmi()); + /* If the work is "lazy", handle it from next tick if any */ + if (work->flags & IRQ_WORK_LAZY) { + if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && + tick_nohz_tick_stopped()) + arch_irq_work_raise(); + } else { + if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) + arch_irq_work_raise(); + } +} +/* Enqueue the irq work @work on the current CPU */ +bool irq_work_queue(struct irq_work *work) +{ /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) - arch_send_call_function_single_ipi(cpu); - -#else /* #ifdef CONFIG_SMP */ - irq_work_queue(work); -#endif /* #else #ifdef CONFIG_SMP */ + /* Queue the entry and raise the IPI if needed. */ + preempt_disable(); + __irq_work_queue_local(work); + preempt_enable(); return true; } +EXPORT_SYMBOL_GPL(irq_work_queue); -/* Enqueue the irq work @work on the current CPU */ -bool irq_work_queue(struct irq_work *work) +/* + * Enqueue the irq_work @work on @cpu unless it's already pending + * somewhere. + * + * Can be re-enqueued while the callback is still in progress. + */ +bool irq_work_queue_on(struct irq_work *work, int cpu) { +#ifndef CONFIG_SMP + return irq_work_queue(work); + +#else /* CONFIG_SMP: */ + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(cpu)); + /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; - /* Queue the entry and raise the IPI if needed. */ preempt_disable(); - - /* If the work is "lazy", handle it from next tick if any */ - if (work->flags & IRQ_WORK_LAZY) { - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && - tick_nohz_tick_stopped()) - arch_irq_work_raise(); + if (cpu != smp_processor_id()) { + /* Arch remote IPI send/receive backend aren't NMI safe */ + WARN_ON_ONCE(in_nmi()); + if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) + arch_send_call_function_single_ipi(cpu); } else { - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) - arch_irq_work_raise(); + __irq_work_queue_local(work); } - preempt_enable(); return true; +#endif /* CONFIG_SMP */ } -EXPORT_SYMBOL_GPL(irq_work_queue); + bool irq_work_needs_cpu(void) { diff --git a/kernel/jump_label.c b/kernel/jump_label.c index bad96b476eb6..de6efdecc70d 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -202,11 +202,13 @@ void static_key_disable(struct static_key *key) } EXPORT_SYMBOL_GPL(static_key_disable); -static void __static_key_slow_dec_cpuslocked(struct static_key *key, - unsigned long rate_limit, - struct delayed_work *work) +static bool static_key_slow_try_dec(struct static_key *key) { - lockdep_assert_cpus_held(); + int val; + + val = atomic_fetch_add_unless(&key->enabled, -1, 1); + if (val == 1) + return false; /* * The negative count check is valid even when a negative @@ -215,63 +217,70 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key, * returns is unbalanced, because all other static_key_slow_inc() * instances block while the update is in progress. */ - if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { - WARN(atomic_read(&key->enabled) < 0, - "jump label: negative count!\n"); + WARN(val < 0, "jump label: negative count!\n"); + return true; +} + +static void __static_key_slow_dec_cpuslocked(struct static_key *key) +{ + lockdep_assert_cpus_held(); + + if (static_key_slow_try_dec(key)) return; - } - if (rate_limit) { - atomic_inc(&key->enabled); - schedule_delayed_work(work, rate_limit); - } else { + jump_label_lock(); + if (atomic_dec_and_test(&key->enabled)) jump_label_update(key); - } jump_label_unlock(); } -static void __static_key_slow_dec(struct static_key *key, - unsigned long rate_limit, - struct delayed_work *work) +static void __static_key_slow_dec(struct static_key *key) { cpus_read_lock(); - __static_key_slow_dec_cpuslocked(key, rate_limit, work); + __static_key_slow_dec_cpuslocked(key); cpus_read_unlock(); } -static void jump_label_update_timeout(struct work_struct *work) +void jump_label_update_timeout(struct work_struct *work) { struct static_key_deferred *key = container_of(work, struct static_key_deferred, work.work); - __static_key_slow_dec(&key->key, 0, NULL); + __static_key_slow_dec(&key->key); } +EXPORT_SYMBOL_GPL(jump_label_update_timeout); void static_key_slow_dec(struct static_key *key) { STATIC_KEY_CHECK_USE(key); - __static_key_slow_dec(key, 0, NULL); + __static_key_slow_dec(key); } EXPORT_SYMBOL_GPL(static_key_slow_dec); void static_key_slow_dec_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); - __static_key_slow_dec_cpuslocked(key, 0, NULL); + __static_key_slow_dec_cpuslocked(key); } -void static_key_slow_dec_deferred(struct static_key_deferred *key) +void __static_key_slow_dec_deferred(struct static_key *key, + struct delayed_work *work, + unsigned long timeout) { STATIC_KEY_CHECK_USE(key); - __static_key_slow_dec(&key->key, key->timeout, &key->work); + + if (static_key_slow_try_dec(key)) + return; + + schedule_delayed_work(work, timeout); } -EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); +EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred); -void static_key_deferred_flush(struct static_key_deferred *key) +void __static_key_deferred_flush(void *key, struct delayed_work *work) { STATIC_KEY_CHECK_USE(key); - flush_delayed_work(&key->work); + flush_delayed_work(work); } -EXPORT_SYMBOL_GPL(static_key_deferred_flush); +EXPORT_SYMBOL_GPL(__static_key_deferred_flush); void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 96b4179cee6a..99a5b5f46dc5 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -120,8 +120,8 @@ account_global_scheduler_latency(struct task_struct *tsk, break; } - /* 0 and ULONG_MAX entries mean end of backtrace: */ - if (record == 0 || record == ULONG_MAX) + /* 0 entry marks end of backtrace: */ + if (!record) break; } if (same) { @@ -141,20 +141,6 @@ account_global_scheduler_latency(struct task_struct *tsk, memcpy(&latency_record[i], lat, sizeof(struct latency_record)); } -/* - * Iterator to store a backtrace into a latency record entry - */ -static inline void store_stacktrace(struct task_struct *tsk, - struct latency_record *lat) -{ - struct stack_trace trace; - - memset(&trace, 0, sizeof(trace)); - trace.max_entries = LT_BACKTRACEDEPTH; - trace.entries = &lat->backtrace[0]; - save_stack_trace_tsk(tsk, &trace); -} - /** * __account_scheduler_latency - record an occurred latency * @tsk - the task struct of the task hitting the latency @@ -191,7 +177,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) lat.count = 1; lat.time = usecs; lat.max = usecs; - store_stacktrace(tsk, &lat); + + stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0); raw_spin_lock_irqsave(&latency_lock, flags); @@ -210,8 +197,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) break; } - /* 0 and ULONG_MAX entries mean end of backtrace: */ - if (record == 0 || record == ULONG_MAX) + /* 0 entry is end of backtrace */ + if (!record) break; } if (same) { @@ -252,10 +239,10 @@ static int lstats_show(struct seq_file *m, void *v) lr->count, lr->time, lr->max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { unsigned long bt = lr->backtrace[q]; + if (!bt) break; - if (bt == ULONG_MAX) - break; + seq_printf(m, " %ps", (void *)bt); } seq_puts(m, "\n"); diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index 9c89ae8b337a..c53370d596be 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -202,15 +202,15 @@ void klp_update_patch_state(struct task_struct *task) * Determine whether the given stack trace includes any references to a * to-be-patched or to-be-unpatched function. */ -static int klp_check_stack_func(struct klp_func *func, - struct stack_trace *trace) +static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, + unsigned int nr_entries) { unsigned long func_addr, func_size, address; struct klp_ops *ops; int i; - for (i = 0; i < trace->nr_entries; i++) { - address = trace->entries[i]; + for (i = 0; i < nr_entries; i++) { + address = entries[i]; if (klp_target_state == KLP_UNPATCHED) { /* @@ -254,29 +254,25 @@ static int klp_check_stack_func(struct klp_func *func, static int klp_check_stack(struct task_struct *task, char *err_buf) { static unsigned long entries[MAX_STACK_ENTRIES]; - struct stack_trace trace; struct klp_object *obj; struct klp_func *func; - int ret; + int ret, nr_entries; - trace.skip = 0; - trace.nr_entries = 0; - trace.max_entries = MAX_STACK_ENTRIES; - trace.entries = entries; - ret = save_stack_trace_tsk_reliable(task, &trace); + ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); WARN_ON_ONCE(ret == -ENOSYS); - if (ret) { + if (ret < 0) { snprintf(err_buf, STACK_ERR_BUF_SIZE, "%s: %s:%d has an unreliable stack\n", __func__, task->comm, task->pid); return ret; } + nr_entries = ret; klp_for_each_object(klp_transition_patch, obj) { if (!obj->patched) continue; klp_for_each_func(obj, func) { - ret = klp_check_stack_func(func, &trace); + ret = klp_check_stack_func(func, entries, nr_entries); if (ret) { snprintf(err_buf, STACK_ERR_BUF_SIZE, "%s: %s:%d is sleeping on function %s\n", diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 392c7f23af76..6fe2f333aecb 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -3,7 +3,7 @@ # and is generally not a function of system call inputs. KCOV_INSTRUMENT := n -obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o +obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o rwsem-xadd.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) @@ -25,8 +25,7 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o -obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o -obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o +obj-$(CONFIG_LOCK_EVENT_COUNTS) += lock_events.o diff --git a/kernel/locking/lock_events.c b/kernel/locking/lock_events.c new file mode 100644 index 000000000000..fa2c2f951c6b --- /dev/null +++ b/kernel/locking/lock_events.c @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: Waiman Long <waiman.long@hpe.com> + */ + +/* + * Collect locking event counts + */ +#include <linux/debugfs.h> +#include <linux/sched.h> +#include <linux/sched/clock.h> +#include <linux/fs.h> + +#include "lock_events.h" + +#undef LOCK_EVENT +#define LOCK_EVENT(name) [LOCKEVENT_ ## name] = #name, + +#define LOCK_EVENTS_DIR "lock_event_counts" + +/* + * When CONFIG_LOCK_EVENT_COUNTS is enabled, event counts of different + * types of locks will be reported under the <debugfs>/lock_event_counts/ + * directory. See lock_events_list.h for the list of available locking + * events. + * + * Writing to the special ".reset_counts" file will reset all the above + * locking event counts. This is a very slow operation and so should not + * be done frequently. + * + * These event counts are implemented as per-cpu variables which are + * summed and computed whenever the corresponding debugfs files are read. This + * minimizes added overhead making the counts usable even in a production + * environment. + */ +static const char * const lockevent_names[lockevent_num + 1] = { + +#include "lock_events_list.h" + + [LOCKEVENT_reset_cnts] = ".reset_counts", +}; + +/* + * Per-cpu counts + */ +DEFINE_PER_CPU(unsigned long, lockevents[lockevent_num]); + +/* + * The lockevent_read() function can be overridden. + */ +ssize_t __weak lockevent_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[64]; + int cpu, id, len; + u64 sum = 0; + + /* + * Get the counter ID stored in file->f_inode->i_private + */ + id = (long)file_inode(file)->i_private; + + if (id >= lockevent_num) + return -EBADF; + + for_each_possible_cpu(cpu) + sum += per_cpu(lockevents[id], cpu); + len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +/* + * Function to handle write request + * + * When idx = reset_cnts, reset all the counts. + */ +static ssize_t lockevent_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int cpu; + + /* + * Get the counter ID stored in file->f_inode->i_private + */ + if ((long)file_inode(file)->i_private != LOCKEVENT_reset_cnts) + return count; + + for_each_possible_cpu(cpu) { + int i; + unsigned long *ptr = per_cpu_ptr(lockevents, cpu); + + for (i = 0 ; i < lockevent_num; i++) + WRITE_ONCE(ptr[i], 0); + } + return count; +} + +/* + * Debugfs data structures + */ +static const struct file_operations fops_lockevent = { + .read = lockevent_read, + .write = lockevent_write, + .llseek = default_llseek, +}; + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +#include <asm/paravirt.h> + +static bool __init skip_lockevent(const char *name) +{ + static int pv_on __initdata = -1; + + if (pv_on < 0) + pv_on = !pv_is_native_spin_unlock(); + /* + * Skip PV qspinlock events on bare metal. + */ + if (!pv_on && !memcmp(name, "pv_", 3)) + return true; + return false; +} +#else +static inline bool skip_lockevent(const char *name) +{ + return false; +} +#endif + +/* + * Initialize debugfs for the locking event counts. + */ +static int __init init_lockevent_counts(void) +{ + struct dentry *d_counts = debugfs_create_dir(LOCK_EVENTS_DIR, NULL); + int i; + + if (!d_counts) + goto out; + + /* + * Create the debugfs files + * + * As reading from and writing to the stat files can be slow, only + * root is allowed to do the read/write to limit impact to system + * performance. + */ + for (i = 0; i < lockevent_num; i++) { + if (skip_lockevent(lockevent_names[i])) + continue; + if (!debugfs_create_file(lockevent_names[i], 0400, d_counts, + (void *)(long)i, &fops_lockevent)) + goto fail_undo; + } + + if (!debugfs_create_file(lockevent_names[LOCKEVENT_reset_cnts], 0200, + d_counts, (void *)(long)LOCKEVENT_reset_cnts, + &fops_lockevent)) + goto fail_undo; + + return 0; +fail_undo: + debugfs_remove_recursive(d_counts); +out: + pr_warn("Could not create '%s' debugfs entries\n", LOCK_EVENTS_DIR); + return -ENOMEM; +} +fs_initcall(init_lockevent_counts); diff --git a/kernel/locking/lock_events.h b/kernel/locking/lock_events.h new file mode 100644 index 000000000000..feb1acc54611 --- /dev/null +++ b/kernel/locking/lock_events.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: Waiman Long <longman@redhat.com> + */ + +#ifndef __LOCKING_LOCK_EVENTS_H +#define __LOCKING_LOCK_EVENTS_H + +enum lock_events { + +#include "lock_events_list.h" + + lockevent_num, /* Total number of lock event counts */ + LOCKEVENT_reset_cnts = lockevent_num, +}; + +#ifdef CONFIG_LOCK_EVENT_COUNTS +/* + * Per-cpu counters + */ +DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]); + +/* + * Increment the PV qspinlock statistical counters + */ +static inline void __lockevent_inc(enum lock_events event, bool cond) +{ + if (cond) + __this_cpu_inc(lockevents[event]); +} + +#define lockevent_inc(ev) __lockevent_inc(LOCKEVENT_ ##ev, true) +#define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c) + +static inline void __lockevent_add(enum lock_events event, int inc) +{ + __this_cpu_add(lockevents[event], inc); +} + +#define lockevent_add(ev, c) __lockevent_add(LOCKEVENT_ ##ev, c) + +#else /* CONFIG_LOCK_EVENT_COUNTS */ + +#define lockevent_inc(ev) +#define lockevent_add(ev, c) +#define lockevent_cond_inc(ev, c) + +#endif /* CONFIG_LOCK_EVENT_COUNTS */ +#endif /* __LOCKING_LOCK_EVENTS_H */ diff --git a/kernel/locking/lock_events_list.h b/kernel/locking/lock_events_list.h new file mode 100644 index 000000000000..ad7668cfc9da --- /dev/null +++ b/kernel/locking/lock_events_list.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: Waiman Long <longman@redhat.com> + */ + +#ifndef LOCK_EVENT +#define LOCK_EVENT(name) LOCKEVENT_ ## name, +#endif + +#ifdef CONFIG_QUEUED_SPINLOCKS +#ifdef CONFIG_PARAVIRT_SPINLOCKS +/* + * Locking events for PV qspinlock. + */ +LOCK_EVENT(pv_hash_hops) /* Average # of hops per hashing operation */ +LOCK_EVENT(pv_kick_unlock) /* # of vCPU kicks issued at unlock time */ +LOCK_EVENT(pv_kick_wake) /* # of vCPU kicks for pv_latency_wake */ +LOCK_EVENT(pv_latency_kick) /* Average latency (ns) of vCPU kick */ +LOCK_EVENT(pv_latency_wake) /* Average latency (ns) of kick-to-wakeup */ +LOCK_EVENT(pv_lock_stealing) /* # of lock stealing operations */ +LOCK_EVENT(pv_spurious_wakeup) /* # of spurious wakeups in non-head vCPUs */ +LOCK_EVENT(pv_wait_again) /* # of wait's after queue head vCPU kick */ +LOCK_EVENT(pv_wait_early) /* # of early vCPU wait's */ +LOCK_EVENT(pv_wait_head) /* # of vCPU wait's at the queue head */ +LOCK_EVENT(pv_wait_node) /* # of vCPU wait's at non-head queue node */ +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ + +/* + * Locking events for qspinlock + * + * Subtracting lock_use_node[234] from lock_slowpath will give you + * lock_use_node1. + */ +LOCK_EVENT(lock_pending) /* # of locking ops via pending code */ +LOCK_EVENT(lock_slowpath) /* # of locking ops via MCS lock queue */ +LOCK_EVENT(lock_use_node2) /* # of locking ops that use 2nd percpu node */ +LOCK_EVENT(lock_use_node3) /* # of locking ops that use 3rd percpu node */ +LOCK_EVENT(lock_use_node4) /* # of locking ops that use 4th percpu node */ +LOCK_EVENT(lock_no_node) /* # of locking ops w/o using percpu node */ +#endif /* CONFIG_QUEUED_SPINLOCKS */ + +/* + * Locking events for rwsem + */ +LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */ +LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */ +LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */ +LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */ +LOCK_EVENT(rwsem_opt_wlock) /* # of write locks opt-spin acquired */ +LOCK_EVENT(rwsem_opt_fail) /* # of failed opt-spinnings */ +LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */ +LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */ +LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */ +LOCK_EVENT(rwsem_rtrylock) /* # of read trylock calls */ +LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */ +LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */ +LOCK_EVENT(rwsem_wtrylock) /* # of write trylock calls */ diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index e221be724fe8..27b992fe8cec 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -434,29 +434,14 @@ static void print_lockdep_off(const char *bug_msg) #endif } -static int save_trace(struct stack_trace *trace) +static int save_trace(struct lock_trace *trace) { - trace->nr_entries = 0; - trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; - trace->entries = stack_trace + nr_stack_trace_entries; - - trace->skip = 3; - - save_stack_trace(trace); - - /* - * Some daft arches put -1 at the end to indicate its a full trace. - * - * <rant> this is buggy anyway, since it takes a whole extra entry so a - * complete trace that maxes out the entries provided will be reported - * as incomplete, friggin useless </rant> - */ - if (trace->nr_entries != 0 && - trace->entries[trace->nr_entries-1] == ULONG_MAX) - trace->nr_entries--; - - trace->max_entries = trace->nr_entries; + unsigned long *entries = stack_trace + nr_stack_trace_entries; + unsigned int max_entries; + trace->offset = nr_stack_trace_entries; + max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; + trace->nr_entries = stack_trace_save(entries, max_entries, 3); nr_stack_trace_entries += trace->nr_entries; if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { @@ -516,11 +501,11 @@ static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) { char c = '.'; - if (class->usage_mask & lock_flag(bit + 2)) + if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) c = '+'; if (class->usage_mask & lock_flag(bit)) { c = '-'; - if (class->usage_mask & lock_flag(bit + 2)) + if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) c = '?'; } @@ -1207,7 +1192,7 @@ static struct lock_list *alloc_list_entry(void) static int add_lock_to_list(struct lock_class *this, struct lock_class *links_to, struct list_head *head, unsigned long ip, int distance, - struct stack_trace *trace) + struct lock_trace *trace) { struct lock_list *entry; /* @@ -1426,6 +1411,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry, * checking. */ +static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) +{ + unsigned long *entries = stack_trace + trace->offset; + + stack_trace_print(entries, trace->nr_entries, spaces); +} + /* * Print a dependency chain entry (this is only done when a deadlock * has been detected): @@ -1438,8 +1430,7 @@ print_circular_bug_entry(struct lock_list *target, int depth) printk("\n-> #%u", depth); print_lock_name(target->class); printk(KERN_CONT ":\n"); - print_stack_trace(&target->trace, 6); - + print_lock_trace(&target->trace, 6); return 0; } @@ -1533,10 +1524,9 @@ static inline int class_equal(struct lock_list *entry, void *data) } static noinline int print_circular_bug(struct lock_list *this, - struct lock_list *target, - struct held_lock *check_src, - struct held_lock *check_tgt, - struct stack_trace *trace) + struct lock_list *target, + struct held_lock *check_src, + struct held_lock *check_tgt) { struct task_struct *curr = current; struct lock_list *parent; @@ -1676,19 +1666,25 @@ check_redundant(struct lock_list *root, struct lock_class *target, } #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) + +static inline int usage_accumulate(struct lock_list *entry, void *mask) +{ + *(unsigned long *)mask |= entry->class->usage_mask; + + return 0; +} + /* * Forwards and backwards subgraph searching, for the purposes of * proving that two subgraphs can be connected by a new dependency * without creating any illegal irq-safe -> irq-unsafe lock dependency. */ -static inline int usage_match(struct lock_list *entry, void *bit) +static inline int usage_match(struct lock_list *entry, void *mask) { - return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); + return entry->class->usage_mask & *(unsigned long *)mask; } - - /* * Find a node in the forwards-direction dependency sub-graph starting * at @root->class that matches @bit. @@ -1700,14 +1696,14 @@ static inline int usage_match(struct lock_list *entry, void *bit) * Return <0 on error. */ static int -find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, +find_usage_forwards(struct lock_list *root, unsigned long usage_mask, struct lock_list **target_entry) { int result; debug_atomic_inc(nr_find_usage_forwards_checks); - result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); + result = __bfs_forwards(root, &usage_mask, usage_match, target_entry); return result; } @@ -1723,14 +1719,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, * Return <0 on error. */ static int -find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, +find_usage_backwards(struct lock_list *root, unsigned long usage_mask, struct lock_list **target_entry) { int result; debug_atomic_inc(nr_find_usage_backwards_checks); - result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); + result = __bfs_backwards(root, &usage_mask, usage_match, target_entry); return result; } @@ -1752,7 +1748,7 @@ static void print_lock_class_header(struct lock_class *class, int depth) len += printk("%*s %s", depth, "", usage_str[bit]); len += printk(KERN_CONT " at:\n"); - print_stack_trace(class->usage_traces + bit, len); + print_lock_trace(class->usage_traces + bit, len); } } printk("%*s }\n", depth, ""); @@ -1777,7 +1773,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf, do { print_lock_class_header(entry->class, depth); printk("%*s ... acquired at:\n", depth, ""); - print_stack_trace(&entry->trace, 2); + print_lock_trace(&entry->trace, 2); printk("\n"); if (depth == 0 && (entry != root)) { @@ -1890,14 +1886,14 @@ print_bad_irq_dependency(struct task_struct *curr, print_lock_name(backwards_entry->class); pr_warn("\n... which became %s-irq-safe at:\n", irqclass); - print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); + print_lock_trace(backwards_entry->class->usage_traces + bit1, 1); pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); print_lock_name(forwards_entry->class); pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); pr_warn("..."); - print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); + print_lock_trace(forwards_entry->class->usage_traces + bit2, 1); pr_warn("\nother info that might help us debug this:\n\n"); print_irq_lock_scenario(backwards_entry, forwards_entry, @@ -1922,39 +1918,6 @@ print_bad_irq_dependency(struct task_struct *curr, return 0; } -static int -check_usage(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next, enum lock_usage_bit bit_backwards, - enum lock_usage_bit bit_forwards, const char *irqclass) -{ - int ret; - struct lock_list this, that; - struct lock_list *uninitialized_var(target_entry); - struct lock_list *uninitialized_var(target_entry1); - - this.parent = NULL; - - this.class = hlock_class(prev); - ret = find_usage_backwards(&this, bit_backwards, &target_entry); - if (ret < 0) - return print_bfs_bug(ret); - if (ret == 1) - return ret; - - that.parent = NULL; - that.class = hlock_class(next); - ret = find_usage_forwards(&that, bit_forwards, &target_entry1); - if (ret < 0) - return print_bfs_bug(ret); - if (ret == 1) - return ret; - - return print_bad_irq_dependency(curr, &this, &that, - target_entry, target_entry1, - prev, next, - bit_backwards, bit_forwards, irqclass); -} - static const char *state_names[] = { #define LOCKDEP_STATE(__STATE) \ __stringify(__STATE), @@ -1971,9 +1934,19 @@ static const char *state_rnames[] = { static inline const char *state_name(enum lock_usage_bit bit) { - return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2]; + if (bit & LOCK_USAGE_READ_MASK) + return state_rnames[bit >> LOCK_USAGE_DIR_MASK]; + else + return state_names[bit >> LOCK_USAGE_DIR_MASK]; } +/* + * The bit number is encoded like: + * + * bit0: 0 exclusive, 1 read lock + * bit1: 0 used in irq, 1 irq enabled + * bit2-n: state + */ static int exclusive_bit(int new_bit) { int state = new_bit & LOCK_USAGE_STATE_MASK; @@ -1985,45 +1958,160 @@ static int exclusive_bit(int new_bit) return state | (dir ^ LOCK_USAGE_DIR_MASK); } +/* + * Observe that when given a bitmask where each bitnr is encoded as above, a + * right shift of the mask transforms the individual bitnrs as -1 and + * conversely, a left shift transforms into +1 for the individual bitnrs. + * + * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can + * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0) + * instead by subtracting the bit number by 2, or shifting the mask right by 2. + * + * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2. + * + * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is + * all bits set) and recompose with bitnr1 flipped. + */ +static unsigned long invert_dir_mask(unsigned long mask) +{ + unsigned long excl = 0; + + /* Invert dir */ + excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK; + excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK; + + return excl; +} + +/* + * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all + * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). + * And then mask out all bitnr0. + */ +static unsigned long exclusive_mask(unsigned long mask) +{ + unsigned long excl = invert_dir_mask(mask); + + /* Strip read */ + excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK; + excl &= ~LOCKF_IRQ_READ; + + return excl; +} + +/* + * Retrieve the _possible_ original mask to which @mask is + * exclusive. Ie: this is the opposite of exclusive_mask(). + * Note that 2 possible original bits can match an exclusive + * bit: one has LOCK_USAGE_READ_MASK set, the other has it + * cleared. So both are returned for each exclusive bit. + */ +static unsigned long original_mask(unsigned long mask) +{ + unsigned long excl = invert_dir_mask(mask); + + /* Include read in existing usages */ + excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK; + + return excl; +} + +/* + * Find the first pair of bit match between an original + * usage mask and an exclusive usage mask. + */ +static int find_exclusive_match(unsigned long mask, + unsigned long excl_mask, + enum lock_usage_bit *bitp, + enum lock_usage_bit *excl_bitp) +{ + int bit, excl; + + for_each_set_bit(bit, &mask, LOCK_USED) { + excl = exclusive_bit(bit); + if (excl_mask & lock_flag(excl)) { + *bitp = bit; + *excl_bitp = excl; + return 0; + } + } + return -1; +} + +/* + * Prove that the new dependency does not connect a hardirq-safe(-read) + * lock with a hardirq-unsafe lock - to achieve this we search + * the backwards-subgraph starting at <prev>, and the + * forwards-subgraph starting at <next>: + */ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next, enum lock_usage_bit bit) + struct held_lock *next) { + unsigned long usage_mask = 0, forward_mask, backward_mask; + enum lock_usage_bit forward_bit = 0, backward_bit = 0; + struct lock_list *uninitialized_var(target_entry1); + struct lock_list *uninitialized_var(target_entry); + struct lock_list this, that; + int ret; + /* - * Prove that the new dependency does not connect a hardirq-safe - * lock with a hardirq-unsafe lock - to achieve this we search - * the backwards-subgraph starting at <prev>, and the - * forwards-subgraph starting at <next>: + * Step 1: gather all hard/soft IRQs usages backward in an + * accumulated usage mask. */ - if (!check_usage(curr, prev, next, bit, - exclusive_bit(bit), state_name(bit))) - return 0; + this.parent = NULL; + this.class = hlock_class(prev); - bit++; /* _READ */ + ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL); + if (ret < 0) + return print_bfs_bug(ret); + + usage_mask &= LOCKF_USED_IN_IRQ_ALL; + if (!usage_mask) + return 1; /* - * Prove that the new dependency does not connect a hardirq-safe-read - * lock with a hardirq-unsafe lock - to achieve this we search - * the backwards-subgraph starting at <prev>, and the - * forwards-subgraph starting at <next>: + * Step 2: find exclusive uses forward that match the previous + * backward accumulated mask. */ - if (!check_usage(curr, prev, next, bit, - exclusive_bit(bit), state_name(bit))) - return 0; + forward_mask = exclusive_mask(usage_mask); - return 1; -} + that.parent = NULL; + that.class = hlock_class(next); -static int -check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next) -{ -#define LOCKDEP_STATE(__STATE) \ - if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ - return 0; -#include "lockdep_states.h" -#undef LOCKDEP_STATE + ret = find_usage_forwards(&that, forward_mask, &target_entry1); + if (ret < 0) + return print_bfs_bug(ret); + if (ret == 1) + return ret; - return 1; + /* + * Step 3: we found a bad match! Now retrieve a lock from the backward + * list whose usage mask matches the exclusive usage mask from the + * lock found on the forward list. + */ + backward_mask = original_mask(target_entry1->class->usage_mask); + + ret = find_usage_backwards(&this, backward_mask, &target_entry); + if (ret < 0) + return print_bfs_bug(ret); + if (DEBUG_LOCKS_WARN_ON(ret == 1)) + return 1; + + /* + * Step 4: narrow down to a pair of incompatible usage bits + * and report it. + */ + ret = find_exclusive_match(target_entry->class->usage_mask, + target_entry1->class->usage_mask, + &backward_bit, &forward_bit); + if (DEBUG_LOCKS_WARN_ON(ret == -1)) + return 1; + + return print_bad_irq_dependency(curr, &this, &that, + target_entry, target_entry1, + prev, next, + backward_bit, forward_bit, + state_name(backward_bit)); } static void inc_chains(void) @@ -2040,9 +2128,8 @@ static void inc_chains(void) #else -static inline int -check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next) +static inline int check_irq_usage(struct task_struct *curr, + struct held_lock *prev, struct held_lock *next) { return 1; } @@ -2170,8 +2257,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, */ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, - struct held_lock *next, int distance, struct stack_trace *trace, - int (*save)(struct stack_trace *trace)) + struct held_lock *next, int distance, struct lock_trace *trace) { struct lock_list *uninitialized_var(target_entry); struct lock_list *entry; @@ -2209,20 +2295,20 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, this.parent = NULL; ret = check_noncircular(&this, hlock_class(prev), &target_entry); if (unlikely(!ret)) { - if (!trace->entries) { + if (!trace->nr_entries) { /* - * If @save fails here, the printing might trigger - * a WARN but because of the !nr_entries it should - * not do bad things. + * If save_trace fails here, the printing might + * trigger a WARN but because of the !nr_entries it + * should not do bad things. */ - save(trace); + save_trace(trace); } - return print_circular_bug(&this, target_entry, next, prev, trace); + return print_circular_bug(&this, target_entry, next, prev); } else if (unlikely(ret < 0)) return print_bfs_bug(ret); - if (!check_prev_add_irq(curr, prev, next)) + if (!check_irq_usage(curr, prev, next)) return 0; /* @@ -2265,7 +2351,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, return print_bfs_bug(ret); - if (!trace->entries && !save(trace)) + if (!trace->nr_entries && !save_trace(trace)) return 0; /* @@ -2297,14 +2383,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, static int check_prevs_add(struct task_struct *curr, struct held_lock *next) { + struct lock_trace trace = { .nr_entries = 0 }; int depth = curr->lockdep_depth; struct held_lock *hlock; - struct stack_trace trace = { - .nr_entries = 0, - .max_entries = 0, - .entries = NULL, - .skip = 0, - }; /* * Debugging checks. @@ -2330,7 +2411,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) * added: */ if (hlock->read != 2 && hlock->check) { - int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace); + int ret = check_prev_add(curr, hlock, next, distance, + &trace); if (!ret) return 0; @@ -2731,6 +2813,10 @@ static inline int validate_chain(struct task_struct *curr, { return 1; } + +static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) +{ +} #endif /* @@ -2784,6 +2870,12 @@ static void check_chain_key(struct task_struct *curr) #endif } +static int mark_lock(struct task_struct *curr, struct held_lock *this, + enum lock_usage_bit new_bit); + +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) + + static void print_usage_bug_scenario(struct held_lock *lock) { @@ -2827,7 +2919,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, print_lock(this); pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); - print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); + print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1); print_irqtrace_events(curr); pr_warn("\nother info that might help us debug this:\n"); @@ -2853,10 +2945,6 @@ valid_state(struct task_struct *curr, struct held_lock *this, return 1; } -static int mark_lock(struct task_struct *curr, struct held_lock *this, - enum lock_usage_bit new_bit); - -#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) /* * print irq inversion bug: @@ -2936,7 +3024,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, root.parent = NULL; root.class = hlock_class(this); - ret = find_usage_forwards(&root, bit, &target_entry); + ret = find_usage_forwards(&root, lock_flag(bit), &target_entry); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) @@ -2960,7 +3048,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, root.parent = NULL; root.class = hlock_class(this); - ret = find_usage_backwards(&root, bit, &target_entry); + ret = find_usage_backwards(&root, lock_flag(bit), &target_entry); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) @@ -3015,7 +3103,7 @@ static int (*state_verbose_f[])(struct lock_class *class) = { static inline int state_verbose(enum lock_usage_bit bit, struct lock_class *class) { - return state_verbose_f[bit >> 2](class); + return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class); } typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, @@ -3157,7 +3245,7 @@ void lockdep_hardirqs_on(unsigned long ip) /* * See the fine text that goes along with this variable definition. */ - if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) + if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled)) return; /* diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index d4c197425f68..150ec3f0c5b5 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -42,13 +42,35 @@ enum { __LOCKF(USED) }; -#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) -#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) +#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | +static const unsigned long LOCKF_ENABLED_IRQ = +#include "lockdep_states.h" + 0; +#undef LOCKDEP_STATE + +#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | +static const unsigned long LOCKF_USED_IN_IRQ = +#include "lockdep_states.h" + 0; +#undef LOCKDEP_STATE + +#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | +static const unsigned long LOCKF_ENABLED_IRQ_READ = +#include "lockdep_states.h" + 0; +#undef LOCKDEP_STATE + +#define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | +static const unsigned long LOCKF_USED_IN_IRQ_READ = +#include "lockdep_states.h" + 0; +#undef LOCKDEP_STATE + +#define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) +#define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) -#define LOCKF_ENABLED_IRQ_READ \ - (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) -#define LOCKF_USED_IN_IRQ_READ \ - (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) +#define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ) +#define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ) /* * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index ad40a2617063..80a463d31a8d 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -829,7 +829,9 @@ static void lock_torture_cleanup(void) "End of test: SUCCESS"); kfree(cxt.lwsa); + cxt.lwsa = NULL; kfree(cxt.lrsa); + cxt.lrsa = NULL; end: torture_cleanup_end(); diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 883cf1b92d90..f17dad99eec8 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -7,6 +7,8 @@ #include <linux/sched.h> #include <linux/errno.h> +#include "rwsem.h" + int __percpu_init_rwsem(struct percpu_rw_semaphore *sem, const char *name, struct lock_class_key *rwsem_key) { diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 5e9247dc2515..e14b32c69639 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -395,7 +395,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * 0,1,0 -> 0,0,1 */ clear_pending_set_locked(lock); - qstat_inc(qstat_lock_pending, true); + lockevent_inc(lock_pending); return; /* @@ -403,7 +403,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * queuing. */ queue: - qstat_inc(qstat_lock_slowpath, true); + lockevent_inc(lock_slowpath); pv_queue: node = this_cpu_ptr(&qnodes[0].mcs); idx = node->count++; @@ -419,7 +419,7 @@ pv_queue: * simple enough. */ if (unlikely(idx >= MAX_NODES)) { - qstat_inc(qstat_lock_no_node, true); + lockevent_inc(lock_no_node); while (!queued_spin_trylock(lock)) cpu_relax(); goto release; @@ -430,7 +430,7 @@ pv_queue: /* * Keep counts of non-zero index values: */ - qstat_inc(qstat_lock_use_node2 + idx - 1, idx); + lockevent_cond_inc(lock_use_node2 + idx - 1, idx); /* * Ensure that we increment the head node->count before initialising diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 8f36c27c1794..89bab079e7a4 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -89,7 +89,7 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) if (!(val & _Q_LOCKED_PENDING_MASK) && (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { - qstat_inc(qstat_pv_lock_stealing, true); + lockevent_inc(pv_lock_stealing); return true; } if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK)) @@ -219,7 +219,7 @@ static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) hopcnt++; if (!cmpxchg(&he->lock, NULL, lock)) { WRITE_ONCE(he->node, node); - qstat_hop(hopcnt); + lockevent_pv_hop(hopcnt); return &he->lock; } } @@ -320,8 +320,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) smp_store_mb(pn->state, vcpu_halted); if (!READ_ONCE(node->locked)) { - qstat_inc(qstat_pv_wait_node, true); - qstat_inc(qstat_pv_wait_early, wait_early); + lockevent_inc(pv_wait_node); + lockevent_cond_inc(pv_wait_early, wait_early); pv_wait(&pn->state, vcpu_halted); } @@ -339,7 +339,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) * So it is better to spin for a while in the hope that the * MCS lock will be released soon. */ - qstat_inc(qstat_pv_spurious_wakeup, !READ_ONCE(node->locked)); + lockevent_cond_inc(pv_spurious_wakeup, + !READ_ONCE(node->locked)); } /* @@ -416,7 +417,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) /* * Tracking # of slowpath locking operations */ - qstat_inc(qstat_lock_slowpath, true); + lockevent_inc(lock_slowpath); for (;; waitcnt++) { /* @@ -464,8 +465,8 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) } } WRITE_ONCE(pn->state, vcpu_hashed); - qstat_inc(qstat_pv_wait_head, true); - qstat_inc(qstat_pv_wait_again, waitcnt); + lockevent_inc(pv_wait_head); + lockevent_cond_inc(pv_wait_again, waitcnt); pv_wait(&lock->locked, _Q_SLOW_VAL); /* @@ -528,7 +529,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) * vCPU is harmless other than the additional latency in completing * the unlock. */ - qstat_inc(qstat_pv_kick_unlock, true); + lockevent_inc(pv_kick_unlock); pv_kick(node->cpu); } diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index d73f85388d5c..54152670ff24 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -9,262 +9,105 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * Authors: Waiman Long <waiman.long@hpe.com> + * Authors: Waiman Long <longman@redhat.com> */ -/* - * When queued spinlock statistical counters are enabled, the following - * debugfs files will be created for reporting the counter values: - * - * <debugfs>/qlockstat/ - * pv_hash_hops - average # of hops per hashing operation - * pv_kick_unlock - # of vCPU kicks issued at unlock time - * pv_kick_wake - # of vCPU kicks used for computing pv_latency_wake - * pv_latency_kick - average latency (ns) of vCPU kick operation - * pv_latency_wake - average latency (ns) from vCPU kick to wakeup - * pv_lock_stealing - # of lock stealing operations - * pv_spurious_wakeup - # of spurious wakeups in non-head vCPUs - * pv_wait_again - # of wait's after a queue head vCPU kick - * pv_wait_early - # of early vCPU wait's - * pv_wait_head - # of vCPU wait's at the queue head - * pv_wait_node - # of vCPU wait's at a non-head queue node - * lock_pending - # of locking operations via pending code - * lock_slowpath - # of locking operations via MCS lock queue - * lock_use_node2 - # of locking operations that use 2nd per-CPU node - * lock_use_node3 - # of locking operations that use 3rd per-CPU node - * lock_use_node4 - # of locking operations that use 4th per-CPU node - * lock_no_node - # of locking operations without using per-CPU node - * - * Subtracting lock_use_node[234] from lock_slowpath will give you - * lock_use_node1. - * - * Writing to the "reset_counters" file will reset all the above counter - * values. - * - * These statistical counters are implemented as per-cpu variables which are - * summed and computed whenever the corresponding debugfs files are read. This - * minimizes added overhead making the counters usable even in a production - * environment. - * - * There may be slight difference between pv_kick_wake and pv_kick_unlock. - */ -enum qlock_stats { - qstat_pv_hash_hops, - qstat_pv_kick_unlock, - qstat_pv_kick_wake, - qstat_pv_latency_kick, - qstat_pv_latency_wake, - qstat_pv_lock_stealing, - qstat_pv_spurious_wakeup, - qstat_pv_wait_again, - qstat_pv_wait_early, - qstat_pv_wait_head, - qstat_pv_wait_node, - qstat_lock_pending, - qstat_lock_slowpath, - qstat_lock_use_node2, - qstat_lock_use_node3, - qstat_lock_use_node4, - qstat_lock_no_node, - qstat_num, /* Total number of statistical counters */ - qstat_reset_cnts = qstat_num, -}; +#include "lock_events.h" -#ifdef CONFIG_QUEUED_LOCK_STAT +#ifdef CONFIG_LOCK_EVENT_COUNTS +#ifdef CONFIG_PARAVIRT_SPINLOCKS /* - * Collect pvqspinlock statistics + * Collect pvqspinlock locking event counts */ -#include <linux/debugfs.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/fs.h> -static const char * const qstat_names[qstat_num + 1] = { - [qstat_pv_hash_hops] = "pv_hash_hops", - [qstat_pv_kick_unlock] = "pv_kick_unlock", - [qstat_pv_kick_wake] = "pv_kick_wake", - [qstat_pv_spurious_wakeup] = "pv_spurious_wakeup", - [qstat_pv_latency_kick] = "pv_latency_kick", - [qstat_pv_latency_wake] = "pv_latency_wake", - [qstat_pv_lock_stealing] = "pv_lock_stealing", - [qstat_pv_wait_again] = "pv_wait_again", - [qstat_pv_wait_early] = "pv_wait_early", - [qstat_pv_wait_head] = "pv_wait_head", - [qstat_pv_wait_node] = "pv_wait_node", - [qstat_lock_pending] = "lock_pending", - [qstat_lock_slowpath] = "lock_slowpath", - [qstat_lock_use_node2] = "lock_use_node2", - [qstat_lock_use_node3] = "lock_use_node3", - [qstat_lock_use_node4] = "lock_use_node4", - [qstat_lock_no_node] = "lock_no_node", - [qstat_reset_cnts] = "reset_counters", -}; +#define EVENT_COUNT(ev) lockevents[LOCKEVENT_ ## ev] /* - * Per-cpu counters + * PV specific per-cpu counter */ -static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]); static DEFINE_PER_CPU(u64, pv_kick_time); /* - * Function to read and return the qlock statistical counter values + * Function to read and return the PV qspinlock counts. * * The following counters are handled specially: - * 1. qstat_pv_latency_kick + * 1. pv_latency_kick * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock - * 2. qstat_pv_latency_wake + * 2. pv_latency_wake * Average wake latency (ns) = pv_latency_wake/pv_kick_wake - * 3. qstat_pv_hash_hops + * 3. pv_hash_hops * Average hops/hash = pv_hash_hops/pv_kick_unlock */ -static ssize_t qstat_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) +ssize_t lockevent_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) { char buf[64]; - int cpu, counter, len; - u64 stat = 0, kicks = 0; + int cpu, id, len; + u64 sum = 0, kicks = 0; /* * Get the counter ID stored in file->f_inode->i_private */ - counter = (long)file_inode(file)->i_private; + id = (long)file_inode(file)->i_private; - if (counter >= qstat_num) + if (id >= lockevent_num) return -EBADF; for_each_possible_cpu(cpu) { - stat += per_cpu(qstats[counter], cpu); + sum += per_cpu(lockevents[id], cpu); /* - * Need to sum additional counter for some of them + * Need to sum additional counters for some of them */ - switch (counter) { + switch (id) { - case qstat_pv_latency_kick: - case qstat_pv_hash_hops: - kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu); + case LOCKEVENT_pv_latency_kick: + case LOCKEVENT_pv_hash_hops: + kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu); break; - case qstat_pv_latency_wake: - kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu); + case LOCKEVENT_pv_latency_wake: + kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu); break; } } - if (counter == qstat_pv_hash_hops) { + if (id == LOCKEVENT_pv_hash_hops) { u64 frac = 0; if (kicks) { - frac = 100ULL * do_div(stat, kicks); + frac = 100ULL * do_div(sum, kicks); frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); } /* * Return a X.XX decimal number */ - len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", stat, frac); + len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", + sum, frac); } else { /* * Round to the nearest ns */ - if ((counter == qstat_pv_latency_kick) || - (counter == qstat_pv_latency_wake)) { + if ((id == LOCKEVENT_pv_latency_kick) || + (id == LOCKEVENT_pv_latency_wake)) { if (kicks) - stat = DIV_ROUND_CLOSEST_ULL(stat, kicks); + sum = DIV_ROUND_CLOSEST_ULL(sum, kicks); } - len = snprintf(buf, sizeof(buf) - 1, "%llu\n", stat); + len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum); } return simple_read_from_buffer(user_buf, count, ppos, buf, len); } /* - * Function to handle write request - * - * When counter = reset_cnts, reset all the counter values. - * Since the counter updates aren't atomic, the resetting is done twice - * to make sure that the counters are very likely to be all cleared. - */ -static ssize_t qstat_write(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - int cpu; - - /* - * Get the counter ID stored in file->f_inode->i_private - */ - if ((long)file_inode(file)->i_private != qstat_reset_cnts) - return count; - - for_each_possible_cpu(cpu) { - int i; - unsigned long *ptr = per_cpu_ptr(qstats, cpu); - - for (i = 0 ; i < qstat_num; i++) - WRITE_ONCE(ptr[i], 0); - } - return count; -} - -/* - * Debugfs data structures - */ -static const struct file_operations fops_qstat = { - .read = qstat_read, - .write = qstat_write, - .llseek = default_llseek, -}; - -/* - * Initialize debugfs for the qspinlock statistical counters - */ -static int __init init_qspinlock_stat(void) -{ - struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL); - int i; - - if (!d_qstat) - goto out; - - /* - * Create the debugfs files - * - * As reading from and writing to the stat files can be slow, only - * root is allowed to do the read/write to limit impact to system - * performance. - */ - for (i = 0; i < qstat_num; i++) - if (!debugfs_create_file(qstat_names[i], 0400, d_qstat, - (void *)(long)i, &fops_qstat)) - goto fail_undo; - - if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat, - (void *)(long)qstat_reset_cnts, &fops_qstat)) - goto fail_undo; - - return 0; -fail_undo: - debugfs_remove_recursive(d_qstat); -out: - pr_warn("Could not create 'qlockstat' debugfs entries\n"); - return -ENOMEM; -} -fs_initcall(init_qspinlock_stat); - -/* - * Increment the PV qspinlock statistical counters - */ -static inline void qstat_inc(enum qlock_stats stat, bool cond) -{ - if (cond) - this_cpu_inc(qstats[stat]); -} - -/* * PV hash hop count */ -static inline void qstat_hop(int hopcnt) +static inline void lockevent_pv_hop(int hopcnt) { - this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt); + this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt); } /* @@ -276,7 +119,7 @@ static inline void __pv_kick(int cpu) per_cpu(pv_kick_time, cpu) = start; pv_kick(cpu); - this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start); + this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start); } /* @@ -289,18 +132,19 @@ static inline void __pv_wait(u8 *ptr, u8 val) *pkick_time = 0; pv_wait(ptr, val); if (*pkick_time) { - this_cpu_add(qstats[qstat_pv_latency_wake], + this_cpu_add(EVENT_COUNT(pv_latency_wake), sched_clock() - *pkick_time); - qstat_inc(qstat_pv_kick_wake, true); + lockevent_inc(pv_kick_wake); } } #define pv_kick(c) __pv_kick(c) #define pv_wait(p, v) __pv_wait(p, v) -#else /* CONFIG_QUEUED_LOCK_STAT */ +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ + +#else /* CONFIG_LOCK_EVENT_COUNTS */ -static inline void qstat_inc(enum qlock_stats stat, bool cond) { } -static inline void qstat_hop(int hopcnt) { } +static inline void lockevent_pv_hop(int hopcnt) { } -#endif /* CONFIG_QUEUED_LOCK_STAT */ +#endif /* CONFIG_LOCK_EVENT_COUNTS */ diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c deleted file mode 100644 index a7ffb2a96ede..000000000000 --- a/kernel/locking/rwsem-spinlock.c +++ /dev/null @@ -1,339 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* rwsem-spinlock.c: R/W semaphores: contention handling functions for - * generic spinlock implementation - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> - * - Derived also from comments by Linus - */ -#include <linux/rwsem.h> -#include <linux/sched/signal.h> -#include <linux/sched/debug.h> -#include <linux/export.h> - -enum rwsem_waiter_type { - RWSEM_WAITING_FOR_WRITE, - RWSEM_WAITING_FOR_READ -}; - -struct rwsem_waiter { - struct list_head list; - struct task_struct *task; - enum rwsem_waiter_type type; -}; - -int rwsem_is_locked(struct rw_semaphore *sem) -{ - int ret = 1; - unsigned long flags; - - if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { - ret = (sem->count != 0); - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - } - return ret; -} -EXPORT_SYMBOL(rwsem_is_locked); - -/* - * initialise the semaphore - */ -void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key) -{ -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* - * Make sure we are not reinitializing a held semaphore: - */ - debug_check_no_locks_freed((void *)sem, sizeof(*sem)); - lockdep_init_map(&sem->dep_map, name, key, 0); -#endif - sem->count = 0; - raw_spin_lock_init(&sem->wait_lock); - INIT_LIST_HEAD(&sem->wait_list); -} -EXPORT_SYMBOL(__init_rwsem); - -/* - * handle the lock release when processes blocked on it that can now run - * - if we come here, then: - * - the 'active count' _reached_ zero - * - the 'waiting count' is non-zero - * - the spinlock must be held by the caller - * - woken process blocks are discarded from the list after having task zeroed - * - writers are only woken if wakewrite is non-zero - */ -static inline struct rw_semaphore * -__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) -{ - struct rwsem_waiter *waiter; - struct task_struct *tsk; - int woken; - - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); - - if (waiter->type == RWSEM_WAITING_FOR_WRITE) { - if (wakewrite) - /* Wake up a writer. Note that we do not grant it the - * lock - it will have to acquire it when it runs. */ - wake_up_process(waiter->task); - goto out; - } - - /* grant an infinite number of read locks to the front of the queue */ - woken = 0; - do { - struct list_head *next = waiter->list.next; - - list_del(&waiter->list); - tsk = waiter->task; - /* - * Make sure we do not wakeup the next reader before - * setting the nil condition to grant the next reader; - * otherwise we could miss the wakeup on the other - * side and end up sleeping again. See the pairing - * in rwsem_down_read_failed(). - */ - smp_mb(); - waiter->task = NULL; - wake_up_process(tsk); - put_task_struct(tsk); - woken++; - if (next == &sem->wait_list) - break; - waiter = list_entry(next, struct rwsem_waiter, list); - } while (waiter->type != RWSEM_WAITING_FOR_WRITE); - - sem->count += woken; - - out: - return sem; -} - -/* - * wake a single writer - */ -static inline struct rw_semaphore * -__rwsem_wake_one_writer(struct rw_semaphore *sem) -{ - struct rwsem_waiter *waiter; - - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); - wake_up_process(waiter->task); - - return sem; -} - -/* - * get a read lock on the semaphore - */ -int __sched __down_read_common(struct rw_semaphore *sem, int state) -{ - struct rwsem_waiter waiter; - unsigned long flags; - - raw_spin_lock_irqsave(&sem->wait_lock, flags); - - if (sem->count >= 0 && list_empty(&sem->wait_list)) { - /* granted */ - sem->count++; - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - goto out; - } - - /* set up my own style of waitqueue */ - waiter.task = current; - waiter.type = RWSEM_WAITING_FOR_READ; - get_task_struct(current); - - list_add_tail(&waiter.list, &sem->wait_list); - - /* wait to be given the lock */ - for (;;) { - if (!waiter.task) - break; - if (signal_pending_state(state, current)) - goto out_nolock; - set_current_state(state); - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - schedule(); - raw_spin_lock_irqsave(&sem->wait_lock, flags); - } - - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - out: - return 0; - -out_nolock: - /* - * We didn't take the lock, so that there is a writer, which - * is owner or the first waiter of the sem. If it's a waiter, - * it will be woken by current owner. Not need to wake anybody. - */ - list_del(&waiter.list); - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - return -EINTR; -} - -void __sched __down_read(struct rw_semaphore *sem) -{ - __down_read_common(sem, TASK_UNINTERRUPTIBLE); -} - -int __sched __down_read_killable(struct rw_semaphore *sem) -{ - return __down_read_common(sem, TASK_KILLABLE); -} - -/* - * trylock for reading -- returns 1 if successful, 0 if contention - */ -int __down_read_trylock(struct rw_semaphore *sem) -{ - unsigned long flags; - int ret = 0; - - - raw_spin_lock_irqsave(&sem->wait_lock, flags); - - if (sem->count >= 0 && list_empty(&sem->wait_list)) { - /* granted */ - sem->count++; - ret = 1; - } - - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - - return ret; -} - -/* - * get a write lock on the semaphore - */ -int __sched __down_write_common(struct rw_semaphore *sem, int state) -{ - struct rwsem_waiter waiter; - unsigned long flags; - int ret = 0; - - raw_spin_lock_irqsave(&sem->wait_lock, flags); - - /* set up my own style of waitqueue */ - waiter.task = current; - waiter.type = RWSEM_WAITING_FOR_WRITE; - list_add_tail(&waiter.list, &sem->wait_list); - - /* wait for someone to release the lock */ - for (;;) { - /* - * That is the key to support write lock stealing: allows the - * task already on CPU to get the lock soon rather than put - * itself into sleep and waiting for system woke it or someone - * else in the head of the wait list up. - */ - if (sem->count == 0) - break; - if (signal_pending_state(state, current)) - goto out_nolock; - - set_current_state(state); - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - schedule(); - raw_spin_lock_irqsave(&sem->wait_lock, flags); - } - /* got the lock */ - sem->count = -1; - list_del(&waiter.list); - - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - - return ret; - -out_nolock: - list_del(&waiter.list); - if (!list_empty(&sem->wait_list) && sem->count >= 0) - __rwsem_do_wake(sem, 0); - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - - return -EINTR; -} - -void __sched __down_write(struct rw_semaphore *sem) -{ - __down_write_common(sem, TASK_UNINTERRUPTIBLE); -} - -int __sched __down_write_killable(struct rw_semaphore *sem) -{ - return __down_write_common(sem, TASK_KILLABLE); -} - -/* - * trylock for writing -- returns 1 if successful, 0 if contention - */ -int __down_write_trylock(struct rw_semaphore *sem) -{ - unsigned long flags; - int ret = 0; - - raw_spin_lock_irqsave(&sem->wait_lock, flags); - - if (sem->count == 0) { - /* got the lock */ - sem->count = -1; - ret = 1; - } - - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - - return ret; -} - -/* - * release a read lock on the semaphore - */ -void __up_read(struct rw_semaphore *sem) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&sem->wait_lock, flags); - - if (--sem->count == 0 && !list_empty(&sem->wait_list)) - sem = __rwsem_wake_one_writer(sem); - - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); -} - -/* - * release a write lock on the semaphore - */ -void __up_write(struct rw_semaphore *sem) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&sem->wait_lock, flags); - - sem->count = 0; - if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem, 1); - - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); -} - -/* - * downgrade a write lock into a read lock - * - just wake up any readers at the front of the queue - */ -void __downgrade_write(struct rw_semaphore *sem) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&sem->wait_lock, flags); - - sem->count = 1; - if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem, 0); - - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); -} - diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index fbe96341beee..6b3ee9948bf1 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -147,6 +147,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, * will notice the queued writer. */ wake_q_add(wake_q, waiter->task); + lockevent_inc(rwsem_wake_writer); } return; @@ -176,9 +177,8 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, goto try_reader_grant; } /* - * It is not really necessary to set it to reader-owned here, - * but it gives the spinners an early indication that the - * readers now have the lock. + * Set it to reader-owned to give spinners an early + * indication that readers now have the lock. */ __rwsem_set_reader_owned(sem, waiter->task); } @@ -215,6 +215,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, } adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; + lockevent_cond_inc(rwsem_wake_reader, woken); if (list_empty(&sem->wait_list)) { /* hit end of list above */ adjustment -= RWSEM_WAITING_BIAS; @@ -225,92 +226,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem, } /* - * Wait for the read lock to be granted - */ -static inline struct rw_semaphore __sched * -__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) -{ - long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; - struct rwsem_waiter waiter; - DEFINE_WAKE_Q(wake_q); - - waiter.task = current; - waiter.type = RWSEM_WAITING_FOR_READ; - - raw_spin_lock_irq(&sem->wait_lock); - if (list_empty(&sem->wait_list)) { - /* - * In case the wait queue is empty and the lock isn't owned - * by a writer, this reader can exit the slowpath and return - * immediately as its RWSEM_ACTIVE_READ_BIAS has already - * been set in the count. - */ - if (atomic_long_read(&sem->count) >= 0) { - raw_spin_unlock_irq(&sem->wait_lock); - return sem; - } - adjustment += RWSEM_WAITING_BIAS; - } - list_add_tail(&waiter.list, &sem->wait_list); - - /* we're now waiting on the lock, but no longer actively locking */ - count = atomic_long_add_return(adjustment, &sem->count); - - /* - * If there are no active locks, wake the front queued process(es). - * - * If there are no writers and we are first in the queue, - * wake our own waiter to join the existing active readers ! - */ - if (count == RWSEM_WAITING_BIAS || - (count > RWSEM_WAITING_BIAS && - adjustment != -RWSEM_ACTIVE_READ_BIAS)) - __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); - - raw_spin_unlock_irq(&sem->wait_lock); - wake_up_q(&wake_q); - - /* wait to be given the lock */ - while (true) { - set_current_state(state); - if (!waiter.task) - break; - if (signal_pending_state(state, current)) { - raw_spin_lock_irq(&sem->wait_lock); - if (waiter.task) - goto out_nolock; - raw_spin_unlock_irq(&sem->wait_lock); - break; - } - schedule(); - } - - __set_current_state(TASK_RUNNING); - return sem; -out_nolock: - list_del(&waiter.list); - if (list_empty(&sem->wait_list)) - atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count); - raw_spin_unlock_irq(&sem->wait_lock); - __set_current_state(TASK_RUNNING); - return ERR_PTR(-EINTR); -} - -__visible struct rw_semaphore * __sched -rwsem_down_read_failed(struct rw_semaphore *sem) -{ - return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE); -} -EXPORT_SYMBOL(rwsem_down_read_failed); - -__visible struct rw_semaphore * __sched -rwsem_down_read_failed_killable(struct rw_semaphore *sem) -{ - return __rwsem_down_read_failed_common(sem, TASK_KILLABLE); -} -EXPORT_SYMBOL(rwsem_down_read_failed_killable); - -/* * This function must be called with the sem->wait_lock held to prevent * race conditions between checking the rwsem wait list and setting the * sem->count accordingly. @@ -346,21 +261,17 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) */ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) { - long old, count = atomic_long_read(&sem->count); - - while (true) { - if (!(count == 0 || count == RWSEM_WAITING_BIAS)) - return false; + long count = atomic_long_read(&sem->count); - old = atomic_long_cmpxchg_acquire(&sem->count, count, - count + RWSEM_ACTIVE_WRITE_BIAS); - if (old == count) { + while (!count || count == RWSEM_WAITING_BIAS) { + if (atomic_long_try_cmpxchg_acquire(&sem->count, &count, + count + RWSEM_ACTIVE_WRITE_BIAS)) { rwsem_set_owner(sem); + lockevent_inc(rwsem_opt_wlock); return true; } - - count = old; } + return false; } static inline bool owner_on_cpu(struct task_struct *owner) @@ -481,6 +392,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) osq_unlock(&sem->osq); done: preempt_enable(); + lockevent_cond_inc(rwsem_opt_fail, !taken); return taken; } @@ -505,6 +417,97 @@ static inline bool rwsem_has_spinner(struct rw_semaphore *sem) #endif /* + * Wait for the read lock to be granted + */ +static inline struct rw_semaphore __sched * +__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state) +{ + long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; + struct rwsem_waiter waiter; + DEFINE_WAKE_Q(wake_q); + + waiter.task = current; + waiter.type = RWSEM_WAITING_FOR_READ; + + raw_spin_lock_irq(&sem->wait_lock); + if (list_empty(&sem->wait_list)) { + /* + * In case the wait queue is empty and the lock isn't owned + * by a writer, this reader can exit the slowpath and return + * immediately as its RWSEM_ACTIVE_READ_BIAS has already + * been set in the count. + */ + if (atomic_long_read(&sem->count) >= 0) { + raw_spin_unlock_irq(&sem->wait_lock); + rwsem_set_reader_owned(sem); + lockevent_inc(rwsem_rlock_fast); + return sem; + } + adjustment += RWSEM_WAITING_BIAS; + } + list_add_tail(&waiter.list, &sem->wait_list); + + /* we're now waiting on the lock, but no longer actively locking */ + count = atomic_long_add_return(adjustment, &sem->count); + + /* + * If there are no active locks, wake the front queued process(es). + * + * If there are no writers and we are first in the queue, + * wake our own waiter to join the existing active readers ! + */ + if (count == RWSEM_WAITING_BIAS || + (count > RWSEM_WAITING_BIAS && + adjustment != -RWSEM_ACTIVE_READ_BIAS)) + __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); + + raw_spin_unlock_irq(&sem->wait_lock); + wake_up_q(&wake_q); + + /* wait to be given the lock */ + while (true) { + set_current_state(state); + if (!waiter.task) + break; + if (signal_pending_state(state, current)) { + raw_spin_lock_irq(&sem->wait_lock); + if (waiter.task) + goto out_nolock; + raw_spin_unlock_irq(&sem->wait_lock); + break; + } + schedule(); + lockevent_inc(rwsem_sleep_reader); + } + + __set_current_state(TASK_RUNNING); + lockevent_inc(rwsem_rlock); + return sem; +out_nolock: + list_del(&waiter.list); + if (list_empty(&sem->wait_list)) + atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count); + raw_spin_unlock_irq(&sem->wait_lock); + __set_current_state(TASK_RUNNING); + lockevent_inc(rwsem_rlock_fail); + return ERR_PTR(-EINTR); +} + +__visible struct rw_semaphore * __sched +rwsem_down_read_failed(struct rw_semaphore *sem) +{ + return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE); +} +EXPORT_SYMBOL(rwsem_down_read_failed); + +__visible struct rw_semaphore * __sched +rwsem_down_read_failed_killable(struct rw_semaphore *sem) +{ + return __rwsem_down_read_failed_common(sem, TASK_KILLABLE); +} +EXPORT_SYMBOL(rwsem_down_read_failed_killable); + +/* * Wait until we successfully acquire the write lock */ static inline struct rw_semaphore * @@ -580,6 +583,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) goto out_nolock; schedule(); + lockevent_inc(rwsem_sleep_writer); set_current_state(state); } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK); @@ -588,6 +592,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) __set_current_state(TASK_RUNNING); list_del(&waiter.list); raw_spin_unlock_irq(&sem->wait_lock); + lockevent_inc(rwsem_wlock); return ret; @@ -601,6 +606,7 @@ out_nolock: __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); raw_spin_unlock_irq(&sem->wait_lock); wake_up_q(&wake_q); + lockevent_inc(rwsem_wlock_fail); return ERR_PTR(-EINTR); } diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index e586f0d03ad3..ccbf18f560ff 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -24,7 +24,6 @@ void __sched down_read(struct rw_semaphore *sem) rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(sem, __down_read_trylock, __down_read); - rwsem_set_reader_owned(sem); } EXPORT_SYMBOL(down_read); @@ -39,7 +38,6 @@ int __sched down_read_killable(struct rw_semaphore *sem) return -EINTR; } - rwsem_set_reader_owned(sem); return 0; } @@ -52,10 +50,8 @@ int down_read_trylock(struct rw_semaphore *sem) { int ret = __down_read_trylock(sem); - if (ret == 1) { + if (ret == 1) rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); - rwsem_set_reader_owned(sem); - } return ret; } @@ -70,7 +66,6 @@ void __sched down_write(struct rw_semaphore *sem) rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(sem, __down_write_trylock, __down_write); - rwsem_set_owner(sem); } EXPORT_SYMBOL(down_write); @@ -88,7 +83,6 @@ int __sched down_write_killable(struct rw_semaphore *sem) return -EINTR; } - rwsem_set_owner(sem); return 0; } @@ -101,10 +95,8 @@ int down_write_trylock(struct rw_semaphore *sem) { int ret = __down_write_trylock(sem); - if (ret == 1) { + if (ret == 1) rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); - rwsem_set_owner(sem); - } return ret; } @@ -117,9 +109,7 @@ EXPORT_SYMBOL(down_write_trylock); void up_read(struct rw_semaphore *sem) { rwsem_release(&sem->dep_map, 1, _RET_IP_); - DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED)); - rwsem_clear_reader_owned(sem); __up_read(sem); } @@ -131,9 +121,7 @@ EXPORT_SYMBOL(up_read); void up_write(struct rw_semaphore *sem) { rwsem_release(&sem->dep_map, 1, _RET_IP_); - DEBUG_RWSEMS_WARN_ON(sem->owner != current); - rwsem_clear_owner(sem); __up_write(sem); } @@ -145,9 +133,7 @@ EXPORT_SYMBOL(up_write); void downgrade_write(struct rw_semaphore *sem) { lock_downgrade(&sem->dep_map, _RET_IP_); - DEBUG_RWSEMS_WARN_ON(sem->owner != current); - rwsem_set_reader_owned(sem); __downgrade_write(sem); } @@ -161,7 +147,6 @@ void down_read_nested(struct rw_semaphore *sem, int subclass) rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(sem, __down_read_trylock, __down_read); - rwsem_set_reader_owned(sem); } EXPORT_SYMBOL(down_read_nested); @@ -172,7 +157,6 @@ void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); LOCK_CONTENDED(sem, __down_write_trylock, __down_write); - rwsem_set_owner(sem); } EXPORT_SYMBOL(_down_write_nest_lock); @@ -193,7 +177,6 @@ void down_write_nested(struct rw_semaphore *sem, int subclass) rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(sem, __down_write_trylock, __down_write); - rwsem_set_owner(sem); } EXPORT_SYMBOL(down_write_nested); @@ -208,7 +191,6 @@ int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) return -EINTR; } - rwsem_set_owner(sem); return 0; } @@ -216,7 +198,8 @@ EXPORT_SYMBOL(down_write_killable_nested); void up_read_non_owner(struct rw_semaphore *sem) { - DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED)); + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED), + sem); __up_read(sem); } diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h index bad2bca0268b..64877f5294e3 100644 --- a/kernel/locking/rwsem.h +++ b/kernel/locking/rwsem.h @@ -23,15 +23,44 @@ * is involved. Ideally we would like to track all the readers that own * a rwsem, but the overhead is simply too big. */ +#include "lock_events.h" + #define RWSEM_READER_OWNED (1UL << 0) #define RWSEM_ANONYMOUSLY_OWNED (1UL << 1) #ifdef CONFIG_DEBUG_RWSEMS -# define DEBUG_RWSEMS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c) +# define DEBUG_RWSEMS_WARN_ON(c, sem) do { \ + if (!debug_locks_silent && \ + WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\ + #c, atomic_long_read(&(sem)->count), \ + (long)((sem)->owner), (long)current, \ + list_empty(&(sem)->wait_list) ? "" : "not ")) \ + debug_locks_off(); \ + } while (0) +#else +# define DEBUG_RWSEMS_WARN_ON(c, sem) +#endif + +/* + * R/W semaphores originally for PPC using the stuff in lib/rwsem.c. + * Adapted largely from include/asm-i386/rwsem.h + * by Paul Mackerras <paulus@samba.org>. + */ + +/* + * the semaphore definition + */ +#ifdef CONFIG_64BIT +# define RWSEM_ACTIVE_MASK 0xffffffffL #else -# define DEBUG_RWSEMS_WARN_ON(c) +# define RWSEM_ACTIVE_MASK 0x0000ffffL #endif +#define RWSEM_ACTIVE_BIAS 0x00000001L +#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER /* * All writes to owner are protected by WRITE_ONCE() to make sure that @@ -132,3 +161,144 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem) { } #endif + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +/* + * lock for reading + */ +static inline void __down_read(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { + rwsem_down_read_failed(sem); + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & + RWSEM_READER_OWNED), sem); + } else { + rwsem_set_reader_owned(sem); + } +} + +static inline int __down_read_killable(struct rw_semaphore *sem) +{ + if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) { + if (IS_ERR(rwsem_down_read_failed_killable(sem))) + return -EINTR; + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & + RWSEM_READER_OWNED), sem); + } else { + rwsem_set_reader_owned(sem); + } + return 0; +} + +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + /* + * Optimize for the case when the rwsem is not locked at all. + */ + long tmp = RWSEM_UNLOCKED_VALUE; + + lockevent_inc(rwsem_rtrylock); + do { + if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, + tmp + RWSEM_ACTIVE_READ_BIAS)) { + rwsem_set_reader_owned(sem); + return 1; + } + } while (tmp >= 0); + return 0; +} + +/* + * lock for writing + */ +static inline void __down_write(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + rwsem_down_write_failed(sem); + rwsem_set_owner(sem); +} + +static inline int __down_write_killable(struct rw_semaphore *sem) +{ + long tmp; + + tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count); + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) + if (IS_ERR(rwsem_down_write_failed_killable(sem))) + return -EINTR; + rwsem_set_owner(sem); + return 0; +} + +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long tmp; + + lockevent_inc(rwsem_wtrylock); + tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + if (tmp == RWSEM_UNLOCKED_VALUE) { + rwsem_set_owner(sem); + return true; + } + return false; +} + +/* + * unlock after reading + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + long tmp; + + DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED), + sem); + rwsem_clear_reader_owned(sem); + tmp = atomic_long_dec_return_release(&sem->count); + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem); + rwsem_clear_owner(sem); + if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS, + &sem->count) < 0)) + rwsem_wake(sem); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long tmp; + + /* + * When downgrading from exclusive to shared ownership, + * anything inside the write-locked region cannot leak + * into the read side. In contrast, anything in the + * read-locked region is ok to be re-ordered into the + * write side. As such, rely on RELEASE semantics. + */ + DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem); + tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count); + rwsem_set_reader_owned(sem); + if (tmp < 0) + rwsem_downgrade_wake(sem); +} diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index acee72c0b24b..4b58c907b4b7 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -233,6 +233,7 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) #ifdef CONFIG_RCU_STALL_COMMON extern int rcu_cpu_stall_suppress; +extern int rcu_cpu_stall_timeout; int rcu_jiffies_till_stall_check(void); #define rcu_ftrace_dump_stall_suppress() \ diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c index c29761152874..7a6890b23c5f 100644 --- a/kernel/rcu/rcuperf.c +++ b/kernel/rcu/rcuperf.c @@ -494,6 +494,10 @@ rcu_perf_cleanup(void) if (torture_cleanup_begin()) return; + if (!cur_ops) { + torture_cleanup_end(); + return; + } if (reader_tasks) { for (i = 0; i < nrealreaders; i++) @@ -614,6 +618,7 @@ rcu_perf_init(void) pr_cont("\n"); WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST)); firsterr = -EINVAL; + cur_ops = NULL; goto unwind; } if (cur_ops->init) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index f14d1b18a74f..efaa5b3f4d3f 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -299,7 +299,6 @@ struct rcu_torture_ops { int irq_capable; int can_boost; int extendables; - int ext_irq_conflict; const char *name; }; @@ -592,12 +591,7 @@ static void srcu_torture_init(void) static void srcu_torture_cleanup(void) { - static DEFINE_TORTURE_RANDOM(rand); - - if (torture_random(&rand) & 0x800) - cleanup_srcu_struct(&srcu_ctld); - else - cleanup_srcu_struct_quiesced(&srcu_ctld); + cleanup_srcu_struct(&srcu_ctld); srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ } @@ -1160,7 +1154,7 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) unsigned long randmask2 = randmask1 >> 3; WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); - /* Most of the time lots of bits, half the time only one bit. */ + /* Mostly only one bit (need preemption!), sometimes lots of bits. */ if (!(randmask1 & 0x7)) mask = mask & randmask2; else @@ -1170,10 +1164,6 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; - if ((mask & RCUTORTURE_RDR_IRQ) && - !(mask & cur_ops->ext_irq_conflict) && - (oldmask & cur_ops->ext_irq_conflict)) - mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */ return mask ?: RCUTORTURE_RDR_RCU; } @@ -1848,7 +1838,7 @@ static int rcutorture_oom_notify(struct notifier_block *self, WARN(1, "%s invoked upon OOM during forward-progress testing.\n", __func__); rcu_torture_fwd_cb_hist(); - rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2)); + rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat)) / 2); WRITE_ONCE(rcu_fwd_emergency_stop, true); smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ pr_info("%s: Freed %lu RCU callbacks.\n", @@ -2094,6 +2084,10 @@ rcu_torture_cleanup(void) cur_ops->cb_barrier(); return; } + if (!cur_ops) { + torture_cleanup_end(); + return; + } rcu_torture_barrier_cleanup(); torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); @@ -2267,6 +2261,7 @@ rcu_torture_init(void) pr_cont("\n"); WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); firsterr = -EINVAL; + cur_ops = NULL; goto unwind; } if (cur_ops->fqs == NULL && fqs_duration != 0) { diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index 5d4a39a6505a..44d6606b8325 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -76,19 +76,16 @@ EXPORT_SYMBOL_GPL(init_srcu_struct); * Must invoke this after you are finished using a given srcu_struct that * was initialized via init_srcu_struct(), else you leak memory. */ -void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) +void cleanup_srcu_struct(struct srcu_struct *ssp) { WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]); - if (quiesced) - WARN_ON(work_pending(&ssp->srcu_work)); - else - flush_work(&ssp->srcu_work); + flush_work(&ssp->srcu_work); WARN_ON(ssp->srcu_gp_running); WARN_ON(ssp->srcu_gp_waiting); WARN_ON(ssp->srcu_cb_head); WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail); } -EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); +EXPORT_SYMBOL_GPL(cleanup_srcu_struct); /* * Removes the count for the old reader from the appropriate element of diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index a60b8ba9e1ac..9b761e546de8 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -360,8 +360,14 @@ static unsigned long srcu_get_delay(struct srcu_struct *ssp) return SRCU_INTERVAL; } -/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ -void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) +/** + * cleanup_srcu_struct - deconstruct a sleep-RCU structure + * @ssp: structure to clean up. + * + * Must invoke this after you are finished using a given srcu_struct that + * was initialized via init_srcu_struct(), else you leak memory. + */ +void cleanup_srcu_struct(struct srcu_struct *ssp) { int cpu; @@ -369,24 +375,14 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) return; /* Just leak it! */ if (WARN_ON(srcu_readers_active(ssp))) return; /* Just leak it! */ - if (quiesced) { - if (WARN_ON(delayed_work_pending(&ssp->work))) - return; /* Just leak it! */ - } else { - flush_delayed_work(&ssp->work); - } + flush_delayed_work(&ssp->work); for_each_possible_cpu(cpu) { struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); - if (quiesced) { - if (WARN_ON(timer_pending(&sdp->delay_work))) - return; /* Just leak it! */ - if (WARN_ON(work_pending(&sdp->work))) - return; /* Just leak it! */ - } else { - del_timer_sync(&sdp->delay_work); - flush_work(&sdp->work); - } + del_timer_sync(&sdp->delay_work); + flush_work(&sdp->work); + if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) + return; /* Forgot srcu_barrier(), so just leak it! */ } if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || WARN_ON(srcu_readers_active(ssp))) { @@ -397,7 +393,7 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) free_percpu(ssp->sda); ssp->sda = NULL; } -EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); +EXPORT_SYMBOL_GPL(cleanup_srcu_struct); /* * Counts the new reader in the appropriate per-CPU element of the diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 911bd9076d43..477b4eb44af5 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -52,7 +52,7 @@ void rcu_qs(void) local_irq_save(flags); if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { rcu_ctrlblk.donetail = rcu_ctrlblk.curtail; - raise_softirq(RCU_SOFTIRQ); + raise_softirq_irqoff(RCU_SOFTIRQ); } local_irq_restore(flags); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index acd6ccf56faf..ec77ec336f58 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -102,11 +102,6 @@ int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; /* Number of rcu_nodes at specified level. */ int num_rcu_lvl[] = NUM_RCU_LVL_INIT; int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */ -/* panic() on RCU Stall sysctl. */ -int sysctl_panic_on_rcu_stall __read_mostly; -/* Commandeer a sysrq key to dump RCU's tree. */ -static bool sysrq_rcu; -module_param(sysrq_rcu, bool, 0444); /* * The rcu_scheduler_active variable is initialized to the value @@ -149,7 +144,7 @@ static void sync_sched_exp_online_cleanup(int cpu); /* rcuc/rcub kthread realtime priority */ static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0; -module_param(kthread_prio, int, 0644); +module_param(kthread_prio, int, 0444); /* Delay in jiffies for grace-period initialization delays, debug only. */ @@ -406,7 +401,7 @@ static bool rcu_kick_kthreads; */ static ulong jiffies_till_sched_qs = ULONG_MAX; module_param(jiffies_till_sched_qs, ulong, 0444); -static ulong jiffies_to_sched_qs; /* Adjusted version of above if not default */ +static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */ module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */ /* @@ -424,6 +419,7 @@ static void adjust_jiffies_till_sched_qs(void) WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); return; } + /* Otherwise, set to third fqs scan, but bound below on large system. */ j = READ_ONCE(jiffies_till_first_fqs) + 2 * READ_ONCE(jiffies_till_next_fqs); if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV) @@ -513,74 +509,6 @@ static const char *gp_state_getname(short gs) } /* - * Show the state of the grace-period kthreads. - */ -void show_rcu_gp_kthreads(void) -{ - int cpu; - unsigned long j; - unsigned long ja; - unsigned long jr; - unsigned long jw; - struct rcu_data *rdp; - struct rcu_node *rnp; - - j = jiffies; - ja = j - READ_ONCE(rcu_state.gp_activity); - jr = j - READ_ONCE(rcu_state.gp_req_activity); - jw = j - READ_ONCE(rcu_state.gp_wake_time); - pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n", - rcu_state.name, gp_state_getname(rcu_state.gp_state), - rcu_state.gp_state, - rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL, - ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq), - (long)READ_ONCE(rcu_state.gp_seq), - (long)READ_ONCE(rcu_get_root()->gp_seq_needed), - READ_ONCE(rcu_state.gp_flags)); - rcu_for_each_node_breadth_first(rnp) { - if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) - continue; - pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n", - rnp->grplo, rnp->grphi, (long)rnp->gp_seq, - (long)rnp->gp_seq_needed); - if (!rcu_is_leaf_node(rnp)) - continue; - for_each_leaf_node_possible_cpu(rnp, cpu) { - rdp = per_cpu_ptr(&rcu_data, cpu); - if (rdp->gpwrap || - ULONG_CMP_GE(rcu_state.gp_seq, - rdp->gp_seq_needed)) - continue; - pr_info("\tcpu %d ->gp_seq_needed %ld\n", - cpu, (long)rdp->gp_seq_needed); - } - } - /* sched_show_task(rcu_state.gp_kthread); */ -} -EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); - -/* Dump grace-period-request information due to commandeered sysrq. */ -static void sysrq_show_rcu(int key) -{ - show_rcu_gp_kthreads(); -} - -static struct sysrq_key_op sysrq_rcudump_op = { - .handler = sysrq_show_rcu, - .help_msg = "show-rcu(y)", - .action_msg = "Show RCU tree", - .enable_mask = SYSRQ_ENABLE_DUMP, -}; - -static int __init rcu_sysrq_init(void) -{ - if (sysrq_rcu) - return register_sysrq_key('y', &sysrq_rcudump_op); - return 0; -} -early_initcall(rcu_sysrq_init); - -/* * Send along grace-period-related data for rcutorture diagnostics. */ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, @@ -1034,27 +962,6 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) } /* - * Handler for the irq_work request posted when a grace period has - * gone on for too long, but not yet long enough for an RCU CPU - * stall warning. Set state appropriately, but just complain if - * there is unexpected state on entry. - */ -static void rcu_iw_handler(struct irq_work *iwp) -{ - struct rcu_data *rdp; - struct rcu_node *rnp; - - rdp = container_of(iwp, struct rcu_data, rcu_iw); - rnp = rdp->mynode; - raw_spin_lock_rcu_node(rnp); - if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { - rdp->rcu_iw_gp_seq = rnp->gp_seq; - rdp->rcu_iw_pending = false; - } - raw_spin_unlock_rcu_node(rnp); -} - -/* * Return true if the specified CPU has passed through a quiescent * state by virtue of being in or having passed through an dynticks * idle state since the last call to dyntick_save_progress_counter() @@ -1167,295 +1074,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) return 0; } -static void record_gp_stall_check_time(void) -{ - unsigned long j = jiffies; - unsigned long j1; - - rcu_state.gp_start = j; - j1 = rcu_jiffies_till_stall_check(); - /* Record ->gp_start before ->jiffies_stall. */ - smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */ - rcu_state.jiffies_resched = j + j1 / 2; - rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); -} - -/* - * Complain about starvation of grace-period kthread. - */ -static void rcu_check_gp_kthread_starvation(void) -{ - struct task_struct *gpk = rcu_state.gp_kthread; - unsigned long j; - - j = jiffies - READ_ONCE(rcu_state.gp_activity); - if (j > 2 * HZ) { - pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", - rcu_state.name, j, - (long)rcu_seq_current(&rcu_state.gp_seq), - READ_ONCE(rcu_state.gp_flags), - gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, - gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); - if (gpk) { - pr_err("RCU grace-period kthread stack dump:\n"); - sched_show_task(gpk); - wake_up_process(gpk); - } - } -} - -/* - * Dump stacks of all tasks running on stalled CPUs. First try using - * NMIs, but fall back to manual remote stack tracing on architectures - * that don't support NMI-based stack dumps. The NMI-triggered stack - * traces are more accurate because they are printed by the target CPU. - */ -static void rcu_dump_cpu_stacks(void) -{ - int cpu; - unsigned long flags; - struct rcu_node *rnp; - - rcu_for_each_leaf_node(rnp) { - raw_spin_lock_irqsave_rcu_node(rnp, flags); - for_each_leaf_node_possible_cpu(rnp, cpu) - if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) - if (!trigger_single_cpu_backtrace(cpu)) - dump_cpu_task(cpu); - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - } -} - -/* - * If too much time has passed in the current grace period, and if - * so configured, go kick the relevant kthreads. - */ -static void rcu_stall_kick_kthreads(void) -{ - unsigned long j; - - if (!rcu_kick_kthreads) - return; - j = READ_ONCE(rcu_state.jiffies_kick_kthreads); - if (time_after(jiffies, j) && rcu_state.gp_kthread && - (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { - WARN_ONCE(1, "Kicking %s grace-period kthread\n", - rcu_state.name); - rcu_ftrace_dump(DUMP_ALL); - wake_up_process(rcu_state.gp_kthread); - WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); - } -} - -static void panic_on_rcu_stall(void) -{ - if (sysctl_panic_on_rcu_stall) - panic("RCU Stall\n"); -} - -static void print_other_cpu_stall(unsigned long gp_seq) -{ - int cpu; - unsigned long flags; - unsigned long gpa; - unsigned long j; - int ndetected = 0; - struct rcu_node *rnp = rcu_get_root(); - long totqlen = 0; - - /* Kick and suppress, if so configured. */ - rcu_stall_kick_kthreads(); - if (rcu_cpu_stall_suppress) - return; - - /* - * OK, time to rat on our buddy... - * See Documentation/RCU/stallwarn.txt for info on how to debug - * RCU CPU stall warnings. - */ - pr_err("INFO: %s detected stalls on CPUs/tasks:", rcu_state.name); - print_cpu_stall_info_begin(); - rcu_for_each_leaf_node(rnp) { - raw_spin_lock_irqsave_rcu_node(rnp, flags); - ndetected += rcu_print_task_stall(rnp); - if (rnp->qsmask != 0) { - for_each_leaf_node_possible_cpu(rnp, cpu) - if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { - print_cpu_stall_info(cpu); - ndetected++; - } - } - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - } - - print_cpu_stall_info_end(); - for_each_possible_cpu(cpu) - totqlen += rcu_get_n_cbs_cpu(cpu); - pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", - smp_processor_id(), (long)(jiffies - rcu_state.gp_start), - (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); - if (ndetected) { - rcu_dump_cpu_stacks(); - - /* Complain about tasks blocking the grace period. */ - rcu_print_detail_task_stall(); - } else { - if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { - pr_err("INFO: Stall ended before state dump start\n"); - } else { - j = jiffies; - gpa = READ_ONCE(rcu_state.gp_activity); - pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", - rcu_state.name, j - gpa, j, gpa, - READ_ONCE(jiffies_till_next_fqs), - rcu_get_root()->qsmask); - /* In this case, the current CPU might be at fault. */ - sched_show_task(current); - } - } - /* Rewrite if needed in case of slow consoles. */ - if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) - WRITE_ONCE(rcu_state.jiffies_stall, - jiffies + 3 * rcu_jiffies_till_stall_check() + 3); - - rcu_check_gp_kthread_starvation(); - - panic_on_rcu_stall(); - - rcu_force_quiescent_state(); /* Kick them all. */ -} - -static void print_cpu_stall(void) -{ - int cpu; - unsigned long flags; - struct rcu_data *rdp = this_cpu_ptr(&rcu_data); - struct rcu_node *rnp = rcu_get_root(); - long totqlen = 0; - - /* Kick and suppress, if so configured. */ - rcu_stall_kick_kthreads(); - if (rcu_cpu_stall_suppress) - return; - - /* - * OK, time to rat on ourselves... - * See Documentation/RCU/stallwarn.txt for info on how to debug - * RCU CPU stall warnings. - */ - pr_err("INFO: %s self-detected stall on CPU", rcu_state.name); - print_cpu_stall_info_begin(); - raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); - print_cpu_stall_info(smp_processor_id()); - raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); - print_cpu_stall_info_end(); - for_each_possible_cpu(cpu) - totqlen += rcu_get_n_cbs_cpu(cpu); - pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n", - jiffies - rcu_state.gp_start, - (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); - - rcu_check_gp_kthread_starvation(); - - rcu_dump_cpu_stacks(); - - raw_spin_lock_irqsave_rcu_node(rnp, flags); - /* Rewrite if needed in case of slow consoles. */ - if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) - WRITE_ONCE(rcu_state.jiffies_stall, - jiffies + 3 * rcu_jiffies_till_stall_check() + 3); - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - - panic_on_rcu_stall(); - - /* - * Attempt to revive the RCU machinery by forcing a context switch. - * - * A context switch would normally allow the RCU state machine to make - * progress and it could be we're stuck in kernel space without context - * switches for an entirely unreasonable amount of time. - */ - set_tsk_need_resched(current); - set_preempt_need_resched(); -} - -static void check_cpu_stall(struct rcu_data *rdp) -{ - unsigned long gs1; - unsigned long gs2; - unsigned long gps; - unsigned long j; - unsigned long jn; - unsigned long js; - struct rcu_node *rnp; - - if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) || - !rcu_gp_in_progress()) - return; - rcu_stall_kick_kthreads(); - j = jiffies; - - /* - * Lots of memory barriers to reject false positives. - * - * The idea is to pick up rcu_state.gp_seq, then - * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally - * another copy of rcu_state.gp_seq. These values are updated in - * the opposite order with memory barriers (or equivalent) during - * grace-period initialization and cleanup. Now, a false positive - * can occur if we get an new value of rcu_state.gp_start and a old - * value of rcu_state.jiffies_stall. But given the memory barriers, - * the only way that this can happen is if one grace period ends - * and another starts between these two fetches. This is detected - * by comparing the second fetch of rcu_state.gp_seq with the - * previous fetch from rcu_state.gp_seq. - * - * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, - * and rcu_state.gp_start suffice to forestall false positives. - */ - gs1 = READ_ONCE(rcu_state.gp_seq); - smp_rmb(); /* Pick up ->gp_seq first... */ - js = READ_ONCE(rcu_state.jiffies_stall); - smp_rmb(); /* ...then ->jiffies_stall before the rest... */ - gps = READ_ONCE(rcu_state.gp_start); - smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ - gs2 = READ_ONCE(rcu_state.gp_seq); - if (gs1 != gs2 || - ULONG_CMP_LT(j, js) || - ULONG_CMP_GE(gps, js)) - return; /* No stall or GP completed since entering function. */ - rnp = rdp->mynode; - jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; - if (rcu_gp_in_progress() && - (READ_ONCE(rnp->qsmask) & rdp->grpmask) && - cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { - - /* We haven't checked in, so go dump stack. */ - print_cpu_stall(); - - } else if (rcu_gp_in_progress() && - ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && - cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { - - /* They had a few time units to dump stack, so complain. */ - print_other_cpu_stall(gs2); - } -} - -/** - * rcu_cpu_stall_reset - prevent further stall warnings in current grace period - * - * Set the stall-warning timeout way off into the future, thus preventing - * any RCU CPU stall-warning messages from appearing in the current set of - * RCU grace periods. - * - * The caller must disable hard irqs. - */ -void rcu_cpu_stall_reset(void) -{ - WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); -} - /* Trace-event wrapper function for trace_rcu_future_grace_period. */ static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, unsigned long gp_seq_req, const char *s) @@ -1585,7 +1203,7 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp) static void rcu_gp_kthread_wake(void) { if ((current == rcu_state.gp_kthread && - !in_interrupt() && !in_serving_softirq()) || + !in_irq() && !in_serving_softirq()) || !READ_ONCE(rcu_state.gp_flags) || !rcu_state.gp_kthread) return; @@ -2295,11 +1913,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) return; } mask = rdp->grpmask; + rdp->core_needs_qs = false; if ((rnp->qsmask & mask) == 0) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } else { - rdp->core_needs_qs = false; - /* * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. @@ -2548,11 +2165,11 @@ void rcu_sched_clock_irq(int user) } /* - * Scan the leaf rcu_node structures, processing dyntick state for any that - * have not yet encountered a quiescent state, using the function specified. - * Also initiate boosting for any threads blocked on the root rcu_node. - * - * The caller must have suppressed start of new grace periods. + * Scan the leaf rcu_node structures. For each structure on which all + * CPUs have reported a quiescent state and on which there are tasks + * blocking the current grace period, initiate RCU priority boosting. + * Otherwise, invoke the specified function to check dyntick state for + * each CPU that has not yet reported a quiescent state. */ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) { @@ -2635,101 +2252,6 @@ void rcu_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); -/* - * This function checks for grace-period requests that fail to motivate - * RCU to come out of its idle mode. - */ -void -rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, - const unsigned long gpssdelay) -{ - unsigned long flags; - unsigned long j; - struct rcu_node *rnp_root = rcu_get_root(); - static atomic_t warned = ATOMIC_INIT(0); - - if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || - ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed)) - return; - j = jiffies; /* Expensive access, and in common case don't get here. */ - if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || - time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || - atomic_read(&warned)) - return; - - raw_spin_lock_irqsave_rcu_node(rnp, flags); - j = jiffies; - if (rcu_gp_in_progress() || - ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || - time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || - time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || - atomic_read(&warned)) { - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - return; - } - /* Hold onto the leaf lock to make others see warned==1. */ - - if (rnp_root != rnp) - raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ - j = jiffies; - if (rcu_gp_in_progress() || - ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || - time_before(j, rcu_state.gp_req_activity + gpssdelay) || - time_before(j, rcu_state.gp_activity + gpssdelay) || - atomic_xchg(&warned, 1)) { - raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */ - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - return; - } - WARN_ON(1); - if (rnp_root != rnp) - raw_spin_unlock_rcu_node(rnp_root); - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - show_rcu_gp_kthreads(); -} - -/* - * Do a forward-progress check for rcutorture. This is normally invoked - * due to an OOM event. The argument "j" gives the time period during - * which rcutorture would like progress to have been made. - */ -void rcu_fwd_progress_check(unsigned long j) -{ - unsigned long cbs; - int cpu; - unsigned long max_cbs = 0; - int max_cpu = -1; - struct rcu_data *rdp; - - if (rcu_gp_in_progress()) { - pr_info("%s: GP age %lu jiffies\n", - __func__, jiffies - rcu_state.gp_start); - show_rcu_gp_kthreads(); - } else { - pr_info("%s: Last GP end %lu jiffies ago\n", - __func__, jiffies - rcu_state.gp_end); - preempt_disable(); - rdp = this_cpu_ptr(&rcu_data); - rcu_check_gp_start_stall(rdp->mynode, rdp, j); - preempt_enable(); - } - for_each_possible_cpu(cpu) { - cbs = rcu_get_n_cbs_cpu(cpu); - if (!cbs) - continue; - if (max_cpu < 0) - pr_info("%s: callbacks", __func__); - pr_cont(" %d: %lu", cpu, cbs); - if (cbs <= max_cbs) - continue; - max_cbs = cbs; - max_cpu = cpu; - } - if (max_cpu >= 0) - pr_cont("\n"); -} -EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); - /* Perform RCU core processing work for the current CPU. */ static __latent_entropy void rcu_core(struct softirq_action *unused) { @@ -3559,13 +3081,11 @@ static int rcu_pm_notify(struct notifier_block *self, switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: - if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ - rcu_expedite_gp(); + rcu_expedite_gp(); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: - if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */ - rcu_unexpedite_gp(); + rcu_unexpedite_gp(); break; default: break; @@ -3742,8 +3262,7 @@ static void __init rcu_init_geometry(void) jiffies_till_first_fqs = d; if (jiffies_till_next_fqs == ULONG_MAX) jiffies_till_next_fqs = d; - if (jiffies_till_sched_qs == ULONG_MAX) - adjust_jiffies_till_sched_qs(); + adjust_jiffies_till_sched_qs(); /* If the compile-time values are accurate, just leave. */ if (rcu_fanout_leaf == RCU_FANOUT_LEAF && @@ -3858,5 +3377,6 @@ void __init rcu_init(void) srcu_init(); } +#include "tree_stall.h" #include "tree_exp.h" #include "tree_plugin.h" diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index bb4f995f2d3f..e253d11af3c4 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -393,15 +393,13 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name; int rcu_dynticks_snap(struct rcu_data *rdp); -/* Forward declarations for rcutree_plugin.h */ +/* Forward declarations for tree_plugin.h */ static void rcu_bootup_announce(void); static void rcu_qs(void); static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU static bool rcu_preempt_has_tasks(struct rcu_node *rnp); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ -static void rcu_print_detail_task_stall(void); -static int rcu_print_task_stall(struct rcu_node *rnp); static int rcu_print_task_exp_stall(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); static void rcu_flavor_sched_clock_irq(int user); @@ -418,9 +416,6 @@ static void rcu_prepare_for_idle(void); static bool rcu_preempt_has_tasks(struct rcu_node *rnp); static bool rcu_preempt_need_deferred_qs(struct task_struct *t); static void rcu_preempt_deferred_qs(struct task_struct *t); -static void print_cpu_stall_info_begin(void); -static void print_cpu_stall_info(int cpu); -static void print_cpu_stall_info_end(void); static void zero_cpu_stall_ticks(struct rcu_data *rdp); static bool rcu_nocb_cpu_needs_barrier(int cpu); static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); @@ -445,3 +440,10 @@ static void rcu_bind_gp_kthread(void); static bool rcu_nohz_full_cpu(void); static void rcu_dynticks_task_enter(void); static void rcu_dynticks_task_exit(void); + +/* Forward declarations for tree_stall.h */ +static void record_gp_stall_check_time(void); +static void rcu_iw_handler(struct irq_work *iwp); +static void check_cpu_stall(struct rcu_data *rdp); +static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, + const unsigned long gpssdelay); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 4c2a0189e748..9c990df880d1 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -10,6 +10,7 @@ #include <linux/lockdep.h> static void rcu_exp_handler(void *unused); +static int rcu_print_task_exp_stall(struct rcu_node *rnp); /* * Record the start of an expedited grace period. @@ -633,7 +634,7 @@ static void rcu_exp_handler(void *unused) raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->expmask & rdp->grpmask) { rdp->deferred_qs = true; - WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true); + t->rcu_read_unlock_special.b.exp_hint = true; } raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return; @@ -648,7 +649,7 @@ static void rcu_exp_handler(void *unused) * * If the CPU is fully enabled (or if some buggy RCU-preempt * read-side critical section is being used from idle), just - * invoke rcu_preempt_defer_qs() to immediately report the + * invoke rcu_preempt_deferred_qs() to immediately report the * quiescent state. We cannot use rcu_read_unlock_special() * because we are in an interrupt handler, which will cause that * function to take an early exit without doing anything. @@ -670,6 +671,27 @@ static void sync_sched_exp_online_cleanup(int cpu) { } +/* + * Scan the current list of tasks blocked within RCU read-side critical + * sections, printing out the tid of each that is blocking the current + * expedited grace period. + */ +static int rcu_print_task_exp_stall(struct rcu_node *rnp) +{ + struct task_struct *t; + int ndetected = 0; + + if (!rnp->exp_tasks) + return 0; + t = list_entry(rnp->exp_tasks->prev, + struct task_struct, rcu_node_entry); + list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { + pr_cont(" P%d", t->pid); + ndetected++; + } + return ndetected; +} + #else /* #ifdef CONFIG_PREEMPT_RCU */ /* Invoked on each online non-idle CPU for expedited quiescent state. */ @@ -709,6 +731,16 @@ static void sync_sched_exp_online_cleanup(int cpu) WARN_ON_ONCE(ret); } +/* + * Because preemptible RCU does not exist, we never have to check for + * tasks blocked within RCU read-side critical sections that are + * blocking the current expedited grace period. + */ +static int rcu_print_task_exp_stall(struct rcu_node *rnp) +{ + return 0; +} + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /** diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 97dba50f6fb2..1102765f91fd 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -285,7 +285,7 @@ static void rcu_qs(void) TPS("cpuqs")); __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */ - current->rcu_read_unlock_special.b.need_qs = false; + WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); } } @@ -643,100 +643,6 @@ static void rcu_read_unlock_special(struct task_struct *t) } /* - * Dump detailed information for all tasks blocking the current RCU - * grace period on the specified rcu_node structure. - */ -static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) -{ - unsigned long flags; - struct task_struct *t; - - raw_spin_lock_irqsave_rcu_node(rnp, flags); - if (!rcu_preempt_blocked_readers_cgp(rnp)) { - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - return; - } - t = list_entry(rnp->gp_tasks->prev, - struct task_struct, rcu_node_entry); - list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { - /* - * We could be printing a lot while holding a spinlock. - * Avoid triggering hard lockup. - */ - touch_nmi_watchdog(); - sched_show_task(t); - } - raw_spin_unlock_irqrestore_rcu_node(rnp, flags); -} - -/* - * Dump detailed information for all tasks blocking the current RCU - * grace period. - */ -static void rcu_print_detail_task_stall(void) -{ - struct rcu_node *rnp = rcu_get_root(); - - rcu_print_detail_task_stall_rnp(rnp); - rcu_for_each_leaf_node(rnp) - rcu_print_detail_task_stall_rnp(rnp); -} - -static void rcu_print_task_stall_begin(struct rcu_node *rnp) -{ - pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", - rnp->level, rnp->grplo, rnp->grphi); -} - -static void rcu_print_task_stall_end(void) -{ - pr_cont("\n"); -} - -/* - * Scan the current list of tasks blocked within RCU read-side critical - * sections, printing out the tid of each. - */ -static int rcu_print_task_stall(struct rcu_node *rnp) -{ - struct task_struct *t; - int ndetected = 0; - - if (!rcu_preempt_blocked_readers_cgp(rnp)) - return 0; - rcu_print_task_stall_begin(rnp); - t = list_entry(rnp->gp_tasks->prev, - struct task_struct, rcu_node_entry); - list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { - pr_cont(" P%d", t->pid); - ndetected++; - } - rcu_print_task_stall_end(); - return ndetected; -} - -/* - * Scan the current list of tasks blocked within RCU read-side critical - * sections, printing out the tid of each that is blocking the current - * expedited grace period. - */ -static int rcu_print_task_exp_stall(struct rcu_node *rnp) -{ - struct task_struct *t; - int ndetected = 0; - - if (!rnp->exp_tasks) - return 0; - t = list_entry(rnp->exp_tasks->prev, - struct task_struct, rcu_node_entry); - list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { - pr_cont(" P%d", t->pid); - ndetected++; - } - return ndetected; -} - -/* * Check that the list of blocked tasks for the newly completed grace * period is in fact empty. It is a serious bug to complete a grace * period that still has RCU readers blocked! This function must be @@ -804,19 +710,25 @@ static void rcu_flavor_sched_clock_irq(int user) /* * Check for a task exiting while in a preemptible-RCU read-side - * critical section, clean up if so. No need to issue warnings, - * as debug_check_no_locks_held() already does this if lockdep - * is enabled. + * critical section, clean up if so. No need to issue warnings, as + * debug_check_no_locks_held() already does this if lockdep is enabled. + * Besides, if this function does anything other than just immediately + * return, there was a bug of some sort. Spewing warnings from this + * function is like as not to simply obscure important prior warnings. */ void exit_rcu(void) { struct task_struct *t = current; - if (likely(list_empty(¤t->rcu_node_entry))) + if (unlikely(!list_empty(¤t->rcu_node_entry))) { + t->rcu_read_lock_nesting = 1; + barrier(); + WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); + } else if (unlikely(t->rcu_read_lock_nesting)) { + t->rcu_read_lock_nesting = 1; + } else { return; - t->rcu_read_lock_nesting = 1; - barrier(); - t->rcu_read_unlock_special.b.blocked = true; + } __rcu_read_unlock(); rcu_preempt_deferred_qs(current); } @@ -980,33 +892,6 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t) static void rcu_preempt_deferred_qs(struct task_struct *t) { } /* - * Because preemptible RCU does not exist, we never have to check for - * tasks blocked within RCU read-side critical sections. - */ -static void rcu_print_detail_task_stall(void) -{ -} - -/* - * Because preemptible RCU does not exist, we never have to check for - * tasks blocked within RCU read-side critical sections. - */ -static int rcu_print_task_stall(struct rcu_node *rnp) -{ - return 0; -} - -/* - * Because preemptible RCU does not exist, we never have to check for - * tasks blocked within RCU read-side critical sections that are - * blocking the current expedited grace period. - */ -static int rcu_print_task_exp_stall(struct rcu_node *rnp) -{ - return 0; -} - -/* * Because there is no preemptible RCU, there can be no readers blocked, * so there is no need to check for blocked tasks. So check only for * bogus qsmask values. @@ -1185,8 +1070,6 @@ static int rcu_boost_kthread(void *arg) static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) __releases(rnp->lock) { - struct task_struct *t; - raw_lockdep_assert_held_rcu_node(rnp); if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); @@ -1200,9 +1083,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) if (rnp->exp_tasks == NULL) rnp->boost_tasks = rnp->gp_tasks; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); - t = rnp->boost_kthread_task; - if (t) - rcu_wake_cond(t, rnp->boost_kthread_status); + rcu_wake_cond(rnp->boost_kthread_task, + rnp->boost_kthread_status); } else { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } @@ -1649,98 +1531,6 @@ static void rcu_cleanup_after_idle(void) #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ -#ifdef CONFIG_RCU_FAST_NO_HZ - -static void print_cpu_stall_fast_no_hz(char *cp, int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - - sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c", - rdp->last_accelerate & 0xffff, jiffies & 0xffff, - ".l"[rdp->all_lazy], - ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)], - ".D"[!rdp->tick_nohz_enabled_snap]); -} - -#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ - -static void print_cpu_stall_fast_no_hz(char *cp, int cpu) -{ - *cp = '\0'; -} - -#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ - -/* Initiate the stall-info list. */ -static void print_cpu_stall_info_begin(void) -{ - pr_cont("\n"); -} - -/* - * Print out diagnostic information for the specified stalled CPU. - * - * If the specified CPU is aware of the current RCU grace period, then - * print the number of scheduling clock interrupts the CPU has taken - * during the time that it has been aware. Otherwise, print the number - * of RCU grace periods that this CPU is ignorant of, for example, "1" - * if the CPU was aware of the previous grace period. - * - * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. - */ -static void print_cpu_stall_info(int cpu) -{ - unsigned long delta; - char fast_no_hz[72]; - struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); - char *ticks_title; - unsigned long ticks_value; - - /* - * We could be printing a lot while holding a spinlock. Avoid - * triggering hard lockup. - */ - touch_nmi_watchdog(); - - ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); - if (ticks_value) { - ticks_title = "GPs behind"; - } else { - ticks_title = "ticks this GP"; - ticks_value = rdp->ticks_this_gp; - } - print_cpu_stall_fast_no_hz(fast_no_hz, cpu); - delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); - pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", - cpu, - "O."[!!cpu_online(cpu)], - "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], - "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], - !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : - rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : - "!."[!delta], - ticks_value, ticks_title, - rcu_dynticks_snap(rdp) & 0xfff, - rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, - rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), - READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, - fast_no_hz); -} - -/* Terminate the stall-info list. */ -static void print_cpu_stall_info_end(void) -{ - pr_err("\t"); -} - -/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ -static void zero_cpu_stall_ticks(struct rcu_data *rdp) -{ - rdp->ticks_this_gp = 0; - rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); - WRITE_ONCE(rdp->last_fqs_resched, jiffies); -} - #ifdef CONFIG_RCU_NOCB_CPU /* @@ -1766,11 +1556,22 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp) */ -/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */ +/* + * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. + * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a + * comma-separated list of CPUs and/or CPU ranges. If an invalid list is + * given, a warning is emitted and all CPUs are offloaded. + */ static int __init rcu_nocb_setup(char *str) { alloc_bootmem_cpumask_var(&rcu_nocb_mask); - cpulist_parse(str, rcu_nocb_mask); + if (!strcasecmp(str, "all")) + cpumask_setall(rcu_nocb_mask); + else + if (cpulist_parse(str, rcu_nocb_mask)) { + pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); + cpumask_setall(rcu_nocb_mask); + } return 1; } __setup("rcu_nocbs=", rcu_nocb_setup); diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h new file mode 100644 index 000000000000..f65a73a97323 --- /dev/null +++ b/kernel/rcu/tree_stall.h @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * RCU CPU stall warnings for normal RCU grace periods + * + * Copyright IBM Corporation, 2019 + * + * Author: Paul E. McKenney <paulmck@linux.ibm.com> + */ + +////////////////////////////////////////////////////////////////////////////// +// +// Controlling CPU stall warnings, including delay calculation. + +/* panic() on RCU Stall sysctl. */ +int sysctl_panic_on_rcu_stall __read_mostly; + +#ifdef CONFIG_PROVE_RCU +#define RCU_STALL_DELAY_DELTA (5 * HZ) +#else +#define RCU_STALL_DELAY_DELTA 0 +#endif + +/* Limit-check stall timeouts specified at boottime and runtime. */ +int rcu_jiffies_till_stall_check(void) +{ + int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); + + /* + * Limit check must be consistent with the Kconfig limits + * for CONFIG_RCU_CPU_STALL_TIMEOUT. + */ + if (till_stall_check < 3) { + WRITE_ONCE(rcu_cpu_stall_timeout, 3); + till_stall_check = 3; + } else if (till_stall_check > 300) { + WRITE_ONCE(rcu_cpu_stall_timeout, 300); + till_stall_check = 300; + } + return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; +} +EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); + +/* Don't do RCU CPU stall warnings during long sysrq printouts. */ +void rcu_sysrq_start(void) +{ + if (!rcu_cpu_stall_suppress) + rcu_cpu_stall_suppress = 2; +} + +void rcu_sysrq_end(void) +{ + if (rcu_cpu_stall_suppress == 2) + rcu_cpu_stall_suppress = 0; +} + +/* Don't print RCU CPU stall warnings during a kernel panic. */ +static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) +{ + rcu_cpu_stall_suppress = 1; + return NOTIFY_DONE; +} + +static struct notifier_block rcu_panic_block = { + .notifier_call = rcu_panic, +}; + +static int __init check_cpu_stall_init(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); + return 0; +} +early_initcall(check_cpu_stall_init); + +/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ +static void panic_on_rcu_stall(void) +{ + if (sysctl_panic_on_rcu_stall) + panic("RCU Stall\n"); +} + +/** + * rcu_cpu_stall_reset - prevent further stall warnings in current grace period + * + * Set the stall-warning timeout way off into the future, thus preventing + * any RCU CPU stall-warning messages from appearing in the current set of + * RCU grace periods. + * + * The caller must disable hard irqs. + */ +void rcu_cpu_stall_reset(void) +{ + WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); +} + +////////////////////////////////////////////////////////////////////////////// +// +// Interaction with RCU grace periods + +/* Start of new grace period, so record stall time (and forcing times). */ +static void record_gp_stall_check_time(void) +{ + unsigned long j = jiffies; + unsigned long j1; + + rcu_state.gp_start = j; + j1 = rcu_jiffies_till_stall_check(); + /* Record ->gp_start before ->jiffies_stall. */ + smp_store_release(&rcu_state.jiffies_stall, j + j1); /* ^^^ */ + rcu_state.jiffies_resched = j + j1 / 2; + rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); +} + +/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ +static void zero_cpu_stall_ticks(struct rcu_data *rdp) +{ + rdp->ticks_this_gp = 0; + rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); + WRITE_ONCE(rdp->last_fqs_resched, jiffies); +} + +/* + * If too much time has passed in the current grace period, and if + * so configured, go kick the relevant kthreads. + */ +static void rcu_stall_kick_kthreads(void) +{ + unsigned long j; + + if (!rcu_kick_kthreads) + return; + j = READ_ONCE(rcu_state.jiffies_kick_kthreads); + if (time_after(jiffies, j) && rcu_state.gp_kthread && + (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { + WARN_ONCE(1, "Kicking %s grace-period kthread\n", + rcu_state.name); + rcu_ftrace_dump(DUMP_ALL); + wake_up_process(rcu_state.gp_kthread); + WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); + } +} + +/* + * Handler for the irq_work request posted about halfway into the RCU CPU + * stall timeout, and used to detect excessive irq disabling. Set state + * appropriately, but just complain if there is unexpected state on entry. + */ +static void rcu_iw_handler(struct irq_work *iwp) +{ + struct rcu_data *rdp; + struct rcu_node *rnp; + + rdp = container_of(iwp, struct rcu_data, rcu_iw); + rnp = rdp->mynode; + raw_spin_lock_rcu_node(rnp); + if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { + rdp->rcu_iw_gp_seq = rnp->gp_seq; + rdp->rcu_iw_pending = false; + } + raw_spin_unlock_rcu_node(rnp); +} + +////////////////////////////////////////////////////////////////////////////// +// +// Printing RCU CPU stall warnings + +#ifdef CONFIG_PREEMPT + +/* + * Dump detailed information for all tasks blocking the current RCU + * grace period on the specified rcu_node structure. + */ +static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) +{ + unsigned long flags; + struct task_struct *t; + + raw_spin_lock_irqsave_rcu_node(rnp, flags); + if (!rcu_preempt_blocked_readers_cgp(rnp)) { + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + return; + } + t = list_entry(rnp->gp_tasks->prev, + struct task_struct, rcu_node_entry); + list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { + /* + * We could be printing a lot while holding a spinlock. + * Avoid triggering hard lockup. + */ + touch_nmi_watchdog(); + sched_show_task(t); + } + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); +} + +/* + * Scan the current list of tasks blocked within RCU read-side critical + * sections, printing out the tid of each. + */ +static int rcu_print_task_stall(struct rcu_node *rnp) +{ + struct task_struct *t; + int ndetected = 0; + + if (!rcu_preempt_blocked_readers_cgp(rnp)) + return 0; + pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", + rnp->level, rnp->grplo, rnp->grphi); + t = list_entry(rnp->gp_tasks->prev, + struct task_struct, rcu_node_entry); + list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { + pr_cont(" P%d", t->pid); + ndetected++; + } + pr_cont("\n"); + return ndetected; +} + +#else /* #ifdef CONFIG_PREEMPT */ + +/* + * Because preemptible RCU does not exist, we never have to check for + * tasks blocked within RCU read-side critical sections. + */ +static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) +{ +} + +/* + * Because preemptible RCU does not exist, we never have to check for + * tasks blocked within RCU read-side critical sections. + */ +static int rcu_print_task_stall(struct rcu_node *rnp) +{ + return 0; +} +#endif /* #else #ifdef CONFIG_PREEMPT */ + +/* + * Dump stacks of all tasks running on stalled CPUs. First try using + * NMIs, but fall back to manual remote stack tracing on architectures + * that don't support NMI-based stack dumps. The NMI-triggered stack + * traces are more accurate because they are printed by the target CPU. + */ +static void rcu_dump_cpu_stacks(void) +{ + int cpu; + unsigned long flags; + struct rcu_node *rnp; + + rcu_for_each_leaf_node(rnp) { + raw_spin_lock_irqsave_rcu_node(rnp, flags); + for_each_leaf_node_possible_cpu(rnp, cpu) + if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) + if (!trigger_single_cpu_backtrace(cpu)) + dump_cpu_task(cpu); + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } +} + +#ifdef CONFIG_RCU_FAST_NO_HZ + +static void print_cpu_stall_fast_no_hz(char *cp, int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + + sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c", + rdp->last_accelerate & 0xffff, jiffies & 0xffff, + ".l"[rdp->all_lazy], + ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)], + ".D"[!!rdp->tick_nohz_enabled_snap]); +} + +#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ + +static void print_cpu_stall_fast_no_hz(char *cp, int cpu) +{ + *cp = '\0'; +} + +#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ + +/* + * Print out diagnostic information for the specified stalled CPU. + * + * If the specified CPU is aware of the current RCU grace period, then + * print the number of scheduling clock interrupts the CPU has taken + * during the time that it has been aware. Otherwise, print the number + * of RCU grace periods that this CPU is ignorant of, for example, "1" + * if the CPU was aware of the previous grace period. + * + * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. + */ +static void print_cpu_stall_info(int cpu) +{ + unsigned long delta; + char fast_no_hz[72]; + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + char *ticks_title; + unsigned long ticks_value; + + /* + * We could be printing a lot while holding a spinlock. Avoid + * triggering hard lockup. + */ + touch_nmi_watchdog(); + + ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); + if (ticks_value) { + ticks_title = "GPs behind"; + } else { + ticks_title = "ticks this GP"; + ticks_value = rdp->ticks_this_gp; + } + print_cpu_stall_fast_no_hz(fast_no_hz, cpu); + delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); + pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", + cpu, + "O."[!!cpu_online(cpu)], + "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], + "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], + !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : + rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : + "!."[!delta], + ticks_value, ticks_title, + rcu_dynticks_snap(rdp) & 0xfff, + rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, + rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), + READ_ONCE(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, + fast_no_hz); +} + +/* Complain about starvation of grace-period kthread. */ +static void rcu_check_gp_kthread_starvation(void) +{ + struct task_struct *gpk = rcu_state.gp_kthread; + unsigned long j; + + j = jiffies - READ_ONCE(rcu_state.gp_activity); + if (j > 2 * HZ) { + pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", + rcu_state.name, j, + (long)rcu_seq_current(&rcu_state.gp_seq), + READ_ONCE(rcu_state.gp_flags), + gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, + gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); + if (gpk) { + pr_err("RCU grace-period kthread stack dump:\n"); + sched_show_task(gpk); + wake_up_process(gpk); + } + } +} + +static void print_other_cpu_stall(unsigned long gp_seq) +{ + int cpu; + unsigned long flags; + unsigned long gpa; + unsigned long j; + int ndetected = 0; + struct rcu_node *rnp; + long totqlen = 0; + + /* Kick and suppress, if so configured. */ + rcu_stall_kick_kthreads(); + if (rcu_cpu_stall_suppress) + return; + + /* + * OK, time to rat on our buddy... + * See Documentation/RCU/stallwarn.txt for info on how to debug + * RCU CPU stall warnings. + */ + pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); + rcu_for_each_leaf_node(rnp) { + raw_spin_lock_irqsave_rcu_node(rnp, flags); + ndetected += rcu_print_task_stall(rnp); + if (rnp->qsmask != 0) { + for_each_leaf_node_possible_cpu(rnp, cpu) + if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { + print_cpu_stall_info(cpu); + ndetected++; + } + } + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + } + + for_each_possible_cpu(cpu) + totqlen += rcu_get_n_cbs_cpu(cpu); + pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", + smp_processor_id(), (long)(jiffies - rcu_state.gp_start), + (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); + if (ndetected) { + rcu_dump_cpu_stacks(); + + /* Complain about tasks blocking the grace period. */ + rcu_for_each_leaf_node(rnp) + rcu_print_detail_task_stall_rnp(rnp); + } else { + if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { + pr_err("INFO: Stall ended before state dump start\n"); + } else { + j = jiffies; + gpa = READ_ONCE(rcu_state.gp_activity); + pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", + rcu_state.name, j - gpa, j, gpa, + READ_ONCE(jiffies_till_next_fqs), + rcu_get_root()->qsmask); + /* In this case, the current CPU might be at fault. */ + sched_show_task(current); + } + } + /* Rewrite if needed in case of slow consoles. */ + if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) + WRITE_ONCE(rcu_state.jiffies_stall, + jiffies + 3 * rcu_jiffies_till_stall_check() + 3); + + rcu_check_gp_kthread_starvation(); + + panic_on_rcu_stall(); + + rcu_force_quiescent_state(); /* Kick them all. */ +} + +static void print_cpu_stall(void) +{ + int cpu; + unsigned long flags; + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); + struct rcu_node *rnp = rcu_get_root(); + long totqlen = 0; + + /* Kick and suppress, if so configured. */ + rcu_stall_kick_kthreads(); + if (rcu_cpu_stall_suppress) + return; + + /* + * OK, time to rat on ourselves... + * See Documentation/RCU/stallwarn.txt for info on how to debug + * RCU CPU stall warnings. + */ + pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); + raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); + print_cpu_stall_info(smp_processor_id()); + raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); + for_each_possible_cpu(cpu) + totqlen += rcu_get_n_cbs_cpu(cpu); + pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n", + jiffies - rcu_state.gp_start, + (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); + + rcu_check_gp_kthread_starvation(); + + rcu_dump_cpu_stacks(); + + raw_spin_lock_irqsave_rcu_node(rnp, flags); + /* Rewrite if needed in case of slow consoles. */ + if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) + WRITE_ONCE(rcu_state.jiffies_stall, + jiffies + 3 * rcu_jiffies_till_stall_check() + 3); + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + + panic_on_rcu_stall(); + + /* + * Attempt to revive the RCU machinery by forcing a context switch. + * + * A context switch would normally allow the RCU state machine to make + * progress and it could be we're stuck in kernel space without context + * switches for an entirely unreasonable amount of time. + */ + set_tsk_need_resched(current); + set_preempt_need_resched(); +} + +static void check_cpu_stall(struct rcu_data *rdp) +{ + unsigned long gs1; + unsigned long gs2; + unsigned long gps; + unsigned long j; + unsigned long jn; + unsigned long js; + struct rcu_node *rnp; + + if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) || + !rcu_gp_in_progress()) + return; + rcu_stall_kick_kthreads(); + j = jiffies; + + /* + * Lots of memory barriers to reject false positives. + * + * The idea is to pick up rcu_state.gp_seq, then + * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally + * another copy of rcu_state.gp_seq. These values are updated in + * the opposite order with memory barriers (or equivalent) during + * grace-period initialization and cleanup. Now, a false positive + * can occur if we get an new value of rcu_state.gp_start and a old + * value of rcu_state.jiffies_stall. But given the memory barriers, + * the only way that this can happen is if one grace period ends + * and another starts between these two fetches. This is detected + * by comparing the second fetch of rcu_state.gp_seq with the + * previous fetch from rcu_state.gp_seq. + * + * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, + * and rcu_state.gp_start suffice to forestall false positives. + */ + gs1 = READ_ONCE(rcu_state.gp_seq); + smp_rmb(); /* Pick up ->gp_seq first... */ + js = READ_ONCE(rcu_state.jiffies_stall); + smp_rmb(); /* ...then ->jiffies_stall before the rest... */ + gps = READ_ONCE(rcu_state.gp_start); + smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ + gs2 = READ_ONCE(rcu_state.gp_seq); + if (gs1 != gs2 || + ULONG_CMP_LT(j, js) || + ULONG_CMP_GE(gps, js)) + return; /* No stall or GP completed since entering function. */ + rnp = rdp->mynode; + jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; + if (rcu_gp_in_progress() && + (READ_ONCE(rnp->qsmask) & rdp->grpmask) && + cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { + + /* We haven't checked in, so go dump stack. */ + print_cpu_stall(); + + } else if (rcu_gp_in_progress() && + ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && + cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { + + /* They had a few time units to dump stack, so complain. */ + print_other_cpu_stall(gs2); + } +} + +////////////////////////////////////////////////////////////////////////////// +// +// RCU forward-progress mechanisms, including of callback invocation. + + +/* + * Show the state of the grace-period kthreads. + */ +void show_rcu_gp_kthreads(void) +{ + int cpu; + unsigned long j; + unsigned long ja; + unsigned long jr; + unsigned long jw; + struct rcu_data *rdp; + struct rcu_node *rnp; + + j = jiffies; + ja = j - READ_ONCE(rcu_state.gp_activity); + jr = j - READ_ONCE(rcu_state.gp_req_activity); + jw = j - READ_ONCE(rcu_state.gp_wake_time); + pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n", + rcu_state.name, gp_state_getname(rcu_state.gp_state), + rcu_state.gp_state, + rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL, + ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq), + (long)READ_ONCE(rcu_state.gp_seq), + (long)READ_ONCE(rcu_get_root()->gp_seq_needed), + READ_ONCE(rcu_state.gp_flags)); + rcu_for_each_node_breadth_first(rnp) { + if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed)) + continue; + pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n", + rnp->grplo, rnp->grphi, (long)rnp->gp_seq, + (long)rnp->gp_seq_needed); + if (!rcu_is_leaf_node(rnp)) + continue; + for_each_leaf_node_possible_cpu(rnp, cpu) { + rdp = per_cpu_ptr(&rcu_data, cpu); + if (rdp->gpwrap || + ULONG_CMP_GE(rcu_state.gp_seq, + rdp->gp_seq_needed)) + continue; + pr_info("\tcpu %d ->gp_seq_needed %ld\n", + cpu, (long)rdp->gp_seq_needed); + } + } + /* sched_show_task(rcu_state.gp_kthread); */ +} +EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); + +/* + * This function checks for grace-period requests that fail to motivate + * RCU to come out of its idle mode. + */ +static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, + const unsigned long gpssdelay) +{ + unsigned long flags; + unsigned long j; + struct rcu_node *rnp_root = rcu_get_root(); + static atomic_t warned = ATOMIC_INIT(0); + + if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || + ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed)) + return; + j = jiffies; /* Expensive access, and in common case don't get here. */ + if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || + time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || + atomic_read(&warned)) + return; + + raw_spin_lock_irqsave_rcu_node(rnp, flags); + j = jiffies; + if (rcu_gp_in_progress() || + ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || + time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || + time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || + atomic_read(&warned)) { + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + return; + } + /* Hold onto the leaf lock to make others see warned==1. */ + + if (rnp_root != rnp) + raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ + j = jiffies; + if (rcu_gp_in_progress() || + ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) || + time_before(j, rcu_state.gp_req_activity + gpssdelay) || + time_before(j, rcu_state.gp_activity + gpssdelay) || + atomic_xchg(&warned, 1)) { + raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */ + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + return; + } + WARN_ON(1); + if (rnp_root != rnp) + raw_spin_unlock_rcu_node(rnp_root); + raw_spin_unlock_irqrestore_rcu_node(rnp, flags); + show_rcu_gp_kthreads(); +} + +/* + * Do a forward-progress check for rcutorture. This is normally invoked + * due to an OOM event. The argument "j" gives the time period during + * which rcutorture would like progress to have been made. + */ +void rcu_fwd_progress_check(unsigned long j) +{ + unsigned long cbs; + int cpu; + unsigned long max_cbs = 0; + int max_cpu = -1; + struct rcu_data *rdp; + + if (rcu_gp_in_progress()) { + pr_info("%s: GP age %lu jiffies\n", + __func__, jiffies - rcu_state.gp_start); + show_rcu_gp_kthreads(); + } else { + pr_info("%s: Last GP end %lu jiffies ago\n", + __func__, jiffies - rcu_state.gp_end); + preempt_disable(); + rdp = this_cpu_ptr(&rcu_data); + rcu_check_gp_start_stall(rdp->mynode, rdp, j); + preempt_enable(); + } + for_each_possible_cpu(cpu) { + cbs = rcu_get_n_cbs_cpu(cpu); + if (!cbs) + continue; + if (max_cpu < 0) + pr_info("%s: callbacks", __func__); + pr_cont(" %d: %lu", cpu, cbs); + if (cbs <= max_cbs) + continue; + max_cbs = cbs; + max_cpu = cpu; + } + if (max_cpu >= 0) + pr_cont("\n"); +} +EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); + +/* Commandeer a sysrq key to dump RCU's tree. */ +static bool sysrq_rcu; +module_param(sysrq_rcu, bool, 0444); + +/* Dump grace-period-request information due to commandeered sysrq. */ +static void sysrq_show_rcu(int key) +{ + show_rcu_gp_kthreads(); +} + +static struct sysrq_key_op sysrq_rcudump_op = { + .handler = sysrq_show_rcu, + .help_msg = "show-rcu(y)", + .action_msg = "Show RCU tree", + .enable_mask = SYSRQ_ENABLE_DUMP, +}; + +static int __init rcu_sysrq_init(void) +{ + if (sysrq_rcu) + return register_sysrq_key('y', &sysrq_rcudump_op); + return 0; +} +early_initcall(rcu_sysrq_init); diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index cbaa976c5945..c3bf44ba42e5 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -424,68 +424,11 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); #endif #ifdef CONFIG_RCU_STALL_COMMON - -#ifdef CONFIG_PROVE_RCU -#define RCU_STALL_DELAY_DELTA (5 * HZ) -#else -#define RCU_STALL_DELAY_DELTA 0 -#endif - int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress); -static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; - module_param(rcu_cpu_stall_suppress, int, 0644); +int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; module_param(rcu_cpu_stall_timeout, int, 0644); - -int rcu_jiffies_till_stall_check(void) -{ - int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); - - /* - * Limit check must be consistent with the Kconfig limits - * for CONFIG_RCU_CPU_STALL_TIMEOUT. - */ - if (till_stall_check < 3) { - WRITE_ONCE(rcu_cpu_stall_timeout, 3); - till_stall_check = 3; - } else if (till_stall_check > 300) { - WRITE_ONCE(rcu_cpu_stall_timeout, 300); - till_stall_check = 300; - } - return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; -} -EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); - -void rcu_sysrq_start(void) -{ - if (!rcu_cpu_stall_suppress) - rcu_cpu_stall_suppress = 2; -} - -void rcu_sysrq_end(void) -{ - if (rcu_cpu_stall_suppress == 2) - rcu_cpu_stall_suppress = 0; -} - -static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) -{ - rcu_cpu_stall_suppress = 1; - return NOTIFY_DONE; -} - -static struct notifier_block rcu_panic_block = { - .notifier_call = rcu_panic, -}; - -static int __init check_cpu_stall_init(void) -{ - atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); - return 0; -} -early_initcall(check_cpu_stall_init); - #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ #ifdef CONFIG_TASKS_RCU diff --git a/kernel/resource.c b/kernel/resource.c index 92190f62ebc5..8c15f846e8ef 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -520,21 +520,20 @@ EXPORT_SYMBOL_GPL(page_is_ram); int region_intersects(resource_size_t start, size_t size, unsigned long flags, unsigned long desc) { - resource_size_t end = start + size - 1; + struct resource res; int type = 0; int other = 0; struct resource *p; + res.start = start; + res.end = start + size - 1; + read_lock(&resource_lock); for (p = iomem_resource.child; p ; p = p->sibling) { bool is_type = (((p->flags & flags) == flags) && ((desc == IORES_DESC_NONE) || (desc == p->desc))); - if (start >= p->start && start <= p->end) - is_type ? type++ : other++; - if (end >= p->start && end <= p->end) - is_type ? type++ : other++; - if (p->start >= start && p->end <= end) + if (resource_overlaps(p, &res)) is_type ? type++ : other++; } read_unlock(&resource_lock); diff --git a/kernel/rseq.c b/kernel/rseq.c index 25e9a7b60eba..9424ee90589e 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -254,8 +254,7 @@ static int rseq_ip_fixup(struct pt_regs *regs) * - signal delivery, * and return to user-space. * - * This is how we can ensure that the entire rseq critical section, - * consisting of both the C part and the assembly instruction sequence, + * This is how we can ensure that the entire rseq critical section * will issue the commit instruction only if executed atomically with * respect to other threads scheduled on the same CPU, and with respect * to signal handlers. @@ -314,7 +313,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, /* Unregister rseq for current thread. */ if (current->rseq != rseq || !current->rseq) return -EINVAL; - if (current->rseq_len != rseq_len) + if (rseq_len != sizeof(*rseq)) return -EINVAL; if (current->rseq_sig != sig) return -EPERM; @@ -322,7 +321,6 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, if (ret) return ret; current->rseq = NULL; - current->rseq_len = 0; current->rseq_sig = 0; return 0; } @@ -336,7 +334,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, * the provided address differs from the prior * one. */ - if (current->rseq != rseq || current->rseq_len != rseq_len) + if (current->rseq != rseq || rseq_len != sizeof(*rseq)) return -EINVAL; if (current->rseq_sig != sig) return -EPERM; @@ -354,7 +352,6 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, if (!access_ok(rseq, rseq_len)) return -EFAULT; current->rseq = rseq; - current->rseq_len = rseq_len; current->rseq_sig = sig; /* * If rseq was previously inactive, and has just been diff --git a/kernel/sched/core.c b/kernel/sched/core.c index cef22c5499a8..102dfcf0a29a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1155,7 +1155,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, /* Need help from migration thread: drop lock and wait. */ task_rq_unlock(rq, p, &rf); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); - tlb_migrate_finish(p->mm); return 0; } else if (task_on_rq_queued(p)) { /* diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 5c41ea367422..3638d2377e3c 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -771,6 +771,7 @@ out: return 0; fail: + kobject_put(&tunables->attr_set.kobj); policy->governor_data = NULL; sugov_tunables_free(tunables); diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c index f8edee9c792d..27bafc1e271e 100644 --- a/kernel/stacktrace.c +++ b/kernel/stacktrace.c @@ -5,41 +5,56 @@ * * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> */ +#include <linux/sched/task_stack.h> +#include <linux/sched/debug.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/kallsyms.h> #include <linux/stacktrace.h> -void print_stack_trace(struct stack_trace *trace, int spaces) +/** + * stack_trace_print - Print the entries in the stack trace + * @entries: Pointer to storage array + * @nr_entries: Number of entries in the storage array + * @spaces: Number of leading spaces to print + */ +void stack_trace_print(unsigned long *entries, unsigned int nr_entries, + int spaces) { - int i; + unsigned int i; - if (WARN_ON(!trace->entries)) + if (WARN_ON(!entries)) return; - for (i = 0; i < trace->nr_entries; i++) - printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]); + for (i = 0; i < nr_entries; i++) + printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]); } -EXPORT_SYMBOL_GPL(print_stack_trace); +EXPORT_SYMBOL_GPL(stack_trace_print); -int snprint_stack_trace(char *buf, size_t size, - struct stack_trace *trace, int spaces) +/** + * stack_trace_snprint - Print the entries in the stack trace into a buffer + * @buf: Pointer to the print buffer + * @size: Size of the print buffer + * @entries: Pointer to storage array + * @nr_entries: Number of entries in the storage array + * @spaces: Number of leading spaces to print + * + * Return: Number of bytes printed. + */ +int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, + unsigned int nr_entries, int spaces) { - int i; - int generated; - int total = 0; + unsigned int generated, i, total = 0; - if (WARN_ON(!trace->entries)) + if (WARN_ON(!entries)) return 0; - for (i = 0; i < trace->nr_entries; i++) { + for (i = 0; i < nr_entries && size; i++) { generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ', - (void *)trace->entries[i]); + (void *)entries[i]); total += generated; - - /* Assume that generated isn't a negative number */ if (generated >= size) { buf += size; size = 0; @@ -51,7 +66,176 @@ int snprint_stack_trace(char *buf, size_t size, return total; } -EXPORT_SYMBOL_GPL(snprint_stack_trace); +EXPORT_SYMBOL_GPL(stack_trace_snprint); + +#ifdef CONFIG_ARCH_STACKWALK + +struct stacktrace_cookie { + unsigned long *store; + unsigned int size; + unsigned int skip; + unsigned int len; +}; + +static bool stack_trace_consume_entry(void *cookie, unsigned long addr, + bool reliable) +{ + struct stacktrace_cookie *c = cookie; + + if (c->len >= c->size) + return false; + + if (c->skip > 0) { + c->skip--; + return true; + } + c->store[c->len++] = addr; + return c->len < c->size; +} + +static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr, + bool reliable) +{ + if (in_sched_functions(addr)) + return true; + return stack_trace_consume_entry(cookie, addr, reliable); +} + +/** + * stack_trace_save - Save a stack trace into a storage array + * @store: Pointer to storage array + * @size: Size of the storage array + * @skipnr: Number of entries to skip at the start of the stack trace + * + * Return: Number of trace entries stored. + */ +unsigned int stack_trace_save(unsigned long *store, unsigned int size, + unsigned int skipnr) +{ + stack_trace_consume_fn consume_entry = stack_trace_consume_entry; + struct stacktrace_cookie c = { + .store = store, + .size = size, + .skip = skipnr + 1, + }; + + arch_stack_walk(consume_entry, &c, current, NULL); + return c.len; +} +EXPORT_SYMBOL_GPL(stack_trace_save); + +/** + * stack_trace_save_tsk - Save a task stack trace into a storage array + * @task: The task to examine + * @store: Pointer to storage array + * @size: Size of the storage array + * @skipnr: Number of entries to skip at the start of the stack trace + * + * Return: Number of trace entries stored. + */ +unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store, + unsigned int size, unsigned int skipnr) +{ + stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched; + struct stacktrace_cookie c = { + .store = store, + .size = size, + .skip = skipnr + 1, + }; + + if (!try_get_task_stack(tsk)) + return 0; + + arch_stack_walk(consume_entry, &c, tsk, NULL); + put_task_stack(tsk); + return c.len; +} + +/** + * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array + * @regs: Pointer to pt_regs to examine + * @store: Pointer to storage array + * @size: Size of the storage array + * @skipnr: Number of entries to skip at the start of the stack trace + * + * Return: Number of trace entries stored. + */ +unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, + unsigned int size, unsigned int skipnr) +{ + stack_trace_consume_fn consume_entry = stack_trace_consume_entry; + struct stacktrace_cookie c = { + .store = store, + .size = size, + .skip = skipnr, + }; + + arch_stack_walk(consume_entry, &c, current, regs); + return c.len; +} + +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE +/** + * stack_trace_save_tsk_reliable - Save task stack with verification + * @tsk: Pointer to the task to examine + * @store: Pointer to storage array + * @size: Size of the storage array + * + * Return: An error if it detects any unreliable features of the + * stack. Otherwise it guarantees that the stack trace is + * reliable and returns the number of entries stored. + * + * If the task is not 'current', the caller *must* ensure the task is inactive. + */ +int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, + unsigned int size) +{ + stack_trace_consume_fn consume_entry = stack_trace_consume_entry; + struct stacktrace_cookie c = { + .store = store, + .size = size, + }; + int ret; + + /* + * If the task doesn't have a stack (e.g., a zombie), the stack is + * "reliably" empty. + */ + if (!try_get_task_stack(tsk)) + return 0; + + ret = arch_stack_walk_reliable(consume_entry, &c, tsk); + put_task_stack(tsk); + return ret; +} +#endif + +#ifdef CONFIG_USER_STACKTRACE_SUPPORT +/** + * stack_trace_save_user - Save a user space stack trace into a storage array + * @store: Pointer to storage array + * @size: Size of the storage array + * + * Return: Number of trace entries stored. + */ +unsigned int stack_trace_save_user(unsigned long *store, unsigned int size) +{ + stack_trace_consume_fn consume_entry = stack_trace_consume_entry; + struct stacktrace_cookie c = { + .store = store, + .size = size, + }; + + /* Trace user stack if not a kernel thread */ + if (!current->mm) + return 0; + + arch_stack_walk_user(consume_entry, &c, task_pt_regs(current)); + return c.len; +} +#endif + +#else /* CONFIG_ARCH_STACKWALK */ /* * Architectures that do not implement save_stack_trace_*() @@ -77,3 +261,118 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk, WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n"); return -ENOSYS; } + +/** + * stack_trace_save - Save a stack trace into a storage array + * @store: Pointer to storage array + * @size: Size of the storage array + * @skipnr: Number of entries to skip at the start of the stack trace + * + * Return: Number of trace entries stored + */ +unsigned int stack_trace_save(unsigned long *store, unsigned int size, + unsigned int skipnr) +{ + struct stack_trace trace = { + .entries = store, + .max_entries = size, + .skip = skipnr + 1, + }; + + save_stack_trace(&trace); + return trace.nr_entries; +} +EXPORT_SYMBOL_GPL(stack_trace_save); + +/** + * stack_trace_save_tsk - Save a task stack trace into a storage array + * @task: The task to examine + * @store: Pointer to storage array + * @size: Size of the storage array + * @skipnr: Number of entries to skip at the start of the stack trace + * + * Return: Number of trace entries stored + */ +unsigned int stack_trace_save_tsk(struct task_struct *task, + unsigned long *store, unsigned int size, + unsigned int skipnr) +{ + struct stack_trace trace = { + .entries = store, + .max_entries = size, + .skip = skipnr + 1, + }; + + save_stack_trace_tsk(task, &trace); + return trace.nr_entries; +} + +/** + * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array + * @regs: Pointer to pt_regs to examine + * @store: Pointer to storage array + * @size: Size of the storage array + * @skipnr: Number of entries to skip at the start of the stack trace + * + * Return: Number of trace entries stored + */ +unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, + unsigned int size, unsigned int skipnr) +{ + struct stack_trace trace = { + .entries = store, + .max_entries = size, + .skip = skipnr, + }; + + save_stack_trace_regs(regs, &trace); + return trace.nr_entries; +} + +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE +/** + * stack_trace_save_tsk_reliable - Save task stack with verification + * @tsk: Pointer to the task to examine + * @store: Pointer to storage array + * @size: Size of the storage array + * + * Return: An error if it detects any unreliable features of the + * stack. Otherwise it guarantees that the stack trace is + * reliable and returns the number of entries stored. + * + * If the task is not 'current', the caller *must* ensure the task is inactive. + */ +int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, + unsigned int size) +{ + struct stack_trace trace = { + .entries = store, + .max_entries = size, + }; + int ret = save_stack_trace_tsk_reliable(tsk, &trace); + + return ret ? ret : trace.nr_entries; +} +#endif + +#ifdef CONFIG_USER_STACKTRACE_SUPPORT +/** + * stack_trace_save_user - Save a user space stack trace into a storage array + * @store: Pointer to storage array + * @size: Size of the storage array + * + * Return: Number of trace entries stored + */ +unsigned int stack_trace_save_user(unsigned long *store, unsigned int size) +{ + struct stack_trace trace = { + .entries = store, + .max_entries = size, + }; + + save_stack_trace_user(&trace); + return trace.nr_entries; +} +#endif /* CONFIG_USER_STACKTRACE_SUPPORT */ + +#endif /* !CONFIG_ARCH_STACKWALK */ diff --git a/kernel/torture.c b/kernel/torture.c index 8faa1a9aaeb9..17b2be9bde12 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -88,6 +88,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes, if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) return false; + if (num_online_cpus() <= 1) + return false; /* Can't offline the last CPU. */ if (verbose > 1) pr_alert("%s" TORTURE_FLAG diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ca1ee656d6d8..ec439999f387 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -159,6 +159,8 @@ static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int tracing_set_tracer(struct trace_array *tr, const char *buf); +static void ftrace_trace_userstack(struct ring_buffer *buffer, + unsigned long flags, int pc); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; @@ -2752,12 +2754,21 @@ trace_function(struct trace_array *tr, #ifdef CONFIG_STACKTRACE -#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) +/* Allow 4 levels of nesting: normal, softirq, irq, NMI */ +#define FTRACE_KSTACK_NESTING 4 + +#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING) + struct ftrace_stack { - unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; + unsigned long calls[FTRACE_KSTACK_ENTRIES]; +}; + + +struct ftrace_stacks { + struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; }; -static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); +static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct ring_buffer *buffer, @@ -2766,13 +2777,10 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, { struct trace_event_call *call = &event_kernel_stack; struct ring_buffer_event *event; + unsigned int size, nr_entries; + struct ftrace_stack *fstack; struct stack_entry *entry; - struct stack_trace trace; - int use_stack; - int size = FTRACE_STACK_ENTRIES; - - trace.nr_entries = 0; - trace.skip = skip; + int stackidx; /* * Add one, for this function and the call to save_stack_trace() @@ -2780,7 +2788,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, */ #ifndef CONFIG_UNWINDER_ORC if (!regs) - trace.skip++; + skip++; #endif /* @@ -2791,53 +2799,40 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, */ preempt_disable_notrace(); - use_stack = __this_cpu_inc_return(ftrace_stack_reserve); + stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; + + /* This should never happen. If it does, yell once and skip */ + if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING)) + goto out; + /* - * We don't need any atomic variables, just a barrier. - * If an interrupt comes in, we don't care, because it would - * have exited and put the counter back to what we want. - * We just need a barrier to keep gcc from moving things - * around. + * The above __this_cpu_inc_return() is 'atomic' cpu local. An + * interrupt will either see the value pre increment or post + * increment. If the interrupt happens pre increment it will have + * restored the counter when it returns. We just need a barrier to + * keep gcc from moving things around. */ barrier(); - if (use_stack == 1) { - trace.entries = this_cpu_ptr(ftrace_stack.calls); - trace.max_entries = FTRACE_STACK_MAX_ENTRIES; - if (regs) - save_stack_trace_regs(regs, &trace); - else - save_stack_trace(&trace); - - if (trace.nr_entries > size) - size = trace.nr_entries; - } else - /* From now on, use_stack is a boolean */ - use_stack = 0; + fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; + size = ARRAY_SIZE(fstack->calls); - size *= sizeof(unsigned long); + if (regs) { + nr_entries = stack_trace_save_regs(regs, fstack->calls, + size, skip); + } else { + nr_entries = stack_trace_save(fstack->calls, size, skip); + } + size = nr_entries * sizeof(unsigned long); event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); - memset(&entry->caller, 0, size); - - if (use_stack) - memcpy(&entry->caller, trace.entries, - trace.nr_entries * sizeof(unsigned long)); - else { - trace.max_entries = FTRACE_STACK_ENTRIES; - trace.entries = entry->caller; - if (regs) - save_stack_trace_regs(regs, &trace); - else - save_stack_trace(&trace); - } - - entry->size = trace.nr_entries; + memcpy(&entry->caller, fstack->calls, size); + entry->size = nr_entries; if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); @@ -2907,15 +2902,15 @@ void trace_dump_stack(int skip) } EXPORT_SYMBOL_GPL(trace_dump_stack); +#ifdef CONFIG_USER_STACKTRACE_SUPPORT static DEFINE_PER_CPU(int, user_stack_count); -void +static void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { struct trace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; - struct stack_trace trace; if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) return; @@ -2946,12 +2941,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); - trace.nr_entries = 0; - trace.max_entries = FTRACE_STACK_ENTRIES; - trace.skip = 0; - trace.entries = entry->caller; - - save_stack_trace_user(&trace); + stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); @@ -2960,13 +2950,12 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) out: preempt_enable(); } - -#ifdef UNUSED -static void __trace_userstack(struct trace_array *tr, unsigned long flags) +#else /* CONFIG_USER_STACKTRACE_SUPPORT */ +static void ftrace_trace_userstack(struct ring_buffer *buffer, + unsigned long flags, int pc) { - ftrace_trace_userstack(tr, flags, preempt_count()); } -#endif /* UNUSED */ +#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ #endif /* CONFIG_STACKTRACE */ diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d80cee49e0eb..639047b259d7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -782,17 +782,9 @@ void update_max_tr_single(struct trace_array *tr, #endif /* CONFIG_TRACER_MAX_TRACE */ #ifdef CONFIG_STACKTRACE -void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, - int pc); - void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc); #else -static inline void ftrace_trace_userstack(struct ring_buffer *buffer, - unsigned long flags, int pc) -{ -} - static inline void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 4ad967453b6f..3ea65cdff30d 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect, int is_constant) { + unsigned long flags = user_access_save(); + /* A constant is always correct */ if (is_constant) { f->constant++; @@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, f->data.correct++; else f->data.incorrect++; + + user_access_restore(flags); } EXPORT_SYMBOL(ftrace_likely_update); diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 795aa2038377..a1d20421f4b0 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -5186,7 +5186,6 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec, u64 var_ref_vals[TRACING_MAP_VARS_MAX]; char compound_key[HIST_KEY_SIZE_MAX]; struct tracing_map_elt *elt = NULL; - struct stack_trace stacktrace; struct hist_field *key_field; u64 field_contents; void *key = NULL; @@ -5198,14 +5197,9 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec, key_field = hist_data->fields[i]; if (key_field->flags & HIST_FIELD_FL_STACKTRACE) { - stacktrace.max_entries = HIST_STACKTRACE_DEPTH; - stacktrace.entries = entries; - stacktrace.nr_entries = 0; - stacktrace.skip = HIST_STACKTRACE_SKIP; - - memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE); - save_stack_trace(&stacktrace); - + memset(entries, 0, HIST_STACKTRACE_SIZE); + stack_trace_save(entries, HIST_STACKTRACE_DEPTH, + HIST_STACKTRACE_SKIP); key = entries; } else { field_contents = key_field->fn(key_field, elt, rbe, rec); @@ -5246,7 +5240,7 @@ static void hist_trigger_stacktrace_print(struct seq_file *m, unsigned int i; for (i = 0; i < max_entries; i++) { - if (stacktrace_entries[i] == ULONG_MAX) + if (!stacktrace_entries[i]) return; seq_printf(m, "%*c", 1 + spaces, ' '); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index eec648a0d673..5d16f73898db 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -18,44 +18,32 @@ #include "trace.h" -static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = - { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; -unsigned stack_trace_index[STACK_TRACE_ENTRIES]; +#define STACK_TRACE_ENTRIES 500 -/* - * Reserve one entry for the passed in ip. This will allow - * us to remove most or all of the stack size overhead - * added by the stack tracer itself. - */ -struct stack_trace stack_trace_max = { - .max_entries = STACK_TRACE_ENTRIES - 1, - .entries = &stack_dump_trace[0], -}; +static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES]; +static unsigned stack_trace_index[STACK_TRACE_ENTRIES]; -unsigned long stack_trace_max_size; -arch_spinlock_t stack_trace_max_lock = +static unsigned int stack_trace_nr_entries; +static unsigned long stack_trace_max_size; +static arch_spinlock_t stack_trace_max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; DEFINE_PER_CPU(int, disable_stack_tracer); static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; -static int last_stack_tracer_enabled; -void stack_trace_print(void) +static void print_max_stack(void) { long i; int size; pr_emerg(" Depth Size Location (%d entries)\n" " ----- ---- --------\n", - stack_trace_max.nr_entries); + stack_trace_nr_entries); - for (i = 0; i < stack_trace_max.nr_entries; i++) { - if (stack_dump_trace[i] == ULONG_MAX) - break; - if (i+1 == stack_trace_max.nr_entries || - stack_dump_trace[i+1] == ULONG_MAX) + for (i = 0; i < stack_trace_nr_entries; i++) { + if (i + 1 == stack_trace_nr_entries) size = stack_trace_index[i]; else size = stack_trace_index[i] - stack_trace_index[i+1]; @@ -65,16 +53,7 @@ void stack_trace_print(void) } } -/* - * When arch-specific code overrides this function, the following - * data should be filled up, assuming stack_trace_max_lock is held to - * prevent concurrent updates. - * stack_trace_index[] - * stack_trace_max - * stack_trace_max_size - */ -void __weak -check_stack(unsigned long ip, unsigned long *stack) +static void check_stack(unsigned long ip, unsigned long *stack) { unsigned long this_size, flags; unsigned long *p, *top, *start; static int tracer_frame; @@ -110,13 +89,12 @@ check_stack(unsigned long ip, unsigned long *stack) stack_trace_max_size = this_size; - stack_trace_max.nr_entries = 0; - stack_trace_max.skip = 0; - - save_stack_trace(&stack_trace_max); + stack_trace_nr_entries = stack_trace_save(stack_dump_trace, + ARRAY_SIZE(stack_dump_trace) - 1, + 0); /* Skip over the overhead of the stack tracer itself */ - for (i = 0; i < stack_trace_max.nr_entries; i++) { + for (i = 0; i < stack_trace_nr_entries; i++) { if (stack_dump_trace[i] == ip) break; } @@ -125,7 +103,7 @@ check_stack(unsigned long ip, unsigned long *stack) * Some archs may not have the passed in ip in the dump. * If that happens, we need to show everything. */ - if (i == stack_trace_max.nr_entries) + if (i == stack_trace_nr_entries) i = 0; /* @@ -143,15 +121,13 @@ check_stack(unsigned long ip, unsigned long *stack) * loop will only happen once. This code only takes place * on a new max, so it is far from a fast path. */ - while (i < stack_trace_max.nr_entries) { + while (i < stack_trace_nr_entries) { int found = 0; stack_trace_index[x] = this_size; p = start; - for (; p < top && i < stack_trace_max.nr_entries; p++) { - if (stack_dump_trace[i] == ULONG_MAX) - break; + for (; p < top && i < stack_trace_nr_entries; p++) { /* * The READ_ONCE_NOCHECK is used to let KASAN know that * this is not a stack-out-of-bounds error. @@ -182,12 +158,10 @@ check_stack(unsigned long ip, unsigned long *stack) i++; } - stack_trace_max.nr_entries = x; - for (; x < i; x++) - stack_dump_trace[x] = ULONG_MAX; + stack_trace_nr_entries = x; if (task_stack_end_corrupted(current)) { - stack_trace_print(); + print_max_stack(); BUG(); } @@ -286,7 +260,7 @@ __next(struct seq_file *m, loff_t *pos) { long n = *pos - 1; - if (n >= stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) + if (n >= stack_trace_nr_entries) return NULL; m->private = (void *)n; @@ -350,7 +324,7 @@ static int t_show(struct seq_file *m, void *v) seq_printf(m, " Depth Size Location" " (%d entries)\n" " ----- ---- --------\n", - stack_trace_max.nr_entries); + stack_trace_nr_entries); if (!stack_tracer_enabled && !stack_trace_max_size) print_disabled(m); @@ -360,12 +334,10 @@ static int t_show(struct seq_file *m, void *v) i = *(long *)v; - if (i >= stack_trace_max.nr_entries || - stack_dump_trace[i] == ULONG_MAX) + if (i >= stack_trace_nr_entries) return 0; - if (i+1 == stack_trace_max.nr_entries || - stack_dump_trace[i+1] == ULONG_MAX) + if (i + 1 == stack_trace_nr_entries) size = stack_trace_index[i]; else size = stack_trace_index[i] - stack_trace_index[i+1]; @@ -422,23 +394,21 @@ stack_trace_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { + int was_enabled; int ret; mutex_lock(&stack_sysctl_mutex); + was_enabled = !!stack_tracer_enabled; ret = proc_dointvec(table, write, buffer, lenp, ppos); - if (ret || !write || - (last_stack_tracer_enabled == !!stack_tracer_enabled)) + if (ret || !write || (was_enabled == !!stack_tracer_enabled)) goto out; - last_stack_tracer_enabled = !!stack_tracer_enabled; - if (stack_tracer_enabled) register_ftrace_function(&trace_ops); else unregister_ftrace_function(&trace_ops); - out: mutex_unlock(&stack_sysctl_mutex); return ret; @@ -454,7 +424,6 @@ static __init int enable_stacktrace(char *str) strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE); stack_tracer_enabled = 1; - last_stack_tracer_enabled = 1; return 1; } __setup("stacktrace", enable_stacktrace); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6a5787233113..7f9e7b9306fe 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -590,7 +590,7 @@ static void lockup_detector_reconfigure(void) * Create the watchdog thread infrastructure and configure the detector(s). * * The threads are not unparked as watchdog_allowed_mask is empty. When - * the threads are sucessfully initialized, take the proper locks and + * the threads are successfully initialized, take the proper locks and * unpark the threads in the watchdog_cpumask if the watchdog is enabled. */ static __init void lockup_detector_setup(void) diff --git a/lib/Kconfig b/lib/Kconfig index a9e56539bd11..e86975bfca6a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -597,6 +597,10 @@ config ARCH_HAS_UACCESS_FLUSHCACHE config ARCH_HAS_UACCESS_MCSAFE bool +# Temporary. Goes away when all archs are cleaned up +config ARCH_STACKWALK + bool + config STACKDEPOT bool select STACKTRACE diff --git a/lib/Makefile b/lib/Makefile index 3b08673e8881..e16e7aadc41a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -17,6 +17,17 @@ KCOV_INSTRUMENT_list_debug.o := n KCOV_INSTRUMENT_debugobjects.o := n KCOV_INSTRUMENT_dynamic_debug.o := n +# Early boot use of cmdline, don't instrument it +ifdef CONFIG_AMD_MEM_ENCRYPT +KASAN_SANITIZE_string.o := n + +ifdef CONFIG_FUNCTION_TRACER +CFLAGS_REMOVE_string.o = -pg +endif + +CFLAGS_string.o := $(call cc-option, -fno-stack-protector) +endif + lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ idr.o int_sqrt.o extable.o \ @@ -268,6 +279,7 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o obj-$(CONFIG_UBSAN) += ubsan.o UBSAN_SANITIZE_ubsan.o := n +CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) obj-$(CONFIG_SBITMAP) += sbitmap.o diff --git a/lib/fault-inject.c b/lib/fault-inject.c index cf7b129b0b2b..e26aa4f65eb9 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -65,22 +65,16 @@ static bool fail_task(struct fault_attr *attr, struct task_struct *task) static bool fail_stacktrace(struct fault_attr *attr) { - struct stack_trace trace; int depth = attr->stacktrace_depth; unsigned long entries[MAX_STACK_TRACE_DEPTH]; - int n; + int n, nr_entries; bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX); if (depth == 0) return found; - trace.nr_entries = 0; - trace.entries = entries; - trace.max_entries = depth; - trace.skip = 1; - - save_stack_trace(&trace); - for (n = 0; n < trace.nr_entries; n++) { + nr_entries = stack_trace_save(entries, depth, 1); + for (n = 0; n < nr_entries; n++) { if (attr->reject_start <= entries[n] && entries[n] < attr->reject_end) return false; diff --git a/lib/stackdepot.c b/lib/stackdepot.c index e513459a5601..605c61f65d94 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -194,40 +194,52 @@ static inline struct stack_record *find_stack(struct stack_record *bucket, return NULL; } -void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace) +/** + * stack_depot_fetch - Fetch stack entries from a depot + * + * @handle: Stack depot handle which was returned from + * stack_depot_save(). + * @entries: Pointer to store the entries address + * + * Return: The number of trace entries for this depot. + */ +unsigned int stack_depot_fetch(depot_stack_handle_t handle, + unsigned long **entries) { union handle_parts parts = { .handle = handle }; void *slab = stack_slabs[parts.slabindex]; size_t offset = parts.offset << STACK_ALLOC_ALIGN; struct stack_record *stack = slab + offset; - trace->nr_entries = trace->max_entries = stack->size; - trace->entries = stack->entries; - trace->skip = 0; + *entries = stack->entries; + return stack->size; } -EXPORT_SYMBOL_GPL(depot_fetch_stack); +EXPORT_SYMBOL_GPL(stack_depot_fetch); /** - * depot_save_stack - save stack in a stack depot. - * @trace - the stacktrace to save. - * @alloc_flags - flags for allocating additional memory if required. + * stack_depot_save - Save a stack trace from an array + * + * @entries: Pointer to storage array + * @nr_entries: Size of the storage array + * @alloc_flags: Allocation gfp flags * - * Returns the handle of the stack struct stored in depot. + * Return: The handle of the stack struct stored in depot */ -depot_stack_handle_t depot_save_stack(struct stack_trace *trace, - gfp_t alloc_flags) +depot_stack_handle_t stack_depot_save(unsigned long *entries, + unsigned int nr_entries, + gfp_t alloc_flags) { - u32 hash; - depot_stack_handle_t retval = 0; struct stack_record *found = NULL, **bucket; - unsigned long flags; + depot_stack_handle_t retval = 0; struct page *page = NULL; void *prealloc = NULL; + unsigned long flags; + u32 hash; - if (unlikely(trace->nr_entries == 0)) + if (unlikely(nr_entries == 0)) goto fast_exit; - hash = hash_stack(trace->entries, trace->nr_entries); + hash = hash_stack(entries, nr_entries); bucket = &stack_table[hash & STACK_HASH_MASK]; /* @@ -235,8 +247,8 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, * The smp_load_acquire() here pairs with smp_store_release() to * |bucket| below. */ - found = find_stack(smp_load_acquire(bucket), trace->entries, - trace->nr_entries, hash); + found = find_stack(smp_load_acquire(bucket), entries, + nr_entries, hash); if (found) goto exit; @@ -264,10 +276,10 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, spin_lock_irqsave(&depot_lock, flags); - found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); + found = find_stack(*bucket, entries, nr_entries, hash); if (!found) { struct stack_record *new = - depot_alloc_stack(trace->entries, trace->nr_entries, + depot_alloc_stack(entries, nr_entries, hash, &prealloc, alloc_flags); if (new) { new->next = *bucket; @@ -297,4 +309,4 @@ exit: fast_exit: return retval; } -EXPORT_SYMBOL_GPL(depot_save_stack); +EXPORT_SYMBOL_GPL(stack_depot_save); diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 58eacd41526c..023ba9f3b99f 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -23,10 +23,11 @@ * hit it), 'max' is the address space maximum (and we return * -EFAULT if we hit it). */ -static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) +static inline long do_strncpy_from_user(char *dst, const char __user *src, + unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long res = 0; + unsigned long res = 0; /* * Truncate 'max' to the user-specified limit, so that diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 1c1a1b0e38a5..7f2db3fe311f 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -28,7 +28,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long align, res = 0; + unsigned long align, res = 0; unsigned long c; /* @@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Do everything aligned. But that means that we * need to also expand the maximum.. */ - align = (sizeof(long) - 1) & (unsigned long)src; + align = (sizeof(unsigned long) - 1) & (unsigned long)src; src -= align; max += align; diff --git a/lib/ubsan.c b/lib/ubsan.c index e4162f59a81c..ecc179338094 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> +#include <linux/uaccess.h> #include "ubsan.h" @@ -86,11 +87,13 @@ static bool is_inline_int(struct type_descriptor *type) return bits <= inline_bits; } -static s_max get_signed_val(struct type_descriptor *type, unsigned long val) +static s_max get_signed_val(struct type_descriptor *type, void *val) { if (is_inline_int(type)) { unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type); - return ((s_max)val) << extra_bits >> extra_bits; + unsigned long ulong_val = (unsigned long)val; + + return ((s_max)ulong_val) << extra_bits >> extra_bits; } if (type_bit_width(type) == 64) @@ -99,15 +102,15 @@ static s_max get_signed_val(struct type_descriptor *type, unsigned long val) return *(s_max *)val; } -static bool val_is_negative(struct type_descriptor *type, unsigned long val) +static bool val_is_negative(struct type_descriptor *type, void *val) { return type_is_signed(type) && get_signed_val(type, val) < 0; } -static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) +static u_max get_unsigned_val(struct type_descriptor *type, void *val) { if (is_inline_int(type)) - return val; + return (unsigned long)val; if (type_bit_width(type) == 64) return *(u64 *)val; @@ -116,7 +119,7 @@ static u_max get_unsigned_val(struct type_descriptor *type, unsigned long val) } static void val_to_string(char *str, size_t size, struct type_descriptor *type, - unsigned long value) + void *value) { if (type_is_int(type)) { if (type_bit_width(type) == 128) { @@ -163,8 +166,8 @@ static void ubsan_epilogue(unsigned long *flags) current->in_ubsan--; } -static void handle_overflow(struct overflow_data *data, unsigned long lhs, - unsigned long rhs, char op) +static void handle_overflow(struct overflow_data *data, void *lhs, + void *rhs, char op) { struct type_descriptor *type = data->type; @@ -191,8 +194,7 @@ static void handle_overflow(struct overflow_data *data, unsigned long lhs, } void __ubsan_handle_add_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + void *lhs, void *rhs) { handle_overflow(data, lhs, rhs, '+'); @@ -200,23 +202,21 @@ void __ubsan_handle_add_overflow(struct overflow_data *data, EXPORT_SYMBOL(__ubsan_handle_add_overflow); void __ubsan_handle_sub_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + void *lhs, void *rhs) { handle_overflow(data, lhs, rhs, '-'); } EXPORT_SYMBOL(__ubsan_handle_sub_overflow); void __ubsan_handle_mul_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + void *lhs, void *rhs) { handle_overflow(data, lhs, rhs, '*'); } EXPORT_SYMBOL(__ubsan_handle_mul_overflow); void __ubsan_handle_negate_overflow(struct overflow_data *data, - unsigned long old_val) + void *old_val) { unsigned long flags; char old_val_str[VALUE_LENGTH]; @@ -237,8 +237,7 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); void __ubsan_handle_divrem_overflow(struct overflow_data *data, - unsigned long lhs, - unsigned long rhs) + void *lhs, void *rhs) { unsigned long flags; char rhs_val_str[VALUE_LENGTH]; @@ -313,6 +312,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data, static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, unsigned long ptr) { + unsigned long flags = user_access_save(); if (!ptr) handle_null_ptr_deref(data); @@ -320,10 +320,12 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, handle_misaligned_access(data, ptr); else handle_object_size_mismatch(data, ptr); + + user_access_restore(flags); } void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, - unsigned long ptr) + void *ptr) { struct type_mismatch_data_common common_data = { .location = &data->location, @@ -332,12 +334,12 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, .type_check_kind = data->type_check_kind }; - ubsan_type_mismatch_common(&common_data, ptr); + ubsan_type_mismatch_common(&common_data, (unsigned long)ptr); } EXPORT_SYMBOL(__ubsan_handle_type_mismatch); void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, - unsigned long ptr) + void *ptr) { struct type_mismatch_data_common common_data = { @@ -347,30 +349,11 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data, .type_check_kind = data->type_check_kind }; - ubsan_type_mismatch_common(&common_data, ptr); + ubsan_type_mismatch_common(&common_data, (unsigned long)ptr); } EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); -void __ubsan_handle_vla_bound_not_positive(struct vla_bound_data *data, - unsigned long bound) -{ - unsigned long flags; - char bound_str[VALUE_LENGTH]; - - if (suppress_report(&data->location)) - return; - - ubsan_prologue(&data->location, &flags); - - val_to_string(bound_str, sizeof(bound_str), data->type, bound); - pr_err("variable length array bound value %s <= 0\n", bound_str); - - ubsan_epilogue(&flags); -} -EXPORT_SYMBOL(__ubsan_handle_vla_bound_not_positive); - -void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, - unsigned long index) +void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) { unsigned long flags; char index_str[VALUE_LENGTH]; @@ -388,7 +371,7 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, - unsigned long lhs, unsigned long rhs) + void *lhs, void *rhs) { unsigned long flags; struct type_descriptor *rhs_type = data->rhs_type; @@ -439,7 +422,7 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, - unsigned long val) + void *val) { unsigned long flags; char val_str[VALUE_LENGTH]; diff --git a/lib/ubsan.h b/lib/ubsan.h index f4d8d0bd4016..b8fa83864467 100644 --- a/lib/ubsan.h +++ b/lib/ubsan.h @@ -57,11 +57,6 @@ struct nonnull_arg_data { int arg_index; }; -struct vla_bound_data { - struct source_location location; - struct type_descriptor *type; -}; - struct out_of_bounds_data { struct source_location location; struct type_descriptor *array_type; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 165ea46bf149..b6a34b32d8ac 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1677,7 +1677,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, struct mm_struct *mm = tlb->mm; bool ret = false; - tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = pmd_trans_huge_lock(pmd, vma); if (!ptl) @@ -1753,7 +1753,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t orig_pmd; spinlock_t *ptl; - tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = __pmd_trans_huge_lock(pmd, vma); if (!ptl) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6cdc7b2d9100..641cedfc8c0f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, * This is a hugetlb vma, all the pte entries should point * to huge page. */ - tlb_remove_check_page_size_change(tlb, sz); + tlb_change_page_size(tlb, sz); tlb_start_vma(tlb, vma); /* diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 5d1065efbd47..613dfe681e9f 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -2,11 +2,13 @@ KASAN_SANITIZE := n UBSAN_SANITIZE_common.o := n UBSAN_SANITIZE_generic.o := n +UBSAN_SANITIZE_generic_report.o := n UBSAN_SANITIZE_tags.o := n KCOV_INSTRUMENT := n CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_generic.o = -pg +CFLAGS_REMOVE_generic_report.o = -pg CFLAGS_REMOVE_tags.o = -pg # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 @@ -14,6 +16,7 @@ CFLAGS_REMOVE_tags.o = -pg CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) obj-$(CONFIG_KASAN) := common.o init.o report.o diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 80bbe62b16cd..36afcf64e016 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -36,6 +36,7 @@ #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/bug.h> +#include <linux/uaccess.h> #include "kasan.h" #include "../slab.h" @@ -48,37 +49,28 @@ static inline int in_irqentry_text(unsigned long ptr) ptr < (unsigned long)&__softirqentry_text_end); } -static inline void filter_irq_stacks(struct stack_trace *trace) +static inline unsigned int filter_irq_stacks(unsigned long *entries, + unsigned int nr_entries) { - int i; + unsigned int i; - if (!trace->nr_entries) - return; - for (i = 0; i < trace->nr_entries; i++) - if (in_irqentry_text(trace->entries[i])) { + for (i = 0; i < nr_entries; i++) { + if (in_irqentry_text(entries[i])) { /* Include the irqentry function into the stack. */ - trace->nr_entries = i + 1; - break; + return i + 1; } + } + return nr_entries; } static inline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[KASAN_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = KASAN_STACK_DEPTH, - .skip = 0 - }; + unsigned int nr_entries; - save_stack_trace(&trace); - filter_irq_stacks(&trace); - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries-1] == ULONG_MAX) - trace.nr_entries--; - - return depot_save_stack(&trace, flags); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + nr_entries = filter_irq_stacks(entries, nr_entries); + return stack_depot_save(entries, nr_entries, flags); } static inline void set_track(struct kasan_track *track, gfp_t flags) @@ -614,6 +606,15 @@ void kasan_free_shadow(const struct vm_struct *vm) vfree(kasan_mem_to_shadow(vm->addr)); } +extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); + +void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) +{ + unsigned long flags = user_access_save(); + __kasan_report(addr, size, is_write, ip); + user_access_restore(flags); +} + #ifdef CONFIG_MEMORY_HOTPLUG static bool shadow_mapped(unsigned long addr) { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index ca9418fe9232..03a443579386 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -100,10 +100,11 @@ static void print_track(struct kasan_track *track, const char *prefix) { pr_err("%s by task %u:\n", prefix, track->pid); if (track->stack) { - struct stack_trace trace; + unsigned long *entries; + unsigned int nr_entries; - depot_fetch_stack(track->stack, &trace); - print_stack_trace(&trace, 0); + nr_entries = stack_depot_fetch(track->stack, &entries); + stack_trace_print(entries, nr_entries, 0); } else { pr_err("(stack is not available)\n"); } @@ -281,8 +282,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip) end_report(&flags); } -void kasan_report(unsigned long addr, size_t size, - bool is_write, unsigned long ip) +void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { struct kasan_access_info info; void *tagged_addr; diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 2e435b8142e5..e57bf810f798 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -410,11 +410,6 @@ static void print_unreferenced(struct seq_file *seq, */ static void dump_object_info(struct kmemleak_object *object) { - struct stack_trace trace; - - trace.nr_entries = object->trace_len; - trace.entries = object->trace; - pr_notice("Object 0x%08lx (size %zu):\n", object->pointer, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", @@ -424,7 +419,7 @@ static void dump_object_info(struct kmemleak_object *object) pr_notice(" flags = 0x%x\n", object->flags); pr_notice(" checksum = %u\n", object->checksum); pr_notice(" backtrace:\n"); - print_stack_trace(&trace, 4); + stack_trace_print(object->trace, object->trace_len, 4); } /* @@ -553,15 +548,7 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali */ static int __save_stack_trace(unsigned long *trace) { - struct stack_trace stack_trace; - - stack_trace.max_entries = MAX_TRACE; - stack_trace.nr_entries = 0; - stack_trace.entries = trace; - stack_trace.skip = 2; - save_stack_trace(&stack_trace); - - return stack_trace.nr_entries; + return stack_trace_save(trace, MAX_TRACE, 2); } /* @@ -2021,13 +2008,8 @@ early_param("kmemleak", kmemleak_boot_config); static void __init print_log_trace(struct early_log *log) { - struct stack_trace trace; - - trace.nr_entries = log->trace_len; - trace.entries = log->trace; - pr_notice("Early log backtrace:\n"); - print_stack_trace(&trace, 2); + stack_trace_print(log->trace, log->trace_len, 2); } /* diff --git a/mm/madvise.c b/mm/madvise.c index 21a7881a2db4..bb3a4554d5d5 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (pmd_trans_unstable(pmd)) return 0; - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); diff --git a/mm/memory.c b/mm/memory.c index ab650c21bccd..36aac6844662 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb, * We add page table cache pages with PAGE_SIZE, * (see pte_free_tlb()), flush the tlb if we need */ - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); @@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t *pte; swp_entry_t entry; - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); again: init_rss_vec(rss); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); @@ -1155,7 +1155,7 @@ again: */ if (force_flush) { force_flush = 0; - tlb_flush_mmu_free(tlb); + tlb_flush_mmu(tlb); if (addr != end) goto again; } diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index f2f03c655807..99740e1dd273 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -11,7 +11,7 @@ #include <asm/pgalloc.h> #include <asm/tlb.h> -#ifdef HAVE_GENERIC_MMU_GATHER +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER static bool tlb_next_batch(struct mmu_gather *tlb) { @@ -41,35 +41,10 @@ static bool tlb_next_batch(struct mmu_gather *tlb) return true; } -void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - tlb->mm = mm; - - /* Is it from 0 to ~0? */ - tlb->fullmm = !(start | (end+1)); - tlb->need_flush_all = 0; - tlb->local.next = NULL; - tlb->local.nr = 0; - tlb->local.max = ARRAY_SIZE(tlb->__pages); - tlb->active = &tlb->local; - tlb->batch_count = 0; - -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb->batch = NULL; -#endif - tlb->page_size = 0; - - __tlb_reset_range(tlb); -} - -void tlb_flush_mmu_free(struct mmu_gather *tlb) +static void tlb_batch_pages_flush(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; -#ifdef CONFIG_HAVE_RCU_TABLE_FREE - tlb_table_flush(tlb); -#endif for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; @@ -77,31 +52,10 @@ void tlb_flush_mmu_free(struct mmu_gather *tlb) tlb->active = &tlb->local; } -void tlb_flush_mmu(struct mmu_gather *tlb) -{ - tlb_flush_mmu_tlbonly(tlb); - tlb_flush_mmu_free(tlb); -} - -/* tlb_finish_mmu - * Called at the end of the shootdown operation to free up any resources - * that were required. - */ -void arch_tlb_finish_mmu(struct mmu_gather *tlb, - unsigned long start, unsigned long end, bool force) +static void tlb_batch_list_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch, *next; - if (force) { - __tlb_reset_range(tlb); - __tlb_adjust_range(tlb, start, end - start); - } - - tlb_flush_mmu(tlb); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - for (batch = tlb->local.next; batch; batch = next) { next = batch->next; free_pages((unsigned long)batch, 0); @@ -109,19 +63,15 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb, tlb->local.next = NULL; } -/* __tlb_remove_page - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while - * handling the additional races in SMP caused by other CPUs caching valid - * mappings in their TLBs. Returns the number of free page slots left. - * When out of page slots we must call tlb_flush_mmu(). - *returns true if the caller should flush. - */ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); + +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE VM_WARN_ON(tlb->page_size != page_size); +#endif batch = tlb->active; /* @@ -139,7 +89,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ return false; } -#endif /* HAVE_GENERIC_MMU_GATHER */ +#endif /* HAVE_MMU_GATHER_NO_GATHER */ #ifdef CONFIG_HAVE_RCU_TABLE_FREE @@ -152,7 +102,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ */ static inline void tlb_table_invalidate(struct mmu_gather *tlb) { -#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE +#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE /* * Invalidate page-table caches used by hardware walkers. Then we still * need to RCU-sched wait while freeing the pages because software @@ -193,7 +143,7 @@ static void tlb_remove_table_rcu(struct rcu_head *head) free_page((unsigned long)batch); } -void tlb_table_flush(struct mmu_gather *tlb) +static void tlb_table_flush(struct mmu_gather *tlb) { struct mmu_table_batch **batch = &tlb->batch; @@ -225,6 +175,22 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +static void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); +#endif +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb_batch_pages_flush(tlb); +#endif +} + +void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + /** * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down * @tlb: the mmu_gather structure to initialize @@ -240,10 +206,40 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { - arch_tlb_gather_mmu(tlb, mm, start, end); + tlb->mm = mm; + + /* Is it from 0 to ~0? */ + tlb->fullmm = !(start | (end+1)); + +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb->need_flush_all = 0; + tlb->local.next = NULL; + tlb->local.nr = 0; + tlb->local.max = ARRAY_SIZE(tlb->__pages); + tlb->active = &tlb->local; + tlb->batch_count = 0; +#endif + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; +#endif +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE + tlb->page_size = 0; +#endif + + __tlb_reset_range(tlb); inc_tlb_flush_pending(tlb->mm); } +/** + * tlb_finish_mmu - finish an mmu_gather structure + * @tlb: the mmu_gather structure to finish + * @start: start of the region that will be removed from the page-table + * @end: end of the region that will be removed from the page-table + * + * Called at the end of the shootdown operation to free up any resources that + * were required. + */ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { @@ -254,8 +250,17 @@ void tlb_finish_mmu(struct mmu_gather *tlb, * the TLB by observing pte_none|!pte_dirty, for example so flush TLB * forcefully if we detect parallel PTE batching threads. */ - bool force = mm_tlb_flush_nested(tlb->mm); + if (mm_tlb_flush_nested(tlb->mm)) { + __tlb_reset_range(tlb); + __tlb_adjust_range(tlb, start, end - start); + } - arch_tlb_finish_mmu(tlb, start, end, force); + tlb_flush_mmu(tlb); + + /* keep the page table cache within bounds */ + check_pgt_cache(); +#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER + tlb_batch_list_free(tlb); +#endif dec_tlb_flush_pending(tlb->mm); } diff --git a/mm/page_owner.c b/mm/page_owner.c index 925b6f44a444..addcbb2ae4e4 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -58,15 +58,10 @@ static bool need_page_owner(void) static __always_inline depot_stack_handle_t create_dummy_stack(void) { unsigned long entries[4]; - struct stack_trace dummy; + unsigned int nr_entries; - dummy.nr_entries = 0; - dummy.max_entries = ARRAY_SIZE(entries); - dummy.entries = &entries[0]; - dummy.skip = 0; - - save_stack_trace(&dummy); - return depot_save_stack(&dummy, GFP_KERNEL); + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + return stack_depot_save(entries, nr_entries, GFP_KERNEL); } static noinline void register_dummy_stack(void) @@ -120,49 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int order) } } -static inline bool check_recursive_alloc(struct stack_trace *trace, - unsigned long ip) +static inline bool check_recursive_alloc(unsigned long *entries, + unsigned int nr_entries, + unsigned long ip) { - int i; - - if (!trace->nr_entries) - return false; + unsigned int i; - for (i = 0; i < trace->nr_entries; i++) { - if (trace->entries[i] == ip) + for (i = 0; i < nr_entries; i++) { + if (entries[i] == ip) return true; } - return false; } static noinline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 2 - }; depot_stack_handle_t handle; + unsigned int nr_entries; - save_stack_trace(&trace); - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries-1] == ULONG_MAX) - trace.nr_entries--; + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); /* - * We need to check recursion here because our request to stackdepot - * could trigger memory allocation to save new entry. New memory - * allocation would reach here and call depot_save_stack() again - * if we don't catch it. There is still not enough memory in stackdepot - * so it would try to allocate memory again and loop forever. + * We need to check recursion here because our request to + * stackdepot could trigger memory allocation to save new + * entry. New memory allocation would reach here and call + * stack_depot_save_entries() again if we don't catch it. There is + * still not enough memory in stackdepot so it would try to + * allocate memory again and loop forever. */ - if (check_recursive_alloc(&trace, _RET_IP_)) + if (check_recursive_alloc(entries, nr_entries, _RET_IP_)) return dummy_handle; - handle = depot_save_stack(&trace, flags); + handle = stack_depot_save(entries, nr_entries, flags); if (!handle) handle = failure_handle; @@ -340,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_owner *page_owner, depot_stack_handle_t handle) { - int ret; - int pageblock_mt, page_mt; + int ret, pageblock_mt, page_mt; + unsigned long *entries; + unsigned int nr_entries; char *kbuf; - unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 0 - }; count = min_t(size_t, count, PAGE_SIZE); kbuf = kmalloc(count, GFP_KERNEL); @@ -378,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, if (ret >= count) goto err; - depot_fetch_stack(handle, &trace); - ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); + nr_entries = stack_depot_fetch(handle, &entries); + ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0); if (ret >= count) goto err; @@ -410,14 +389,9 @@ void __dump_page_owner(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); struct page_owner *page_owner; - unsigned long entries[PAGE_OWNER_STACK_DEPTH]; - struct stack_trace trace = { - .nr_entries = 0, - .entries = entries, - .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 0 - }; depot_stack_handle_t handle; + unsigned long *entries; + unsigned int nr_entries; gfp_t gfp_mask; int mt; @@ -441,10 +415,10 @@ void __dump_page_owner(struct page *page) return; } - depot_fetch_stack(handle, &trace); + nr_entries = stack_depot_fetch(handle, &entries); pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask); - print_stack_trace(&trace, 0); + stack_trace_print(entries, nr_entries, 0); if (page_owner->last_migrate_reason != -1) pr_alert("page has been migrated, last migrate reason: %s\n", diff --git a/mm/slub.c b/mm/slub.c index d30ede89f4a6..6b28cd2b5a58 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -552,31 +552,22 @@ static void set_track(struct kmem_cache *s, void *object, if (addr) { #ifdef CONFIG_STACKTRACE - struct stack_trace trace; - int i; + unsigned int nr_entries; - trace.nr_entries = 0; - trace.max_entries = TRACK_ADDRS_COUNT; - trace.entries = p->addrs; - trace.skip = 3; metadata_access_enable(); - save_stack_trace(&trace); + nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); metadata_access_disable(); - /* See rant in lockdep.c */ - if (trace.nr_entries != 0 && - trace.entries[trace.nr_entries - 1] == ULONG_MAX) - trace.nr_entries--; - - for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++) - p->addrs[i] = 0; + if (nr_entries < TRACK_ADDRS_COUNT) + p->addrs[nr_entries] = 0; #endif p->addr = addr; p->cpu = smp_processor_id(); p->pid = current->pid; p->when = jiffies; - } else + } else { memset(p, 0, sizeof(struct track)); + } } static void init_tracking(struct kmem_cache *s, void *object) diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 835d50b279f5..a2a88ab07f7b 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -56,7 +56,7 @@ struct clusterip_config { #endif enum clusterip_hashmode hash_mode; /* which hashing mode */ u_int32_t hash_initval; /* hash initialization */ - struct rcu_head rcu; /* for call_rcu_bh */ + struct rcu_head rcu; /* for call_rcu */ struct net *net; /* netns for pernet list */ char ifname[IFNAMSIZ]; /* device ifname */ }; diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 76ca30cc4791..0c5969fa795f 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -222,6 +222,9 @@ endif ifdef CONFIG_RETPOLINE objtool_args += --retpoline endif +ifdef CONFIG_X86_SMAP + objtool_args += --uaccess +endif # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan index 38b2b4818e8e..019771b845c5 100644 --- a/scripts/Makefile.ubsan +++ b/scripts/Makefile.ubsan @@ -3,7 +3,6 @@ ifdef CONFIG_UBSAN CFLAGS_UBSAN += $(call cc-option, -fsanitize=shift) CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero) CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable) - CFLAGS_UBSAN += $(call cc-option, -fsanitize=vla-bound) CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow) CFLAGS_UBSAN += $(call cc-option, -fsanitize=bounds) CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size) diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index fefee040bf79..b9298d2e8165 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -123,17 +123,22 @@ static int aafs_show_path(struct seq_file *seq, struct dentry *dentry) return 0; } -static void aafs_evict_inode(struct inode *inode) +static void aafs_i_callback(struct rcu_head *head) { - truncate_inode_pages_final(&inode->i_data); - clear_inode(inode); + struct inode *inode = container_of(head, struct inode, i_rcu); if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); + free_inode_nonrcu(inode); +} + +static void aafs_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, aafs_i_callback); } static const struct super_operations aafs_super_ops = { .statfs = simple_statfs, - .evict_inode = aafs_evict_inode, + .destroy_inode = aafs_destroy_inode, .show_path = aafs_show_path, }; diff --git a/security/inode.c b/security/inode.c index b7772a9b315e..421dd72b5876 100644 --- a/security/inode.c +++ b/security/inode.c @@ -27,17 +27,22 @@ static struct vfsmount *mount; static int mount_count; -static void securityfs_evict_inode(struct inode *inode) +static void securityfs_i_callback(struct rcu_head *head) { - truncate_inode_pages_final(&inode->i_data); - clear_inode(inode); + struct inode *inode = container_of(head, struct inode, i_rcu); if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); + free_inode_nonrcu(inode); +} + +static void securityfs_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, securityfs_i_callback); } static const struct super_operations securityfs_super_operations = { .statfs = simple_statfs, - .evict_inode = securityfs_evict_inode, + .destroy_inode = securityfs_destroy_inode, }; static int fill_super(struct super_block *sb, void *data, int silent) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f5b510f119ed..42cd3945e0de 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5450,6 +5450,8 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec, return; spec->gen.preferred_dacs = preferred_pairs; + spec->gen.auto_mute_via_amp = 1; + codec->power_save_node = 0; } /* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */ @@ -7268,6 +7270,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x21, 0x02211020}), + SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x40000000}, + {0x14, 0x90170110}, + {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x21, 0x02211020}), @@ -7541,6 +7547,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60130}, {0x17, 0x90170110}, {0x21, 0x04211020}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK, + {0x12, 0x90a60130}, + {0x17, 0x90170110}, + {0x21, 0x03211020}), + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x14, 0x90170110}, + {0x21, 0x04211020}), SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, ALC295_STANDARD_PINS, {0x17, 0x21014020}, diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 7afe8fae4939..b61f65bed4e4 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -351,12 +351,16 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, { struct usb_device *usbdev = line6->usbdev; int ret; - unsigned char len; + unsigned char *len; unsigned count; if (address > 0xffff || datalen > 0xff) return -EINVAL; + len = kmalloc(sizeof(*len), GFP_KERNEL); + if (!len) + return -ENOMEM; + /* query the serial number: */ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, @@ -365,7 +369,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, if (ret < 0) { dev_err(line6->ifcdev, "read request failed (error %d)\n", ret); - return ret; + goto exit; } /* Wait for data length. We'll get 0xff until length arrives. */ @@ -375,28 +379,29 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0x0012, 0x0000, &len, 1, + 0x0012, 0x0000, len, 1, LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(line6->ifcdev, "receive length failed (error %d)\n", ret); - return ret; + goto exit; } - if (len != 0xff) + if (*len != 0xff) break; } - if (len == 0xff) { + ret = -EIO; + if (*len == 0xff) { dev_err(line6->ifcdev, "read failed after %d retries\n", count); - return -EIO; - } else if (len != datalen) { + goto exit; + } else if (*len != datalen) { /* should be equal or something went wrong */ dev_err(line6->ifcdev, "length mismatch (expected %d, got %d)\n", - (int)datalen, (int)len); - return -EIO; + (int)datalen, (int)*len); + goto exit; } /* receive the result: */ @@ -405,12 +410,12 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, 0x0013, 0x0000, data, datalen, LINE6_TIMEOUT * HZ); - if (ret < 0) { + if (ret < 0) dev_err(line6->ifcdev, "read failed (error %d)\n", ret); - return ret; - } - return 0; +exit: + kfree(len); + return ret; } EXPORT_SYMBOL_GPL(line6_read_data); @@ -422,12 +427,16 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, { struct usb_device *usbdev = line6->usbdev; int ret; - unsigned char status; + unsigned char *status; int count; if (address > 0xffff || datalen > 0xffff) return -EINVAL; + status = kmalloc(sizeof(*status), GFP_KERNEL); + if (!status) + return -ENOMEM; + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0022, address, data, datalen, @@ -436,7 +445,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, if (ret < 0) { dev_err(line6->ifcdev, "write request failed (error %d)\n", ret); - return ret; + goto exit; } for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) { @@ -447,28 +456,29 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0012, 0x0000, - &status, 1, LINE6_TIMEOUT * HZ); + status, 1, LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(line6->ifcdev, "receiving status failed (error %d)\n", ret); - return ret; + goto exit; } - if (status != 0xff) + if (*status != 0xff) break; } - if (status == 0xff) { + if (*status == 0xff) { dev_err(line6->ifcdev, "write failed after %d retries\n", count); - return -EIO; - } else if (status != 0) { + ret = -EIO; + } else if (*status != 0) { dev_err(line6->ifcdev, "write failed (error %d)\n", ret); - return -EIO; + ret = -EIO; } - - return 0; +exit: + kfree(status); + return ret; } EXPORT_SYMBOL_GPL(line6_write_data); diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c index 36ed9c85c0eb..5f3c87264e66 100644 --- a/sound/usb/line6/podhd.c +++ b/sound/usb/line6/podhd.c @@ -225,28 +225,32 @@ static void podhd_startup_start_workqueue(struct timer_list *t) static int podhd_dev_start(struct usb_line6_podhd *pod) { int ret; - u8 init_bytes[8]; + u8 *init_bytes; int i; struct usb_device *usbdev = pod->line6.usbdev; + init_bytes = kmalloc(8, GFP_KERNEL); + if (!init_bytes) + return -ENOMEM; + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x11, 0, NULL, 0, LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret); - return ret; + goto exit; } /* NOTE: looks like some kind of ping message */ ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x11, 0x0, - &init_bytes, 3, LINE6_TIMEOUT * HZ); + init_bytes, 3, LINE6_TIMEOUT * HZ); if (ret < 0) { dev_err(pod->line6.ifcdev, "receive length failed (error %d)\n", ret); - return ret; + goto exit; } pod->firmware_version = @@ -255,7 +259,7 @@ static int podhd_dev_start(struct usb_line6_podhd *pod) for (i = 0; i <= 16; i++) { ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8); if (ret < 0) - return ret; + goto exit; } ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), @@ -263,10 +267,9 @@ static int podhd_dev_start(struct usb_line6_podhd *pod) USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT, 1, 0, NULL, 0, LINE6_TIMEOUT * HZ); - if (ret < 0) - return ret; - - return 0; +exit: + kfree(init_bytes); + return ret; } static void podhd_startup_workqueue(struct work_struct *work) diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c index f47ba94e6f4a..19bee725de00 100644 --- a/sound/usb/line6/toneport.c +++ b/sound/usb/line6/toneport.c @@ -365,16 +365,21 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport) /* Setup Toneport device. */ -static void toneport_setup(struct usb_line6_toneport *toneport) +static int toneport_setup(struct usb_line6_toneport *toneport) { - u32 ticks; + u32 *ticks; struct usb_line6 *line6 = &toneport->line6; struct usb_device *usbdev = line6->usbdev; + ticks = kmalloc(sizeof(*ticks), GFP_KERNEL); + if (!ticks) + return -ENOMEM; + /* sync time on device with host: */ /* note: 32-bit timestamps overflow in year 2106 */ - ticks = (u32)ktime_get_real_seconds(); - line6_write_data(line6, 0x80c6, &ticks, 4); + *ticks = (u32)ktime_get_real_seconds(); + line6_write_data(line6, 0x80c6, ticks, 4); + kfree(ticks); /* enable device: */ toneport_send_cmd(usbdev, 0x0301, 0x0000); @@ -389,6 +394,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport) toneport_update_led(toneport); mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ); + return 0; } /* @@ -451,7 +457,9 @@ static int toneport_init(struct usb_line6 *line6, return err; } - toneport_setup(toneport); + err = toneport_setup(toneport); + if (err) + return err; /* register audio system: */ return snd_card_register(line6->card); @@ -463,7 +471,11 @@ static int toneport_init(struct usb_line6 *line6, */ static int toneport_reset_resume(struct usb_interface *interface) { - toneport_setup(usb_get_intfdata(interface)); + int err; + + err = toneport_setup(usb_get_intfdata(interface)); + if (err) + return err; return line6_resume(interface); } #endif diff --git a/tools/arch/arc/include/uapi/asm/unistd.h b/tools/arch/arc/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..5eafa1115162 --- /dev/null +++ b/tools/arch/arc/include/uapi/asm/unistd.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/******** no-legacy-syscalls-ABI *******/ + +/* + * Non-typical guard macro to enable inclusion twice in ARCH sys.c + * That is how the Generic syscall wrapper generator works + */ +#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) +#define _UAPI_ASM_ARC_UNISTD_H + +#define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYS_EXECVE +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_TIME32_SYSCALLS + +#define sys_mmap2 sys_mmap_pgoff + +#include <asm-generic/unistd.h> + +#define NR_syscalls __NR_syscalls + +/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */ +#define __NR_sysfs (__NR_arch_specific_syscall + 3) + +/* ARC specific syscall */ +#define __NR_cacheflush (__NR_arch_specific_syscall + 0) +#define __NR_arc_settls (__NR_arch_specific_syscall + 1) +#define __NR_arc_gettls (__NR_arch_specific_syscall + 2) +#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4) + +__SYSCALL(__NR_cacheflush, sys_cacheflush) +__SYSCALL(__NR_arc_settls, sys_arc_settls) +__SYSCALL(__NR_arc_gettls, sys_arc_gettls) +__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg) +__SYSCALL(__NR_sysfs, sys_sysfs) + +#undef __SYSCALL + +#endif diff --git a/tools/arch/hexagon/include/uapi/asm/unistd.h b/tools/arch/hexagon/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..432c4db1b623 --- /dev/null +++ b/tools/arch/hexagon/include/uapi/asm/unistd.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Syscall support for Hexagon + * + * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +/* + * The kernel pulls this unistd.h in three different ways: + * 1. the "normal" way which gets all the __NR defines + * 2. with __SYSCALL defined to produce function declarations + * 3. with __SYSCALL defined to produce syscall table initialization + * See also: syscalltab.c + */ + +#define sys_mmap2 sys_mmap_pgoff +#define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_SYS_EXECVE +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_TIME32_SYSCALLS + +#include <asm-generic/unistd.h> diff --git a/tools/arch/riscv/include/uapi/asm/unistd.h b/tools/arch/riscv/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..0e2eeeb1fd27 --- /dev/null +++ b/tools/arch/riscv/include/uapi/asm/unistd.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifdef __LP64__ +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SET_GET_RLIMIT +#endif /* __LP64__ */ + +#include <asm-generic/unistd.h> + +/* + * Allows the instruction cache to be flushed from userspace. Despite RISC-V + * having a direct 'fence.i' instruction available to userspace (which we + * can't trap!), that's not actually viable when running on Linux because the + * kernel might schedule a process on another hart. There is no way for + * userspace to handle this without invoking the kernel (as it doesn't know the + * thread->hart mappings), so we've defined a RISC-V specific system call to + * flush the instruction cache. + * + * __NR_riscv_flush_icache is defined to flush the instruction cache over an + * address range, with the flush applying to either all threads or just the + * caller. We don't currently do anything with the address range, that's just + * in there for forwards compatibility. + */ +#ifndef __NR_riscv_flush_icache +#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15) +#endif +__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache) diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index f0b0c90dd398..d213ec5c3766 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h @@ -146,6 +146,7 @@ #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2 +#define VMX_ABORT_VMCS_CORRUPTED 3 #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4 #endif /* _UAPIVMX_H */ diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 8d3864b061f3..361207387b1b 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -67,6 +67,7 @@ FEATURE_TESTS_BASIC := \ sdt \ setns \ libaio \ + libzstd \ disassembler-four-args # FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list @@ -120,6 +121,7 @@ FEATURE_DISPLAY ?= \ get_cpuid \ bpf \ libaio \ + libzstd \ disassembler-four-args # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 7ceb4441b627..4b8244ee65ce 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -62,7 +62,8 @@ FILES= \ test-clang.bin \ test-llvm.bin \ test-llvm-version.bin \ - test-libaio.bin + test-libaio.bin \ + test-libzstd.bin FILES := $(addprefix $(OUTPUT),$(FILES)) @@ -301,6 +302,9 @@ $(OUTPUT)test-clang.bin: $(OUTPUT)test-libaio.bin: $(BUILD) -lrt +$(OUTPUT)test-libzstd.bin: + $(BUILD) -lzstd + ############################### clean: diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 7853e6d91090..a59c53705093 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -182,6 +182,10 @@ # include "test-disassembler-four-args.c" #undef main +#define main main_test_zstd +# include "test-libzstd.c" +#undef main + int main(int argc, char *argv[]) { main_test_libpython(); @@ -224,6 +228,7 @@ int main(int argc, char *argv[]) main_test_libaio(); main_test_reallocarray(); main_test_disassembler_four_args(); + main_test_libzstd(); return 0; } diff --git a/tools/build/feature/test-libzstd.c b/tools/build/feature/test-libzstd.c new file mode 100644 index 000000000000..55268c01b84d --- /dev/null +++ b/tools/build/feature/test-libzstd.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <zstd.h> + +int main(void) +{ + ZSTD_CStream *cstream; + + cstream = ZSTD_createCStream(); + ZSTD_freeCStream(cstream); + + return 0; +} diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c index d463761a58f4..988587840c80 100644 --- a/tools/lib/traceevent/event-parse-api.c +++ b/tools/lib/traceevent/event-parse-api.c @@ -9,6 +9,22 @@ #include "event-utils.h" /** + * tep_get_event - returns the event with the given index + * @tep: a handle to the tep_handle + * @index: index of the requested event, in the range 0 .. nr_events + * + * This returns pointer to the element of the events array with the given index + * If @tep is NULL, or @index is not in the range 0 .. nr_events, NULL is returned. + */ +struct tep_event *tep_get_event(struct tep_handle *tep, int index) +{ + if (tep && tep->events && index < tep->nr_events) + return tep->events[index]; + + return NULL; +} + +/** * tep_get_first_event - returns the first event in the events array * @tep: a handle to the tep_handle * @@ -17,10 +33,7 @@ */ struct tep_event *tep_get_first_event(struct tep_handle *tep) { - if (tep && tep->events) - return tep->events[0]; - - return NULL; + return tep_get_event(tep, 0); } /** @@ -32,7 +45,7 @@ struct tep_event *tep_get_first_event(struct tep_handle *tep) */ int tep_get_events_count(struct tep_handle *tep) { - if(tep) + if (tep) return tep->nr_events; return 0; } @@ -43,19 +56,47 @@ int tep_get_events_count(struct tep_handle *tep) * @flag: flag, or combination of flags to be set * can be any combination from enum tep_flag * - * This sets a flag or mbination of flags from enum tep_flag - */ + * This sets a flag or combination of flags from enum tep_flag + */ void tep_set_flag(struct tep_handle *tep, int flag) { - if(tep) + if (tep) tep->flags |= flag; } -unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data) +/** + * tep_clear_flag - clear event parser flag + * @tep: a handle to the tep_handle + * @flag: flag to be cleared + * + * This clears a tep flag + */ +void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag) +{ + if (tep) + tep->flags &= ~flag; +} + +/** + * tep_test_flag - check the state of event parser flag + * @tep: a handle to the tep_handle + * @flag: flag to be checked + * + * This returns the state of the requested tep flag. + * Returns: true if the flag is set, false otherwise. + */ +bool tep_test_flag(struct tep_handle *tep, enum tep_flag flag) +{ + if (tep) + return tep->flags & flag; + return false; +} + +unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data) { unsigned short swap; - if (!pevent || pevent->host_bigendian == pevent->file_bigendian) + if (!tep || tep->host_bigendian == tep->file_bigendian) return data; swap = ((data & 0xffULL) << 8) | @@ -64,11 +105,11 @@ unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data) return swap; } -unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data) +unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data) { unsigned int swap; - if (!pevent || pevent->host_bigendian == pevent->file_bigendian) + if (!tep || tep->host_bigendian == tep->file_bigendian) return data; swap = ((data & 0xffULL) << 24) | @@ -80,11 +121,11 @@ unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data) } unsigned long long -tep_data2host8(struct tep_handle *pevent, unsigned long long data) +tep_data2host8(struct tep_handle *tep, unsigned long long data) { unsigned long long swap; - if (!pevent || pevent->host_bigendian == pevent->file_bigendian) + if (!tep || tep->host_bigendian == tep->file_bigendian) return data; swap = ((data & 0xffULL) << 56) | @@ -101,175 +142,232 @@ tep_data2host8(struct tep_handle *pevent, unsigned long long data) /** * tep_get_header_page_size - get size of the header page - * @pevent: a handle to the tep_handle + * @tep: a handle to the tep_handle * * This returns size of the header page - * If @pevent is NULL, 0 is returned. + * If @tep is NULL, 0 is returned. + */ +int tep_get_header_page_size(struct tep_handle *tep) +{ + if (tep) + return tep->header_page_size_size; + return 0; +} + +/** + * tep_get_header_timestamp_size - get size of the timestamp in the header page + * @tep: a handle to the tep_handle + * + * This returns size of the timestamp in the header page + * If @tep is NULL, 0 is returned. */ -int tep_get_header_page_size(struct tep_handle *pevent) +int tep_get_header_timestamp_size(struct tep_handle *tep) { - if(pevent) - return pevent->header_page_size_size; + if (tep) + return tep->header_page_ts_size; return 0; } /** * tep_get_cpus - get the number of CPUs - * @pevent: a handle to the tep_handle + * @tep: a handle to the tep_handle * * This returns the number of CPUs - * If @pevent is NULL, 0 is returned. + * If @tep is NULL, 0 is returned. */ -int tep_get_cpus(struct tep_handle *pevent) +int tep_get_cpus(struct tep_handle *tep) { - if(pevent) - return pevent->cpus; + if (tep) + return tep->cpus; return 0; } /** * tep_set_cpus - set the number of CPUs - * @pevent: a handle to the tep_handle + * @tep: a handle to the tep_handle * * This sets the number of CPUs */ -void tep_set_cpus(struct tep_handle *pevent, int cpus) +void tep_set_cpus(struct tep_handle *tep, int cpus) { - if(pevent) - pevent->cpus = cpus; + if (tep) + tep->cpus = cpus; } /** - * tep_get_long_size - get the size of a long integer on the current machine - * @pevent: a handle to the tep_handle + * tep_get_long_size - get the size of a long integer on the traced machine + * @tep: a handle to the tep_handle * - * This returns the size of a long integer on the current machine - * If @pevent is NULL, 0 is returned. + * This returns the size of a long integer on the traced machine + * If @tep is NULL, 0 is returned. */ -int tep_get_long_size(struct tep_handle *pevent) +int tep_get_long_size(struct tep_handle *tep) { - if(pevent) - return pevent->long_size; + if (tep) + return tep->long_size; return 0; } /** - * tep_set_long_size - set the size of a long integer on the current machine - * @pevent: a handle to the tep_handle + * tep_set_long_size - set the size of a long integer on the traced machine + * @tep: a handle to the tep_handle * @size: size, in bytes, of a long integer * - * This sets the size of a long integer on the current machine + * This sets the size of a long integer on the traced machine */ -void tep_set_long_size(struct tep_handle *pevent, int long_size) +void tep_set_long_size(struct tep_handle *tep, int long_size) { - if(pevent) - pevent->long_size = long_size; + if (tep) + tep->long_size = long_size; } /** - * tep_get_page_size - get the size of a memory page on the current machine - * @pevent: a handle to the tep_handle + * tep_get_page_size - get the size of a memory page on the traced machine + * @tep: a handle to the tep_handle * - * This returns the size of a memory page on the current machine - * If @pevent is NULL, 0 is returned. + * This returns the size of a memory page on the traced machine + * If @tep is NULL, 0 is returned. */ -int tep_get_page_size(struct tep_handle *pevent) +int tep_get_page_size(struct tep_handle *tep) { - if(pevent) - return pevent->page_size; + if (tep) + return tep->page_size; return 0; } /** - * tep_set_page_size - set the size of a memory page on the current machine - * @pevent: a handle to the tep_handle + * tep_set_page_size - set the size of a memory page on the traced machine + * @tep: a handle to the tep_handle * @_page_size: size of a memory page, in bytes * - * This sets the size of a memory page on the current machine + * This sets the size of a memory page on the traced machine */ -void tep_set_page_size(struct tep_handle *pevent, int _page_size) +void tep_set_page_size(struct tep_handle *tep, int _page_size) { - if(pevent) - pevent->page_size = _page_size; + if (tep) + tep->page_size = _page_size; } /** - * tep_file_bigendian - get if the file is in big endian order - * @pevent: a handle to the tep_handle + * tep_is_file_bigendian - return the endian of the file + * @tep: a handle to the tep_handle * - * This returns if the file is in big endian order - * If @pevent is NULL, 0 is returned. + * This returns true if the file is in big endian order + * If @tep is NULL, false is returned. */ -int tep_file_bigendian(struct tep_handle *pevent) +bool tep_is_file_bigendian(struct tep_handle *tep) { - if(pevent) - return pevent->file_bigendian; - return 0; + if (tep) + return (tep->file_bigendian == TEP_BIG_ENDIAN); + return false; } /** * tep_set_file_bigendian - set if the file is in big endian order - * @pevent: a handle to the tep_handle + * @tep: a handle to the tep_handle * @endian: non zero, if the file is in big endian order * * This sets if the file is in big endian order */ -void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian) +void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian) { - if(pevent) - pevent->file_bigendian = endian; + if (tep) + tep->file_bigendian = endian; } /** - * tep_is_host_bigendian - get if the order of the current host is big endian - * @pevent: a handle to the tep_handle + * tep_is_local_bigendian - return the endian of the saved local machine + * @tep: a handle to the tep_handle * - * This gets if the order of the current host is big endian - * If @pevent is NULL, 0 is returned. + * This returns true if the saved local machine in @tep is big endian. + * If @tep is NULL, false is returned. */ -int tep_is_host_bigendian(struct tep_handle *pevent) +bool tep_is_local_bigendian(struct tep_handle *tep) { - if(pevent) - return pevent->host_bigendian; + if (tep) + return (tep->host_bigendian == TEP_BIG_ENDIAN); return 0; } /** - * tep_set_host_bigendian - set the order of the local host - * @pevent: a handle to the tep_handle + * tep_set_local_bigendian - set the stored local machine endian order + * @tep: a handle to the tep_handle * @endian: non zero, if the local host has big endian order * - * This sets the order of the local host + * This sets the endian order for the local machine. */ -void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian) +void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian) { - if(pevent) - pevent->host_bigendian = endian; + if (tep) + tep->host_bigendian = endian; } /** * tep_is_latency_format - get if the latency output format is configured - * @pevent: a handle to the tep_handle + * @tep: a handle to the tep_handle * - * This gets if the latency output format is configured - * If @pevent is NULL, 0 is returned. + * This returns true if the latency output format is configured + * If @tep is NULL, false is returned. */ -int tep_is_latency_format(struct tep_handle *pevent) +bool tep_is_latency_format(struct tep_handle *tep) { - if(pevent) - return pevent->latency_format; - return 0; + if (tep) + return (tep->latency_format); + return false; } /** * tep_set_latency_format - set the latency output format - * @pevent: a handle to the tep_handle + * @tep: a handle to the tep_handle * @lat: non zero for latency output format * * This sets the latency output format */ -void tep_set_latency_format(struct tep_handle *pevent, int lat) +void tep_set_latency_format(struct tep_handle *tep, int lat) +{ + if (tep) + tep->latency_format = lat; +} + +/** + * tep_is_old_format - get if an old kernel is used + * @tep: a handle to the tep_handle + * + * This returns true, if an old kernel is used to generate the tracing events or + * false if a new kernel is used. Old kernels did not have header page info. + * If @tep is NULL, false is returned. + */ +bool tep_is_old_format(struct tep_handle *tep) +{ + if (tep) + return tep->old_format; + return false; +} + +/** + * tep_set_print_raw - set a flag to force print in raw format + * @tep: a handle to the tep_handle + * @print_raw: the new value of the print_raw flag + * + * This sets a flag to force print in raw format + */ +void tep_set_print_raw(struct tep_handle *tep, int print_raw) +{ + if (tep) + tep->print_raw = print_raw; +} + +/** + * tep_set_test_filters - set a flag to test a filter string + * @tep: a handle to the tep_handle + * @test_filters: the new value of the test_filters flag + * + * This sets a flag to test a filter string. If this flag is set, when + * tep_filter_add_filter_str() API as called,it will print the filter string + * instead of adding it. + */ +void tep_set_test_filters(struct tep_handle *tep, int test_filters) { - if(pevent) - pevent->latency_format = lat; + if (tep) + tep->test_filters = test_filters; } diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h index 35833ee32d6c..09aa142f7fdd 100644 --- a/tools/lib/traceevent/event-parse-local.h +++ b/tools/lib/traceevent/event-parse-local.h @@ -92,8 +92,8 @@ struct tep_handle { void tep_free_event(struct tep_event *event); void tep_free_format_field(struct tep_format_field *field); -unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data); -unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data); -unsigned long long tep_data2host8(struct tep_handle *pevent, unsigned long long data); +unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data); +unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data); +unsigned long long tep_data2host8(struct tep_handle *tep, unsigned long long data); #endif /* _PARSE_EVENTS_INT_H */ diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 981c6ce2da2c..b36b536a9fcb 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -148,14 +148,14 @@ struct cmdline_list { int pid; }; -static int cmdline_init(struct tep_handle *pevent) +static int cmdline_init(struct tep_handle *tep) { - struct cmdline_list *cmdlist = pevent->cmdlist; + struct cmdline_list *cmdlist = tep->cmdlist; struct cmdline_list *item; struct tep_cmdline *cmdlines; int i; - cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count); + cmdlines = malloc(sizeof(*cmdlines) * tep->cmdline_count); if (!cmdlines) return -1; @@ -169,15 +169,15 @@ static int cmdline_init(struct tep_handle *pevent) free(item); } - qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp); + qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp); - pevent->cmdlines = cmdlines; - pevent->cmdlist = NULL; + tep->cmdlines = cmdlines; + tep->cmdlist = NULL; return 0; } -static const char *find_cmdline(struct tep_handle *pevent, int pid) +static const char *find_cmdline(struct tep_handle *tep, int pid) { const struct tep_cmdline *comm; struct tep_cmdline key; @@ -185,13 +185,13 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid) if (!pid) return "<idle>"; - if (!pevent->cmdlines && cmdline_init(pevent)) + if (!tep->cmdlines && cmdline_init(tep)) return "<not enough memory for cmdlines!>"; key.pid = pid; - comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count, - sizeof(*pevent->cmdlines), cmdline_cmp); + comm = bsearch(&key, tep->cmdlines, tep->cmdline_count, + sizeof(*tep->cmdlines), cmdline_cmp); if (comm) return comm->comm; @@ -199,32 +199,32 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid) } /** - * tep_pid_is_registered - return if a pid has a cmdline registered - * @pevent: handle for the pevent + * tep_is_pid_registered - return if a pid has a cmdline registered + * @tep: a handle to the trace event parser context * @pid: The pid to check if it has a cmdline registered with. * - * Returns 1 if the pid has a cmdline mapped to it - * 0 otherwise. + * Returns true if the pid has a cmdline mapped to it + * false otherwise. */ -int tep_pid_is_registered(struct tep_handle *pevent, int pid) +bool tep_is_pid_registered(struct tep_handle *tep, int pid) { const struct tep_cmdline *comm; struct tep_cmdline key; if (!pid) - return 1; + return true; - if (!pevent->cmdlines && cmdline_init(pevent)) - return 0; + if (!tep->cmdlines && cmdline_init(tep)) + return false; key.pid = pid; - comm = bsearch(&key, pevent->cmdlines, pevent->cmdline_count, - sizeof(*pevent->cmdlines), cmdline_cmp); + comm = bsearch(&key, tep->cmdlines, tep->cmdline_count, + sizeof(*tep->cmdlines), cmdline_cmp); if (comm) - return 1; - return 0; + return true; + return false; } /* @@ -232,10 +232,10 @@ int tep_pid_is_registered(struct tep_handle *pevent, int pid) * we must add this pid. This is much slower than when cmdlines * are added before the array is initialized. */ -static int add_new_comm(struct tep_handle *pevent, +static int add_new_comm(struct tep_handle *tep, const char *comm, int pid, bool override) { - struct tep_cmdline *cmdlines = pevent->cmdlines; + struct tep_cmdline *cmdlines = tep->cmdlines; struct tep_cmdline *cmdline; struct tep_cmdline key; char *new_comm; @@ -246,8 +246,8 @@ static int add_new_comm(struct tep_handle *pevent, /* avoid duplicates */ key.pid = pid; - cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count, - sizeof(*pevent->cmdlines), cmdline_cmp); + cmdline = bsearch(&key, tep->cmdlines, tep->cmdline_count, + sizeof(*tep->cmdlines), cmdline_cmp); if (cmdline) { if (!override) { errno = EEXIST; @@ -264,37 +264,37 @@ static int add_new_comm(struct tep_handle *pevent, return 0; } - cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1)); + cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (tep->cmdline_count + 1)); if (!cmdlines) { errno = ENOMEM; return -1; } - cmdlines[pevent->cmdline_count].comm = strdup(comm); - if (!cmdlines[pevent->cmdline_count].comm) { + cmdlines[tep->cmdline_count].comm = strdup(comm); + if (!cmdlines[tep->cmdline_count].comm) { free(cmdlines); errno = ENOMEM; return -1; } - cmdlines[pevent->cmdline_count].pid = pid; + cmdlines[tep->cmdline_count].pid = pid; - if (cmdlines[pevent->cmdline_count].comm) - pevent->cmdline_count++; + if (cmdlines[tep->cmdline_count].comm) + tep->cmdline_count++; - qsort(cmdlines, pevent->cmdline_count, sizeof(*cmdlines), cmdline_cmp); - pevent->cmdlines = cmdlines; + qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp); + tep->cmdlines = cmdlines; return 0; } -static int _tep_register_comm(struct tep_handle *pevent, +static int _tep_register_comm(struct tep_handle *tep, const char *comm, int pid, bool override) { struct cmdline_list *item; - if (pevent->cmdlines) - return add_new_comm(pevent, comm, pid, override); + if (tep->cmdlines) + return add_new_comm(tep, comm, pid, override); item = malloc(sizeof(*item)); if (!item) @@ -309,17 +309,17 @@ static int _tep_register_comm(struct tep_handle *pevent, return -1; } item->pid = pid; - item->next = pevent->cmdlist; + item->next = tep->cmdlist; - pevent->cmdlist = item; - pevent->cmdline_count++; + tep->cmdlist = item; + tep->cmdline_count++; return 0; } /** * tep_register_comm - register a pid / comm mapping - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * @comm: the command line to register * @pid: the pid to map the command line to * @@ -327,14 +327,14 @@ static int _tep_register_comm(struct tep_handle *pevent, * a given pid. The comm is duplicated. If a command with the same pid * already exist, -1 is returned and errno is set to EEXIST */ -int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid) +int tep_register_comm(struct tep_handle *tep, const char *comm, int pid) { - return _tep_register_comm(pevent, comm, pid, false); + return _tep_register_comm(tep, comm, pid, false); } /** * tep_override_comm - register a pid / comm mapping - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * @comm: the command line to register * @pid: the pid to map the command line to * @@ -342,19 +342,19 @@ int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid) * a given pid. The comm is duplicated. If a command with the same pid * already exist, the command string is udapted with the new one */ -int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid) +int tep_override_comm(struct tep_handle *tep, const char *comm, int pid) { - if (!pevent->cmdlines && cmdline_init(pevent)) { + if (!tep->cmdlines && cmdline_init(tep)) { errno = ENOMEM; return -1; } - return _tep_register_comm(pevent, comm, pid, true); + return _tep_register_comm(tep, comm, pid, true); } -int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock) +int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock) { - pevent->trace_clock = strdup(trace_clock); - if (!pevent->trace_clock) { + tep->trace_clock = strdup(trace_clock); + if (!tep->trace_clock) { errno = ENOMEM; return -1; } @@ -408,18 +408,18 @@ static int func_bcmp(const void *a, const void *b) return 1; } -static int func_map_init(struct tep_handle *pevent) +static int func_map_init(struct tep_handle *tep) { struct func_list *funclist; struct func_list *item; struct func_map *func_map; int i; - func_map = malloc(sizeof(*func_map) * (pevent->func_count + 1)); + func_map = malloc(sizeof(*func_map) * (tep->func_count + 1)); if (!func_map) return -1; - funclist = pevent->funclist; + funclist = tep->funclist; i = 0; while (funclist) { @@ -432,34 +432,34 @@ static int func_map_init(struct tep_handle *pevent) free(item); } - qsort(func_map, pevent->func_count, sizeof(*func_map), func_cmp); + qsort(func_map, tep->func_count, sizeof(*func_map), func_cmp); /* * Add a special record at the end. */ - func_map[pevent->func_count].func = NULL; - func_map[pevent->func_count].addr = 0; - func_map[pevent->func_count].mod = NULL; + func_map[tep->func_count].func = NULL; + func_map[tep->func_count].addr = 0; + func_map[tep->func_count].mod = NULL; - pevent->func_map = func_map; - pevent->funclist = NULL; + tep->func_map = func_map; + tep->funclist = NULL; return 0; } static struct func_map * -__find_func(struct tep_handle *pevent, unsigned long long addr) +__find_func(struct tep_handle *tep, unsigned long long addr) { struct func_map *func; struct func_map key; - if (!pevent->func_map) - func_map_init(pevent); + if (!tep->func_map) + func_map_init(tep); key.addr = addr; - func = bsearch(&key, pevent->func_map, pevent->func_count, - sizeof(*pevent->func_map), func_bcmp); + func = bsearch(&key, tep->func_map, tep->func_count, + sizeof(*tep->func_map), func_bcmp); return func; } @@ -472,15 +472,14 @@ struct func_resolver { /** * tep_set_function_resolver - set an alternative function resolver - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * @resolver: function to be used * @priv: resolver function private state. * * Some tools may have already a way to resolve kernel functions, allow them to - * keep using it instead of duplicating all the entries inside - * pevent->funclist. + * keep using it instead of duplicating all the entries inside tep->funclist. */ -int tep_set_function_resolver(struct tep_handle *pevent, +int tep_set_function_resolver(struct tep_handle *tep, tep_func_resolver_t *func, void *priv) { struct func_resolver *resolver = malloc(sizeof(*resolver)); @@ -491,38 +490,38 @@ int tep_set_function_resolver(struct tep_handle *pevent, resolver->func = func; resolver->priv = priv; - free(pevent->func_resolver); - pevent->func_resolver = resolver; + free(tep->func_resolver); + tep->func_resolver = resolver; return 0; } /** * tep_reset_function_resolver - reset alternative function resolver - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * * Stop using whatever alternative resolver was set, use the default * one instead. */ -void tep_reset_function_resolver(struct tep_handle *pevent) +void tep_reset_function_resolver(struct tep_handle *tep) { - free(pevent->func_resolver); - pevent->func_resolver = NULL; + free(tep->func_resolver); + tep->func_resolver = NULL; } static struct func_map * -find_func(struct tep_handle *pevent, unsigned long long addr) +find_func(struct tep_handle *tep, unsigned long long addr) { struct func_map *map; - if (!pevent->func_resolver) - return __find_func(pevent, addr); + if (!tep->func_resolver) + return __find_func(tep, addr); - map = &pevent->func_resolver->map; + map = &tep->func_resolver->map; map->mod = NULL; map->addr = addr; - map->func = pevent->func_resolver->func(pevent->func_resolver->priv, - &map->addr, &map->mod); + map->func = tep->func_resolver->func(tep->func_resolver->priv, + &map->addr, &map->mod); if (map->func == NULL) return NULL; @@ -531,18 +530,18 @@ find_func(struct tep_handle *pevent, unsigned long long addr) /** * tep_find_function - find a function by a given address - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * @addr: the address to find the function with * * Returns a pointer to the function stored that has the given * address. Note, the address does not have to be exact, it * will select the function that would contain the address. */ -const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr) +const char *tep_find_function(struct tep_handle *tep, unsigned long long addr) { struct func_map *map; - map = find_func(pevent, addr); + map = find_func(tep, addr); if (!map) return NULL; @@ -551,7 +550,7 @@ const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr /** * tep_find_function_address - find a function address by a given address - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * @addr: the address to find the function with * * Returns the address the function starts at. This can be used in @@ -559,11 +558,11 @@ const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr * name and the function offset. */ unsigned long long -tep_find_function_address(struct tep_handle *pevent, unsigned long long addr) +tep_find_function_address(struct tep_handle *tep, unsigned long long addr) { struct func_map *map; - map = find_func(pevent, addr); + map = find_func(tep, addr); if (!map) return 0; @@ -572,7 +571,7 @@ tep_find_function_address(struct tep_handle *pevent, unsigned long long addr) /** * tep_register_function - register a function with a given address - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * @function: the function name to register * @addr: the address the function starts at * @mod: the kernel module the function may be in (NULL for none) @@ -580,7 +579,7 @@ tep_find_function_address(struct tep_handle *pevent, unsigned long long addr) * This registers a function name with an address and module. * The @func passed in is duplicated. */ -int tep_register_function(struct tep_handle *pevent, char *func, +int tep_register_function(struct tep_handle *tep, char *func, unsigned long long addr, char *mod) { struct func_list *item = malloc(sizeof(*item)); @@ -588,7 +587,7 @@ int tep_register_function(struct tep_handle *pevent, char *func, if (!item) return -1; - item->next = pevent->funclist; + item->next = tep->funclist; item->func = strdup(func); if (!item->func) goto out_free; @@ -601,8 +600,8 @@ int tep_register_function(struct tep_handle *pevent, char *func, item->mod = NULL; item->addr = addr; - pevent->funclist = item; - pevent->func_count++; + tep->funclist = item; + tep->func_count++; return 0; @@ -617,23 +616,23 @@ out_free: /** * tep_print_funcs - print out the stored functions - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * * This prints out the stored functions. */ -void tep_print_funcs(struct tep_handle *pevent) +void tep_print_funcs(struct tep_handle *tep) { int i; - if (!pevent->func_map) - func_map_init(pevent); + if (!tep->func_map) + func_map_init(tep); - for (i = 0; i < (int)pevent->func_count; i++) { + for (i = 0; i < (int)tep->func_count; i++) { printf("%016llx %s", - pevent->func_map[i].addr, - pevent->func_map[i].func); - if (pevent->func_map[i].mod) - printf(" [%s]\n", pevent->func_map[i].mod); + tep->func_map[i].addr, + tep->func_map[i].func); + if (tep->func_map[i].mod) + printf(" [%s]\n", tep->func_map[i].mod); else printf("\n"); } @@ -663,18 +662,18 @@ static int printk_cmp(const void *a, const void *b) return 0; } -static int printk_map_init(struct tep_handle *pevent) +static int printk_map_init(struct tep_handle *tep) { struct printk_list *printklist; struct printk_list *item; struct printk_map *printk_map; int i; - printk_map = malloc(sizeof(*printk_map) * (pevent->printk_count + 1)); + printk_map = malloc(sizeof(*printk_map) * (tep->printk_count + 1)); if (!printk_map) return -1; - printklist = pevent->printklist; + printklist = tep->printklist; i = 0; while (printklist) { @@ -686,41 +685,41 @@ static int printk_map_init(struct tep_handle *pevent) free(item); } - qsort(printk_map, pevent->printk_count, sizeof(*printk_map), printk_cmp); + qsort(printk_map, tep->printk_count, sizeof(*printk_map), printk_cmp); - pevent->printk_map = printk_map; - pevent->printklist = NULL; + tep->printk_map = printk_map; + tep->printklist = NULL; return 0; } static struct printk_map * -find_printk(struct tep_handle *pevent, unsigned long long addr) +find_printk(struct tep_handle *tep, unsigned long long addr) { struct printk_map *printk; struct printk_map key; - if (!pevent->printk_map && printk_map_init(pevent)) + if (!tep->printk_map && printk_map_init(tep)) return NULL; key.addr = addr; - printk = bsearch(&key, pevent->printk_map, pevent->printk_count, - sizeof(*pevent->printk_map), printk_cmp); + printk = bsearch(&key, tep->printk_map, tep->printk_count, + sizeof(*tep->printk_map), printk_cmp); return printk; } /** * tep_register_print_string - register a string by its address - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * @fmt: the string format to register * @addr: the address the string was located at * * This registers a string by the address it was stored in the kernel. * The @fmt passed in is duplicated. */ -int tep_register_print_string(struct tep_handle *pevent, const char *fmt, +int tep_register_print_string(struct tep_handle *tep, const char *fmt, unsigned long long addr) { struct printk_list *item = malloc(sizeof(*item)); @@ -729,7 +728,7 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt, if (!item) return -1; - item->next = pevent->printklist; + item->next = tep->printklist; item->addr = addr; /* Strip off quotes and '\n' from the end */ @@ -747,8 +746,8 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt, if (strcmp(p, "\\n") == 0) *p = 0; - pevent->printklist = item; - pevent->printk_count++; + tep->printklist = item; + tep->printk_count++; return 0; @@ -760,21 +759,21 @@ out_free: /** * tep_print_printk - print out the stored strings - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * * This prints the string formats that were stored. */ -void tep_print_printk(struct tep_handle *pevent) +void tep_print_printk(struct tep_handle *tep) { int i; - if (!pevent->printk_map) - printk_map_init(pevent); + if (!tep->printk_map) + printk_map_init(tep); - for (i = 0; i < (int)pevent->printk_count; i++) { + for (i = 0; i < (int)tep->printk_count; i++) { printf("%016llx %s\n", - pevent->printk_map[i].addr, - pevent->printk_map[i].printk); + tep->printk_map[i].addr, + tep->printk_map[i].printk); } } @@ -783,29 +782,29 @@ static struct tep_event *alloc_event(void) return calloc(1, sizeof(struct tep_event)); } -static int add_event(struct tep_handle *pevent, struct tep_event *event) +static int add_event(struct tep_handle *tep, struct tep_event *event) { int i; - struct tep_event **events = realloc(pevent->events, sizeof(event) * - (pevent->nr_events + 1)); + struct tep_event **events = realloc(tep->events, sizeof(event) * + (tep->nr_events + 1)); if (!events) return -1; - pevent->events = events; + tep->events = events; - for (i = 0; i < pevent->nr_events; i++) { - if (pevent->events[i]->id > event->id) + for (i = 0; i < tep->nr_events; i++) { + if (tep->events[i]->id > event->id) break; } - if (i < pevent->nr_events) - memmove(&pevent->events[i + 1], - &pevent->events[i], - sizeof(event) * (pevent->nr_events - i)); + if (i < tep->nr_events) + memmove(&tep->events[i + 1], + &tep->events[i], + sizeof(event) * (tep->nr_events - i)); - pevent->events[i] = event; - pevent->nr_events++; + tep->events[i] = event; + tep->nr_events++; - event->pevent = pevent; + event->tep = tep; return 0; } @@ -1184,7 +1183,7 @@ static enum tep_event_type read_token(char **tok) } /** - * tep_read_token - access to utilities to use the pevent parser + * tep_read_token - access to utilities to use the tep parser * @tok: The token to return * * This will parse tokens from the string given by @@ -1657,8 +1656,8 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field ** else if (field->flags & TEP_FIELD_IS_STRING) field->elementsize = 1; else if (field->flags & TEP_FIELD_IS_LONG) - field->elementsize = event->pevent ? - event->pevent->long_size : + field->elementsize = event->tep ? + event->tep->long_size : sizeof(long); } else field->elementsize = field->size; @@ -2942,14 +2941,14 @@ process_bitmask(struct tep_event *event __maybe_unused, struct tep_print_arg *ar } static struct tep_function_handler * -find_func_handler(struct tep_handle *pevent, char *func_name) +find_func_handler(struct tep_handle *tep, char *func_name) { struct tep_function_handler *func; - if (!pevent) + if (!tep) return NULL; - for (func = pevent->func_handlers; func; func = func->next) { + for (func = tep->func_handlers; func; func = func->next) { if (strcmp(func->name, func_name) == 0) break; } @@ -2957,12 +2956,12 @@ find_func_handler(struct tep_handle *pevent, char *func_name) return func; } -static void remove_func_handler(struct tep_handle *pevent, char *func_name) +static void remove_func_handler(struct tep_handle *tep, char *func_name) { struct tep_function_handler *func; struct tep_function_handler **next; - next = &pevent->func_handlers; + next = &tep->func_handlers; while ((func = *next)) { if (strcmp(func->name, func_name) == 0) { *next = func->next; @@ -3076,7 +3075,7 @@ process_function(struct tep_event *event, struct tep_print_arg *arg, return process_dynamic_array_len(event, arg, tok); } - func = find_func_handler(event->pevent, token); + func = find_func_handler(event->tep, token); if (func) { free_token(token); return process_func_handler(event, func, arg, tok); @@ -3357,14 +3356,14 @@ tep_find_any_field(struct tep_event *event, const char *name) /** * tep_read_number - read a number from data - * @pevent: handle for the pevent + * @tep: a handle to the trace event parser context * @ptr: the raw data * @size: the size of the data that holds the number * * Returns the number (converted to host) from the * raw data. */ -unsigned long long tep_read_number(struct tep_handle *pevent, +unsigned long long tep_read_number(struct tep_handle *tep, const void *ptr, int size) { unsigned long long val; @@ -3373,12 +3372,12 @@ unsigned long long tep_read_number(struct tep_handle *pevent, case 1: return *(unsigned char *)ptr; case 2: - return tep_data2host2(pevent, *(unsigned short *)ptr); + return tep_data2host2(tep, *(unsigned short *)ptr); case 4: - return tep_data2host4(pevent, *(unsigned int *)ptr); + return tep_data2host4(tep, *(unsigned int *)ptr); case 8: memcpy(&val, (ptr), sizeof(unsigned long long)); - return tep_data2host8(pevent, val); + return tep_data2host8(tep, val); default: /* BUG! */ return 0; @@ -3406,7 +3405,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data, case 2: case 4: case 8: - *value = tep_read_number(field->event->pevent, + *value = tep_read_number(field->event->tep, data + field->offset, field->size); return 0; default: @@ -3414,7 +3413,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data, } } -static int get_common_info(struct tep_handle *pevent, +static int get_common_info(struct tep_handle *tep, const char *type, int *offset, int *size) { struct tep_event *event; @@ -3424,12 +3423,12 @@ static int get_common_info(struct tep_handle *pevent, * All events should have the same common elements. * Pick any event to find where the type is; */ - if (!pevent->events) { + if (!tep->events) { do_warning("no event_list!"); return -1; } - event = pevent->events[0]; + event = tep->events[0]; field = tep_find_common_field(event, type); if (!field) return -1; @@ -3440,58 +3439,58 @@ static int get_common_info(struct tep_handle *pevent, return 0; } -static int __parse_common(struct tep_handle *pevent, void *data, +static int __parse_common(struct tep_handle *tep, void *data, int *size, int *offset, const char *name) { int ret; if (!*size) { - ret = get_common_info(pevent, name, offset, size); + ret = get_common_info(tep, name, offset, size); if (ret < 0) return ret; } - return tep_read_number(pevent, data + *offset, *size); + return tep_read_number(tep, data + *offset, *size); } -static int trace_parse_common_type(struct tep_handle *pevent, void *data) +static int trace_parse_common_type(struct tep_handle *tep, void *data) { - return __parse_common(pevent, data, - &pevent->type_size, &pevent->type_offset, + return __parse_common(tep, data, + &tep->type_size, &tep->type_offset, "common_type"); } -static int parse_common_pid(struct tep_handle *pevent, void *data) +static int parse_common_pid(struct tep_handle *tep, void *data) { - return __parse_common(pevent, data, - &pevent->pid_size, &pevent->pid_offset, + return __parse_common(tep, data, + &tep->pid_size, &tep->pid_offset, "common_pid"); } -static int parse_common_pc(struct tep_handle *pevent, void *data) +static int parse_common_pc(struct tep_handle *tep, void *data) { - return __parse_common(pevent, data, - &pevent->pc_size, &pevent->pc_offset, + return __parse_common(tep, data, + &tep->pc_size, &tep->pc_offset, "common_preempt_count"); } -static int parse_common_flags(struct tep_handle *pevent, void *data) +static int parse_common_flags(struct tep_handle *tep, void *data) { - return __parse_common(pevent, data, - &pevent->flags_size, &pevent->flags_offset, + return __parse_common(tep, data, + &tep->flags_size, &tep->flags_offset, "common_flags"); } -static int parse_common_lock_depth(struct tep_handle *pevent, void *data) +static int parse_common_lock_depth(struct tep_handle *tep, void *data) { - return __parse_common(pevent, data, - &pevent->ld_size, &pevent->ld_offset, + return __parse_common(tep, data, + &tep->ld_size, &tep->ld_offset, "common_lock_depth"); } -static int parse_common_migrate_disable(struct tep_handle *pevent, void *data) +static int parse_common_migrate_disable(struct tep_handle *tep, void *data) { - return __parse_common(pevent, data, - &pevent->ld_size, &pevent->ld_offset, + return __parse_common(tep, data, + &tep->ld_size, &tep->ld_offset, "common_migrate_disable"); } @@ -3499,28 +3498,28 @@ static int events_id_cmp(const void *a, const void *b); /** * tep_find_event - find an event by given id - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @id: the id of the event * * Returns an event that has a given @id. */ -struct tep_event *tep_find_event(struct tep_handle *pevent, int id) +struct tep_event *tep_find_event(struct tep_handle *tep, int id) { struct tep_event **eventptr; struct tep_event key; struct tep_event *pkey = &key; /* Check cache first */ - if (pevent->last_event && pevent->last_event->id == id) - return pevent->last_event; + if (tep->last_event && tep->last_event->id == id) + return tep->last_event; key.id = id; - eventptr = bsearch(&pkey, pevent->events, pevent->nr_events, - sizeof(*pevent->events), events_id_cmp); + eventptr = bsearch(&pkey, tep->events, tep->nr_events, + sizeof(*tep->events), events_id_cmp); if (eventptr) { - pevent->last_event = *eventptr; + tep->last_event = *eventptr; return *eventptr; } @@ -3529,7 +3528,7 @@ struct tep_event *tep_find_event(struct tep_handle *pevent, int id) /** * tep_find_event_by_name - find an event by given name - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @sys: the system name to search for * @name: the name of the event to search for * @@ -3537,19 +3536,19 @@ struct tep_event *tep_find_event(struct tep_handle *pevent, int id) * @sys. If @sys is NULL the first event with @name is returned. */ struct tep_event * -tep_find_event_by_name(struct tep_handle *pevent, +tep_find_event_by_name(struct tep_handle *tep, const char *sys, const char *name) { struct tep_event *event = NULL; int i; - if (pevent->last_event && - strcmp(pevent->last_event->name, name) == 0 && - (!sys || strcmp(pevent->last_event->system, sys) == 0)) - return pevent->last_event; + if (tep->last_event && + strcmp(tep->last_event->name, name) == 0 && + (!sys || strcmp(tep->last_event->system, sys) == 0)) + return tep->last_event; - for (i = 0; i < pevent->nr_events; i++) { - event = pevent->events[i]; + for (i = 0; i < tep->nr_events; i++) { + event = tep->events[i]; if (strcmp(event->name, name) == 0) { if (!sys) break; @@ -3557,17 +3556,17 @@ tep_find_event_by_name(struct tep_handle *pevent, break; } } - if (i == pevent->nr_events) + if (i == tep->nr_events) event = NULL; - pevent->last_event = event; + tep->last_event = event; return event; } static unsigned long long eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg *arg) { - struct tep_handle *pevent = event->pevent; + struct tep_handle *tep = event->tep; unsigned long long val = 0; unsigned long long left, right; struct tep_print_arg *typearg = NULL; @@ -3589,7 +3588,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg } /* must be a number */ - val = tep_read_number(pevent, data + arg->field.field->offset, + val = tep_read_number(tep, data + arg->field.field->offset, arg->field.field->size); break; case TEP_PRINT_FLAGS: @@ -3629,11 +3628,11 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg } /* Default to long size */ - field_size = pevent->long_size; + field_size = tep->long_size; switch (larg->type) { case TEP_PRINT_DYNAMIC_ARRAY: - offset = tep_read_number(pevent, + offset = tep_read_number(tep, data + larg->dynarray.field->offset, larg->dynarray.field->size); if (larg->dynarray.field->elementsize) @@ -3662,7 +3661,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg default: goto default_op; /* oops, all bets off */ } - val = tep_read_number(pevent, + val = tep_read_number(tep, data + offset, field_size); if (typearg) val = eval_type(val, typearg, 1); @@ -3763,7 +3762,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg } break; case TEP_PRINT_DYNAMIC_ARRAY_LEN: - offset = tep_read_number(pevent, + offset = tep_read_number(tep, data + arg->dynarray.field->offset, arg->dynarray.field->size); /* @@ -3775,7 +3774,7 @@ eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg break; case TEP_PRINT_DYNAMIC_ARRAY: /* Without [], we pass the address to the dynamic data */ - offset = tep_read_number(pevent, + offset = tep_read_number(tep, data + arg->dynarray.field->offset, arg->dynarray.field->size); /* @@ -3850,7 +3849,7 @@ static void print_str_to_seq(struct trace_seq *s, const char *format, trace_seq_printf(s, format, str); } -static void print_bitmask_to_seq(struct tep_handle *pevent, +static void print_bitmask_to_seq(struct tep_handle *tep, struct trace_seq *s, const char *format, int len_arg, const void *data, int size) { @@ -3882,7 +3881,7 @@ static void print_bitmask_to_seq(struct tep_handle *pevent, * In the kernel, this is an array of long words, thus * endianness is very important. */ - if (pevent->file_bigendian) + if (tep->file_bigendian) index = size - (len + 1); else index = len; @@ -3908,7 +3907,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, struct tep_event *event, const char *format, int len_arg, struct tep_print_arg *arg) { - struct tep_handle *pevent = event->pevent; + struct tep_handle *tep = event->tep; struct tep_print_flag_sym *flag; struct tep_format_field *field; struct printk_map *printk; @@ -3945,7 +3944,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, * is a pointer. */ if (!(field->flags & TEP_FIELD_IS_ARRAY) && - field->size == pevent->long_size) { + field->size == tep->long_size) { /* Handle heterogeneous recording and processing * architectures @@ -3960,12 +3959,12 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, * on 32-bit devices: * In this case, 64 bits must be read. */ - addr = (pevent->long_size == 8) ? + addr = (tep->long_size == 8) ? *(unsigned long long *)(data + field->offset) : (unsigned long long)*(unsigned int *)(data + field->offset); /* Check if it matches a print format */ - printk = find_printk(pevent, addr); + printk = find_printk(tep, addr); if (printk) trace_seq_puts(s, printk->printk); else @@ -4022,7 +4021,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, case TEP_PRINT_HEX_STR: if (arg->hex.field->type == TEP_PRINT_DYNAMIC_ARRAY) { unsigned long offset; - offset = tep_read_number(pevent, + offset = tep_read_number(tep, data + arg->hex.field->dynarray.field->offset, arg->hex.field->dynarray.field->size); hex = data + (offset & 0xffff); @@ -4053,7 +4052,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, unsigned long offset; struct tep_format_field *field = arg->int_array.field->dynarray.field; - offset = tep_read_number(pevent, + offset = tep_read_number(tep, data + field->offset, field->size); num = data + (offset & 0xffff); @@ -4104,7 +4103,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, f = tep_find_any_field(event, arg->string.string); arg->string.offset = f->offset; } - str_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->string.offset)); + str_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->string.offset)); str_offset &= 0xffff; print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset); break; @@ -4122,10 +4121,10 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, f = tep_find_any_field(event, arg->bitmask.bitmask); arg->bitmask.offset = f->offset; } - bitmask_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->bitmask.offset)); + bitmask_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->bitmask.offset)); bitmask_size = bitmask_offset >> 16; bitmask_offset &= 0xffff; - print_bitmask_to_seq(pevent, s, format, len_arg, + print_bitmask_to_seq(tep, s, format, len_arg, data + bitmask_offset, bitmask_size); break; } @@ -4257,7 +4256,7 @@ static void free_args(struct tep_print_arg *args) static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event *event) { - struct tep_handle *pevent = event->pevent; + struct tep_handle *tep = event->tep; struct tep_format_field *field, *ip_field; struct tep_print_arg *args, *arg, **next; unsigned long long ip, val; @@ -4265,8 +4264,8 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s void *bptr; int vsize = 0; - field = pevent->bprint_buf_field; - ip_field = pevent->bprint_ip_field; + field = tep->bprint_buf_field; + ip_field = tep->bprint_ip_field; if (!field) { field = tep_find_field(event, "buf"); @@ -4279,11 +4278,11 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s do_warning_event(event, "can't find ip field for binary printk"); return NULL; } - pevent->bprint_buf_field = field; - pevent->bprint_ip_field = ip_field; + tep->bprint_buf_field = field; + tep->bprint_ip_field = ip_field; } - ip = tep_read_number(pevent, data + ip_field->offset, ip_field->size); + ip = tep_read_number(tep, data + ip_field->offset, ip_field->size); /* * The first arg is the IP pointer. @@ -4338,6 +4337,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s case 'S': case 'f': case 'F': + case 'x': break; default: /* @@ -4360,7 +4360,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s vsize = 4; break; case 1: - vsize = pevent->long_size; + vsize = tep->long_size; break; case 2: vsize = 8; @@ -4377,7 +4377,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s /* the pointers are always 4 bytes aligned */ bptr = (void *)(((unsigned long)bptr + 3) & ~3); - val = tep_read_number(pevent, bptr, vsize); + val = tep_read_number(tep, bptr, vsize); bptr += vsize; arg = alloc_arg(); if (!arg) { @@ -4434,13 +4434,13 @@ static char * get_bprint_format(void *data, int size __maybe_unused, struct tep_event *event) { - struct tep_handle *pevent = event->pevent; + struct tep_handle *tep = event->tep; unsigned long long addr; struct tep_format_field *field; struct printk_map *printk; char *format; - field = pevent->bprint_fmt_field; + field = tep->bprint_fmt_field; if (!field) { field = tep_find_field(event, "fmt"); @@ -4448,12 +4448,12 @@ get_bprint_format(void *data, int size __maybe_unused, do_warning_event(event, "can't find format field for binary printk"); return NULL; } - pevent->bprint_fmt_field = field; + tep->bprint_fmt_field = field; } - addr = tep_read_number(pevent, data + field->offset, field->size); + addr = tep_read_number(tep, data + field->offset, field->size); - printk = find_printk(pevent, addr); + printk = find_printk(tep, addr); if (!printk) { if (asprintf(&format, "%%pf: (NO FORMAT FOUND at %llx)\n", addr) < 0) return NULL; @@ -4835,13 +4835,13 @@ void tep_print_field(struct trace_seq *s, void *data, { unsigned long long val; unsigned int offset, len, i; - struct tep_handle *pevent = field->event->pevent; + struct tep_handle *tep = field->event->tep; if (field->flags & TEP_FIELD_IS_ARRAY) { offset = field->offset; len = field->size; if (field->flags & TEP_FIELD_IS_DYNAMIC) { - val = tep_read_number(pevent, data + offset, len); + val = tep_read_number(tep, data + offset, len); offset = val; len = offset >> 16; offset &= 0xffff; @@ -4861,7 +4861,7 @@ void tep_print_field(struct trace_seq *s, void *data, field->flags &= ~TEP_FIELD_IS_STRING; } } else { - val = tep_read_number(pevent, data + field->offset, + val = tep_read_number(tep, data + field->offset, field->size); if (field->flags & TEP_FIELD_IS_POINTER) { trace_seq_printf(s, "0x%llx", val); @@ -4910,7 +4910,7 @@ void tep_print_fields(struct trace_seq *s, void *data, static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event) { - struct tep_handle *pevent = event->pevent; + struct tep_handle *tep = event->tep; struct tep_print_fmt *print_fmt = &event->print_fmt; struct tep_print_arg *arg = print_fmt->args; struct tep_print_arg *args = NULL; @@ -5002,7 +5002,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e case '-': goto cont_process; case 'p': - if (pevent->long_size == 4) + if (tep->long_size == 4) ls = 1; else ls = 2; @@ -5063,7 +5063,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e arg = arg->next; if (show_func) { - func = find_func(pevent, val); + func = find_func(tep, val); if (func) { trace_seq_puts(s, func->func); if (show_func == 'F') @@ -5073,7 +5073,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e break; } } - if (pevent->long_size == 8 && ls == 1 && + if (tep->long_size == 8 && ls == 1 && sizeof(long) != 8) { char *p; @@ -5171,8 +5171,8 @@ out_failed: } /** - * tep_data_lat_fmt - parse the data for the latency format - * @pevent: a handle to the pevent + * tep_data_latency_format - parse the data for the latency format + * @tep: a handle to the trace event parser context * @s: the trace_seq to write to * @record: the record to read from * @@ -5180,8 +5180,8 @@ out_failed: * need rescheduling, in hard/soft interrupt, preempt count * and lock depth) and places it into the trace_seq. */ -void tep_data_lat_fmt(struct tep_handle *pevent, - struct trace_seq *s, struct tep_record *record) +void tep_data_latency_format(struct tep_handle *tep, + struct trace_seq *s, struct tep_record *record) { static int check_lock_depth = 1; static int check_migrate_disable = 1; @@ -5195,13 +5195,13 @@ void tep_data_lat_fmt(struct tep_handle *pevent, int softirq; void *data = record->data; - lat_flags = parse_common_flags(pevent, data); - pc = parse_common_pc(pevent, data); + lat_flags = parse_common_flags(tep, data); + pc = parse_common_pc(tep, data); /* lock_depth may not always exist */ if (lock_depth_exists) - lock_depth = parse_common_lock_depth(pevent, data); + lock_depth = parse_common_lock_depth(tep, data); else if (check_lock_depth) { - lock_depth = parse_common_lock_depth(pevent, data); + lock_depth = parse_common_lock_depth(tep, data); if (lock_depth < 0) check_lock_depth = 0; else @@ -5210,9 +5210,9 @@ void tep_data_lat_fmt(struct tep_handle *pevent, /* migrate_disable may not always exist */ if (migrate_disable_exists) - migrate_disable = parse_common_migrate_disable(pevent, data); + migrate_disable = parse_common_migrate_disable(tep, data); else if (check_migrate_disable) { - migrate_disable = parse_common_migrate_disable(pevent, data); + migrate_disable = parse_common_migrate_disable(tep, data); if (migrate_disable < 0) check_migrate_disable = 0; else @@ -5255,79 +5255,79 @@ void tep_data_lat_fmt(struct tep_handle *pevent, /** * tep_data_type - parse out the given event type - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @rec: the record to read from * * This returns the event id from the @rec. */ -int tep_data_type(struct tep_handle *pevent, struct tep_record *rec) +int tep_data_type(struct tep_handle *tep, struct tep_record *rec) { - return trace_parse_common_type(pevent, rec->data); + return trace_parse_common_type(tep, rec->data); } /** * tep_data_pid - parse the PID from record - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @rec: the record to parse * * This returns the PID from a record. */ -int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec) +int tep_data_pid(struct tep_handle *tep, struct tep_record *rec) { - return parse_common_pid(pevent, rec->data); + return parse_common_pid(tep, rec->data); } /** * tep_data_preempt_count - parse the preempt count from the record - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @rec: the record to parse * * This returns the preempt count from a record. */ -int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec) +int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec) { - return parse_common_pc(pevent, rec->data); + return parse_common_pc(tep, rec->data); } /** * tep_data_flags - parse the latency flags from the record - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @rec: the record to parse * * This returns the latency flags from a record. * * Use trace_flag_type enum for the flags (see event-parse.h). */ -int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec) +int tep_data_flags(struct tep_handle *tep, struct tep_record *rec) { - return parse_common_flags(pevent, rec->data); + return parse_common_flags(tep, rec->data); } /** * tep_data_comm_from_pid - return the command line from PID - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @pid: the PID of the task to search for * * This returns a pointer to the command line that has the given * @pid. */ -const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid) +const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid) { const char *comm; - comm = find_cmdline(pevent, pid); + comm = find_cmdline(tep, pid); return comm; } static struct tep_cmdline * -pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next) +pid_from_cmdlist(struct tep_handle *tep, const char *comm, struct tep_cmdline *next) { struct cmdline_list *cmdlist = (struct cmdline_list *)next; if (cmdlist) cmdlist = cmdlist->next; else - cmdlist = pevent->cmdlist; + cmdlist = tep->cmdlist; while (cmdlist && strcmp(cmdlist->comm, comm) != 0) cmdlist = cmdlist->next; @@ -5337,7 +5337,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline /** * tep_data_pid_from_comm - return the pid from a given comm - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @comm: the cmdline to find the pid from * @next: the cmdline structure to find the next comm * @@ -5348,7 +5348,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline * next pid. * Also, it does a linear search, so it may be slow. */ -struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, +struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm, struct tep_cmdline *next) { struct tep_cmdline *cmdline; @@ -5357,25 +5357,25 @@ struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char * If the cmdlines have not been converted yet, then use * the list. */ - if (!pevent->cmdlines) - return pid_from_cmdlist(pevent, comm, next); + if (!tep->cmdlines) + return pid_from_cmdlist(tep, comm, next); if (next) { /* * The next pointer could have been still from * a previous call before cmdlines were created */ - if (next < pevent->cmdlines || - next >= pevent->cmdlines + pevent->cmdline_count) + if (next < tep->cmdlines || + next >= tep->cmdlines + tep->cmdline_count) next = NULL; else cmdline = next++; } if (!next) - cmdline = pevent->cmdlines; + cmdline = tep->cmdlines; - while (cmdline < pevent->cmdlines + pevent->cmdline_count) { + while (cmdline < tep->cmdlines + tep->cmdline_count) { if (strcmp(cmdline->comm, comm) == 0) return cmdline; cmdline++; @@ -5385,12 +5385,13 @@ struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char /** * tep_cmdline_pid - return the pid associated to a given cmdline + * @tep: a handle to the trace event parser context * @cmdline: The cmdline structure to get the pid from * * Returns the pid for a give cmdline. If @cmdline is NULL, then * -1 is returned. */ -int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline) +int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline) { struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline; @@ -5401,9 +5402,9 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline) * If cmdlines have not been created yet, or cmdline is * not part of the array, then treat it as a cmdlist instead. */ - if (!pevent->cmdlines || - cmdline < pevent->cmdlines || - cmdline >= pevent->cmdlines + pevent->cmdline_count) + if (!tep->cmdlines || + cmdline < tep->cmdlines || + cmdline >= tep->cmdlines + tep->cmdline_count) return cmdlist->pid; return cmdline->pid; @@ -5423,7 +5424,7 @@ void tep_event_info(struct trace_seq *s, struct tep_event *event, { int print_pretty = 1; - if (event->pevent->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW)) + if (event->tep->print_raw || (event->flags & TEP_EVENT_FL_PRINTRAW)) tep_print_fields(s, record->data, record->size, event); else { @@ -5444,7 +5445,8 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock) return true; if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global") - || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf")) + || !strcmp(trace_clock, "uptime") || !strcmp(trace_clock, "perf") + || !strncmp(trace_clock, "mono", 4)) return true; /* trace_clock is setting in tsc or counter mode */ @@ -5453,14 +5455,14 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock) /** * tep_find_event_by_record - return the event from a given record - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @record: The record to get the event from * * Returns the associated event for a given record, or NULL if non is * is found. */ struct tep_event * -tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record) +tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record) { int type; @@ -5469,21 +5471,21 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record) return NULL; } - type = trace_parse_common_type(pevent, record->data); + type = trace_parse_common_type(tep, record->data); - return tep_find_event(pevent, type); + return tep_find_event(tep, type); } /** * tep_print_event_task - Write the event task comm, pid and CPU - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @s: the trace_seq to write to * @event: the handle to the record's event * @record: The record to get the event from * * Writes the tasks comm, pid and CPU to @s. */ -void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s, +void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s, struct tep_event *event, struct tep_record *record) { @@ -5491,27 +5493,26 @@ void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s, const char *comm; int pid; - pid = parse_common_pid(pevent, data); - comm = find_cmdline(pevent, pid); + pid = parse_common_pid(tep, data); + comm = find_cmdline(tep, pid); - if (pevent->latency_format) { - trace_seq_printf(s, "%8.8s-%-5d %3d", - comm, pid, record->cpu); - } else + if (tep->latency_format) + trace_seq_printf(s, "%8.8s-%-5d %3d", comm, pid, record->cpu); + else trace_seq_printf(s, "%16s-%-5d [%03d]", comm, pid, record->cpu); } /** * tep_print_event_time - Write the event timestamp - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @s: the trace_seq to write to * @event: the handle to the record's event * @record: The record to get the event from - * @use_trace_clock: Set to parse according to the @pevent->trace_clock + * @use_trace_clock: Set to parse according to the @tep->trace_clock * * Writes the timestamp of the record into @s. */ -void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s, +void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s, struct tep_event *event, struct tep_record *record, bool use_trace_clock) @@ -5522,19 +5523,18 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s, int p; bool use_usec_format; - use_usec_format = is_timestamp_in_us(pevent->trace_clock, - use_trace_clock); + use_usec_format = is_timestamp_in_us(tep->trace_clock, use_trace_clock); if (use_usec_format) { secs = record->ts / NSEC_PER_SEC; nsecs = record->ts - secs * NSEC_PER_SEC; } - if (pevent->latency_format) { - tep_data_lat_fmt(pevent, s, record); + if (tep->latency_format) { + tep_data_latency_format(tep, s, record); } if (use_usec_format) { - if (pevent->flags & TEP_NSEC_OUTPUT) { + if (tep->flags & TEP_NSEC_OUTPUT) { usecs = nsecs; p = 9; } else { @@ -5554,14 +5554,14 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s, /** * tep_print_event_data - Write the event data section - * @pevent: a handle to the pevent + * @tep: a handle to the trace event parser context * @s: the trace_seq to write to * @event: the handle to the record's event * @record: The record to get the event from * * Writes the parsing of the record's data to @s. */ -void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s, +void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s, struct tep_event *event, struct tep_record *record) { @@ -5578,15 +5578,15 @@ void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s, tep_event_info(s, event, record); } -void tep_print_event(struct tep_handle *pevent, struct trace_seq *s, +void tep_print_event(struct tep_handle *tep, struct trace_seq *s, struct tep_record *record, bool use_trace_clock) { struct tep_event *event; - event = tep_find_event_by_record(pevent, record); + event = tep_find_event_by_record(tep, record); if (!event) { int i; - int type = trace_parse_common_type(pevent, record->data); + int type = trace_parse_common_type(tep, record->data); do_warning("ug! no event found for type %d", type); trace_seq_printf(s, "[UNKNOWN TYPE %d]", type); @@ -5596,9 +5596,9 @@ void tep_print_event(struct tep_handle *pevent, struct trace_seq *s, return; } - tep_print_event_task(pevent, s, event, record); - tep_print_event_time(pevent, s, event, record, use_trace_clock); - tep_print_event_data(pevent, s, event, record); + tep_print_event_task(tep, s, event, record); + tep_print_event_time(tep, s, event, record, use_trace_clock); + tep_print_event_data(tep, s, event, record); } static int events_id_cmp(const void *a, const void *b) @@ -5649,32 +5649,26 @@ static int events_system_cmp(const void *a, const void *b) return events_id_cmp(a, b); } -struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type) +static struct tep_event **list_events_copy(struct tep_handle *tep) { struct tep_event **events; - int (*sort)(const void *a, const void *b); - - events = pevent->sort_events; - - if (events && pevent->last_type == sort_type) - return events; - if (!events) { - events = malloc(sizeof(*events) * (pevent->nr_events + 1)); - if (!events) - return NULL; + if (!tep) + return NULL; - memcpy(events, pevent->events, sizeof(*events) * pevent->nr_events); - events[pevent->nr_events] = NULL; + events = malloc(sizeof(*events) * (tep->nr_events + 1)); + if (!events) + return NULL; - pevent->sort_events = events; + memcpy(events, tep->events, sizeof(*events) * tep->nr_events); + events[tep->nr_events] = NULL; + return events; +} - /* the internal events are sorted by id */ - if (sort_type == TEP_EVENT_SORT_ID) { - pevent->last_type = sort_type; - return events; - } - } +static void list_events_sort(struct tep_event **events, int nr_events, + enum tep_event_sort_type sort_type) +{ + int (*sort)(const void *a, const void *b); switch (sort_type) { case TEP_EVENT_SORT_ID: @@ -5687,11 +5681,82 @@ struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sor sort = events_system_cmp; break; default: + sort = NULL; + } + + if (sort) + qsort(events, nr_events, sizeof(*events), sort); +} + +/** + * tep_list_events - Get events, sorted by given criteria. + * @tep: a handle to the tep context + * @sort_type: desired sort order of the events in the array + * + * Returns an array of pointers to all events, sorted by the given + * @sort_type criteria. The last element of the array is NULL. The returned + * memory must not be freed, it is managed by the library. + * The function is not thread safe. + */ +struct tep_event **tep_list_events(struct tep_handle *tep, + enum tep_event_sort_type sort_type) +{ + struct tep_event **events; + + if (!tep) + return NULL; + + events = tep->sort_events; + if (events && tep->last_type == sort_type) return events; + + if (!events) { + events = list_events_copy(tep); + if (!events) + return NULL; + + tep->sort_events = events; + + /* the internal events are sorted by id */ + if (sort_type == TEP_EVENT_SORT_ID) { + tep->last_type = sort_type; + return events; + } } - qsort(events, pevent->nr_events, sizeof(*events), sort); - pevent->last_type = sort_type; + list_events_sort(events, tep->nr_events, sort_type); + tep->last_type = sort_type; + + return events; +} + + +/** + * tep_list_events_copy - Thread safe version of tep_list_events() + * @tep: a handle to the tep context + * @sort_type: desired sort order of the events in the array + * + * Returns an array of pointers to all events, sorted by the given + * @sort_type criteria. The last element of the array is NULL. The returned + * array is newly allocated inside the function and must be freed by the caller + */ +struct tep_event **tep_list_events_copy(struct tep_handle *tep, + enum tep_event_sort_type sort_type) +{ + struct tep_event **events; + + if (!tep) + return NULL; + + events = list_events_copy(tep); + if (!events) + return NULL; + + /* the internal events are sorted by id */ + if (sort_type == TEP_EVENT_SORT_ID) + return events; + + list_events_sort(events, tep->nr_events, sort_type); return events; } @@ -5950,7 +6015,7 @@ static void parse_header_field(const char *field, /** * tep_parse_header_page - parse the data stored in the header page - * @pevent: the handle to the pevent + * @tep: a handle to the trace event parser context * @buf: the buffer storing the header page format string * @size: the size of @buf * @long_size: the long size to use if there is no header @@ -5960,7 +6025,7 @@ static void parse_header_field(const char *field, * * /sys/kernel/debug/tracing/events/header_page */ -int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size, +int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size, int long_size) { int ignore; @@ -5970,22 +6035,22 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si * Old kernels did not have header page info. * Sorry but we just use what we find here in user space. */ - pevent->header_page_ts_size = sizeof(long long); - pevent->header_page_size_size = long_size; - pevent->header_page_data_offset = sizeof(long long) + long_size; - pevent->old_format = 1; + tep->header_page_ts_size = sizeof(long long); + tep->header_page_size_size = long_size; + tep->header_page_data_offset = sizeof(long long) + long_size; + tep->old_format = 1; return -1; } init_input_buf(buf, size); - parse_header_field("timestamp", &pevent->header_page_ts_offset, - &pevent->header_page_ts_size, 1); - parse_header_field("commit", &pevent->header_page_size_offset, - &pevent->header_page_size_size, 1); - parse_header_field("overwrite", &pevent->header_page_overwrite, + parse_header_field("timestamp", &tep->header_page_ts_offset, + &tep->header_page_ts_size, 1); + parse_header_field("commit", &tep->header_page_size_offset, + &tep->header_page_size_size, 1); + parse_header_field("overwrite", &tep->header_page_overwrite, &ignore, 0); - parse_header_field("data", &pevent->header_page_data_offset, - &pevent->header_page_data_size, 1); + parse_header_field("data", &tep->header_page_data_offset, + &tep->header_page_data_size, 1); return 0; } @@ -6013,11 +6078,11 @@ static void free_handler(struct event_handler *handle) free(handle); } -static int find_event_handle(struct tep_handle *pevent, struct tep_event *event) +static int find_event_handle(struct tep_handle *tep, struct tep_event *event) { struct event_handler *handle, **next; - for (next = &pevent->handlers; *next; + for (next = &tep->handlers; *next; next = &(*next)->next) { handle = *next; if (event_matches(event, handle->id, @@ -6055,7 +6120,7 @@ static int find_event_handle(struct tep_handle *pevent, struct tep_event *event) * /sys/kernel/debug/tracing/events/.../.../format */ enum tep_errno __tep_parse_format(struct tep_event **eventp, - struct tep_handle *pevent, const char *buf, + struct tep_handle *tep, const char *buf, unsigned long size, const char *sys) { struct tep_event *event; @@ -6097,8 +6162,8 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp, goto event_alloc_failed; } - /* Add pevent to event so that it can be referenced */ - event->pevent = pevent; + /* Add tep to event so that it can be referenced */ + event->tep = tep; ret = event_read_format(event); if (ret < 0) { @@ -6110,7 +6175,7 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp, * If the event has an override, don't print warnings if the event * print format fails to parse. */ - if (pevent && find_event_handle(pevent, event)) + if (tep && find_event_handle(tep, event)) show_warning = 0; ret = event_read_print(event); @@ -6162,18 +6227,18 @@ enum tep_errno __tep_parse_format(struct tep_event **eventp, } static enum tep_errno -__parse_event(struct tep_handle *pevent, +__parse_event(struct tep_handle *tep, struct tep_event **eventp, const char *buf, unsigned long size, const char *sys) { - int ret = __tep_parse_format(eventp, pevent, buf, size, sys); + int ret = __tep_parse_format(eventp, tep, buf, size, sys); struct tep_event *event = *eventp; if (event == NULL) return ret; - if (pevent && add_event(pevent, event)) { + if (tep && add_event(tep, event)) { ret = TEP_ERRNO__MEM_ALLOC_FAILED; goto event_add_failed; } @@ -6191,7 +6256,7 @@ event_add_failed: /** * tep_parse_format - parse the event format - * @pevent: the handle to the pevent + * @tep: a handle to the trace event parser context * @eventp: returned format * @buf: the buffer storing the event format string * @size: the size of @buf @@ -6204,17 +6269,17 @@ event_add_failed: * * /sys/kernel/debug/tracing/events/.../.../format */ -enum tep_errno tep_parse_format(struct tep_handle *pevent, +enum tep_errno tep_parse_format(struct tep_handle *tep, struct tep_event **eventp, const char *buf, unsigned long size, const char *sys) { - return __parse_event(pevent, eventp, buf, size, sys); + return __parse_event(tep, eventp, buf, size, sys); } /** * tep_parse_event - parse the event format - * @pevent: the handle to the pevent + * @tep: a handle to the trace event parser context * @buf: the buffer storing the event format string * @size: the size of @buf * @sys: the system the event belongs to @@ -6226,11 +6291,11 @@ enum tep_errno tep_parse_format(struct tep_handle *pevent, * * /sys/kernel/debug/tracing/events/.../.../format */ -enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf, +enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf, unsigned long size, const char *sys) { struct tep_event *event = NULL; - return __parse_event(pevent, &event, buf, size, sys); + return __parse_event(tep, &event, buf, size, sys); } int get_field_val(struct trace_seq *s, struct tep_format_field *field, @@ -6292,8 +6357,8 @@ void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event, offset = field->offset; if (field->flags & TEP_FIELD_IS_DYNAMIC) { - offset = tep_read_number(event->pevent, - data + offset, field->size); + offset = tep_read_number(event->tep, + data + offset, field->size); *len = offset >> 16; offset &= 0xffff; } else @@ -6386,7 +6451,8 @@ int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event, * @record: The record with the field name. * @err: print default error if failed. * - * Returns: 0 on success, -1 field not found, or 1 if buffer is full. + * Returns positive value on success, negative in case of an error, + * or 0 if buffer is full. */ int tep_print_num_field(struct trace_seq *s, const char *fmt, struct tep_event *event, const char *name, @@ -6418,14 +6484,15 @@ int tep_print_num_field(struct trace_seq *s, const char *fmt, * @record: The record with the field name. * @err: print default error if failed. * - * Returns: 0 on success, -1 field not found, or 1 if buffer is full. + * Returns positive value on success, negative in case of an error, + * or 0 if buffer is full. */ int tep_print_func_field(struct trace_seq *s, const char *fmt, struct tep_event *event, const char *name, struct tep_record *record, int err) { struct tep_format_field *field = tep_find_field(event, name); - struct tep_handle *pevent = event->pevent; + struct tep_handle *tep = event->tep; unsigned long long val; struct func_map *func; char tmp[128]; @@ -6436,7 +6503,7 @@ int tep_print_func_field(struct trace_seq *s, const char *fmt, if (tep_read_number_field(field, record->data, &val)) goto failed; - func = find_func(pevent, val); + func = find_func(tep, val); if (func) snprintf(tmp, 128, "%s/0x%llx", func->func, func->addr - val); @@ -6468,7 +6535,7 @@ static void free_func_handle(struct tep_function_handler *func) /** * tep_register_print_function - register a helper function - * @pevent: the handle to the pevent + * @tep: a handle to the trace event parser context * @func: the function to process the helper function * @ret_type: the return type of the helper function * @name: the name of the helper function @@ -6481,7 +6548,7 @@ static void free_func_handle(struct tep_function_handler *func) * The @parameters is a variable list of tep_func_arg_type enums that * must end with TEP_FUNC_ARG_VOID. */ -int tep_register_print_function(struct tep_handle *pevent, +int tep_register_print_function(struct tep_handle *tep, tep_func_handler func, enum tep_func_arg_type ret_type, char *name, ...) @@ -6493,7 +6560,7 @@ int tep_register_print_function(struct tep_handle *pevent, va_list ap; int ret; - func_handle = find_func_handler(pevent, name); + func_handle = find_func_handler(tep, name); if (func_handle) { /* * This is most like caused by the users own @@ -6501,7 +6568,7 @@ int tep_register_print_function(struct tep_handle *pevent, * system defaults. */ pr_stat("override of function helper '%s'", name); - remove_func_handler(pevent, name); + remove_func_handler(tep, name); } func_handle = calloc(1, sizeof(*func_handle)); @@ -6548,8 +6615,8 @@ int tep_register_print_function(struct tep_handle *pevent, } va_end(ap); - func_handle->next = pevent->func_handlers; - pevent->func_handlers = func_handle; + func_handle->next = tep->func_handlers; + tep->func_handlers = func_handle; return 0; out_free: @@ -6560,7 +6627,7 @@ int tep_register_print_function(struct tep_handle *pevent, /** * tep_unregister_print_function - unregister a helper function - * @pevent: the handle to the pevent + * @tep: a handle to the trace event parser context * @func: the function to process the helper function * @name: the name of the helper function * @@ -6568,20 +6635,20 @@ int tep_register_print_function(struct tep_handle *pevent, * * Returns 0 if the handler was removed successully, -1 otherwise. */ -int tep_unregister_print_function(struct tep_handle *pevent, +int tep_unregister_print_function(struct tep_handle *tep, tep_func_handler func, char *name) { struct tep_function_handler *func_handle; - func_handle = find_func_handler(pevent, name); + func_handle = find_func_handler(tep, name); if (func_handle && func_handle->func == func) { - remove_func_handler(pevent, name); + remove_func_handler(tep, name); return 0; } return -1; } -static struct tep_event *search_event(struct tep_handle *pevent, int id, +static struct tep_event *search_event(struct tep_handle *tep, int id, const char *sys_name, const char *event_name) { @@ -6589,7 +6656,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id, if (id >= 0) { /* search by id */ - event = tep_find_event(pevent, id); + event = tep_find_event(tep, id); if (!event) return NULL; if (event_name && (strcmp(event_name, event->name) != 0)) @@ -6597,7 +6664,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id, if (sys_name && (strcmp(sys_name, event->system) != 0)) return NULL; } else { - event = tep_find_event_by_name(pevent, sys_name, event_name); + event = tep_find_event_by_name(tep, sys_name, event_name); if (!event) return NULL; } @@ -6606,7 +6673,7 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id, /** * tep_register_event_handler - register a way to parse an event - * @pevent: the handle to the pevent + * @tep: a handle to the trace event parser context * @id: the id of the event to register * @sys_name: the system name the event belongs to * @event_name: the name of the event @@ -6627,14 +6694,14 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id, * negative TEP_ERRNO_... in case of an error * */ -int tep_register_event_handler(struct tep_handle *pevent, int id, +int tep_register_event_handler(struct tep_handle *tep, int id, const char *sys_name, const char *event_name, tep_event_handler_func func, void *context) { struct tep_event *event; struct event_handler *handle; - event = search_event(pevent, id, sys_name, event_name); + event = search_event(tep, id, sys_name, event_name); if (event == NULL) goto not_found; @@ -6669,8 +6736,8 @@ int tep_register_event_handler(struct tep_handle *pevent, int id, } handle->func = func; - handle->next = pevent->handlers; - pevent->handlers = handle; + handle->next = tep->handlers; + tep->handlers = handle; handle->context = context; return TEP_REGISTER_SUCCESS; @@ -6697,7 +6764,7 @@ static int handle_matches(struct event_handler *handler, int id, /** * tep_unregister_event_handler - unregister an existing event handler - * @pevent: the handle to the pevent + * @tep: a handle to the trace event parser context * @id: the id of the event to unregister * @sys_name: the system name the handler belongs to * @event_name: the name of the event handler @@ -6711,7 +6778,7 @@ static int handle_matches(struct event_handler *handler, int id, * * Returns 0 if handler was removed successfully, -1 if event was not found. */ -int tep_unregister_event_handler(struct tep_handle *pevent, int id, +int tep_unregister_event_handler(struct tep_handle *tep, int id, const char *sys_name, const char *event_name, tep_event_handler_func func, void *context) { @@ -6719,7 +6786,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id, struct event_handler *handle; struct event_handler **next; - event = search_event(pevent, id, sys_name, event_name); + event = search_event(tep, id, sys_name, event_name); if (event == NULL) goto not_found; @@ -6733,7 +6800,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id, } not_found: - for (next = &pevent->handlers; *next; next = &(*next)->next) { + for (next = &tep->handlers; *next; next = &(*next)->next) { handle = *next; if (handle_matches(handle, id, sys_name, event_name, func, context)) @@ -6750,23 +6817,23 @@ not_found: } /** - * tep_alloc - create a pevent handle + * tep_alloc - create a tep handle */ struct tep_handle *tep_alloc(void) { - struct tep_handle *pevent = calloc(1, sizeof(*pevent)); + struct tep_handle *tep = calloc(1, sizeof(*tep)); - if (pevent) { - pevent->ref_count = 1; - pevent->host_bigendian = tep_host_bigendian(); + if (tep) { + tep->ref_count = 1; + tep->host_bigendian = tep_is_bigendian(); } - return pevent; + return tep; } -void tep_ref(struct tep_handle *pevent) +void tep_ref(struct tep_handle *tep) { - pevent->ref_count++; + tep->ref_count++; } int tep_get_ref(struct tep_handle *tep) @@ -6816,10 +6883,10 @@ void tep_free_event(struct tep_event *event) } /** - * tep_free - free a pevent handle - * @pevent: the pevent handle to free + * tep_free - free a tep handle + * @tep: the tep handle to free */ -void tep_free(struct tep_handle *pevent) +void tep_free(struct tep_handle *tep) { struct cmdline_list *cmdlist, *cmdnext; struct func_list *funclist, *funcnext; @@ -6828,21 +6895,21 @@ void tep_free(struct tep_handle *pevent) struct event_handler *handle; int i; - if (!pevent) + if (!tep) return; - cmdlist = pevent->cmdlist; - funclist = pevent->funclist; - printklist = pevent->printklist; + cmdlist = tep->cmdlist; + funclist = tep->funclist; + printklist = tep->printklist; - pevent->ref_count--; - if (pevent->ref_count) + tep->ref_count--; + if (tep->ref_count) return; - if (pevent->cmdlines) { - for (i = 0; i < pevent->cmdline_count; i++) - free(pevent->cmdlines[i].comm); - free(pevent->cmdlines); + if (tep->cmdlines) { + for (i = 0; i < tep->cmdline_count; i++) + free(tep->cmdlines[i].comm); + free(tep->cmdlines); } while (cmdlist) { @@ -6852,12 +6919,12 @@ void tep_free(struct tep_handle *pevent) cmdlist = cmdnext; } - if (pevent->func_map) { - for (i = 0; i < (int)pevent->func_count; i++) { - free(pevent->func_map[i].func); - free(pevent->func_map[i].mod); + if (tep->func_map) { + for (i = 0; i < (int)tep->func_count; i++) { + free(tep->func_map[i].func); + free(tep->func_map[i].mod); } - free(pevent->func_map); + free(tep->func_map); } while (funclist) { @@ -6868,16 +6935,16 @@ void tep_free(struct tep_handle *pevent) funclist = funcnext; } - while (pevent->func_handlers) { - func_handler = pevent->func_handlers; - pevent->func_handlers = func_handler->next; + while (tep->func_handlers) { + func_handler = tep->func_handlers; + tep->func_handlers = func_handler->next; free_func_handle(func_handler); } - if (pevent->printk_map) { - for (i = 0; i < (int)pevent->printk_count; i++) - free(pevent->printk_map[i].printk); - free(pevent->printk_map); + if (tep->printk_map) { + for (i = 0; i < (int)tep->printk_count; i++) + free(tep->printk_map[i].printk); + free(tep->printk_map); } while (printklist) { @@ -6887,24 +6954,24 @@ void tep_free(struct tep_handle *pevent) printklist = printknext; } - for (i = 0; i < pevent->nr_events; i++) - tep_free_event(pevent->events[i]); + for (i = 0; i < tep->nr_events; i++) + tep_free_event(tep->events[i]); - while (pevent->handlers) { - handle = pevent->handlers; - pevent->handlers = handle->next; + while (tep->handlers) { + handle = tep->handlers; + tep->handlers = handle->next; free_handler(handle); } - free(pevent->trace_clock); - free(pevent->events); - free(pevent->sort_events); - free(pevent->func_resolver); + free(tep->trace_clock); + free(tep->events); + free(tep->sort_events); + free(tep->func_resolver); - free(pevent); + free(tep); } -void tep_unref(struct tep_handle *pevent) +void tep_unref(struct tep_handle *tep) { - tep_free(pevent); + tep_free(tep); } diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index aec48f2aea8a..642f68ab5fb2 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h @@ -64,8 +64,8 @@ typedef int (*tep_event_handler_func)(struct trace_seq *s, struct tep_event *event, void *context); -typedef int (*tep_plugin_load_func)(struct tep_handle *pevent); -typedef int (*tep_plugin_unload_func)(struct tep_handle *pevent); +typedef int (*tep_plugin_load_func)(struct tep_handle *tep); +typedef int (*tep_plugin_unload_func)(struct tep_handle *tep); struct tep_plugin_option { struct tep_plugin_option *next; @@ -85,12 +85,12 @@ struct tep_plugin_option { * TEP_PLUGIN_LOADER: (required) * The function name to initialized the plugin. * - * int TEP_PLUGIN_LOADER(struct tep_handle *pevent) + * int TEP_PLUGIN_LOADER(struct tep_handle *tep) * * TEP_PLUGIN_UNLOADER: (optional) * The function called just before unloading * - * int TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) + * int TEP_PLUGIN_UNLOADER(struct tep_handle *tep) * * TEP_PLUGIN_OPTIONS: (optional) * Plugin options that can be set before loading @@ -278,7 +278,7 @@ struct tep_print_fmt { }; struct tep_event { - struct tep_handle *pevent; + struct tep_handle *tep; char *name; int id; int flags; @@ -393,9 +393,9 @@ struct tep_plugin_list; #define INVALID_PLUGIN_LIST_OPTION ((char **)((unsigned long)-1)) -struct tep_plugin_list *tep_load_plugins(struct tep_handle *pevent); +struct tep_plugin_list *tep_load_plugins(struct tep_handle *tep); void tep_unload_plugins(struct tep_plugin_list *plugin_list, - struct tep_handle *pevent); + struct tep_handle *tep); char **tep_plugin_list_options(void); void tep_plugin_free_options_list(char **list); int tep_plugin_add_options(const char *name, @@ -409,8 +409,10 @@ void tep_print_plugins(struct trace_seq *s, typedef char *(tep_func_resolver_t)(void *priv, unsigned long long *addrp, char **modp); void tep_set_flag(struct tep_handle *tep, int flag); +void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag); +bool tep_test_flag(struct tep_handle *tep, enum tep_flag flags); -static inline int tep_host_bigendian(void) +static inline int tep_is_bigendian(void) { unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 }; unsigned int val; @@ -428,37 +430,37 @@ enum trace_flag_type { TRACE_FLAG_SOFTIRQ = 0x10, }; -int tep_set_function_resolver(struct tep_handle *pevent, +int tep_set_function_resolver(struct tep_handle *tep, tep_func_resolver_t *func, void *priv); -void tep_reset_function_resolver(struct tep_handle *pevent); -int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid); -int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid); -int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock); -int tep_register_function(struct tep_handle *pevent, char *name, +void tep_reset_function_resolver(struct tep_handle *tep); +int tep_register_comm(struct tep_handle *tep, const char *comm, int pid); +int tep_override_comm(struct tep_handle *tep, const char *comm, int pid); +int tep_register_trace_clock(struct tep_handle *tep, const char *trace_clock); +int tep_register_function(struct tep_handle *tep, char *name, unsigned long long addr, char *mod); -int tep_register_print_string(struct tep_handle *pevent, const char *fmt, +int tep_register_print_string(struct tep_handle *tep, const char *fmt, unsigned long long addr); -int tep_pid_is_registered(struct tep_handle *pevent, int pid); +bool tep_is_pid_registered(struct tep_handle *tep, int pid); -void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s, +void tep_print_event_task(struct tep_handle *tep, struct trace_seq *s, struct tep_event *event, struct tep_record *record); -void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s, +void tep_print_event_time(struct tep_handle *tep, struct trace_seq *s, struct tep_event *event, struct tep_record *record, bool use_trace_clock); -void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s, +void tep_print_event_data(struct tep_handle *tep, struct trace_seq *s, struct tep_event *event, struct tep_record *record); -void tep_print_event(struct tep_handle *pevent, struct trace_seq *s, +void tep_print_event(struct tep_handle *tep, struct trace_seq *s, struct tep_record *record, bool use_trace_clock); -int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size, +int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size, int long_size); -enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf, +enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf, unsigned long size, const char *sys); -enum tep_errno tep_parse_format(struct tep_handle *pevent, +enum tep_errno tep_parse_format(struct tep_handle *tep, struct tep_event **eventp, const char *buf, unsigned long size, const char *sys); @@ -490,50 +492,50 @@ enum tep_reg_handler { TEP_REGISTER_SUCCESS_OVERWRITE, }; -int tep_register_event_handler(struct tep_handle *pevent, int id, +int tep_register_event_handler(struct tep_handle *tep, int id, const char *sys_name, const char *event_name, tep_event_handler_func func, void *context); -int tep_unregister_event_handler(struct tep_handle *pevent, int id, +int tep_unregister_event_handler(struct tep_handle *tep, int id, const char *sys_name, const char *event_name, tep_event_handler_func func, void *context); -int tep_register_print_function(struct tep_handle *pevent, +int tep_register_print_function(struct tep_handle *tep, tep_func_handler func, enum tep_func_arg_type ret_type, char *name, ...); -int tep_unregister_print_function(struct tep_handle *pevent, +int tep_unregister_print_function(struct tep_handle *tep, tep_func_handler func, char *name); struct tep_format_field *tep_find_common_field(struct tep_event *event, const char *name); struct tep_format_field *tep_find_field(struct tep_event *event, const char *name); struct tep_format_field *tep_find_any_field(struct tep_event *event, const char *name); -const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr); +const char *tep_find_function(struct tep_handle *tep, unsigned long long addr); unsigned long long -tep_find_function_address(struct tep_handle *pevent, unsigned long long addr); -unsigned long long tep_read_number(struct tep_handle *pevent, const void *ptr, int size); +tep_find_function_address(struct tep_handle *tep, unsigned long long addr); +unsigned long long tep_read_number(struct tep_handle *tep, const void *ptr, int size); int tep_read_number_field(struct tep_format_field *field, const void *data, unsigned long long *value); struct tep_event *tep_get_first_event(struct tep_handle *tep); int tep_get_events_count(struct tep_handle *tep); -struct tep_event *tep_find_event(struct tep_handle *pevent, int id); +struct tep_event *tep_find_event(struct tep_handle *tep, int id); struct tep_event * -tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name); +tep_find_event_by_name(struct tep_handle *tep, const char *sys, const char *name); struct tep_event * -tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record); - -void tep_data_lat_fmt(struct tep_handle *pevent, - struct trace_seq *s, struct tep_record *record); -int tep_data_type(struct tep_handle *pevent, struct tep_record *rec); -int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec); -int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec); -int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec); -const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid); +tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record); + +void tep_data_latency_format(struct tep_handle *tep, + struct trace_seq *s, struct tep_record *record); +int tep_data_type(struct tep_handle *tep, struct tep_record *rec); +int tep_data_pid(struct tep_handle *tep, struct tep_record *rec); +int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec); +int tep_data_flags(struct tep_handle *tep, struct tep_record *rec); +const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid); struct tep_cmdline; -struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, +struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm, struct tep_cmdline *next); -int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline); +int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline); void tep_print_field(struct trace_seq *s, void *data, struct tep_format_field *field); @@ -541,10 +543,12 @@ void tep_print_fields(struct trace_seq *s, void *data, int size __maybe_unused, struct tep_event *event); void tep_event_info(struct trace_seq *s, struct tep_event *event, struct tep_record *record); -int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum, +int tep_strerror(struct tep_handle *tep, enum tep_errno errnum, char *buf, size_t buflen); -struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type); +struct tep_event **tep_list_events(struct tep_handle *tep, enum tep_event_sort_type); +struct tep_event **tep_list_events_copy(struct tep_handle *tep, + enum tep_event_sort_type); struct tep_format_field **tep_event_common_fields(struct tep_event *event); struct tep_format_field **tep_event_fields(struct tep_event *event); @@ -552,24 +556,28 @@ enum tep_endian { TEP_LITTLE_ENDIAN = 0, TEP_BIG_ENDIAN }; -int tep_get_cpus(struct tep_handle *pevent); -void tep_set_cpus(struct tep_handle *pevent, int cpus); -int tep_get_long_size(struct tep_handle *pevent); -void tep_set_long_size(struct tep_handle *pevent, int long_size); -int tep_get_page_size(struct tep_handle *pevent); -void tep_set_page_size(struct tep_handle *pevent, int _page_size); -int tep_file_bigendian(struct tep_handle *pevent); -void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian); -int tep_is_host_bigendian(struct tep_handle *pevent); -void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian); -int tep_is_latency_format(struct tep_handle *pevent); -void tep_set_latency_format(struct tep_handle *pevent, int lat); -int tep_get_header_page_size(struct tep_handle *pevent); +int tep_get_cpus(struct tep_handle *tep); +void tep_set_cpus(struct tep_handle *tep, int cpus); +int tep_get_long_size(struct tep_handle *tep); +void tep_set_long_size(struct tep_handle *tep, int long_size); +int tep_get_page_size(struct tep_handle *tep); +void tep_set_page_size(struct tep_handle *tep, int _page_size); +bool tep_is_file_bigendian(struct tep_handle *tep); +void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian); +bool tep_is_local_bigendian(struct tep_handle *tep); +void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian); +bool tep_is_latency_format(struct tep_handle *tep); +void tep_set_latency_format(struct tep_handle *tep, int lat); +int tep_get_header_page_size(struct tep_handle *tep); +int tep_get_header_timestamp_size(struct tep_handle *tep); +bool tep_is_old_format(struct tep_handle *tep); +void tep_set_print_raw(struct tep_handle *tep, int print_raw); +void tep_set_test_filters(struct tep_handle *tep, int test_filters); struct tep_handle *tep_alloc(void); -void tep_free(struct tep_handle *pevent); -void tep_ref(struct tep_handle *pevent); -void tep_unref(struct tep_handle *pevent); +void tep_free(struct tep_handle *tep); +void tep_ref(struct tep_handle *tep); +void tep_unref(struct tep_handle *tep); int tep_get_ref(struct tep_handle *tep); /* access to the internal parser */ @@ -581,8 +589,8 @@ const char *tep_get_input_buf(void); unsigned long long tep_get_input_buf_ptr(void); /* for debugging */ -void tep_print_funcs(struct tep_handle *pevent); -void tep_print_printk(struct tep_handle *pevent); +void tep_print_funcs(struct tep_handle *tep); +void tep_print_printk(struct tep_handle *tep); /* ----------------------- filtering ----------------------- */ @@ -709,13 +717,13 @@ struct tep_filter_type { #define TEP_FILTER_ERROR_BUFSZ 1024 struct tep_event_filter { - struct tep_handle *pevent; + struct tep_handle *tep; int filters; struct tep_filter_type *event_filters; char error_buffer[TEP_FILTER_ERROR_BUFSZ]; }; -struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent); +struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep); /* for backward compatibility */ #define FILTER_NONE TEP_ERRNO__NO_FILTER @@ -723,12 +731,6 @@ struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent); #define FILTER_MISS TEP_ERRNO__FILTER_MISS #define FILTER_MATCH TEP_ERRNO__FILTER_MATCH -enum tep_filter_trivial_type { - TEP_FILTER_TRIVIAL_FALSE, - TEP_FILTER_TRIVIAL_TRUE, - TEP_FILTER_TRIVIAL_BOTH, -}; - enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter, const char *filter_str); @@ -743,9 +745,6 @@ int tep_event_filtered(struct tep_event_filter *filter, void tep_filter_reset(struct tep_event_filter *filter); -int tep_filter_clear_trivial(struct tep_event_filter *filter, - enum tep_filter_trivial_type type); - void tep_filter_free(struct tep_event_filter *filter); char *tep_filter_make_string(struct tep_event_filter *filter, int event_id); @@ -753,15 +752,8 @@ char *tep_filter_make_string(struct tep_event_filter *filter, int event_id); int tep_filter_remove_event(struct tep_event_filter *filter, int event_id); -int tep_filter_event_has_trivial(struct tep_event_filter *filter, - int event_id, - enum tep_filter_trivial_type type); - int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *source); -int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source, - enum tep_filter_trivial_type type); - int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter *filter2); #endif /* _PARSE_EVENTS_H */ diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c index e74f16c88398..8ca28de9337a 100644 --- a/tools/lib/traceevent/event-plugin.c +++ b/tools/lib/traceevent/event-plugin.c @@ -269,7 +269,7 @@ void tep_print_plugins(struct trace_seq *s, } static void -load_plugin(struct tep_handle *pevent, const char *path, +load_plugin(struct tep_handle *tep, const char *path, const char *file, void *data) { struct tep_plugin_list **plugin_list = data; @@ -316,7 +316,7 @@ load_plugin(struct tep_handle *pevent, const char *path, *plugin_list = list; pr_stat("registering plugin: %s", plugin); - func(pevent); + func(tep); return; out_free: @@ -324,9 +324,9 @@ load_plugin(struct tep_handle *pevent, const char *path, } static void -load_plugins_dir(struct tep_handle *pevent, const char *suffix, +load_plugins_dir(struct tep_handle *tep, const char *suffix, const char *path, - void (*load_plugin)(struct tep_handle *pevent, + void (*load_plugin)(struct tep_handle *tep, const char *path, const char *name, void *data), @@ -359,15 +359,15 @@ load_plugins_dir(struct tep_handle *pevent, const char *suffix, if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0) continue; - load_plugin(pevent, path, name, data); + load_plugin(tep, path, name, data); } closedir(dir); } static void -load_plugins(struct tep_handle *pevent, const char *suffix, - void (*load_plugin)(struct tep_handle *pevent, +load_plugins(struct tep_handle *tep, const char *suffix, + void (*load_plugin)(struct tep_handle *tep, const char *path, const char *name, void *data), @@ -378,7 +378,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix, char *envdir; int ret; - if (pevent->flags & TEP_DISABLE_PLUGINS) + if (tep->flags & TEP_DISABLE_PLUGINS) return; /* @@ -386,8 +386,8 @@ load_plugins(struct tep_handle *pevent, const char *suffix, * check that first. */ #ifdef PLUGIN_DIR - if (!(pevent->flags & TEP_DISABLE_SYS_PLUGINS)) - load_plugins_dir(pevent, suffix, PLUGIN_DIR, + if (!(tep->flags & TEP_DISABLE_SYS_PLUGINS)) + load_plugins_dir(tep, suffix, PLUGIN_DIR, load_plugin, data); #endif @@ -397,7 +397,7 @@ load_plugins(struct tep_handle *pevent, const char *suffix, */ envdir = getenv("TRACEEVENT_PLUGIN_DIR"); if (envdir) - load_plugins_dir(pevent, suffix, envdir, load_plugin, data); + load_plugins_dir(tep, suffix, envdir, load_plugin, data); /* * Now let the home directory override the environment @@ -413,22 +413,22 @@ load_plugins(struct tep_handle *pevent, const char *suffix, return; } - load_plugins_dir(pevent, suffix, path, load_plugin, data); + load_plugins_dir(tep, suffix, path, load_plugin, data); free(path); } struct tep_plugin_list* -tep_load_plugins(struct tep_handle *pevent) +tep_load_plugins(struct tep_handle *tep) { struct tep_plugin_list *list = NULL; - load_plugins(pevent, ".so", load_plugin, &list); + load_plugins(tep, ".so", load_plugin, &list); return list; } void -tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *pevent) +tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *tep) { tep_plugin_unload_func func; struct tep_plugin_list *list; @@ -438,7 +438,7 @@ tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *peven plugin_list = list->next; func = dlsym(list->handle, TEP_PLUGIN_UNLOADER_NAME); if (func) - func(pevent); + func(tep); dlclose(list->handle); free(list->name); free(list); diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c index af2a1f3b7424..b887e7437d67 100644 --- a/tools/lib/traceevent/kbuffer-parse.c +++ b/tools/lib/traceevent/kbuffer-parse.c @@ -727,3 +727,52 @@ int kbuffer_start_of_data(struct kbuffer *kbuf) { return kbuf->start; } + +/** + * kbuffer_raw_get - get raw buffer info + * @kbuf: The kbuffer + * @subbuf: Start of mapped subbuffer + * @info: Info descriptor to fill in + * + * For debugging. This can return internals of the ring buffer. + * Expects to have info->next set to what it will read. + * The type, length and timestamp delta will be filled in, and + * @info->next will be updated to the next element. + * The @subbuf is used to know if the info is passed the end of + * data and NULL will be returned if it is. + */ +struct kbuffer_raw_info * +kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf, struct kbuffer_raw_info *info) +{ + unsigned long long flags; + unsigned long long delta; + unsigned int type_len; + unsigned int size; + int start; + int length; + void *ptr = info->next; + + if (!kbuf || !subbuf) + return NULL; + + if (kbuf->flags & KBUFFER_FL_LONG_8) + start = 16; + else + start = 12; + + flags = read_long(kbuf, subbuf + 8); + size = (unsigned int)flags & COMMIT_MASK; + + if (ptr < subbuf || ptr >= subbuf + start + size) + return NULL; + + type_len = translate_data(kbuf, ptr, &ptr, &delta, &length); + + info->next = ptr + length; + + info->type = type_len; + info->delta = delta; + info->length = length; + + return info; +} diff --git a/tools/lib/traceevent/kbuffer.h b/tools/lib/traceevent/kbuffer.h index 03dce757553f..ed4d697fc137 100644 --- a/tools/lib/traceevent/kbuffer.h +++ b/tools/lib/traceevent/kbuffer.h @@ -65,4 +65,17 @@ int kbuffer_subbuffer_size(struct kbuffer *kbuf); void kbuffer_set_old_format(struct kbuffer *kbuf); int kbuffer_start_of_data(struct kbuffer *kbuf); +/* Debugging */ + +struct kbuffer_raw_info { + int type; + int length; + unsigned long long delta; + void *next; +}; + +/* Read raw data */ +struct kbuffer_raw_info *kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf, + struct kbuffer_raw_info *info); + #endif /* _K_BUFFER_H */ diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c index cb5ce66dab6e..552592d153fb 100644 --- a/tools/lib/traceevent/parse-filter.c +++ b/tools/lib/traceevent/parse-filter.c @@ -154,7 +154,7 @@ add_filter_type(struct tep_event_filter *filter, int id) filter_type = &filter->event_filters[i]; filter_type->event_id = id; - filter_type->event = tep_find_event(filter->pevent, id); + filter_type->event = tep_find_event(filter->tep, id); filter_type->filter = NULL; filter->filters++; @@ -164,9 +164,9 @@ add_filter_type(struct tep_event_filter *filter, int id) /** * tep_filter_alloc - create a new event filter - * @pevent: The pevent that this filter is associated with + * @tep: The tep that this filter is associated with */ -struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent) +struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep) { struct tep_event_filter *filter; @@ -175,8 +175,8 @@ struct tep_event_filter *tep_filter_alloc(struct tep_handle *pevent) return NULL; memset(filter, 0, sizeof(*filter)); - filter->pevent = pevent; - tep_ref(pevent); + filter->tep = tep; + tep_ref(tep); return filter; } @@ -256,7 +256,7 @@ static int event_match(struct tep_event *event, } static enum tep_errno -find_event(struct tep_handle *pevent, struct event_list **events, +find_event(struct tep_handle *tep, struct event_list **events, char *sys_name, char *event_name) { struct tep_event *event; @@ -299,8 +299,8 @@ find_event(struct tep_handle *pevent, struct event_list **events, } } - for (i = 0; i < pevent->nr_events; i++) { - event = pevent->events[i]; + for (i = 0; i < tep->nr_events; i++) { + event = tep->events[i]; if (event_match(event, sys_name ? &sreg : NULL, &ereg)) { match = 1; if (add_event(events, event) < 0) { @@ -1257,7 +1257,7 @@ static void filter_init_error_buf(struct tep_event_filter *filter) enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter, const char *filter_str) { - struct tep_handle *pevent = filter->pevent; + struct tep_handle *tep = filter->tep; struct event_list *event; struct event_list *events = NULL; const char *filter_start; @@ -1313,7 +1313,7 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter, } /* Find this event */ - ret = find_event(pevent, &events, strim(sys_name), strim(event_name)); + ret = find_event(tep, &events, strim(sys_name), strim(event_name)); if (ret < 0) { free_events(events); free(this_event); @@ -1334,7 +1334,7 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter, if (ret < 0) rtn = ret; - if (ret >= 0 && pevent->test_filters) { + if (ret >= 0 && tep->test_filters) { char *test; test = tep_filter_make_string(filter, event->event->id); if (test) { @@ -1346,9 +1346,6 @@ enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter, free_events(events); - if (rtn >= 0 && pevent->test_filters) - exit(0); - return rtn; } @@ -1380,7 +1377,7 @@ int tep_filter_strerror(struct tep_event_filter *filter, enum tep_errno err, return 0; } - return tep_strerror(filter->pevent, err, buf, buflen); + return tep_strerror(filter->tep, err, buf, buflen); } /** @@ -1443,7 +1440,7 @@ void tep_filter_reset(struct tep_event_filter *filter) void tep_filter_free(struct tep_event_filter *filter) { - tep_unref(filter->pevent); + tep_unref(filter->tep); tep_filter_reset(filter); @@ -1462,10 +1459,10 @@ static int copy_filter_type(struct tep_event_filter *filter, const char *name; char *str; - /* Can't assume that the pevent's are the same */ + /* Can't assume that the tep's are the same */ sys = filter_type->event->system; name = filter_type->event->name; - event = tep_find_event_by_name(filter->pevent, sys, name); + event = tep_find_event_by_name(filter->tep, sys, name); if (!event) return -1; @@ -1522,167 +1519,6 @@ int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *sour return ret; } - -/** - * tep_update_trivial - update the trivial filters with the given filter - * @dest - the filter to update - * @source - the filter as the source of the update - * @type - the type of trivial filter to update. - * - * Scan dest for trivial events matching @type to replace with the source. - * - * Returns 0 on success and -1 if there was a problem updating, but - * events may have still been updated on error. - */ -int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *source, - enum tep_filter_trivial_type type) -{ - struct tep_handle *src_pevent; - struct tep_handle *dest_pevent; - struct tep_event *event; - struct tep_filter_type *filter_type; - struct tep_filter_arg *arg; - char *str; - int i; - - src_pevent = source->pevent; - dest_pevent = dest->pevent; - - /* Do nothing if either of the filters has nothing to filter */ - if (!dest->filters || !source->filters) - return 0; - - for (i = 0; i < dest->filters; i++) { - filter_type = &dest->event_filters[i]; - arg = filter_type->filter; - if (arg->type != TEP_FILTER_ARG_BOOLEAN) - continue; - if ((arg->boolean.value && type == TEP_FILTER_TRIVIAL_FALSE) || - (!arg->boolean.value && type == TEP_FILTER_TRIVIAL_TRUE)) - continue; - - event = filter_type->event; - - if (src_pevent != dest_pevent) { - /* do a look up */ - event = tep_find_event_by_name(src_pevent, - event->system, - event->name); - if (!event) - return -1; - } - - str = tep_filter_make_string(source, event->id); - if (!str) - continue; - - /* Don't bother if the filter is trivial too */ - if (strcmp(str, "TRUE") != 0 && strcmp(str, "FALSE") != 0) - filter_event(dest, event, str, NULL); - free(str); - } - return 0; -} - -/** - * tep_filter_clear_trivial - clear TRUE and FALSE filters - * @filter: the filter to remove trivial filters from - * @type: remove only true, false, or both - * - * Removes filters that only contain a TRUE or FALES boolean arg. - * - * Returns 0 on success and -1 if there was a problem. - */ -int tep_filter_clear_trivial(struct tep_event_filter *filter, - enum tep_filter_trivial_type type) -{ - struct tep_filter_type *filter_type; - int count = 0; - int *ids = NULL; - int i; - - if (!filter->filters) - return 0; - - /* - * Two steps, first get all ids with trivial filters. - * then remove those ids. - */ - for (i = 0; i < filter->filters; i++) { - int *new_ids; - - filter_type = &filter->event_filters[i]; - if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN) - continue; - switch (type) { - case TEP_FILTER_TRIVIAL_FALSE: - if (filter_type->filter->boolean.value) - continue; - break; - case TEP_FILTER_TRIVIAL_TRUE: - if (!filter_type->filter->boolean.value) - continue; - default: - break; - } - - new_ids = realloc(ids, sizeof(*ids) * (count + 1)); - if (!new_ids) { - free(ids); - return -1; - } - - ids = new_ids; - ids[count++] = filter_type->event_id; - } - - if (!count) - return 0; - - for (i = 0; i < count; i++) - tep_filter_remove_event(filter, ids[i]); - - free(ids); - return 0; -} - -/** - * tep_filter_event_has_trivial - return true event contains trivial filter - * @filter: the filter with the information - * @event_id: the id of the event to test - * @type: trivial type to test for (TRUE, FALSE, EITHER) - * - * Returns 1 if the event contains a matching trivial type - * otherwise 0. - */ -int tep_filter_event_has_trivial(struct tep_event_filter *filter, - int event_id, - enum tep_filter_trivial_type type) -{ - struct tep_filter_type *filter_type; - - if (!filter->filters) - return 0; - - filter_type = find_filter_type(filter, event_id); - - if (!filter_type) - return 0; - - if (filter_type->filter->type != TEP_FILTER_ARG_BOOLEAN) - return 0; - - switch (type) { - case TEP_FILTER_TRIVIAL_FALSE: - return !filter_type->filter->boolean.value; - - case TEP_FILTER_TRIVIAL_TRUE: - return filter_type->filter->boolean.value; - default: - return 1; - } -} - static int test_filter(struct tep_event *event, struct tep_filter_arg *arg, struct tep_record *record, enum tep_errno *err); @@ -1692,8 +1528,8 @@ get_comm(struct tep_event *event, struct tep_record *record) const char *comm; int pid; - pid = tep_data_pid(event->pevent, record); - comm = tep_data_comm_from_pid(event->pevent, pid); + pid = tep_data_pid(event->tep, record); + comm = tep_data_comm_from_pid(event->tep, pid); return comm; } @@ -1861,7 +1697,7 @@ static int test_num(struct tep_event *event, struct tep_filter_arg *arg, static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record) { struct tep_event *event; - struct tep_handle *pevent; + struct tep_handle *tep; unsigned long long addr; const char *val = NULL; unsigned int size; @@ -1891,12 +1727,12 @@ static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record * } else { event = arg->str.field->event; - pevent = event->pevent; + tep = event->tep; addr = get_value(event, arg->str.field, record); if (arg->str.field->flags & (TEP_FIELD_IS_POINTER | TEP_FIELD_IS_LONG)) /* convert to a kernel symbol */ - val = tep_find_function(pevent, addr); + val = tep_find_function(tep, addr); if (val == NULL) { /* just use the hex of the string name */ @@ -2036,7 +1872,7 @@ int tep_event_filtered(struct tep_event_filter *filter, int event_id) enum tep_errno tep_filter_match(struct tep_event_filter *filter, struct tep_record *record) { - struct tep_handle *pevent = filter->pevent; + struct tep_handle *tep = filter->tep; struct tep_filter_type *filter_type; int event_id; int ret; @@ -2047,7 +1883,7 @@ enum tep_errno tep_filter_match(struct tep_event_filter *filter, if (!filter->filters) return TEP_ERRNO__NO_FILTER; - event_id = tep_data_type(pevent, record); + event_id = tep_data_type(tep, record); filter_type = find_filter_type(filter, event_id); if (!filter_type) @@ -2409,14 +2245,6 @@ int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter break; if (filter_type1->filter->type != filter_type2->filter->type) break; - switch (filter_type1->filter->type) { - case TEP_FILTER_TRIVIAL_FALSE: - case TEP_FILTER_TRIVIAL_TRUE: - /* trivial types just need the type compared */ - continue; - default: - break; - } /* The best way to compare complex filters is with strings */ str1 = arg_to_str(filter1, filter_type1->filter); str2 = arg_to_str(filter2, filter_type2->filter); diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c index 77e4ec6402dd..e99867111387 100644 --- a/tools/lib/traceevent/parse-utils.c +++ b/tools/lib/traceevent/parse-utils.c @@ -14,7 +14,7 @@ void __vwarning(const char *fmt, va_list ap) { if (errno) - perror("trace-cmd"); + perror("libtraceevent"); errno = 0; fprintf(stderr, " "); diff --git a/tools/lib/traceevent/plugin_cfg80211.c b/tools/lib/traceevent/plugin_cfg80211.c index a51b366f47da..3d43b56a6c98 100644 --- a/tools/lib/traceevent/plugin_cfg80211.c +++ b/tools/lib/traceevent/plugin_cfg80211.c @@ -25,9 +25,9 @@ process___le16_to_cpup(struct trace_seq *s, unsigned long long *args) return val ? (long long) le16toh(*val) : 0; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_print_function(pevent, + tep_register_print_function(tep, process___le16_to_cpup, TEP_FUNC_ARG_INT, "__le16_to_cpup", @@ -36,8 +36,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent) return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_print_function(pevent, process___le16_to_cpup, + tep_unregister_print_function(tep, process___le16_to_cpup, "__le16_to_cpup"); } diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugin_function.c index a73eca34a8f9..7770fcb78e0f 100644 --- a/tools/lib/traceevent/plugin_function.c +++ b/tools/lib/traceevent/plugin_function.c @@ -126,7 +126,7 @@ static int add_and_get_index(const char *parent, const char *child, int cpu) static int function_handler(struct trace_seq *s, struct tep_record *record, struct tep_event *event, void *context) { - struct tep_handle *pevent = event->pevent; + struct tep_handle *tep = event->tep; unsigned long long function; unsigned long long pfunction; const char *func; @@ -136,12 +136,12 @@ static int function_handler(struct trace_seq *s, struct tep_record *record, if (tep_get_field_val(s, event, "ip", record, &function, 1)) return trace_seq_putc(s, '!'); - func = tep_find_function(pevent, function); + func = tep_find_function(tep, function); if (tep_get_field_val(s, event, "parent_ip", record, &pfunction, 1)) return trace_seq_putc(s, '!'); - parent = tep_find_function(pevent, pfunction); + parent = tep_find_function(tep, pfunction); if (parent && ftrace_indent->set) index = add_and_get_index(parent, func, record->cpu); @@ -164,9 +164,9 @@ static int function_handler(struct trace_seq *s, struct tep_record *record, return 0; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_event_handler(pevent, -1, "ftrace", "function", + tep_register_event_handler(tep, -1, "ftrace", "function", function_handler, NULL); tep_plugin_add_options("ftrace", plugin_options); @@ -174,11 +174,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent) return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { int i, x; - tep_unregister_event_handler(pevent, -1, "ftrace", "function", + tep_unregister_event_handler(tep, -1, "ftrace", "function", function_handler, NULL); for (i = 0; i <= cpus; i++) { diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugin_hrtimer.c index 5db5e401275f..bb434e0ed03a 100644 --- a/tools/lib/traceevent/plugin_hrtimer.c +++ b/tools/lib/traceevent/plugin_hrtimer.c @@ -67,23 +67,23 @@ static int timer_start_handler(struct trace_seq *s, return 0; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_event_handler(pevent, -1, + tep_register_event_handler(tep, -1, "timer", "hrtimer_expire_entry", timer_expire_handler, NULL); - tep_register_event_handler(pevent, -1, "timer", "hrtimer_start", + tep_register_event_handler(tep, -1, "timer", "hrtimer_start", timer_start_handler, NULL); return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_event_handler(pevent, -1, + tep_unregister_event_handler(tep, -1, "timer", "hrtimer_expire_entry", timer_expire_handler, NULL); - tep_unregister_event_handler(pevent, -1, "timer", "hrtimer_start", + tep_unregister_event_handler(tep, -1, "timer", "hrtimer_start", timer_start_handler, NULL); } diff --git a/tools/lib/traceevent/plugin_jbd2.c b/tools/lib/traceevent/plugin_jbd2.c index a5e34135dd6a..04fc125f38cb 100644 --- a/tools/lib/traceevent/plugin_jbd2.c +++ b/tools/lib/traceevent/plugin_jbd2.c @@ -48,16 +48,16 @@ process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args) return jiffies; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_print_function(pevent, + tep_register_print_function(tep, process_jbd2_dev_to_name, TEP_FUNC_ARG_STRING, "jbd2_dev_to_name", TEP_FUNC_ARG_INT, TEP_FUNC_ARG_VOID); - tep_register_print_function(pevent, + tep_register_print_function(tep, process_jiffies_to_msecs, TEP_FUNC_ARG_LONG, "jiffies_to_msecs", @@ -66,11 +66,11 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent) return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_print_function(pevent, process_jbd2_dev_to_name, + tep_unregister_print_function(tep, process_jbd2_dev_to_name, "jbd2_dev_to_name"); - tep_unregister_print_function(pevent, process_jiffies_to_msecs, + tep_unregister_print_function(tep, process_jiffies_to_msecs, "jiffies_to_msecs"); } diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugin_kmem.c index 0e3c601f9ed1..edaec5d962c3 100644 --- a/tools/lib/traceevent/plugin_kmem.c +++ b/tools/lib/traceevent/plugin_kmem.c @@ -39,57 +39,57 @@ static int call_site_handler(struct trace_seq *s, struct tep_record *record, if (tep_read_number_field(field, data, &val)) return 1; - func = tep_find_function(event->pevent, val); + func = tep_find_function(event->tep, val); if (!func) return 1; - addr = tep_find_function_address(event->pevent, val); + addr = tep_find_function_address(event->tep, val); trace_seq_printf(s, "(%s+0x%x) ", func, (int)(val - addr)); return 1; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_event_handler(pevent, -1, "kmem", "kfree", + tep_register_event_handler(tep, -1, "kmem", "kfree", call_site_handler, NULL); - tep_register_event_handler(pevent, -1, "kmem", "kmalloc", + tep_register_event_handler(tep, -1, "kmem", "kmalloc", call_site_handler, NULL); - tep_register_event_handler(pevent, -1, "kmem", "kmalloc_node", + tep_register_event_handler(tep, -1, "kmem", "kmalloc_node", call_site_handler, NULL); - tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_alloc", + tep_register_event_handler(tep, -1, "kmem", "kmem_cache_alloc", call_site_handler, NULL); - tep_register_event_handler(pevent, -1, "kmem", + tep_register_event_handler(tep, -1, "kmem", "kmem_cache_alloc_node", call_site_handler, NULL); - tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_free", + tep_register_event_handler(tep, -1, "kmem", "kmem_cache_free", call_site_handler, NULL); return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_event_handler(pevent, -1, "kmem", "kfree", + tep_unregister_event_handler(tep, -1, "kmem", "kfree", call_site_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc", + tep_unregister_event_handler(tep, -1, "kmem", "kmalloc", call_site_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc_node", + tep_unregister_event_handler(tep, -1, "kmem", "kmalloc_node", call_site_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_alloc", + tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_alloc", call_site_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kmem", + tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_alloc_node", call_site_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_free", + tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_free", call_site_handler, NULL); } diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c index 64b9c25a1fd3..c8e623065a7e 100644 --- a/tools/lib/traceevent/plugin_kvm.c +++ b/tools/lib/traceevent/plugin_kvm.c @@ -389,8 +389,8 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record, * We can only use the structure if file is of the same * endianness. */ - if (tep_file_bigendian(event->pevent) == - tep_is_host_bigendian(event->pevent)) { + if (tep_is_file_bigendian(event->tep) == + tep_is_local_bigendian(event->tep)) { trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s", role.level, @@ -445,40 +445,40 @@ process_is_writable_pte(struct trace_seq *s, unsigned long long *args) return pte & PT_WRITABLE_MASK; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { init_disassembler(); - tep_register_event_handler(pevent, -1, "kvm", "kvm_exit", + tep_register_event_handler(tep, -1, "kvm", "kvm_exit", kvm_exit_handler, NULL); - tep_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn", + tep_register_event_handler(tep, -1, "kvm", "kvm_emulate_insn", kvm_emulate_insn_handler, NULL); - tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit", + tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit", kvm_nested_vmexit_handler, NULL); - tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject", + tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject", kvm_nested_vmexit_inject_handler, NULL); - tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page", + tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page", kvm_mmu_get_page_handler, NULL); - tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page", + tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page", kvm_mmu_print_role, NULL); - tep_register_event_handler(pevent, -1, + tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_unsync_page", kvm_mmu_print_role, NULL); - tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page", + tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page", kvm_mmu_print_role, NULL); - tep_register_event_handler(pevent, -1, "kvmmmu", + tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_prepare_zap_page", kvm_mmu_print_role, NULL); - tep_register_print_function(pevent, + tep_register_print_function(tep, process_is_writable_pte, TEP_FUNC_ARG_INT, "is_writable_pte", @@ -487,37 +487,37 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent) return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_event_handler(pevent, -1, "kvm", "kvm_exit", + tep_unregister_event_handler(tep, -1, "kvm", "kvm_exit", kvm_exit_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn", + tep_unregister_event_handler(tep, -1, "kvm", "kvm_emulate_insn", kvm_emulate_insn_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit", + tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit", kvm_nested_vmexit_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject", + tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject", kvm_nested_vmexit_inject_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page", + tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page", kvm_mmu_get_page_handler, NULL); - tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page", + tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page", kvm_mmu_print_role, NULL); - tep_unregister_event_handler(pevent, -1, + tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_unsync_page", kvm_mmu_print_role, NULL); - tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page", + tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page", kvm_mmu_print_role, NULL); - tep_unregister_event_handler(pevent, -1, "kvmmmu", + tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_prepare_zap_page", kvm_mmu_print_role, NULL); - tep_unregister_print_function(pevent, process_is_writable_pte, + tep_unregister_print_function(tep, process_is_writable_pte, "is_writable_pte"); } diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugin_mac80211.c index e38b9477aad2..884303c26b5c 100644 --- a/tools/lib/traceevent/plugin_mac80211.c +++ b/tools/lib/traceevent/plugin_mac80211.c @@ -87,17 +87,17 @@ static int drv_bss_info_changed(struct trace_seq *s, return 0; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_event_handler(pevent, -1, "mac80211", + tep_register_event_handler(tep, -1, "mac80211", "drv_bss_info_changed", drv_bss_info_changed, NULL); return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_event_handler(pevent, -1, "mac80211", + tep_unregister_event_handler(tep, -1, "mac80211", "drv_bss_info_changed", drv_bss_info_changed, NULL); } diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c index 834c9e378ff8..957389a0ff7a 100644 --- a/tools/lib/traceevent/plugin_sched_switch.c +++ b/tools/lib/traceevent/plugin_sched_switch.c @@ -62,7 +62,7 @@ static void write_and_save_comm(struct tep_format_field *field, comm = &s->buffer[len]; /* Help out the comm to ids. This will handle dups */ - tep_register_comm(field->event->pevent, comm, pid); + tep_register_comm(field->event->tep, comm, pid); } static int sched_wakeup_handler(struct trace_seq *s, @@ -135,27 +135,27 @@ static int sched_switch_handler(struct trace_seq *s, return 0; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_event_handler(pevent, -1, "sched", "sched_switch", + tep_register_event_handler(tep, -1, "sched", "sched_switch", sched_switch_handler, NULL); - tep_register_event_handler(pevent, -1, "sched", "sched_wakeup", + tep_register_event_handler(tep, -1, "sched", "sched_wakeup", sched_wakeup_handler, NULL); - tep_register_event_handler(pevent, -1, "sched", "sched_wakeup_new", + tep_register_event_handler(tep, -1, "sched", "sched_wakeup_new", sched_wakeup_handler, NULL); return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_event_handler(pevent, -1, "sched", "sched_switch", + tep_unregister_event_handler(tep, -1, "sched", "sched_switch", sched_switch_handler, NULL); - tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup", + tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup", sched_wakeup_handler, NULL); - tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup_new", + tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup_new", sched_wakeup_handler, NULL); } diff --git a/tools/lib/traceevent/plugin_scsi.c b/tools/lib/traceevent/plugin_scsi.c index 4eba25cc1431..5d0387a4b65a 100644 --- a/tools/lib/traceevent/plugin_scsi.c +++ b/tools/lib/traceevent/plugin_scsi.c @@ -414,9 +414,9 @@ unsigned long long process_scsi_trace_parse_cdb(struct trace_seq *s, return 0; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_print_function(pevent, + tep_register_print_function(tep, process_scsi_trace_parse_cdb, TEP_FUNC_ARG_STRING, "scsi_trace_parse_cdb", @@ -427,8 +427,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent) return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_print_function(pevent, process_scsi_trace_parse_cdb, + tep_unregister_print_function(tep, process_scsi_trace_parse_cdb, "scsi_trace_parse_cdb"); } diff --git a/tools/lib/traceevent/plugin_xen.c b/tools/lib/traceevent/plugin_xen.c index bc0496e4c296..993b208d0323 100644 --- a/tools/lib/traceevent/plugin_xen.c +++ b/tools/lib/traceevent/plugin_xen.c @@ -120,9 +120,9 @@ unsigned long long process_xen_hypercall_name(struct trace_seq *s, return 0; } -int TEP_PLUGIN_LOADER(struct tep_handle *pevent) +int TEP_PLUGIN_LOADER(struct tep_handle *tep) { - tep_register_print_function(pevent, + tep_register_print_function(tep, process_xen_hypercall_name, TEP_FUNC_ARG_STRING, "xen_hypercall_name", @@ -131,8 +131,8 @@ int TEP_PLUGIN_LOADER(struct tep_handle *pevent) return 0; } -void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent) +void TEP_PLUGIN_UNLOADER(struct tep_handle *tep) { - tep_unregister_print_function(pevent, process_xen_hypercall_name, + tep_unregister_print_function(tep, process_xen_hypercall_name, "xen_hypercall_name"); } diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt index 35bff92cc773..68caa9a976d0 100644 --- a/tools/memory-model/Documentation/explanation.txt +++ b/tools/memory-model/Documentation/explanation.txt @@ -27,7 +27,7 @@ Explanation of the Linux-Kernel Memory Consistency Model 19. AND THEN THERE WAS ALPHA 20. THE HAPPENS-BEFORE RELATION: hb 21. THE PROPAGATES-BEFORE RELATION: pb - 22. RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb + 22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb 23. LOCKING 24. ODDS AND ENDS @@ -1430,8 +1430,8 @@ they execute means that it cannot have cycles. This requirement is the content of the LKMM's "propagation" axiom. -RCU RELATIONS: rcu-link, gp, rscs, rcu-fence, and rb ----------------------------------------------------- +RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-fence, and rb +------------------------------------------------------------- RCU (Read-Copy-Update) is a powerful synchronization mechanism. It rests on two concepts: grace periods and read-side critical sections. @@ -1446,17 +1446,19 @@ As far as memory models are concerned, RCU's main feature is its Grace-Period Guarantee, which states that a critical section can never span a full grace period. In more detail, the Guarantee says: - If a critical section starts before a grace period then it - must end before the grace period does. In addition, every - store that propagates to the critical section's CPU before the - end of the critical section must propagate to every CPU before - the end of the grace period. + For any critical section C and any grace period G, at least + one of the following statements must hold: - If a critical section ends after a grace period ends then it - must start after the grace period does. In addition, every - store that propagates to the grace period's CPU before the - start of the grace period must propagate to every CPU before - the start of the critical section. +(1) C ends before G does, and in addition, every store that + propagates to C's CPU before the end of C must propagate to + every CPU before G ends. + +(2) G starts before C does, and in addition, every store that + propagates to G's CPU before the start of G must propagate + to every CPU before C starts. + +In particular, it is not possible for a critical section to both start +before and end after a grace period. Here is a simple example of RCU in action: @@ -1483,10 +1485,11 @@ The Grace Period Guarantee tells us that when this code runs, it will never end with r1 = 1 and r2 = 0. The reasoning is as follows. r1 = 1 means that P0's store to x propagated to P1 before P1 called synchronize_rcu(), so P0's critical section must have started before -P1's grace period. On the other hand, r2 = 0 means that P0's store to -y, which occurs before the end of the critical section, did not -propagate to P1 before the end of the grace period, violating the -Guarantee. +P1's grace period, contrary to part (2) of the Guarantee. On the +other hand, r2 = 0 means that P0's store to y, which occurs before the +end of the critical section, did not propagate to P1 before the end of +the grace period, contrary to part (1). Together the results violate +the Guarantee. In the kernel's implementations of RCU, the requirements for stores to propagate to every CPU are fulfilled by placing strong fences at @@ -1504,11 +1507,11 @@ before" or "ends after" a grace period? Some aspects of the meaning are pretty obvious, as in the example above, but the details aren't entirely clear. The LKMM formalizes this notion by means of the rcu-link relation. rcu-link encompasses a very general notion of -"before": Among other things, X ->rcu-link Z includes cases where X -happens-before or is equal to some event Y which is equal to or comes -before Z in the coherence order. When Y = Z this says that X ->rfe Z -implies X ->rcu-link Z. In addition, when Y = X it says that X ->fr Z -and X ->co Z each imply X ->rcu-link Z. +"before": If E and F are RCU fence events (i.e., rcu_read_lock(), +rcu_read_unlock(), or synchronize_rcu()) then among other things, +E ->rcu-link F includes cases where E is po-before some memory-access +event X, F is po-after some memory-access event Y, and we have any of +X ->rfe Y, X ->co Y, or X ->fr Y. The formal definition of the rcu-link relation is more than a little obscure, and we won't give it here. It is closely related to the pb @@ -1516,171 +1519,173 @@ relation, and the details don't matter unless you want to comb through a somewhat lengthy formal proof. Pretty much all you need to know about rcu-link is the information in the preceding paragraph. -The LKMM also defines the gp and rscs relations. They bring grace -periods and read-side critical sections into the picture, in the +The LKMM also defines the rcu-gp and rcu-rscsi relations. They bring +grace periods and read-side critical sections into the picture, in the following way: - E ->gp F means there is a synchronize_rcu() fence event S such - that E ->po S and either S ->po F or S = F. In simple terms, - there is a grace period po-between E and F. + E ->rcu-gp F means that E and F are in fact the same event, + and that event is a synchronize_rcu() fence (i.e., a grace + period). - E ->rscs F means there is a critical section delimited by an - rcu_read_lock() fence L and an rcu_read_unlock() fence U, such - that E ->po U and either L ->po F or L = F. You can think of - this as saying that E and F are in the same critical section - (in fact, it also allows E to be po-before the start of the - critical section and F to be po-after the end). + E ->rcu-rscsi F means that E and F are the rcu_read_unlock() + and rcu_read_lock() fence events delimiting some read-side + critical section. (The 'i' at the end of the name emphasizes + that this relation is "inverted": It links the end of the + critical section to the start.) If we think of the rcu-link relation as standing for an extended -"before", then X ->gp Y ->rcu-link Z says that X executes before a -grace period which ends before Z executes. (In fact it covers more -than this, because it also includes cases where X executes before a -grace period and some store propagates to Z's CPU before Z executes -but doesn't propagate to some other CPU until after the grace period -ends.) Similarly, X ->rscs Y ->rcu-link Z says that X is part of (or -before the start of) a critical section which starts before Z -executes. - -The LKMM goes on to define the rcu-fence relation as a sequence of gp -and rscs links separated by rcu-link links, in which the number of gp -links is >= the number of rscs links. For example: +"before", then X ->rcu-gp Y ->rcu-link Z roughly says that X is a +grace period which ends before Z begins. (In fact it covers more than +this, because it also includes cases where some store propagates to +Z's CPU before Z begins but doesn't propagate to some other CPU until +after X ends.) Similarly, X ->rcu-rscsi Y ->rcu-link Z says that X is +the end of a critical section which starts before Z begins. + +The LKMM goes on to define the rcu-fence relation as a sequence of +rcu-gp and rcu-rscsi links separated by rcu-link links, in which the +number of rcu-gp links is >= the number of rcu-rscsi links. For +example: - X ->gp Y ->rcu-link Z ->rscs T ->rcu-link U ->gp V + X ->rcu-gp Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V would imply that X ->rcu-fence V, because this sequence contains two -gp links and only one rscs link. (It also implies that X ->rcu-fence T -and Z ->rcu-fence V.) On the other hand: +rcu-gp links and one rcu-rscsi link. (It also implies that +X ->rcu-fence T and Z ->rcu-fence V.) On the other hand: - X ->rscs Y ->rcu-link Z ->rscs T ->rcu-link U ->gp V + X ->rcu-rscsi Y ->rcu-link Z ->rcu-rscsi T ->rcu-link U ->rcu-gp V does not imply X ->rcu-fence V, because the sequence contains only -one gp link but two rscs links. +one rcu-gp link but two rcu-rscsi links. The rcu-fence relation is important because the Grace Period Guarantee means that rcu-fence acts kind of like a strong fence. In particular, -if W is a write and we have W ->rcu-fence Z, the Guarantee says that W -will propagate to every CPU before Z executes. +E ->rcu-fence F implies not only that E begins before F ends, but also +that any write po-before E will propagate to every CPU before any +instruction po-after F can execute. (However, it does not imply that +E must execute before F; in fact, each synchronize_rcu() fence event +is linked to itself by rcu-fence as a degenerate case.) To prove this in full generality requires some intellectual effort. We'll consider just a very simple case: - W ->gp X ->rcu-link Y ->rscs Z. + G ->rcu-gp W ->rcu-link Z ->rcu-rscsi F. -This formula means that there is a grace period G and a critical -section C such that: +This formula means that G and W are the same event (a grace period), +and there are events X, Y and a read-side critical section C such that: - 1. W is po-before G; + 1. G = W is po-before or equal to X; - 2. X is equal to or po-after G; + 2. X comes "before" Y in some sense (including rfe, co and fr); - 3. X comes "before" Y in some sense; + 2. Y is po-before Z; - 4. Y is po-before the end of C; + 4. Z is the rcu_read_unlock() event marking the end of C; - 5. Z is equal to or po-after the start of C. + 5. F is the rcu_read_lock() event marking the start of C. -From 2 - 4 we deduce that the grace period G ends before the critical -section C. Then the second part of the Grace Period Guarantee says -not only that G starts before C does, but also that W (which executes -on G's CPU before G starts) must propagate to every CPU before C -starts. In particular, W propagates to every CPU before Z executes -(or finishes executing, in the case where Z is equal to the -rcu_read_lock() fence event which starts C.) This sort of reasoning -can be expanded to handle all the situations covered by rcu-fence. +From 1 - 4 we deduce that the grace period G ends before the critical +section C. Then part (2) of the Grace Period Guarantee says not only +that G starts before C does, but also that any write which executes on +G's CPU before G starts must propagate to every CPU before C starts. +In particular, the write propagates to every CPU before F finishes +executing and hence before any instruction po-after F can execute. +This sort of reasoning can be extended to handle all the situations +covered by rcu-fence. Finally, the LKMM defines the RCU-before (rb) relation in terms of rcu-fence. This is done in essentially the same way as the pb relation was defined in terms of strong-fence. We will omit the -details; the end result is that E ->rb F implies E must execute before -F, just as E ->pb F does (and for much the same reasons). +details; the end result is that E ->rb F implies E must execute +before F, just as E ->pb F does (and for much the same reasons). Putting this all together, the LKMM expresses the Grace Period Guarantee by requiring that the rb relation does not contain a cycle. -Equivalently, this "rcu" axiom requires that there are no events E and -F with E ->rcu-link F ->rcu-fence E. Or to put it a third way, the -axiom requires that there are no cycles consisting of gp and rscs -alternating with rcu-link, where the number of gp links is >= the -number of rscs links. +Equivalently, this "rcu" axiom requires that there are no events E +and F with E ->rcu-link F ->rcu-fence E. Or to put it a third way, +the axiom requires that there are no cycles consisting of rcu-gp and +rcu-rscsi alternating with rcu-link, where the number of rcu-gp links +is >= the number of rcu-rscsi links. Justifying the axiom isn't easy, but it is in fact a valid formalization of the Grace Period Guarantee. We won't attempt to go through the detailed argument, but the following analysis gives a -taste of what is involved. Suppose we have a violation of the first -part of the Guarantee: A critical section starts before a grace -period, and some store propagates to the critical section's CPU before -the end of the critical section but doesn't propagate to some other -CPU until after the end of the grace period. +taste of what is involved. Suppose both parts of the Guarantee are +violated: A critical section starts before a grace period, and some +store propagates to the critical section's CPU before the end of the +critical section but doesn't propagate to some other CPU until after +the end of the grace period. Putting symbols to these ideas, let L and U be the rcu_read_lock() and rcu_read_unlock() fence events delimiting the critical section in question, and let S be the synchronize_rcu() fence event for the grace period. Saying that the critical section starts before S means there -are events E and F where E is po-after L (which marks the start of the -critical section), E is "before" F in the sense of the rcu-link -relation, and F is po-before the grace period S: +are events Q and R where Q is po-after L (which marks the start of the +critical section), Q is "before" R in the sense used by the rcu-link +relation, and R is po-before the grace period S. Thus we have: - L ->po E ->rcu-link F ->po S. + L ->rcu-link S. -Let W be the store mentioned above, let Z come before the end of the +Let W be the store mentioned above, let Y come before the end of the critical section and witness that W propagates to the critical -section's CPU by reading from W, and let Y on some arbitrary CPU be a -witness that W has not propagated to that CPU, where Y happens after +section's CPU by reading from W, and let Z on some arbitrary CPU be a +witness that W has not propagated to that CPU, where Z happens after some event X which is po-after S. Symbolically, this amounts to: - S ->po X ->hb* Y ->fr W ->rf Z ->po U. + S ->po X ->hb* Z ->fr W ->rf Y ->po U. -The fr link from Y to W indicates that W has not propagated to Y's CPU -at the time that Y executes. From this, it can be shown (see the -discussion of the rcu-link relation earlier) that X and Z are related -by rcu-link, yielding: +The fr link from Z to W indicates that W has not propagated to Z's CPU +at the time that Z executes. From this, it can be shown (see the +discussion of the rcu-link relation earlier) that S and U are related +by rcu-link: - S ->po X ->rcu-link Z ->po U. + S ->rcu-link U. -The formulas say that S is po-between F and X, hence F ->gp X. They -also say that Z comes before the end of the critical section and E -comes after its start, hence Z ->rscs E. From all this we obtain: +Since S is a grace period we have S ->rcu-gp S, and since L and U are +the start and end of the critical section C we have U ->rcu-rscsi L. +From this we obtain: - F ->gp X ->rcu-link Z ->rscs E ->rcu-link F, + S ->rcu-gp S ->rcu-link U ->rcu-rscsi L ->rcu-link S, a forbidden cycle. Thus the "rcu" axiom rules out this violation of the Grace Period Guarantee. For something a little more down-to-earth, let's see how the axiom works out in practice. Consider the RCU code example from above, this -time with statement labels added to the memory access instructions: +time with statement labels added: int x, y; P0() { - rcu_read_lock(); - W: WRITE_ONCE(x, 1); - X: WRITE_ONCE(y, 1); - rcu_read_unlock(); + L: rcu_read_lock(); + X: WRITE_ONCE(x, 1); + Y: WRITE_ONCE(y, 1); + U: rcu_read_unlock(); } P1() { int r1, r2; - Y: r1 = READ_ONCE(x); - synchronize_rcu(); - Z: r2 = READ_ONCE(y); + Z: r1 = READ_ONCE(x); + S: synchronize_rcu(); + W: r2 = READ_ONCE(y); } -If r2 = 0 at the end then P0's store at X overwrites the value that -P1's load at Z reads from, so we have Z ->fre X and thus Z ->rcu-link X. -In addition, there is a synchronize_rcu() between Y and Z, so therefore -we have Y ->gp Z. +If r2 = 0 at the end then P0's store at Y overwrites the value that +P1's load at W reads from, so we have W ->fre Y. Since S ->po W and +also Y ->po U, we get S ->rcu-link U. In addition, S ->rcu-gp S +because S is a grace period. -If r1 = 1 at the end then P1's load at Y reads from P0's store at W, -so we have W ->rcu-link Y. In addition, W and X are in the same critical -section, so therefore we have X ->rscs W. +If r1 = 1 at the end then P1's load at Z reads from P0's store at X, +so we have X ->rfe Z. Together with L ->po X and Z ->po S, this +yields L ->rcu-link S. And since L and U are the start and end of a +critical section, we have U ->rcu-rscsi L. -Then X ->rscs W ->rcu-link Y ->gp Z ->rcu-link X is a forbidden cycle, -violating the "rcu" axiom. Hence the outcome is not allowed by the -LKMM, as we would expect. +Then U ->rcu-rscsi L ->rcu-link S ->rcu-gp S ->rcu-link U is a +forbidden cycle, violating the "rcu" axiom. Hence the outcome is not +allowed by the LKMM, as we would expect. For contrast, let's see what can happen in a more complicated example: @@ -1690,51 +1695,52 @@ For contrast, let's see what can happen in a more complicated example: { int r0; - rcu_read_lock(); - W: r0 = READ_ONCE(x); - X: WRITE_ONCE(y, 1); - rcu_read_unlock(); + L0: rcu_read_lock(); + r0 = READ_ONCE(x); + WRITE_ONCE(y, 1); + U0: rcu_read_unlock(); } P1() { int r1; - Y: r1 = READ_ONCE(y); - synchronize_rcu(); - Z: WRITE_ONCE(z, 1); + r1 = READ_ONCE(y); + S1: synchronize_rcu(); + WRITE_ONCE(z, 1); } P2() { int r2; - rcu_read_lock(); - U: r2 = READ_ONCE(z); - V: WRITE_ONCE(x, 1); - rcu_read_unlock(); + L2: rcu_read_lock(); + r2 = READ_ONCE(z); + WRITE_ONCE(x, 1); + U2: rcu_read_unlock(); } If r0 = r1 = r2 = 1 at the end, then similar reasoning to before shows -that W ->rscs X ->rcu-link Y ->gp Z ->rcu-link U ->rscs V ->rcu-link W. -However this cycle is not forbidden, because the sequence of relations -contains fewer instances of gp (one) than of rscs (two). Consequently -the outcome is allowed by the LKMM. The following instruction timing -diagram shows how it might actually occur: +that U0 ->rcu-rscsi L0 ->rcu-link S1 ->rcu-gp S1 ->rcu-link U2 ->rcu-rscsi +L2 ->rcu-link U0. However this cycle is not forbidden, because the +sequence of relations contains fewer instances of rcu-gp (one) than of +rcu-rscsi (two). Consequently the outcome is allowed by the LKMM. +The following instruction timing diagram shows how it might actually +occur: P0 P1 P2 -------------------- -------------------- -------------------- rcu_read_lock() -X: WRITE_ONCE(y, 1) - Y: r1 = READ_ONCE(y) +WRITE_ONCE(y, 1) + r1 = READ_ONCE(y) synchronize_rcu() starts . rcu_read_lock() - . V: WRITE_ONCE(x, 1) -W: r0 = READ_ONCE(x) . + . WRITE_ONCE(x, 1) +r0 = READ_ONCE(x) . rcu_read_unlock() . synchronize_rcu() ends - Z: WRITE_ONCE(z, 1) - U: r2 = READ_ONCE(z) + WRITE_ONCE(z, 1) + r2 = READ_ONCE(z) rcu_read_unlock() This requires P0 and P2 to execute their loads and stores out of @@ -1744,6 +1750,15 @@ section in P0 both starts before P1's grace period does and ends before it does, and the critical section in P2 both starts after P1's grace period does and ends after it does. +Addendum: The LKMM now supports SRCU (Sleepable Read-Copy-Update) in +addition to normal RCU. The ideas involved are much the same as +above, with new relations srcu-gp and srcu-rscsi added to represent +SRCU grace periods and read-side critical sections. There is a +restriction on the srcu-gp and srcu-rscsi links that can appear in an +rcu-fence sequence (the srcu-rscsi links must be paired with srcu-gp +links having the same SRCU domain with proper nesting); the details +are relatively unimportant. + LOCKING ------- diff --git a/tools/memory-model/README b/tools/memory-model/README index 0f2c366518c6..2b87f3971548 100644 --- a/tools/memory-model/README +++ b/tools/memory-model/README @@ -20,13 +20,17 @@ that litmus test to be exercised within the Linux kernel. REQUIREMENTS ============ -Version 7.49 of the "herd7" and "klitmus7" tools must be downloaded -separately: +Version 7.52 or higher of the "herd7" and "klitmus7" tools must be +downloaded separately: https://github.com/herd/herdtools7 See "herdtools7/INSTALL.md" for installation instructions. +Note that although these tools usually provide backwards compatibility, +this is not absolutely guaranteed. Therefore, if a later version does +not work, please try using the exact version called out above. + ================== BASIC USAGE: HERD7 @@ -221,8 +225,29 @@ The Linux-kernel memory model has the following limitations: additional call_rcu() process to the site of the emulated rcu-barrier(). - e. Sleepable RCU (SRCU) is not modeled. It can be - emulated, but perhaps not simply. + e. Although sleepable RCU (SRCU) is now modeled, there + are some subtle differences between its semantics and + those in the Linux kernel. For example, the kernel + might interpret the following sequence as two partially + overlapping SRCU read-side critical sections: + + 1 r1 = srcu_read_lock(&my_srcu); + 2 do_something_1(); + 3 r2 = srcu_read_lock(&my_srcu); + 4 do_something_2(); + 5 srcu_read_unlock(&my_srcu, r1); + 6 do_something_3(); + 7 srcu_read_unlock(&my_srcu, r2); + + In contrast, LKMM will interpret this as a nested pair of + SRCU read-side critical sections, with the outer critical + section spanning lines 1-7 and the inner critical section + spanning lines 3-5. + + This difference would be more of a concern had anyone + identified a reasonable use case for partially overlapping + SRCU read-side critical sections. For more information, + please see: https://paulmck.livejournal.com/40593.html f. Reader-writer locking is not modeled. It can be emulated in litmus tests using atomic read-modify-write diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell index 796513362c05..def9131d3d8e 100644 --- a/tools/memory-model/linux-kernel.bell +++ b/tools/memory-model/linux-kernel.bell @@ -33,8 +33,14 @@ enum Barriers = 'wmb (*smp_wmb*) || 'after-unlock-lock (*smp_mb__after_unlock_lock*) instructions F[Barriers] +(* SRCU *) +enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu +instructions SRCU[SRCU] +(* All srcu events *) +let Srcu = Srcu-lock | Srcu-unlock | Sync-srcu + (* Compute matching pairs of nested Rcu-lock and Rcu-unlock *) -let matched = let rec +let rcu-rscs = let rec unmatched-locks = Rcu-lock \ domain(matched) and unmatched-unlocks = Rcu-unlock \ range(matched) and unmatched = unmatched-locks | unmatched-unlocks @@ -46,8 +52,27 @@ let matched = let rec in matched (* Validate nesting *) -flag ~empty Rcu-lock \ domain(matched) as unbalanced-rcu-locking -flag ~empty Rcu-unlock \ range(matched) as unbalanced-rcu-locking +flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking +flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking + +(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *) +let srcu-rscs = let rec + unmatched-locks = Srcu-lock \ domain(matched) + and unmatched-unlocks = Srcu-unlock \ range(matched) + and unmatched = unmatched-locks | unmatched-unlocks + and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc + and unmatched-locks-to-unlocks = + ([unmatched-locks] ; po ; [unmatched-unlocks]) & loc + and matched = matched | (unmatched-locks-to-unlocks \ + (unmatched-po ; unmatched-po)) + in matched + +(* Validate nesting *) +flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking +flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking + +(* Check for use of synchronize_srcu() inside an RCU critical section *) +flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep -(* Outermost level of nesting only *) -let crit = matched \ (po^-1 ; matched ; po^-1) +(* Validate SRCU dynamic match *) +flag ~empty different-values(srcu-rscs) as srcu-bad-nesting diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat index 8f23c74a96fd..8dcb37835b61 100644 --- a/tools/memory-model/linux-kernel.cat +++ b/tools/memory-model/linux-kernel.cat @@ -33,7 +33,7 @@ let mb = ([M] ; fencerel(Mb) ; [M]) | ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) | ([M] ; po ; [UL] ; (co | po) ; [LKW] ; fencerel(After-unlock-lock) ; [M]) -let gp = po ; [Sync-rcu] ; po? +let gp = po ; [Sync-rcu | Sync-srcu] ; po? let strong-fence = mb | gp @@ -91,32 +91,47 @@ acyclic pb as propagation (*******) (* - * Effect of read-side critical section proceeds from the rcu_read_lock() - * onward on the one hand and from the rcu_read_unlock() backwards on the - * other hand. + * Effects of read-side critical sections proceed from the rcu_read_unlock() + * or srcu_read_unlock() backwards on the one hand, and from the + * rcu_read_lock() or srcu_read_lock() forwards on the other hand. + * + * In the definition of rcu-fence below, the po term at the left-hand side + * of each disjunct and the po? term at the right-hand end have been factored + * out. They have been moved into the definitions of rcu-link and rb. + * This was necessary in order to apply the "& loc" tests correctly. *) -let rscs = po ; crit^-1 ; po? +let rcu-gp = [Sync-rcu] (* Compare with gp *) +let srcu-gp = [Sync-srcu] +let rcu-rscsi = rcu-rscs^-1 +let srcu-rscsi = srcu-rscs^-1 (* * The synchronize_rcu() strong fence is special in that it can order not * one but two non-rf relations, but only in conjunction with an RCU * read-side critical section. *) -let rcu-link = hb* ; pb* ; prop +let rcu-link = po? ; hb* ; pb* ; prop ; po (* * Any sequence containing at least as many grace periods as RCU read-side * critical sections (joined by rcu-link) acts as a generalized strong fence. + * Likewise for SRCU grace periods and read-side critical sections, provided + * the synchronize_srcu() and srcu_read_[un]lock() calls refer to the same + * struct srcu_struct location. *) -let rec rcu-fence = gp | - (gp ; rcu-link ; rscs) | - (rscs ; rcu-link ; gp) | - (gp ; rcu-link ; rcu-fence ; rcu-link ; rscs) | - (rscs ; rcu-link ; rcu-fence ; rcu-link ; gp) | +let rec rcu-fence = rcu-gp | srcu-gp | + (rcu-gp ; rcu-link ; rcu-rscsi) | + ((srcu-gp ; rcu-link ; srcu-rscsi) & loc) | + (rcu-rscsi ; rcu-link ; rcu-gp) | + ((srcu-rscsi ; rcu-link ; srcu-gp) & loc) | + (rcu-gp ; rcu-link ; rcu-fence ; rcu-link ; rcu-rscsi) | + ((srcu-gp ; rcu-link ; rcu-fence ; rcu-link ; srcu-rscsi) & loc) | + (rcu-rscsi ; rcu-link ; rcu-fence ; rcu-link ; rcu-gp) | + ((srcu-rscsi ; rcu-link ; rcu-fence ; rcu-link ; srcu-gp) & loc) | (rcu-fence ; rcu-link ; rcu-fence) (* rb orders instructions just as pb does *) -let rb = prop ; rcu-fence ; hb* ; pb* +let rb = prop ; po ; rcu-fence ; po? ; hb* ; pb* irreflexive rb as rcu diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def index b27911cc087d..551eeaa389d4 100644 --- a/tools/memory-model/linux-kernel.def +++ b/tools/memory-model/linux-kernel.def @@ -47,6 +47,12 @@ rcu_read_unlock() { __fence{rcu-unlock}; } synchronize_rcu() { __fence{sync-rcu}; } synchronize_rcu_expedited() { __fence{sync-rcu}; } +// SRCU +srcu_read_lock(X) __srcu{srcu-lock}(X) +srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); } +synchronize_srcu(X) { __srcu{sync-srcu}(X); } +synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); } + // Atomic atomic_read(X) READ_ONCE(*X) atomic_set(X,V) { WRITE_ONCE(*X,V); } diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat index 305ded17e741..a059d1a6d8a2 100644 --- a/tools/memory-model/lock.cat +++ b/tools/memory-model/lock.cat @@ -6,9 +6,6 @@ (* * Generate coherence orders and handle lock operations - * - * Warning: spin_is_locked() crashes herd7 versions strictly before 7.48. - * spin_is_locked() is functional from herd7 version 7.49. *) include "cross.cat" diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h index b0d7dc3d71b5..7a111a77b7aa 100644 --- a/tools/objtool/arch.h +++ b/tools/objtool/arch.h @@ -33,7 +33,11 @@ #define INSN_STACK 8 #define INSN_BUG 9 #define INSN_NOP 10 -#define INSN_OTHER 11 +#define INSN_STAC 11 +#define INSN_CLAC 12 +#define INSN_STD 13 +#define INSN_CLD 14 +#define INSN_OTHER 15 #define INSN_LAST INSN_OTHER enum op_dest_type { @@ -41,6 +45,7 @@ enum op_dest_type { OP_DEST_REG_INDIRECT, OP_DEST_MEM, OP_DEST_PUSH, + OP_DEST_PUSHF, OP_DEST_LEAVE, }; @@ -55,6 +60,7 @@ enum op_src_type { OP_SRC_REG_INDIRECT, OP_SRC_CONST, OP_SRC_POP, + OP_SRC_POPF, OP_SRC_ADD, OP_SRC_AND, }; diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 540a209b78ab..472e991f6512 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -357,19 +357,26 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, /* pushf */ *type = INSN_STACK; op->src.type = OP_SRC_CONST; - op->dest.type = OP_DEST_PUSH; + op->dest.type = OP_DEST_PUSHF; break; case 0x9d: /* popf */ *type = INSN_STACK; - op->src.type = OP_SRC_POP; + op->src.type = OP_SRC_POPF; op->dest.type = OP_DEST_MEM; break; case 0x0f: - if (op2 >= 0x80 && op2 <= 0x8f) { + if (op2 == 0x01) { + + if (modrm == 0xca) + *type = INSN_CLAC; + else if (modrm == 0xcb) + *type = INSN_STAC; + + } else if (op2 >= 0x80 && op2 <= 0x8f) { *type = INSN_JUMP_CONDITIONAL; @@ -444,6 +451,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, *type = INSN_CALL; break; + case 0xfc: + *type = INSN_CLD; + break; + + case 0xfd: + *type = INSN_STD; + break; + case 0xff: if (modrm_reg == 2 || modrm_reg == 3) diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 694abc628e9b..f3b378126011 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -29,7 +29,7 @@ #include "builtin.h" #include "check.h" -bool no_fp, no_unreachable, retpoline, module; +bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess; static const char * const check_usage[] = { "objtool check [<options>] file.o", @@ -41,6 +41,8 @@ const struct option check_options[] = { OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"), OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"), OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"), + OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"), + OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"), OPT_END(), }; diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h index 28ff40e19a14..69762f9c5602 100644 --- a/tools/objtool/builtin.h +++ b/tools/objtool/builtin.h @@ -20,7 +20,7 @@ #include <subcmd/parse-options.h> extern const struct option check_options[]; -extern bool no_fp, no_unreachable, retpoline, module; +extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess; extern int cmd_check(int argc, const char **argv); extern int cmd_orc(int argc, const char **argv); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 479196aeb409..ac743a1d53ab 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -31,6 +31,7 @@ struct alternative { struct list_head list; struct instruction *insn; + bool skip_orig; }; const char *objname; @@ -105,29 +106,6 @@ static struct instruction *next_insn_same_func(struct objtool_file *file, insn = next_insn_same_sec(file, insn)) /* - * Check if the function has been manually whitelisted with the - * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted - * due to its use of a context switching instruction. - */ -static bool ignore_func(struct objtool_file *file, struct symbol *func) -{ - struct rela *rela; - - /* check for STACK_FRAME_NON_STANDARD */ - if (file->whitelist && file->whitelist->rela) - list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) { - if (rela->sym->type == STT_SECTION && - rela->sym->sec == func->sec && - rela->addend == func->offset) - return true; - if (rela->sym->type == STT_FUNC && rela->sym == func) - return true; - } - - return false; -} - -/* * This checks to see if the given function is a "noreturn" function. * * For global functions which are outside the scope of this object file, we @@ -437,18 +415,107 @@ static void add_ignores(struct objtool_file *file) struct instruction *insn; struct section *sec; struct symbol *func; + struct rela *rela; - for_each_sec(file, sec) { - list_for_each_entry(func, &sec->symbol_list, list) { - if (func->type != STT_FUNC) - continue; + sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); + if (!sec) + return; - if (!ignore_func(file, func)) + list_for_each_entry(rela, &sec->rela_list, list) { + switch (rela->sym->type) { + case STT_FUNC: + func = rela->sym; + break; + + case STT_SECTION: + func = find_symbol_by_offset(rela->sym->sec, rela->addend); + if (!func || func->type != STT_FUNC) continue; + break; - func_for_each_insn_all(file, func, insn) - insn->ignore = true; + default: + WARN("unexpected relocation symbol type in %s: %d", sec->name, rela->sym->type); + continue; } + + func_for_each_insn_all(file, func, insn) + insn->ignore = true; + } +} + +/* + * This is a whitelist of functions that is allowed to be called with AC set. + * The list is meant to be minimal and only contains compiler instrumentation + * ABI and a few functions used to implement *_{to,from}_user() functions. + * + * These functions must not directly change AC, but may PUSHF/POPF. + */ +static const char *uaccess_safe_builtin[] = { + /* KASAN */ + "kasan_report", + "check_memory_region", + /* KASAN out-of-line */ + "__asan_loadN_noabort", + "__asan_load1_noabort", + "__asan_load2_noabort", + "__asan_load4_noabort", + "__asan_load8_noabort", + "__asan_load16_noabort", + "__asan_storeN_noabort", + "__asan_store1_noabort", + "__asan_store2_noabort", + "__asan_store4_noabort", + "__asan_store8_noabort", + "__asan_store16_noabort", + /* KASAN in-line */ + "__asan_report_load_n_noabort", + "__asan_report_load1_noabort", + "__asan_report_load2_noabort", + "__asan_report_load4_noabort", + "__asan_report_load8_noabort", + "__asan_report_load16_noabort", + "__asan_report_store_n_noabort", + "__asan_report_store1_noabort", + "__asan_report_store2_noabort", + "__asan_report_store4_noabort", + "__asan_report_store8_noabort", + "__asan_report_store16_noabort", + /* KCOV */ + "write_comp_data", + "__sanitizer_cov_trace_pc", + "__sanitizer_cov_trace_const_cmp1", + "__sanitizer_cov_trace_const_cmp2", + "__sanitizer_cov_trace_const_cmp4", + "__sanitizer_cov_trace_const_cmp8", + "__sanitizer_cov_trace_cmp1", + "__sanitizer_cov_trace_cmp2", + "__sanitizer_cov_trace_cmp4", + "__sanitizer_cov_trace_cmp8", + /* UBSAN */ + "ubsan_type_mismatch_common", + "__ubsan_handle_type_mismatch", + "__ubsan_handle_type_mismatch_v1", + /* misc */ + "csum_partial_copy_generic", + "__memcpy_mcsafe", + "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ + NULL +}; + +static void add_uaccess_safe(struct objtool_file *file) +{ + struct symbol *func; + const char **name; + + if (!uaccess) + return; + + for (name = uaccess_safe_builtin; *name; name++) { + func = find_symbol_by_name(file->elf, *name); + if (!func) + continue; + + func->alias->uaccess_safe = true; } } @@ -458,13 +525,13 @@ static void add_ignores(struct objtool_file *file) * But it at least allows objtool to understand the control flow *around* the * retpoline. */ -static int add_nospec_ignores(struct objtool_file *file) +static int add_ignore_alternatives(struct objtool_file *file) { struct section *sec; struct rela *rela; struct instruction *insn; - sec = find_section_by_name(file->elf, ".rela.discard.nospec"); + sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); if (!sec) return 0; @@ -476,7 +543,7 @@ static int add_nospec_ignores(struct objtool_file *file) insn = find_insn(file, rela->sym->sec, rela->addend); if (!insn) { - WARN("bad .discard.nospec entry"); + WARN("bad .discard.ignore_alts entry"); return -1; } @@ -525,7 +592,8 @@ static int add_jump_destinations(struct objtool_file *file) continue; } else { /* sibling call */ - insn->jump_dest = 0; + insn->call_dest = rela->sym; + insn->jump_dest = NULL; continue; } @@ -547,25 +615,38 @@ static int add_jump_destinations(struct objtool_file *file) } /* - * For GCC 8+, create parent/child links for any cold - * subfunctions. This is _mostly_ redundant with a similar - * initialization in read_symbols(). - * - * If a function has aliases, we want the *first* such function - * in the symbol table to be the subfunction's parent. In that - * case we overwrite the initialization done in read_symbols(). - * - * However this code can't completely replace the - * read_symbols() code because this doesn't detect the case - * where the parent function's only reference to a subfunction - * is through a switch table. + * Cross-function jump. */ if (insn->func && insn->jump_dest->func && - insn->func != insn->jump_dest->func && - !strstr(insn->func->name, ".cold.") && - strstr(insn->jump_dest->func->name, ".cold.")) { - insn->func->cfunc = insn->jump_dest->func; - insn->jump_dest->func->pfunc = insn->func; + insn->func != insn->jump_dest->func) { + + /* + * For GCC 8+, create parent/child links for any cold + * subfunctions. This is _mostly_ redundant with a + * similar initialization in read_symbols(). + * + * If a function has aliases, we want the *first* such + * function in the symbol table to be the subfunction's + * parent. In that case we overwrite the + * initialization done in read_symbols(). + * + * However this code can't completely replace the + * read_symbols() code because this doesn't detect the + * case where the parent function's only reference to a + * subfunction is through a switch table. + */ + if (!strstr(insn->func->name, ".cold.") && + strstr(insn->jump_dest->func->name, ".cold.")) { + insn->func->cfunc = insn->jump_dest->func; + insn->jump_dest->func->pfunc = insn->func; + + } else if (insn->jump_dest->func->pfunc != insn->func->pfunc && + insn->jump_dest->offset == insn->jump_dest->func->offset) { + + /* sibling class */ + insn->call_dest = insn->jump_dest->func; + insn->jump_dest = NULL; + } } } @@ -634,9 +715,6 @@ static int add_call_destinations(struct objtool_file *file) * conditionally jumps to the _end_ of the entry. We have to modify these * jumps' destinations to point back to .text rather than the end of the * entry in .altinstr_replacement. - * - * 4. It has been requested that we don't validate the !POPCNT feature path - * which is a "very very small percentage of machines". */ static int handle_group_alt(struct objtool_file *file, struct special_alt *special_alt, @@ -652,9 +730,6 @@ static int handle_group_alt(struct objtool_file *file, if (insn->offset >= special_alt->orig_off + special_alt->orig_len) break; - if (special_alt->skip_orig) - insn->type = INSN_NOP; - insn->alt_group = true; last_orig_insn = insn; } @@ -696,6 +771,7 @@ static int handle_group_alt(struct objtool_file *file, last_new_insn = insn; insn->ignore = orig_insn->ignore_alts; + insn->func = orig_insn->func; if (insn->type != INSN_JUMP_CONDITIONAL && insn->type != INSN_JUMP_UNCONDITIONAL) @@ -818,6 +894,8 @@ static int add_special_section_alts(struct objtool_file *file) } alt->insn = new_insn; + alt->skip_orig = special_alt->skip_orig; + orig_insn->ignore_alts |= special_alt->skip_alt; list_add_tail(&alt->list, &orig_insn->alts); list_del(&special_alt->list); @@ -1239,8 +1317,9 @@ static int decode_sections(struct objtool_file *file) return ret; add_ignores(file); + add_uaccess_safe(file); - ret = add_nospec_ignores(file); + ret = add_ignore_alternatives(file); if (ret) return ret; @@ -1320,11 +1399,11 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s return 0; /* push */ - if (op->dest.type == OP_DEST_PUSH) + if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) cfa->offset += 8; /* pop */ - if (op->src.type == OP_SRC_POP) + if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) cfa->offset -= 8; /* add immediate to sp */ @@ -1581,6 +1660,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_SRC_POP: + case OP_SRC_POPF: if (!state->drap && op->dest.type == OP_DEST_REG && op->dest.reg == cfa->base) { @@ -1645,6 +1725,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_DEST_PUSH: + case OP_DEST_PUSHF: state->stack_size += 8; if (cfa->base == CFI_SP) cfa->offset += 8; @@ -1735,7 +1816,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_DEST_MEM: - if (op->src.type != OP_SRC_POP) { + if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { WARN_FUNC("unknown stack-related memory operation", insn->sec, insn->offset); return -1; @@ -1799,6 +1880,50 @@ static bool insn_state_match(struct instruction *insn, struct insn_state *state) return false; } +static inline bool func_uaccess_safe(struct symbol *func) +{ + if (func) + return func->alias->uaccess_safe; + + return false; +} + +static inline const char *insn_dest_name(struct instruction *insn) +{ + if (insn->call_dest) + return insn->call_dest->name; + + return "{dynamic}"; +} + +static int validate_call(struct instruction *insn, struct insn_state *state) +{ + if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { + WARN_FUNC("call to %s() with UACCESS enabled", + insn->sec, insn->offset, insn_dest_name(insn)); + return 1; + } + + if (state->df) { + WARN_FUNC("call to %s() with DF set", + insn->sec, insn->offset, insn_dest_name(insn)); + return 1; + } + + return 0; +} + +static int validate_sibling_call(struct instruction *insn, struct insn_state *state) +{ + if (has_modified_stack_frame(state)) { + WARN_FUNC("sibling call from callable instruction with modified stack frame", + insn->sec, insn->offset); + return 1; + } + + return validate_call(insn, state); +} + /* * Follow the branch starting at the given instruction, and recursively follow * any other branches (jumps). Meanwhile, track the frame pointer state at @@ -1844,7 +1969,9 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, if (!insn->hint && !insn_state_match(insn, &state)) return 1; - return 0; + /* If we were here with AC=0, but now have AC=1, go again */ + if (insn->state.uaccess || !state.uaccess) + return 0; } if (insn->hint) { @@ -1893,16 +2020,42 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, insn->visited = true; if (!insn->ignore_alts) { + bool skip_orig = false; + list_for_each_entry(alt, &insn->alts, list) { + if (alt->skip_orig) + skip_orig = true; + ret = validate_branch(file, alt->insn, state); - if (ret) - return 1; + if (ret) { + if (backtrace) + BT_FUNC("(alt)", insn); + return ret; + } } + + if (skip_orig) + return 0; } switch (insn->type) { case INSN_RETURN: + if (state.uaccess && !func_uaccess_safe(func)) { + WARN_FUNC("return with UACCESS enabled", sec, insn->offset); + return 1; + } + + if (!state.uaccess && func_uaccess_safe(func)) { + WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", sec, insn->offset); + return 1; + } + + if (state.df) { + WARN_FUNC("return with DF set", sec, insn->offset); + return 1; + } + if (func && has_modified_stack_frame(&state)) { WARN_FUNC("return with modified stack frame", sec, insn->offset); @@ -1918,17 +2071,22 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, return 0; case INSN_CALL: - if (is_fentry_call(insn)) - break; + case INSN_CALL_DYNAMIC: + ret = validate_call(insn, &state); + if (ret) + return ret; - ret = dead_end_function(file, insn->call_dest); - if (ret == 1) - return 0; - if (ret == -1) - return 1; + if (insn->type == INSN_CALL) { + if (is_fentry_call(insn)) + break; + + ret = dead_end_function(file, insn->call_dest); + if (ret == 1) + return 0; + if (ret == -1) + return 1; + } - /* fallthrough */ - case INSN_CALL_DYNAMIC: if (!no_fp && func && !has_valid_stack_frame(&state)) { WARN_FUNC("call without frame pointer save/setup", sec, insn->offset); @@ -1938,18 +2096,21 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, case INSN_JUMP_CONDITIONAL: case INSN_JUMP_UNCONDITIONAL: - if (insn->jump_dest && - (!func || !insn->jump_dest->func || - insn->jump_dest->func->pfunc == func)) { - ret = validate_branch(file, insn->jump_dest, - state); + if (func && !insn->jump_dest) { + ret = validate_sibling_call(insn, &state); if (ret) - return 1; + return ret; - } else if (func && has_modified_stack_frame(&state)) { - WARN_FUNC("sibling call from callable instruction with modified stack frame", - sec, insn->offset); - return 1; + } else if (insn->jump_dest && + (!func || !insn->jump_dest->func || + insn->jump_dest->func->pfunc == func)) { + ret = validate_branch(file, insn->jump_dest, + state); + if (ret) { + if (backtrace) + BT_FUNC("(branch)", insn); + return ret; + } } if (insn->type == INSN_JUMP_UNCONDITIONAL) @@ -1958,11 +2119,10 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, break; case INSN_JUMP_DYNAMIC: - if (func && list_empty(&insn->alts) && - has_modified_stack_frame(&state)) { - WARN_FUNC("sibling call from callable instruction with modified stack frame", - sec, insn->offset); - return 1; + if (func && list_empty(&insn->alts)) { + ret = validate_sibling_call(insn, &state); + if (ret) + return ret; } return 0; @@ -1979,6 +2139,63 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, if (update_insn_state(insn, &state)) return 1; + if (insn->stack_op.dest.type == OP_DEST_PUSHF) { + if (!state.uaccess_stack) { + state.uaccess_stack = 1; + } else if (state.uaccess_stack >> 31) { + WARN_FUNC("PUSHF stack exhausted", sec, insn->offset); + return 1; + } + state.uaccess_stack <<= 1; + state.uaccess_stack |= state.uaccess; + } + + if (insn->stack_op.src.type == OP_SRC_POPF) { + if (state.uaccess_stack) { + state.uaccess = state.uaccess_stack & 1; + state.uaccess_stack >>= 1; + if (state.uaccess_stack == 1) + state.uaccess_stack = 0; + } + } + + break; + + case INSN_STAC: + if (state.uaccess) { + WARN_FUNC("recursive UACCESS enable", sec, insn->offset); + return 1; + } + + state.uaccess = true; + break; + + case INSN_CLAC: + if (!state.uaccess && insn->func) { + WARN_FUNC("redundant UACCESS disable", sec, insn->offset); + return 1; + } + + if (func_uaccess_safe(func) && !state.uaccess_stack) { + WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); + return 1; + } + + state.uaccess = false; + break; + + case INSN_STD: + if (state.df) + WARN_FUNC("recursive STD", sec, insn->offset); + + state.df = true; + break; + + case INSN_CLD: + if (!state.df && insn->func) + WARN_FUNC("redundant CLD", sec, insn->offset); + + state.df = false; break; default: @@ -2015,6 +2232,8 @@ static int validate_unwind_hints(struct objtool_file *file) for_each_insn(file, insn) { if (insn->hint && !insn->visited) { ret = validate_branch(file, insn, state); + if (ret && backtrace) + BT_FUNC("<=== (hint)", insn); warnings += ret; } } @@ -2142,7 +2361,11 @@ static int validate_functions(struct objtool_file *file) if (!insn || insn->ignore) continue; + state.uaccess = func->alias->uaccess_safe; + ret = validate_branch(file, insn, state); + if (ret && backtrace) + BT_FUNC("<=== (func)", insn); warnings += ret; } } @@ -2199,7 +2422,6 @@ int check(const char *_objname, bool orc) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); - file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); file.c_file = find_section_by_name(file.elf, ".comment"); file.ignore_unreachables = no_unreachable; file.hints = false; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index e6e8a655b556..71e54f97dbcd 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -31,7 +31,8 @@ struct insn_state { int stack_size; unsigned char type; bool bp_scratch; - bool drap, end; + bool drap, end, uaccess, df; + unsigned int uaccess_stack; int drap_reg, drap_offset; struct cfi_reg vals[CFI_NUM_REGS]; }; @@ -60,7 +61,6 @@ struct objtool_file { struct elf *elf; struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 16); - struct section *whitelist; bool ignore_unreachables, c_file, hints, rodata; }; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index b8f3cca8e58b..dd198d53387d 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -219,7 +219,7 @@ static int read_sections(struct elf *elf) static int read_symbols(struct elf *elf) { struct section *symtab, *sec; - struct symbol *sym, *pfunc; + struct symbol *sym, *pfunc, *alias; struct list_head *entry, *tmp; int symbols_nr, i; char *coldstr; @@ -239,6 +239,7 @@ static int read_symbols(struct elf *elf) return -1; } memset(sym, 0, sizeof(*sym)); + alias = sym; sym->idx = i; @@ -288,11 +289,17 @@ static int read_symbols(struct elf *elf) break; } - if (sym->offset == s->offset && sym->len >= s->len) { - entry = tmp; - break; + if (sym->offset == s->offset) { + if (sym->len == s->len && alias == sym) + alias = s; + + if (sym->len >= s->len) { + entry = tmp; + break; + } } } + sym->alias = alias; list_add(&sym->list, entry); hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx); } diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index bc97ed86b9cd..2cc2ed49322d 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -61,7 +61,8 @@ struct symbol { unsigned char bind, type; unsigned long offset; unsigned int len; - struct symbol *pfunc, *cfunc; + struct symbol *pfunc, *cfunc, *alias; + bool uaccess_safe; }; struct rela { diff --git a/tools/objtool/special.c b/tools/objtool/special.c index 50af4e1274b3..4e50563d87c6 100644 --- a/tools/objtool/special.c +++ b/tools/objtool/special.c @@ -23,6 +23,7 @@ #include <stdlib.h> #include <string.h> +#include "builtin.h" #include "special.h" #include "warn.h" @@ -42,6 +43,7 @@ #define ALT_NEW_LEN_OFFSET 11 #define X86_FEATURE_POPCNT (4*32+23) +#define X86_FEATURE_SMAP (9*32+20) struct special_entry { const char *sec; @@ -110,6 +112,22 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry, */ if (feature == X86_FEATURE_POPCNT) alt->skip_orig = true; + + /* + * If UACCESS validation is enabled; force that alternative; + * otherwise force it the other way. + * + * What we want to avoid is having both the original and the + * alternative code flow at the same time, in that case we can + * find paths that see the STAC but take the NOP instead of + * CLAC and the other way around. + */ + if (feature == X86_FEATURE_SMAP) { + if (uaccess) + alt->skip_orig = true; + else + alt->skip_alt = true; + } } orig_rela = find_rela_by_dest(sec, offset + entry->orig); diff --git a/tools/objtool/special.h b/tools/objtool/special.h index fad1d092f679..d5c062e718ef 100644 --- a/tools/objtool/special.h +++ b/tools/objtool/special.h @@ -26,6 +26,7 @@ struct special_alt { bool group; bool skip_orig; + bool skip_alt; bool jump_or_nop; struct section *orig_sec; diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h index afd9f7a05f6d..f4fbb972b611 100644 --- a/tools/objtool/warn.h +++ b/tools/objtool/warn.h @@ -64,6 +64,14 @@ static inline char *offstr(struct section *sec, unsigned long offset) free(_str); \ }) +#define BT_FUNC(format, insn, ...) \ +({ \ + struct instruction *_insn = (insn); \ + char *_str = offstr(_insn->sec, _insn->offset); \ + WARN(" %s: " format, _str, ##__VA_ARGS__); \ + free(_str); \ +}) + #define WARN_ELF(format, ...) \ WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1)) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 8fe4dffcadd0..58986f4cc190 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -459,6 +459,25 @@ Set affinity mask of trace reading thread according to the policy defined by 'mo node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer cpu - thread affinity mask is set to cpu of the processed mmap buffer +--mmap-flush=number:: + +Specify minimal number of bytes that is extracted from mmap data pages and +processed for output. One can specify the number using B/K/M/G suffixes. + +The maximal allowed value is a quarter of the size of mmaped data pages. + +The default option value is 1 byte which means that every time that the output +writing thread finds some new data in the mmaped buffer the data is extracted, +possibly compressed (-z) and written to the output, perf.data or pipe. + +Larger data chunks are compressed more effectively in comparison to smaller +chunks so extraction of larger chunks from the mmap data pages is preferable +from the perspective of output size reduction. + +Also at some cases executing less output write syscalls with bigger data size +can take less time than executing more output write syscalls with smaller data +size thus lowering runtime profiling overhead. + --all-kernel:: Configure all used events to run in kernel space. diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index fe3f97e342fa..0c52a01dc759 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -152,6 +152,13 @@ endif FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf +ifdef LIBZSTD_DIR + LIBZSTD_CFLAGS := -I$(LIBZSTD_DIR)/lib + LIBZSTD_LDFLAGS := -L$(LIBZSTD_DIR)/lib +endif +FEATURE_CHECK_CFLAGS-libzstd := $(LIBZSTD_CFLAGS) +FEATURE_CHECK_LDFLAGS-libzstd := $(LIBZSTD_LDFLAGS) + FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi # include ARCH specific config -include $(src-perf)/arch/$(SRCARCH)/Makefile @@ -227,7 +234,7 @@ FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS) FEATURE_CHECK_LDFLAGS-libaio = -lrt -FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes +FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl CFLAGS += -fno-omit-frame-pointer CFLAGS += -ggdb3 @@ -787,6 +794,19 @@ ifndef NO_LZMA endif endif +ifndef NO_LIBZSTD + ifeq ($(feature-libzstd), 1) + CFLAGS += -DHAVE_ZSTD_SUPPORT + CFLAGS += $(LIBZSTD_CFLAGS) + LDFLAGS += $(LIBZSTD_LDFLAGS) + EXTLIBS += -lzstd + $(call detected,CONFIG_ZSTD) + else + msg := $(warning No libzstd found, disables trace compression, please install libzstd-dev[el] and/or set LIBZSTD_DIR); + NO_LIBZSTD := 1 + endif +endif + ifndef NO_BACKTRACE ifeq ($(feature-backtrace), 1) CFLAGS += -DHAVE_BACKTRACE_SUPPORT diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index e8c9f77e9010..c706548d5b10 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -108,6 +108,9 @@ include ../scripts/utilities.mak # streaming for record mode. Currently Posix AIO trace streaming is # supported only when linking with glibc. # +# Define NO_LIBZSTD if you do not want support of Zstandard based runtime +# trace compression in record mode. +# # As per kernel Makefile, avoid funny character set dependencies unexport LC_ALL diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 98ad783efc69..a7784554a80d 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -39,6 +39,10 @@ #include <numa.h> #include <numaif.h> +#ifndef RUSAGE_THREAD +# define RUSAGE_THREAD 1 +#endif + /* * Regular printout to the terminal, supressed if -q is specified: */ diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index fa520f4b8095..b80eee455111 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -1975,7 +1975,7 @@ int cmd_kmem(int argc, const char **argv) goto out_delete; } - kmem_page_size = tep_get_page_size(evsel->tp_format->pevent); + kmem_page_size = tep_get_page_size(evsel->tp_format->tep); symbol_conf.use_callchain = true; } diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index a8394b4f1167..e0312a1c4792 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c @@ -70,10 +70,11 @@ int cmd_list(int argc, const char **argv) print_symbol_events(NULL, PERF_TYPE_HARDWARE, event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump); else if (strcmp(argv[i], "sw") == 0 || - strcmp(argv[i], "software") == 0) + strcmp(argv[i], "software") == 0) { print_symbol_events(NULL, PERF_TYPE_SOFTWARE, event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump); - else if (strcmp(argv[i], "cache") == 0 || + print_tool_events(NULL, raw_dump); + } else if (strcmp(argv[i], "cache") == 0 || strcmp(argv[i], "hwcache") == 0) print_hwcache_events(NULL, raw_dump); else if (strcmp(argv[i], "pmu") == 0) @@ -113,6 +114,7 @@ int cmd_list(int argc, const char **argv) event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump); print_symbol_events(s, PERF_TYPE_SOFTWARE, event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump); + print_tool_events(s, raw_dump); print_hwcache_events(s, raw_dump); print_pmu_events(s, raw_dump, !desc_flag, long_desc_flag, diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 4e2d953d4bc5..c5e10552776a 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -337,6 +337,41 @@ static int record__aio_enabled(struct record *rec) return rec->opts.nr_cblocks > 0; } +#define MMAP_FLUSH_DEFAULT 1 +static int record__mmap_flush_parse(const struct option *opt, + const char *str, + int unset) +{ + int flush_max; + struct record_opts *opts = (struct record_opts *)opt->value; + static struct parse_tag tags[] = { + { .tag = 'B', .mult = 1 }, + { .tag = 'K', .mult = 1 << 10 }, + { .tag = 'M', .mult = 1 << 20 }, + { .tag = 'G', .mult = 1 << 30 }, + { .tag = 0 }, + }; + + if (unset) + return 0; + + if (str) { + opts->mmap_flush = parse_tag_value(str, tags); + if (opts->mmap_flush == (int)-1) + opts->mmap_flush = strtol(str, NULL, 0); + } + + if (!opts->mmap_flush) + opts->mmap_flush = MMAP_FLUSH_DEFAULT; + + flush_max = perf_evlist__mmap_size(opts->mmap_pages); + flush_max /= 4; + if (opts->mmap_flush > flush_max) + opts->mmap_flush = flush_max; + + return 0; +} + static int process_synthesized_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample __maybe_unused, @@ -546,7 +581,8 @@ static int record__mmap_evlist(struct record *rec, if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, opts->auxtrace_mmap_pages, opts->auxtrace_snapshot_mode, - opts->nr_cblocks, opts->affinity) < 0) { + opts->nr_cblocks, opts->affinity, + opts->mmap_flush) < 0) { if (errno == EPERM) { pr_err("Permission error mapping pages.\n" "Consider increasing " @@ -736,7 +772,7 @@ static void record__adjust_affinity(struct record *rec, struct perf_mmap *map) } static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist, - bool overwrite) + bool overwrite, bool synch) { u64 bytes_written = rec->bytes_written; int i; @@ -759,12 +795,19 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli off = record__aio_get_pos(trace_fd); for (i = 0; i < evlist->nr_mmaps; i++) { + u64 flush = 0; struct perf_mmap *map = &maps[i]; if (map->base) { record__adjust_affinity(rec, map); + if (synch) { + flush = map->flush; + map->flush = 1; + } if (!record__aio_enabled(rec)) { if (perf_mmap__push(map, rec, record__pushfn) != 0) { + if (synch) + map->flush = flush; rc = -1; goto out; } @@ -777,10 +820,14 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli idx = record__aio_sync(map, false); if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) { record__aio_set_pos(trace_fd, off); + if (synch) + map->flush = flush; rc = -1; goto out; } } + if (synch) + map->flush = flush; } if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && @@ -806,15 +853,15 @@ out: return rc; } -static int record__mmap_read_all(struct record *rec) +static int record__mmap_read_all(struct record *rec, bool synch) { int err; - err = record__mmap_read_evlist(rec, rec->evlist, false); + err = record__mmap_read_evlist(rec, rec->evlist, false, synch); if (err) return err; - return record__mmap_read_evlist(rec, rec->evlist, true); + return record__mmap_read_evlist(rec, rec->evlist, true, synch); } static void record__init_features(struct record *rec) @@ -1340,7 +1387,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) if (trigger_is_hit(&switch_output_trigger) || done || draining) perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING); - if (record__mmap_read_all(rec) < 0) { + if (record__mmap_read_all(rec, false) < 0) { trigger_error(&auxtrace_snapshot_trigger); trigger_error(&switch_output_trigger); err = -1; @@ -1441,6 +1488,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) record__synthesize_workload(rec, true); out_child: + record__mmap_read_all(rec, true); record__aio_mmap_read_sync(rec); if (forks) { @@ -1846,6 +1894,7 @@ static struct record record = { .uses_mmap = true, .default_per_cpu = true, }, + .mmap_flush = MMAP_FLUSH_DEFAULT, }, .tool = { .sample = process_sample_event, @@ -1912,6 +1961,9 @@ static struct option __record_options[] = { OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]", "number of mmap data pages and AUX area tracing mmap pages", record__parse_mmap_pages), + OPT_CALLBACK(0, "mmap-flush", &record.opts, "number", + "Minimal number of bytes that is extracted from mmap data pages (default: 1)", + record__mmap_flush_parse), OPT_BOOLEAN(0, "group", &record.opts.group, "put the counters into a counter group"), OPT_CALLBACK_NOOPT('g', NULL, &callchain_param, @@ -2224,6 +2276,7 @@ int cmd_record(int argc, const char **argv) pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks); pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]); + pr_debug("mmap flush: %d\n", rec->opts.mmap_flush); err = __cmd_record(&record, argc, argv); out: diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index c3625ec374e0..a3c060878faa 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -244,11 +244,25 @@ perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread, process_synthesized_event, NULL); } +static int read_single_counter(struct perf_evsel *counter, int cpu, + int thread, struct timespec *rs) +{ + if (counter->tool_event == PERF_TOOL_DURATION_TIME) { + u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; + struct perf_counts_values *count = + perf_counts(counter->counts, cpu, thread); + count->ena = count->run = val; + count->val = val; + return 0; + } + return perf_evsel__read_counter(counter, cpu, thread); +} + /* * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */ -static int read_counter(struct perf_evsel *counter) +static int read_counter(struct perf_evsel *counter, struct timespec *rs) { int nthreads = thread_map__nr(evsel_list->threads); int ncpus, cpu, thread; @@ -275,7 +289,7 @@ static int read_counter(struct perf_evsel *counter) * (via perf_evsel__read_counter) and sets threir count->loaded. */ if (!count->loaded && - perf_evsel__read_counter(counter, cpu, thread)) { + read_single_counter(counter, cpu, thread, rs)) { counter->counts->scaled = -1; perf_counts(counter->counts, cpu, thread)->ena = 0; perf_counts(counter->counts, cpu, thread)->run = 0; @@ -304,13 +318,13 @@ static int read_counter(struct perf_evsel *counter) return 0; } -static void read_counters(void) +static void read_counters(struct timespec *rs) { struct perf_evsel *counter; int ret; evlist__for_each_entry(evsel_list, counter) { - ret = read_counter(counter); + ret = read_counter(counter, rs); if (ret) pr_debug("failed to read counter %s\n", counter->name); @@ -323,11 +337,11 @@ static void process_interval(void) { struct timespec ts, rs; - read_counters(); - clock_gettime(CLOCK_MONOTONIC, &ts); diff_timespec(&rs, &ts, &ref_time); + read_counters(&rs); + if (STAT_RECORD) { if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) pr_err("failed to write stat round event\n"); @@ -593,7 +607,7 @@ try_again: * avoid arbitrary skew, we must read all counters before closing any * group leaders. */ - read_counters(); + read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); perf_evlist__close(evsel_list); return WEXITSTATUS(status); diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c index 50df168be326..f470144d1a70 100644 --- a/tools/perf/builtin-version.c +++ b/tools/perf/builtin-version.c @@ -78,6 +78,8 @@ static void library_status(void) STATUS(HAVE_LZMA_SUPPORT, lzma); STATUS(HAVE_AUXTRACE_SUPPORT, get_cpuid); STATUS(HAVE_LIBBPF_SUPPORT, bpf); + STATUS(HAVE_AIO_SUPPORT, aio); + STATUS(HAVE_ZSTD_SUPPORT, zstd); } int cmd_version(int argc, const char **argv) diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c index f9b2161e1ca4..2422894a8194 100644 --- a/tools/perf/examples/bpf/augmented_raw_syscalls.c +++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c @@ -15,6 +15,7 @@ */ #include <unistd.h> +#include <linux/limits.h> #include <pid_filter.h> /* bpf-output associated map */ @@ -41,32 +42,110 @@ struct syscall_exit_args { struct augmented_filename { unsigned int size; int reserved; - char value[256]; + char value[PATH_MAX]; }; -#define SYS_OPEN 2 -#define SYS_ACCESS 21 -#define SYS_OPENAT 257 +/* syscalls where the first arg is a string */ +#define SYS_OPEN 2 +#define SYS_STAT 4 +#define SYS_LSTAT 6 +#define SYS_ACCESS 21 +#define SYS_EXECVE 59 +#define SYS_TRUNCATE 76 +#define SYS_CHDIR 80 +#define SYS_RENAME 82 +#define SYS_MKDIR 83 +#define SYS_RMDIR 84 +#define SYS_CREAT 85 +#define SYS_LINK 86 +#define SYS_UNLINK 87 +#define SYS_SYMLINK 88 +#define SYS_READLINK 89 +#define SYS_CHMOD 90 +#define SYS_CHOWN 92 +#define SYS_LCHOWN 94 +#define SYS_MKNOD 133 +#define SYS_STATFS 137 +#define SYS_PIVOT_ROOT 155 +#define SYS_CHROOT 161 +#define SYS_ACCT 163 +#define SYS_SWAPON 167 +#define SYS_SWAPOFF 168 +#define SYS_DELETE_MODULE 176 +#define SYS_SETXATTR 188 +#define SYS_LSETXATTR 189 +#define SYS_GETXATTR 191 +#define SYS_LGETXATTR 192 +#define SYS_LISTXATTR 194 +#define SYS_LLISTXATTR 195 +#define SYS_REMOVEXATTR 197 +#define SYS_LREMOVEXATTR 198 +#define SYS_MQ_OPEN 240 +#define SYS_MQ_UNLINK 241 +#define SYS_ADD_KEY 248 +#define SYS_REQUEST_KEY 249 +#define SYS_SYMLINKAT 266 +#define SYS_MEMFD_CREATE 319 + +/* syscalls where the first arg is a string */ + +#define SYS_PWRITE64 18 +#define SYS_EXECVE 59 +#define SYS_RENAME 82 +#define SYS_QUOTACTL 179 +#define SYS_FSETXATTR 190 +#define SYS_FGETXATTR 193 +#define SYS_FREMOVEXATTR 199 +#define SYS_MQ_TIMEDSEND 242 +#define SYS_REQUEST_KEY 249 +#define SYS_INOTIFY_ADD_WATCH 254 +#define SYS_OPENAT 257 +#define SYS_MKDIRAT 258 +#define SYS_MKNODAT 259 +#define SYS_FCHOWNAT 260 +#define SYS_FUTIMESAT 261 +#define SYS_NEWFSTATAT 262 +#define SYS_UNLINKAT 263 +#define SYS_RENAMEAT 264 +#define SYS_LINKAT 265 +#define SYS_READLINKAT 267 +#define SYS_FCHMODAT 268 +#define SYS_FACCESSAT 269 +#define SYS_UTIMENSAT 280 +#define SYS_NAME_TO_HANDLE_AT 303 +#define SYS_FINIT_MODULE 313 +#define SYS_RENAMEAT2 316 +#define SYS_EXECVEAT 322 +#define SYS_STATX 332 pid_filter(pids_filtered); +struct augmented_args_filename { + struct syscall_enter_args args; + struct augmented_filename filename; +}; + +bpf_map(augmented_filename_map, PERCPU_ARRAY, int, struct augmented_args_filename, 1); + SEC("raw_syscalls:sys_enter") int sys_enter(struct syscall_enter_args *args) { - struct { - struct syscall_enter_args args; - struct augmented_filename filename; - } augmented_args; - struct syscall *syscall; - unsigned int len = sizeof(augmented_args); + struct augmented_args_filename *augmented_args; + unsigned int len = sizeof(*augmented_args); const void *filename_arg = NULL; + struct syscall *syscall; + int key = 0; + + augmented_args = bpf_map_lookup_elem(&augmented_filename_map, &key); + if (augmented_args == NULL) + return 1; if (pid_filter__has(&pids_filtered, getpid())) return 0; - probe_read(&augmented_args.args, sizeof(augmented_args.args), args); + probe_read(&augmented_args->args, sizeof(augmented_args->args), args); - syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr); + syscall = bpf_map_lookup_elem(&syscalls, &augmented_args->args.syscall_nr); if (syscall == NULL || !syscall->enabled) return 0; /* @@ -109,30 +188,105 @@ int sys_enter(struct syscall_enter_args *args) * * after the ctx memory access to prevent their down stream merging. */ - switch (augmented_args.args.syscall_nr) { + /* + * This table of what args are strings will be provided by userspace, + * in the syscalls map, i.e. we will already have to do the lookup to + * see if this specific syscall is filtered, so we can as well get more + * info about what syscall args are strings or pointers, and how many + * bytes to copy, per arg, etc. + * + * For now hard code it, till we have all the basic mechanisms in place + * to automate everything and make the kernel part be completely driven + * by information obtained in userspace for each kernel version and + * processor architecture, making the kernel part the same no matter what + * kernel version or processor architecture it runs on. + */ + switch (augmented_args->args.syscall_nr) { + case SYS_ACCT: + case SYS_ADD_KEY: + case SYS_CHDIR: + case SYS_CHMOD: + case SYS_CHOWN: + case SYS_CHROOT: + case SYS_CREAT: + case SYS_DELETE_MODULE: + case SYS_EXECVE: + case SYS_GETXATTR: + case SYS_LCHOWN: + case SYS_LGETXATTR: + case SYS_LINK: + case SYS_LISTXATTR: + case SYS_LLISTXATTR: + case SYS_LREMOVEXATTR: + case SYS_LSETXATTR: + case SYS_LSTAT: + case SYS_MEMFD_CREATE: + case SYS_MKDIR: + case SYS_MKNOD: + case SYS_MQ_OPEN: + case SYS_MQ_UNLINK: + case SYS_PIVOT_ROOT: + case SYS_READLINK: + case SYS_REMOVEXATTR: + case SYS_RENAME: + case SYS_REQUEST_KEY: + case SYS_RMDIR: + case SYS_SETXATTR: + case SYS_STAT: + case SYS_STATFS: + case SYS_SWAPOFF: + case SYS_SWAPON: + case SYS_SYMLINK: + case SYS_SYMLINKAT: + case SYS_TRUNCATE: + case SYS_UNLINK: case SYS_ACCESS: case SYS_OPEN: filename_arg = (const void *)args->args[0]; __asm__ __volatile__("": : :"memory"); break; + case SYS_EXECVEAT: + case SYS_FACCESSAT: + case SYS_FCHMODAT: + case SYS_FCHOWNAT: + case SYS_FGETXATTR: + case SYS_FINIT_MODULE: + case SYS_FREMOVEXATTR: + case SYS_FSETXATTR: + case SYS_FUTIMESAT: + case SYS_INOTIFY_ADD_WATCH: + case SYS_LINKAT: + case SYS_MKDIRAT: + case SYS_MKNODAT: + case SYS_MQ_TIMEDSEND: + case SYS_NAME_TO_HANDLE_AT: + case SYS_NEWFSTATAT: + case SYS_PWRITE64: + case SYS_QUOTACTL: + case SYS_READLINKAT: + case SYS_RENAMEAT: + case SYS_RENAMEAT2: + case SYS_STATX: + case SYS_UNLINKAT: + case SYS_UTIMENSAT: case SYS_OPENAT: filename_arg = (const void *)args->args[1]; break; } if (filename_arg != NULL) { - augmented_args.filename.reserved = 0; - augmented_args.filename.size = probe_read_str(&augmented_args.filename.value, - sizeof(augmented_args.filename.value), + augmented_args->filename.reserved = 0; + augmented_args->filename.size = probe_read_str(&augmented_args->filename.value, + sizeof(augmented_args->filename.value), filename_arg); - if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) { - len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size; - len &= sizeof(augmented_args.filename.value) - 1; + if (augmented_args->filename.size < sizeof(augmented_args->filename.value)) { + len -= sizeof(augmented_args->filename.value) - augmented_args->filename.size; + len &= sizeof(augmented_args->filename.value) - 1; } } else { - len = sizeof(augmented_args.args); + len = sizeof(augmented_args->args); } /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ - return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len); + return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, augmented_args, len); } SEC("raw_syscalls:sys_exit") diff --git a/tools/perf/perf.h b/tools/perf/perf.h index c59743def8d3..369eae61068d 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -85,6 +85,7 @@ struct record_opts { u64 clockid_res_ns; int nr_cblocks; int affinity; + int mmap_flush; }; enum perf_affinity { diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json index e7a3524b748f..68618152ea2c 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json @@ -4,7 +4,7 @@ "EventCode": "128", "EventName": "L1D_RO_EXCL_WRITES", "BriefDescription": "L1D Read-only Exclusive Writes", - "PublicDescription": "Counter:128 Name:L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" + "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" }, { "Unit": "CPU-M-CF", diff --git a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json index 935b7dcf067d..ef69540ab61d 100644 --- a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json +++ b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json @@ -77,7 +77,7 @@ "UMask": "0x1", "EventName": "UOPS.MS_CYCLES", "SampleAfterValue": "2000000", - "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ. ", + "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ.", "CounterMask": "1" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json index b2e681c78466..09c6de13de20 100644 --- a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json @@ -189,7 +189,7 @@ "UMask": "0x8", "EventName": "BR_MISSP_TYPE_RETIRED.IND_CALL", "SampleAfterValue": "200000", - "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect. " + "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect." }, { "EventCode": "0x89", diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json index 00bfdb5c5acb..212b117a8ffb 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json @@ -1,164 +1,352 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )", - "MetricGroup": "Frontend", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "Branch_Misprediction_Cost" }, { + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts_SMT", + "MetricName": "Branch_Misprediction_Cost_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" + }, + { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json index 0b080b0352d8..7938bf5689ab 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json @@ -56,10 +56,10 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.", + "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x41", + "UMask": "0xc1", "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", "SampleAfterValue": "200003", "BriefDescription": "Demand Data Read requests that hit L2 cache", @@ -68,7 +68,7 @@ { "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x42", + "UMask": "0xc2", "EventName": "L2_RQSTS.RFO_HIT", "SampleAfterValue": "200003", "BriefDescription": "RFO requests that hit L2 cache.", @@ -77,7 +77,7 @@ { "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x44", + "UMask": "0xc4", "EventName": "L2_RQSTS.CODE_RD_HIT", "SampleAfterValue": "200003", "BriefDescription": "L2 cache hits when fetching instructions, code reads.", @@ -87,7 +87,7 @@ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x50", + "UMask": "0xd0", "EventName": "L2_RQSTS.L2_PF_HIT", "SampleAfterValue": "200003", "BriefDescription": "L2 prefetch requests that hit L2 cache", @@ -433,7 +433,7 @@ }, { "PEBS": "1", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", "EventCode": "0xD0", "Counter": "0,1,2,3", "UMask": "0x41", @@ -445,7 +445,7 @@ }, { "PEBS": "1", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", "EventCode": "0xD0", "Counter": "0,1,2,3", "UMask": "0x42", @@ -771,2628 +771,2628 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010001 ", + "MSRValue": "0x0000010001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that have any response type.", + "BriefDescription": "Counts demand data reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020001 ", + "MSRValue": "0x0080020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020001 ", + "MSRValue": "0x0100020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020001 ", + "MSRValue": "0x0200020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020001 ", + "MSRValue": "0x0400020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020001 ", + "MSRValue": "0x1000020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020001 ", + "MSRValue": "0x3F80020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0001 ", + "MSRValue": "0x00803C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0001 ", + "MSRValue": "0x01003C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0001 ", + "MSRValue": "0x02003C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0001 ", + "MSRValue": "0x04003C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0001 ", + "MSRValue": "0x10003C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0001 ", + "MSRValue": "0x3F803C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010002 ", + "MSRValue": "0x0000010002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.", + "BriefDescription": "Counts all demand data writes (RFOs) have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0002 ", + "MSRValue": "0x00803C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0002 ", + "MSRValue": "0x01003C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0002 ", + "MSRValue": "0x02003C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0002 ", + "MSRValue": "0x04003C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0002 ", + "MSRValue": "0x10003C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0002 ", + "MSRValue": "0x3F803C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010004 ", + "MSRValue": "0x0000010004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that have any response type.", + "BriefDescription": "Counts all demand code reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020004 ", + "MSRValue": "0x0080020004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020004 ", + "MSRValue": "0x0100020004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020004 ", + "MSRValue": "0x0200020004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020004 ", + "MSRValue": "0x0400020004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020004 ", + "MSRValue": "0x1000020004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020004 ", + "MSRValue": "0x3F80020004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0004 ", + "MSRValue": "0x00803C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0004 ", + "MSRValue": "0x01003C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0004 ", + "MSRValue": "0x02003C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0004 ", + "MSRValue": "0x04003C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0004 ", + "MSRValue": "0x10003C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0004 ", + "MSRValue": "0x3F803C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that hit in the L3.", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive) have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010008 ", + "MSRValue": "0x0000010008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that have any response type.", + "BriefDescription": "Counts writebacks (modified to exclusive) have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020008 ", + "MSRValue": "0x0080020008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020008 ", + "MSRValue": "0x0100020008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020008 ", + "MSRValue": "0x0200020008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020008 ", + "MSRValue": "0x0400020008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020008 ", + "MSRValue": "0x1000020008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020008 ", + "MSRValue": "0x3F80020008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0008 ", + "MSRValue": "0x00803C0008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0008 ", + "MSRValue": "0x01003C0008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0008 ", + "MSRValue": "0x02003C0008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0008 ", + "MSRValue": "0x04003C0008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0008 ", + "MSRValue": "0x10003C0008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0008 ", + "MSRValue": "0x3F803C0008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3.", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010010 ", + "MSRValue": "0x0000010010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020010 ", + "MSRValue": "0x0080020010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020010 ", + "MSRValue": "0x0100020010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020010 ", + "MSRValue": "0x0200020010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020010 ", + "MSRValue": "0x0400020010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020010 ", + "MSRValue": "0x1000020010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020010 ", + "MSRValue": "0x3F80020010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0010 ", + "MSRValue": "0x00803C0010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0010 ", + "MSRValue": "0x01003C0010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0010 ", + "MSRValue": "0x02003C0010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0010 ", + "MSRValue": "0x04003C0010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0010 ", + "MSRValue": "0x10003C0010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0010 ", + "MSRValue": "0x3F803C0010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010020 ", + "MSRValue": "0x0000010020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020020 ", + "MSRValue": "0x0080020020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020020 ", + "MSRValue": "0x0100020020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020020 ", + "MSRValue": "0x0200020020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020020 ", + "MSRValue": "0x0400020020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020020 ", + "MSRValue": "0x1000020020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020020 ", + "MSRValue": "0x3F80020020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0020 ", + "MSRValue": "0x00803C0020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0020 ", + "MSRValue": "0x01003C0020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0020 ", + "MSRValue": "0x02003C0020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0020 ", + "MSRValue": "0x04003C0020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0020 ", + "MSRValue": "0x10003C0020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0020 ", + "MSRValue": "0x3F803C0020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010040 ", + "MSRValue": "0x0000010040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020040 ", + "MSRValue": "0x0080020040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020040 ", + "MSRValue": "0x0100020040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020040 ", + "MSRValue": "0x0200020040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020040 ", + "MSRValue": "0x0400020040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020040 ", + "MSRValue": "0x1000020040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020040 ", + "MSRValue": "0x3F80020040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0040 ", + "MSRValue": "0x00803C0040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0040 ", + "MSRValue": "0x01003C0040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0040 ", + "MSRValue": "0x02003C0040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0040 ", + "MSRValue": "0x04003C0040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0040 ", + "MSRValue": "0x10003C0040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0040 ", + "MSRValue": "0x3F803C0040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010080 ", + "MSRValue": "0x0000010080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020080 ", + "MSRValue": "0x0080020080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020080 ", + "MSRValue": "0x0100020080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020080 ", + "MSRValue": "0x0200020080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020080 ", + "MSRValue": "0x0400020080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020080 ", + "MSRValue": "0x1000020080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020080 ", + "MSRValue": "0x3F80020080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0080 ", + "MSRValue": "0x00803C0080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0080 ", + "MSRValue": "0x01003C0080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0080 ", + "MSRValue": "0x02003C0080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0080 ", + "MSRValue": "0x04003C0080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0080 ", + "MSRValue": "0x10003C0080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0080 ", + "MSRValue": "0x3F803C0080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010100 ", + "MSRValue": "0x0000010100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020100 ", + "MSRValue": "0x0080020100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020100 ", + "MSRValue": "0x0100020100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020100 ", + "MSRValue": "0x0200020100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020100 ", + "MSRValue": "0x0400020100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020100 ", + "MSRValue": "0x1000020100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020100 ", + "MSRValue": "0x3F80020100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0100 ", + "MSRValue": "0x00803C0100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0100 ", + "MSRValue": "0x01003C0100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0100 ", + "MSRValue": "0x02003C0100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0100 ", + "MSRValue": "0x04003C0100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0100 ", + "MSRValue": "0x10003C0100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0100 ", + "MSRValue": "0x3F803C0100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010200 ", + "MSRValue": "0x0000010200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020200 ", + "MSRValue": "0x0080020200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020200 ", + "MSRValue": "0x0100020200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020200 ", + "MSRValue": "0x0200020200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020200 ", + "MSRValue": "0x0400020200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020200 ", + "MSRValue": "0x1000020200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020200 ", + "MSRValue": "0x3F80020200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0200 ", + "MSRValue": "0x00803C0200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0200 ", + "MSRValue": "0x01003C0200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0200 ", + "MSRValue": "0x02003C0200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0200 ", + "MSRValue": "0x04003C0200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0200 ", + "MSRValue": "0x10003C0200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0200 ", + "MSRValue": "0x3F803C0200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000018000 ", + "MSRValue": "0x0000018000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that have any response type.", + "BriefDescription": "Counts any other requests have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080028000 ", + "MSRValue": "0x0080028000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100028000 ", + "MSRValue": "0x0100028000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200028000 ", + "MSRValue": "0x0200028000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400028000 ", + "MSRValue": "0x0400028000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000028000 ", + "MSRValue": "0x1000028000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80028000 ", + "MSRValue": "0x3F80028000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c8000 ", + "MSRValue": "0x00803C8000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c8000 ", + "MSRValue": "0x01003C8000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c8000 ", + "MSRValue": "0x02003C8000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c8000 ", + "MSRValue": "0x04003C8000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c8000 ", + "MSRValue": "0x10003C8000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c8000 ", + "MSRValue": "0x3F803C8000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that hit in the L3.", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010090 ", + "MSRValue": "0x0000010090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that have any response type.", + "BriefDescription": "Counts all prefetch data reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020090 ", + "MSRValue": "0x0080020090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020090 ", + "MSRValue": "0x0100020090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020090 ", + "MSRValue": "0x0200020090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020090 ", + "MSRValue": "0x0400020090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020090 ", + "MSRValue": "0x1000020090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020090 ", + "MSRValue": "0x3F80020090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0090 ", + "MSRValue": "0x00803C0090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0090 ", + "MSRValue": "0x01003C0090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0090 ", + "MSRValue": "0x02003C0090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0090 ", + "MSRValue": "0x04003C0090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0090 ", + "MSRValue": "0x10003C0090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0090 ", + "MSRValue": "0x3F803C0090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that hit in the L3.", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010120 ", + "MSRValue": "0x0000010120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that have any response type.", + "BriefDescription": "Counts prefetch RFOs have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020120 ", + "MSRValue": "0x0080020120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020120 ", + "MSRValue": "0x0100020120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020120 ", + "MSRValue": "0x0200020120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020120 ", + "MSRValue": "0x0400020120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020120 ", + "MSRValue": "0x1000020120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020120 ", + "MSRValue": "0x3F80020120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0120 ", + "MSRValue": "0x00803C0120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0120 ", + "MSRValue": "0x01003C0120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0120 ", + "MSRValue": "0x02003C0120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0120 ", + "MSRValue": "0x04003C0120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0120 ", + "MSRValue": "0x10003C0120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0120 ", + "MSRValue": "0x3F803C0120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that hit in the L3.", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010240 ", + "MSRValue": "0x0000010240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that have any response type.", + "BriefDescription": "Counts all prefetch code reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020240 ", + "MSRValue": "0x0080020240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020240 ", + "MSRValue": "0x0100020240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020240 ", + "MSRValue": "0x0200020240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020240 ", + "MSRValue": "0x0400020240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020240 ", + "MSRValue": "0x1000020240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020240 ", + "MSRValue": "0x3F80020240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0240 ", + "MSRValue": "0x00803C0240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0240 ", + "MSRValue": "0x01003C0240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0240 ", + "MSRValue": "0x02003C0240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0240 ", + "MSRValue": "0x04003C0240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0240 ", + "MSRValue": "0x10003C0240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0240 ", + "MSRValue": "0x3F803C0240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that hit in the L3.", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010091 ", + "MSRValue": "0x0000010091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that have any response type.", + "BriefDescription": "Counts all demand & prefetch data reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020091 ", + "MSRValue": "0x0080020091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020091 ", + "MSRValue": "0x0100020091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020091 ", + "MSRValue": "0x0200020091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020091 ", + "MSRValue": "0x0400020091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020091 ", + "MSRValue": "0x1000020091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020091 ", + "MSRValue": "0x3F80020091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0091 ", + "MSRValue": "0x00803C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0091 ", + "MSRValue": "0x01003C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0091 ", + "MSRValue": "0x02003C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0091 ", + "MSRValue": "0x04003C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0091 ", + "MSRValue": "0x10003C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0091 ", + "MSRValue": "0x3F803C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010122 ", + "MSRValue": "0x0000010122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.", + "BriefDescription": "Counts all demand & prefetch RFOs have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020122 ", + "MSRValue": "0x0080020122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020122 ", + "MSRValue": "0x0100020122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020122 ", + "MSRValue": "0x0200020122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020122 ", + "MSRValue": "0x0400020122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020122 ", + "MSRValue": "0x1000020122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f80020122 ", + "MSRValue": "0x3F80020122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00803c0122 ", + "MSRValue": "0x00803C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01003c0122 ", + "MSRValue": "0x01003C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02003c0122 ", + "MSRValue": "0x02003C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response.", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0122 ", + "MSRValue": "0x04003C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0122 ", + "MSRValue": "0x10003C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0122 ", + "MSRValue": "0x3F803C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json index 689d478dae93..15291239c128 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json @@ -1,24 +1,26 @@ [ { - "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.", "EventCode": "0xC1", "Counter": "0,1,2,3", "UMask": "0x8", "Errata": "BDM30", "EventName": "OTHER_ASSISTS.AVX_TO_SSE", "SampleAfterValue": "100003", - "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.", + "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable (Precise Event)", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.", "EventCode": "0xC1", "Counter": "0,1,2,3", "UMask": "0x10", "Errata": "BDM30", "EventName": "OTHER_ASSISTS.SSE_TO_AVX", "SampleAfterValue": "100003", - "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.", + "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable (Precise Event)", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -45,7 +47,7 @@ "UMask": "0x3", "EventName": "FP_ARITH_INST_RETIRED.SCALAR", "SampleAfterValue": "2000003", - "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)", "CounterHTOff": "0,1,2,3" }, { @@ -54,7 +56,7 @@ "UMask": "0x4", "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", "SampleAfterValue": "2000003", - "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "CounterHTOff": "0,1,2,3" }, { @@ -63,7 +65,7 @@ "UMask": "0x8", "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", "SampleAfterValue": "2000003", - "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "CounterHTOff": "0,1,2,3" }, { @@ -72,7 +74,7 @@ "UMask": "0x10", "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", "SampleAfterValue": "2000003", - "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "CounterHTOff": "0,1,2,3" }, { @@ -81,7 +83,7 @@ "UMask": "0x15", "EventName": "FP_ARITH_INST_RETIRED.DOUBLE", "SampleAfterValue": "2000006", - "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.", + "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "CounterHTOff": "0,1,2,3" }, { @@ -90,7 +92,7 @@ "UMask": "0x20", "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", "SampleAfterValue": "2000003", - "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "CounterHTOff": "0,1,2,3" }, { @@ -99,7 +101,7 @@ "UMask": "0x2a", "EventName": "FP_ARITH_INST_RETIRED.SINGLE", "SampleAfterValue": "2000005", - "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.", + "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "CounterHTOff": "0,1,2,3" }, { @@ -108,57 +110,62 @@ "UMask": "0x3c", "EventName": "FP_ARITH_INST_RETIRED.PACKED", "SampleAfterValue": "2000004", - "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x2", "EventName": "FP_ASSIST.X87_OUTPUT", "SampleAfterValue": "100003", - "BriefDescription": "Number of X87 assists due to output value.", + "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result (Precise Event)", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x4", "EventName": "FP_ASSIST.X87_INPUT", "SampleAfterValue": "100003", - "BriefDescription": "Number of X87 assists due to input value.", + "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand (Precise Event)", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x8", "EventName": "FP_ASSIST.SIMD_OUTPUT", "SampleAfterValue": "100003", - "BriefDescription": "Number of SIMD FP assists due to Output values", + "BriefDescription": "SSE* FP micro-code assist when output value is invalid. (Precise Event)", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts any input SSE* floating-point (FP) assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x10", "EventName": "FP_ASSIST.SIMD_INPUT", "SampleAfterValue": "100003", - "BriefDescription": "Number of SIMD FP assists due to input values", + "BriefDescription": "Any input SSE* FP Assist - (Precise Event)", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.", + "PEBS": "1", + "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1. Uses PEBS.", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x1e", "EventName": "FP_ASSIST.ANY", "SampleAfterValue": "100003", - "BriefDescription": "Cycles with any input/output SSE or FP assist", + "BriefDescription": "Counts any FP_ASSIST umask was incrementing (Precise Event)", "CounterMask": "1", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json index 7142c76d7f11..aa4a5d762f21 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json @@ -211,7 +211,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.", + "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.", "EventCode": "0x9C", "Counter": "0,1,2,3", "UMask": "0x1", @@ -274,7 +274,7 @@ "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.", + "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.", "EventCode": "0xAB", "Counter": "0,1,2,3", "UMask": "0x2", diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json index c9154cebbdf0..b6b5247d3d5a 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json @@ -311,7 +311,7 @@ }, { "PEBS": "2", - "PublicDescription": "This event counts loads with latency value being above four.", + "PublicDescription": "Counts randomly selected loads with latency value being above four.", "EventCode": "0xCD", "MSRValue": "0x4", "Counter": "3", @@ -320,13 +320,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4", "MSRIndex": "0x3F6", "SampleAfterValue": "100003", - "BriefDescription": "Loads with latency value being above 4", + "BriefDescription": "Randomly selected loads with latency value being above 4", "TakenAlone": "1", "CounterHTOff": "3" }, { "PEBS": "2", - "PublicDescription": "This event counts loads with latency value being above eight.", + "PublicDescription": "Counts randomly selected loads with latency value being above eight.", "EventCode": "0xCD", "MSRValue": "0x8", "Counter": "3", @@ -335,13 +335,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8", "MSRIndex": "0x3F6", "SampleAfterValue": "50021", - "BriefDescription": "Loads with latency value being above 8", + "BriefDescription": "Randomly selected loads with latency value being above 8", "TakenAlone": "1", "CounterHTOff": "3" }, { "PEBS": "2", - "PublicDescription": "This event counts loads with latency value being above 16.", + "PublicDescription": "Counts randomly selected loads with latency value being above 16.", "EventCode": "0xCD", "MSRValue": "0x10", "Counter": "3", @@ -350,13 +350,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16", "MSRIndex": "0x3F6", "SampleAfterValue": "20011", - "BriefDescription": "Loads with latency value being above 16", + "BriefDescription": "Randomly selected loads with latency value being above 16", "TakenAlone": "1", "CounterHTOff": "3" }, { "PEBS": "2", - "PublicDescription": "This event counts loads with latency value being above 32.", + "PublicDescription": "Counts randomly selected loads with latency value being above 32.", "EventCode": "0xCD", "MSRValue": "0x20", "Counter": "3", @@ -365,13 +365,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32", "MSRIndex": "0x3F6", "SampleAfterValue": "100007", - "BriefDescription": "Loads with latency value being above 32", + "BriefDescription": "Randomly selected loads with latency value being above 32", "TakenAlone": "1", "CounterHTOff": "3" }, { "PEBS": "2", - "PublicDescription": "This event counts loads with latency value being above 64.", + "PublicDescription": "Counts randomly selected loads with latency value being above 64.", "EventCode": "0xCD", "MSRValue": "0x40", "Counter": "3", @@ -380,13 +380,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64", "MSRIndex": "0x3F6", "SampleAfterValue": "2003", - "BriefDescription": "Loads with latency value being above 64", + "BriefDescription": "Randomly selected loads with latency value being above 64", "TakenAlone": "1", "CounterHTOff": "3" }, { "PEBS": "2", - "PublicDescription": "This event counts loads with latency value being above 128.", + "PublicDescription": "Counts randomly selected loads with latency value being above 128.", "EventCode": "0xCD", "MSRValue": "0x80", "Counter": "3", @@ -395,13 +395,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128", "MSRIndex": "0x3F6", "SampleAfterValue": "1009", - "BriefDescription": "Loads with latency value being above 128", + "BriefDescription": "Randomly selected loads with latency value being above 128", "TakenAlone": "1", "CounterHTOff": "3" }, { "PEBS": "2", - "PublicDescription": "This event counts loads with latency value being above 256.", + "PublicDescription": "Counts randomly selected loads with latency value being above 256.", "EventCode": "0xCD", "MSRValue": "0x100", "Counter": "3", @@ -410,13 +410,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256", "MSRIndex": "0x3F6", "SampleAfterValue": "503", - "BriefDescription": "Loads with latency value being above 256", + "BriefDescription": "Randomly selected loads with latency value being above 256", "TakenAlone": "1", "CounterHTOff": "3" }, { "PEBS": "2", - "PublicDescription": "This event counts loads with latency value being above 512.", + "PublicDescription": "Counts randomly selected loads with latency value being above 512.", "EventCode": "0xCD", "MSRValue": "0x200", "Counter": "3", @@ -425,2620 +425,2620 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512", "MSRIndex": "0x3F6", "SampleAfterValue": "101", - "BriefDescription": "Loads with latency value being above 512", + "BriefDescription": "Randomly selected loads with latency value being above 512", "TakenAlone": "1", "CounterHTOff": "3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020001 ", + "MSRValue": "0x2000020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0001 ", + "MSRValue": "0x20003C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000001 ", + "MSRValue": "0x0084000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000001 ", + "MSRValue": "0x0104000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000001 ", + "MSRValue": "0x0204000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000001 ", + "MSRValue": "0x0404000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000001 ", + "MSRValue": "0x1004000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000001 ", + "MSRValue": "0x2004000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000001 ", + "MSRValue": "0x3F84000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000001 ", + "MSRValue": "0x00BC000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000001 ", + "MSRValue": "0x013C000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000001 ", + "MSRValue": "0x023C000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000001 ", + "MSRValue": "0x043C000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0002 ", + "MSRValue": "0x20003C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000002 ", + "MSRValue": "0x3F84000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000002 ", + "MSRValue": "0x00BC000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000002 ", + "MSRValue": "0x013C000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000002 ", + "MSRValue": "0x023C000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000002 ", + "MSRValue": "0x043C000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand data writes (RFOs)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020004 ", + "MSRValue": "0x2000020004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0004 ", + "MSRValue": "0x20003C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000004 ", + "MSRValue": "0x0084000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000004 ", + "MSRValue": "0x0104000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000004 ", + "MSRValue": "0x0204000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000004 ", + "MSRValue": "0x0404000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000004 ", + "MSRValue": "0x1004000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000004 ", + "MSRValue": "0x2004000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000004 ", + "MSRValue": "0x3F84000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000004 ", + "MSRValue": "0x00BC000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000004 ", + "MSRValue": "0x013C000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000004 ", + "MSRValue": "0x023C000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000004 ", + "MSRValue": "0x043C000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020008 ", + "MSRValue": "0x2000020008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0008 ", + "MSRValue": "0x20003C0008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000008 ", + "MSRValue": "0x0084000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000008 ", + "MSRValue": "0x0104000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000008 ", + "MSRValue": "0x0204000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000008 ", + "MSRValue": "0x0404000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000008 ", + "MSRValue": "0x1004000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000008 ", + "MSRValue": "0x2004000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000008 ", + "MSRValue": "0x3F84000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000008 ", + "MSRValue": "0x00BC000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000008 ", + "MSRValue": "0x013C000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000008 ", + "MSRValue": "0x023C000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts writebacks (modified to exclusive)", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000008 ", + "MSRValue": "0x043C000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "COREWB & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts writebacks (modified to exclusive)", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020010 ", + "MSRValue": "0x2000020010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0010 ", + "MSRValue": "0x20003C0010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000010 ", + "MSRValue": "0x0084000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000010 ", + "MSRValue": "0x0104000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000010 ", + "MSRValue": "0x0204000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000010 ", + "MSRValue": "0x0404000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000010 ", + "MSRValue": "0x1004000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000010 ", + "MSRValue": "0x2004000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000010 ", + "MSRValue": "0x3F84000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000010 ", + "MSRValue": "0x00BC000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000010 ", + "MSRValue": "0x013C000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000010 ", + "MSRValue": "0x023C000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000010 ", + "MSRValue": "0x043C000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020020 ", + "MSRValue": "0x2000020020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0020 ", + "MSRValue": "0x20003C0020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000020 ", + "MSRValue": "0x0084000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000020 ", + "MSRValue": "0x0104000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000020 ", + "MSRValue": "0x0204000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000020 ", + "MSRValue": "0x0404000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000020 ", + "MSRValue": "0x1004000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000020 ", + "MSRValue": "0x2004000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000020 ", + "MSRValue": "0x3F84000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000020 ", + "MSRValue": "0x00BC000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000020 ", + "MSRValue": "0x013C000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000020 ", + "MSRValue": "0x023C000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000020 ", + "MSRValue": "0x043C000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020040 ", + "MSRValue": "0x2000020040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0040 ", + "MSRValue": "0x20003C0040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000040 ", + "MSRValue": "0x0084000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000040 ", + "MSRValue": "0x0104000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000040 ", + "MSRValue": "0x0204000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000040 ", + "MSRValue": "0x0404000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000040 ", + "MSRValue": "0x1004000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000040 ", + "MSRValue": "0x2004000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000040 ", + "MSRValue": "0x3F84000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000040 ", + "MSRValue": "0x00BC000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000040 ", + "MSRValue": "0x013C000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000040 ", + "MSRValue": "0x023C000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000040 ", + "MSRValue": "0x043C000040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020080 ", + "MSRValue": "0x2000020080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0080 ", + "MSRValue": "0x20003C0080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000080 ", + "MSRValue": "0x0084000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000080 ", + "MSRValue": "0x0104000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000080 ", + "MSRValue": "0x0204000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000080 ", + "MSRValue": "0x0404000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000080 ", + "MSRValue": "0x1004000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000080 ", + "MSRValue": "0x2004000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000080 ", + "MSRValue": "0x3F84000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000080 ", + "MSRValue": "0x00BC000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000080 ", + "MSRValue": "0x013C000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000080 ", + "MSRValue": "0x023C000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000080 ", + "MSRValue": "0x043C000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020100 ", + "MSRValue": "0x2000020100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0100 ", + "MSRValue": "0x20003C0100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000100 ", + "MSRValue": "0x0084000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000100 ", + "MSRValue": "0x0104000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000100 ", + "MSRValue": "0x0204000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000100 ", + "MSRValue": "0x0404000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000100 ", + "MSRValue": "0x1004000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000100 ", + "MSRValue": "0x2004000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000100 ", + "MSRValue": "0x3F84000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000100 ", + "MSRValue": "0x00BC000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000100 ", + "MSRValue": "0x013C000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000100 ", + "MSRValue": "0x023C000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000100 ", + "MSRValue": "0x043C000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020200 ", + "MSRValue": "0x2000020200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0200 ", + "MSRValue": "0x20003C0200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000200 ", + "MSRValue": "0x0084000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000200 ", + "MSRValue": "0x0104000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000200 ", + "MSRValue": "0x0204000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000200 ", + "MSRValue": "0x0404000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000200 ", + "MSRValue": "0x1004000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000200 ", + "MSRValue": "0x2004000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000200 ", + "MSRValue": "0x3F84000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000200 ", + "MSRValue": "0x00BC000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000200 ", + "MSRValue": "0x013C000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000200 ", + "MSRValue": "0x023C000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000200 ", + "MSRValue": "0x043C000200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000028000 ", + "MSRValue": "0x2000028000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c8000 ", + "MSRValue": "0x20003C8000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084008000 ", + "MSRValue": "0x0084008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104008000 ", + "MSRValue": "0x0104008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204008000 ", + "MSRValue": "0x0204008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404008000 ", + "MSRValue": "0x0404008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004008000 ", + "MSRValue": "0x1004008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004008000 ", + "MSRValue": "0x2004008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84008000 ", + "MSRValue": "0x3F84008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc008000 ", + "MSRValue": "0x00BC008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c008000 ", + "MSRValue": "0x013C008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts any other requests that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c008000 ", + "MSRValue": "0x023C008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts any other requests that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c008000 ", + "MSRValue": "0x043C008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts any other requests", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020090 ", + "MSRValue": "0x2000020090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0090 ", + "MSRValue": "0x20003C0090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000090 ", + "MSRValue": "0x0084000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000090 ", + "MSRValue": "0x0104000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000090 ", + "MSRValue": "0x0204000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000090 ", + "MSRValue": "0x0404000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000090 ", + "MSRValue": "0x1004000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000090 ", + "MSRValue": "0x2004000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000090 ", + "MSRValue": "0x3F84000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000090 ", + "MSRValue": "0x00BC000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000090 ", + "MSRValue": "0x013C000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000090 ", + "MSRValue": "0x023C000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000090 ", + "MSRValue": "0x043C000090", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020120 ", + "MSRValue": "0x2000020120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0120 ", + "MSRValue": "0x20003C0120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000120 ", + "MSRValue": "0x0084000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000120 ", + "MSRValue": "0x0104000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000120 ", + "MSRValue": "0x0204000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000120 ", + "MSRValue": "0x0404000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000120 ", + "MSRValue": "0x1004000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000120 ", + "MSRValue": "0x2004000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000120 ", + "MSRValue": "0x3F84000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000120 ", + "MSRValue": "0x00BC000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000120 ", + "MSRValue": "0x013C000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000120 ", + "MSRValue": "0x023C000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000120 ", + "MSRValue": "0x043C000120", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020240 ", + "MSRValue": "0x2000020240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0240 ", + "MSRValue": "0x20003C0240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000240 ", + "MSRValue": "0x0084000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000240 ", + "MSRValue": "0x0104000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000240 ", + "MSRValue": "0x0204000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000240 ", + "MSRValue": "0x0404000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000240 ", + "MSRValue": "0x1004000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000240 ", + "MSRValue": "0x2004000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000240 ", + "MSRValue": "0x3F84000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000240 ", + "MSRValue": "0x00BC000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000240 ", + "MSRValue": "0x013C000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000240 ", + "MSRValue": "0x023C000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch code reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000240 ", + "MSRValue": "0x043C000240", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all prefetch code reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020091 ", + "MSRValue": "0x2000020091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0091 ", + "MSRValue": "0x20003C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000091 ", + "MSRValue": "0x0084000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000091 ", + "MSRValue": "0x0104000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000091 ", + "MSRValue": "0x0204000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000091 ", + "MSRValue": "0x0404000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000091 ", + "MSRValue": "0x1004000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000091 ", + "MSRValue": "0x2004000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000091 ", + "MSRValue": "0x3F84000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000091 ", + "MSRValue": "0x00BC000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000091 ", + "MSRValue": "0x013C000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000091 ", + "MSRValue": "0x023C000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000091 ", + "MSRValue": "0x043C000091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand & prefetch data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2000020122 ", + "MSRValue": "0x2000020122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x20003c0122 ", + "MSRValue": "0x20003C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address.", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000122 ", + "MSRValue": "0x0084000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000122 ", + "MSRValue": "0x0104000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000122 ", + "MSRValue": "0x0204000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000122 ", + "MSRValue": "0x0404000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000122 ", + "MSRValue": "0x1004000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x2004000122 ", + "MSRValue": "0x2004000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f84000122 ", + "MSRValue": "0x3F84000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000122 ", + "MSRValue": "0x00BC000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information.", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000122 ", + "MSRValue": "0x013C000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000122 ", + "MSRValue": "0x023C000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response.", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000122 ", + "MSRValue": "0x043C000122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts all demand & prefetch RFOs", "Offcore": "1", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json index 999cf3066363..bb25574b8d21 100644 --- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json @@ -1,7 +1,6 @@ [ { "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.", - "EventCode": "0x00", "Counter": "Fixed counter 0", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", @@ -11,7 +10,6 @@ }, { "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.THREAD", @@ -20,7 +18,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "AnyThread": "1", @@ -31,7 +28,6 @@ }, { "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.", - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", @@ -317,7 +313,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", + "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", "EventCode": "0x87", "Counter": "0,1,2,3", "UMask": "0x1", @@ -786,8 +782,8 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.", - "EventCode": "0xA2", + "PublicDescription": "This event counts resource-related stall cycles.", + "EventCode": "0xa2", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "RESOURCE_STALLS.ANY", @@ -973,6 +969,7 @@ "CounterHTOff": "2" }, { + "PublicDescription": "Number of Uops delivered by the LSD.", "EventCode": "0xA8", "Counter": "0,1,2,3", "UMask": "0x1", @@ -1147,7 +1144,8 @@ "CounterHTOff": "1" }, { - "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.", "EventCode": "0xC0", "Counter": "0,1,2,3", "UMask": "0x2", @@ -1157,12 +1155,12 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "PEBS": "1", "EventCode": "0xC1", "Counter": "0,1,2,3", "UMask": "0x40", "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST", "SampleAfterValue": "100003", - "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -1178,26 +1176,28 @@ "Data_LA": "1" }, { - "PublicDescription": "This event counts cycles without actually retired uops.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts cycles without actually retired uops.", "EventCode": "0xC2", "Invert": "1", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "UOPS_RETIRED.STALL_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles without actually retired uops.", + "BriefDescription": "Cycles no executable uops retired (Precise Event)", "CounterMask": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.", + "PEBS": "1", + "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to PEBS uops retired event.", "EventCode": "0xC2", "Invert": "1", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "UOPS_RETIRED.TOTAL_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles with less than 10 actually retired uops.", + "BriefDescription": "Number of cycles using always true condition applied to PEBS uops retired event.", "CounterMask": "10", "CounterHTOff": "0,1,2,3" }, @@ -1320,13 +1320,14 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts not taken branch instructions retired.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.", "EventCode": "0xC4", "Counter": "0,1,2,3", "UMask": "0x10", "EventName": "BR_INST_RETIRED.NOT_TAKEN", "SampleAfterValue": "400009", - "BriefDescription": "Not taken branch instructions retired.", + "BriefDescription": "Counts all not taken macro branch instructions retired. (Precise Event)", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -1341,14 +1342,15 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts far branch instructions retired.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.", "EventCode": "0xC4", "Counter": "0,1,2,3", "UMask": "0x40", "Errata": "BDW98", "EventName": "BR_INST_RETIRED.FAR_BRANCH", "SampleAfterValue": "100007", - "BriefDescription": "Far branch instructions retired.", + "BriefDescription": "Counts the number of far branch instructions retired.(Precise Event)", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json index 4ad425312bdc..bf243fe2a0ec 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json +++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json @@ -439,7 +439,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -451,7 +451,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", "SampleAfterValue": "100003", "L1_Hit_Indication": "1", "CounterHTOff": "0,1,2,3" diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json index 0d04bf9db000..e2f0540625a2 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json @@ -1,6 +1,5 @@ [ { - "EventCode": "0x00", "UMask": "0x1", "BriefDescription": "Instructions retired from execution.", "Counter": "Fixed counter 0", @@ -10,7 +9,6 @@ "CounterHTOff": "Fixed counter 0" }, { - "EventCode": "0x00", "UMask": "0x2", "BriefDescription": "Core cycles when the thread is not in halt state", "Counter": "Fixed counter 1", @@ -20,7 +18,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "UMask": "0x2", "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.", "Counter": "Fixed counter 1", @@ -30,7 +27,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "UMask": "0x3", "BriefDescription": "Reference cycles when the core is not in halt state.", "Counter": "Fixed counter 2", @@ -322,7 +318,7 @@ "BriefDescription": "Stalls caused by changing prefix length of the instruction.", "Counter": "0,1,2,3", "EventName": "ILD_STALL.LCP", - "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", + "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json index 5a7f1ec24200..c6f9762f32c0 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json @@ -1,164 +1,370 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )", - "MetricGroup": "Frontend", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED )) ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "Branch_Misprediction_Cost" + }, + { + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts_SMT", + "MetricName": "Branch_Misprediction_Cost_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" }, { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED) ) / (2*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles))", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { + "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )", + "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_Lat", + "MetricName": "DRAM_Read_Latency" + }, + { + "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@", + "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_Parallel_Reads" + }, + { + "MetricExpr": "cbox_0@event\\=0x0@", + "BriefDescription": "Socket actual clocks when any core is active on that socket", + "MetricGroup": "", + "MetricName": "Socket_CLKS" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json index 141b1080429d..75a3098d5775 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json @@ -57,17 +57,17 @@ }, { "EventCode": "0x24", - "UMask": "0x41", + "UMask": "0xc1", "BriefDescription": "Demand Data Read requests that hit L2 cache", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", - "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.", + "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.", "SampleAfterValue": "200003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x24", - "UMask": "0x42", + "UMask": "0xc2", "BriefDescription": "RFO requests that hit L2 cache.", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.RFO_HIT", @@ -76,7 +76,7 @@ }, { "EventCode": "0x24", - "UMask": "0x44", + "UMask": "0xc4", "BriefDescription": "L2 cache hits when fetching instructions, code reads.", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.CODE_RD_HIT", @@ -85,7 +85,7 @@ }, { "EventCode": "0x24", - "UMask": "0x50", + "UMask": "0xd0", "BriefDescription": "L2 prefetch requests that hit L2 cache", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.L2_PF_HIT", @@ -396,24 +396,24 @@ { "EventCode": "0xD0", "UMask": "0x11", - "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops that miss the STLB.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.", + "PublicDescription": "This event counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD0", "UMask": "0x12", - "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)", + "BriefDescription": "Retired store uops that miss the STLB.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.", + "PublicDescription": "This event counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.", "SampleAfterValue": "100003", "L1_Hit_Indication": "1", "CounterHTOff": "0,1,2,3" @@ -421,37 +421,37 @@ { "EventCode": "0xD0", "UMask": "0x21", - "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops with locked access.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", "Errata": "BDM35", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.", + "PublicDescription": "This event counts load uops with locked access retired to the architected path.", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD0", "UMask": "0x41", - "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)", + "BriefDescription": "Retired load uops that split across a cacheline boundary.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", + "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD0", "UMask": "0x42", - "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)", + "BriefDescription": "Retired store uops that split across a cacheline boundary.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", + "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", "SampleAfterValue": "100003", "L1_Hit_Indication": "1", "CounterHTOff": "0,1,2,3" @@ -459,24 +459,24 @@ { "EventCode": "0xD0", "UMask": "0x81", - "BriefDescription": "All retired load uops. (Precise Event - PEBS)", + "BriefDescription": "All retired load uops.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.ALL_LOADS", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.", + "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD0", "UMask": "0x82", - "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)", + "BriefDescription": "All retired store uops.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.ALL_STORES", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.", + "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.", "SampleAfterValue": "2000003", "L1_Hit_Indication": "1", "CounterHTOff": "0,1,2,3" @@ -484,69 +484,69 @@ { "EventCode": "0xD1", "UMask": "0x1", - "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops with L1 cache hits as data sources.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.", + "PublicDescription": "This event counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD1", "UMask": "0x2", - "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops with L2 cache hits as data sources.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", "Errata": "BDM35", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.", + "PublicDescription": "This event counts retired load uops which data sources were hits in the mid-level (L2) cache.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD1", "UMask": "0x4", - "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT", "Errata": "BDM100", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.", + "PublicDescription": "This event counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.", "SampleAfterValue": "50021", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD1", "UMask": "0x8", - "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.", + "BriefDescription": "Retired load uops misses in L1 cache as data sources.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.", + "PublicDescription": "This event counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD1", "UMask": "0x10", - "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.", + "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.", + "PublicDescription": "This event counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.", "SampleAfterValue": "50021", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD1", "UMask": "0x20", - "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).", + "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -558,83 +558,84 @@ { "EventCode": "0xD1", "UMask": "0x40", - "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.", + "PublicDescription": "This event counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD2", "UMask": "0x1", - "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS", "Errata": "BDM100", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.", + "PublicDescription": "This event counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.", "SampleAfterValue": "20011", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD2", "UMask": "0x2", - "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", "Errata": "BDM100", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.", + "PublicDescription": "This event counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.", "SampleAfterValue": "20011", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD2", "UMask": "0x4", - "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", "Errata": "BDM100", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).", + "PublicDescription": "This event counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).", "SampleAfterValue": "20011", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD2", "UMask": "0x8", - "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)", + "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE", "Errata": "BDM100", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.", + "PublicDescription": "This event counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD3", "UMask": "0x1", + "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM", "Errata": "BDE70, BDM100", - "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.", + "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD3", "UMask": "0x4", - "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)", + "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -646,7 +647,7 @@ { "EventCode": "0xD3", "UMask": "0x10", - "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)", + "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -658,7 +659,7 @@ { "EventCode": "0xD3", "UMask": "0x20", - "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)", + "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -810,12 +811,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all requests that hit in the L3", - "MSRValue": "0x3f803c8fff", + "BriefDescription": "Counts all requests hit in the L3", + "MSRValue": "0x3F803C8FFF", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all requests hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -823,12 +824,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c07f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C07F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -836,12 +837,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c07f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C07F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -849,12 +850,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0244", + "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0244", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -862,12 +863,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c0122", + "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C0122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -875,12 +876,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0122", + "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -888,12 +889,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c0091", + "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C0091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -901,12 +902,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0091", + "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -914,12 +915,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3", - "MSRValue": "0x3f803c0200", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3", + "MSRValue": "0x3F803C0200", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -927,12 +928,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3", - "MSRValue": "0x3f803c0100", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3", + "MSRValue": "0x3F803C0100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -940,12 +941,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c0002", + "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C0002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -953,12 +954,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3", - "MSRValue": "0x3f803c0002", + "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3", + "MSRValue": "0x3F803C0002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json index d7b9d9c9c518..ba0e0c4e74eb 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json @@ -42,7 +42,7 @@ { "EventCode": "0xC7", "UMask": "0x3", - "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.SCALAR", "SampleAfterValue": "2000003", @@ -51,7 +51,7 @@ { "EventCode": "0xC7", "UMask": "0x4", - "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE", "SampleAfterValue": "2000003", @@ -60,7 +60,7 @@ { "EventCode": "0xC7", "UMask": "0x8", - "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE", "SampleAfterValue": "2000003", @@ -69,7 +69,7 @@ { "EventCode": "0xC7", "UMask": "0x10", - "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE", "SampleAfterValue": "2000003", @@ -78,7 +78,7 @@ { "EventCode": "0xC7", "UMask": "0x15", - "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.", + "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.DOUBLE", "SampleAfterValue": "2000006", @@ -87,7 +87,7 @@ { "EventCode": "0xc7", "UMask": "0x20", - "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE", "SampleAfterValue": "2000003", @@ -96,7 +96,7 @@ { "EventCode": "0xC7", "UMask": "0x2a", - "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.", + "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.SINGLE", "SampleAfterValue": "2000005", @@ -105,7 +105,7 @@ { "EventCode": "0xC7", "UMask": "0x3c", - "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.", + "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.PACKED", "SampleAfterValue": "2000004", diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json index d79a5cfea44b..ecb413bb67ca 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json @@ -170,11 +170,11 @@ { "EventCode": "0xc8", "UMask": "0x4", - "BriefDescription": "Number of times HLE abort was triggered (PEBS)", + "BriefDescription": "Number of times HLE abort was triggered", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "HLE_RETIRED.ABORTED", - "PublicDescription": "Number of times HLE abort was triggered (PEBS).", + "PublicDescription": "Number of times HLE abort was triggered.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -251,11 +251,11 @@ { "EventCode": "0xc9", "UMask": "0x4", - "BriefDescription": "Number of times RTM abort was triggered (PEBS)", + "BriefDescription": "Number of times RTM abort was triggered", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "RTM_RETIRED.ABORTED", - "PublicDescription": "Number of times RTM abort was triggered (PEBS).", + "PublicDescription": "Number of times RTM abort was triggered .", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3" }, @@ -312,14 +312,14 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 4", + "BriefDescription": "Randomly selected loads with latency value being above 4", "PEBS": "2", "MSRValue": "0x4", "Counter": "3", "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4", "MSRIndex": "0x3F6", "Errata": "BDM100, BDM35", - "PublicDescription": "This event counts loads with latency value being above four.", + "PublicDescription": "Counts randomly selected loads with latency value being above four.", "TakenAlone": "1", "SampleAfterValue": "100003", "CounterHTOff": "3" @@ -327,14 +327,14 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 8", + "BriefDescription": "Randomly selected loads with latency value being above 8", "PEBS": "2", "MSRValue": "0x8", "Counter": "3", "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8", "MSRIndex": "0x3F6", "Errata": "BDM100, BDM35", - "PublicDescription": "This event counts loads with latency value being above eight.", + "PublicDescription": "Counts randomly selected loads with latency value being above eight.", "TakenAlone": "1", "SampleAfterValue": "50021", "CounterHTOff": "3" @@ -342,14 +342,14 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 16", + "BriefDescription": "Randomly selected loads with latency value being above 16", "PEBS": "2", "MSRValue": "0x10", "Counter": "3", "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16", "MSRIndex": "0x3F6", "Errata": "BDM100, BDM35", - "PublicDescription": "This event counts loads with latency value being above 16.", + "PublicDescription": "Counts randomly selected loads with latency value being above 16.", "TakenAlone": "1", "SampleAfterValue": "20011", "CounterHTOff": "3" @@ -357,14 +357,14 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 32", + "BriefDescription": "Randomly selected loads with latency value being above 32", "PEBS": "2", "MSRValue": "0x20", "Counter": "3", "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32", "MSRIndex": "0x3F6", "Errata": "BDM100, BDM35", - "PublicDescription": "This event counts loads with latency value being above 32.", + "PublicDescription": "Counts randomly selected loads with latency value being above 32.", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "3" @@ -372,14 +372,14 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 64", + "BriefDescription": "Randomly selected loads with latency value being above 64", "PEBS": "2", "MSRValue": "0x40", "Counter": "3", "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64", "MSRIndex": "0x3F6", "Errata": "BDM100, BDM35", - "PublicDescription": "This event counts loads with latency value being above 64.", + "PublicDescription": "Counts randomly selected loads with latency value being above 64.", "TakenAlone": "1", "SampleAfterValue": "2003", "CounterHTOff": "3" @@ -387,14 +387,14 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 128", + "BriefDescription": "Randomly selected loads with latency value being above 128", "PEBS": "2", "MSRValue": "0x80", "Counter": "3", "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128", "MSRIndex": "0x3F6", "Errata": "BDM100, BDM35", - "PublicDescription": "This event counts loads with latency value being above 128.", + "PublicDescription": "Counts randomly selected loads with latency value being above 128.", "TakenAlone": "1", "SampleAfterValue": "1009", "CounterHTOff": "3" @@ -402,14 +402,14 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 256", + "BriefDescription": "Randomly selected loads with latency value being above 256", "PEBS": "2", "MSRValue": "0x100", "Counter": "3", "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256", "MSRIndex": "0x3F6", "Errata": "BDM100, BDM35", - "PublicDescription": "This event counts loads with latency value being above 256.", + "PublicDescription": "Counts randomly selected loads with latency value being above 256.", "TakenAlone": "1", "SampleAfterValue": "503", "CounterHTOff": "3" @@ -417,14 +417,14 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 512", + "BriefDescription": "Randomly selected loads with latency value being above 512", "PEBS": "2", "MSRValue": "0x200", "Counter": "3", "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512", "MSRIndex": "0x3F6", "Errata": "BDM100, BDM35", - "PublicDescription": "This event counts loads with latency value being above 512.", + "PublicDescription": "Counts randomly selected loads with latency value being above 512.", "TakenAlone": "1", "SampleAfterValue": "101", "CounterHTOff": "3" @@ -433,12 +433,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all requests that miss in the L3", - "MSRValue": "0x3fbfc08fff", + "BriefDescription": "Counts all requests miss in the L3", + "MSRValue": "0x3FBFC08FFF", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all requests miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -446,12 +446,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache", - "MSRValue": "0x087fc007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache", + "MSRValue": "0x087FC007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -459,12 +459,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache", - "MSRValue": "0x103fc007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache", + "MSRValue": "0x103FC007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -472,12 +472,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram", - "MSRValue": "0x063bc007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram", + "MSRValue": "0x063BC007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -485,12 +485,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram", - "MSRValue": "0x06040007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram", + "MSRValue": "0x06040007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -498,12 +498,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3", - "MSRValue": "0x3fbfc007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3", + "MSRValue": "0x3FBFC007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -511,12 +511,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram", "MSRValue": "0x0604000244", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -524,12 +524,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3", - "MSRValue": "0x3fbfc00244", + "BriefDescription": "Counts all demand & prefetch code reads miss in the L3", + "MSRValue": "0x3FBFC00244", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -537,12 +537,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram", "MSRValue": "0x0604000122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -550,12 +550,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3", - "MSRValue": "0x3fbfc00122", + "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3", + "MSRValue": "0x3FBFC00122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -563,12 +563,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache", - "MSRValue": "0x087fc00091", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache", + "MSRValue": "0x087FC00091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -576,12 +576,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache", - "MSRValue": "0x103fc00091", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache", + "MSRValue": "0x103FC00091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -589,12 +589,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram", - "MSRValue": "0x063bc00091", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram", + "MSRValue": "0x063BC00091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -602,12 +602,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram", "MSRValue": "0x0604000091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -615,12 +615,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3", - "MSRValue": "0x3fbfc00091", + "BriefDescription": "Counts all demand & prefetch data reads miss in the L3", + "MSRValue": "0x3FBFC00091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -628,12 +628,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3", - "MSRValue": "0x3fbfc00200", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3", + "MSRValue": "0x3FBFC00200", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -641,12 +641,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3", - "MSRValue": "0x3fbfc00100", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3", + "MSRValue": "0x3FBFC00100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -654,12 +654,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache", - "MSRValue": "0x103fc00002", + "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache", + "MSRValue": "0x103FC00002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -667,12 +667,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3", - "MSRValue": "0x3fbfc00002", + "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3", + "MSRValue": "0x3FBFC00002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json index 0d04bf9db000..c2f6932a5817 100644 --- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json @@ -1,6 +1,5 @@ [ { - "EventCode": "0x00", "UMask": "0x1", "BriefDescription": "Instructions retired from execution.", "Counter": "Fixed counter 0", @@ -10,7 +9,6 @@ "CounterHTOff": "Fixed counter 0" }, { - "EventCode": "0x00", "UMask": "0x2", "BriefDescription": "Core cycles when the thread is not in halt state", "Counter": "Fixed counter 1", @@ -20,7 +18,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "UMask": "0x2", "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.", "Counter": "Fixed counter 1", @@ -30,7 +27,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "UMask": "0x3", "BriefDescription": "Reference cycles when the core is not in halt state.", "Counter": "Fixed counter 2", @@ -322,7 +318,7 @@ "BriefDescription": "Stalls caused by changing prefix length of the instruction.", "Counter": "0,1,2,3", "EventName": "ILD_STALL.LCP", - "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", + "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -786,12 +782,12 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA2", + "EventCode": "0xa2", "UMask": "0x1", "BriefDescription": "Resource-related stall cycles", "Counter": "0,1,2,3", "EventName": "RESOURCE_STALLS.ANY", - "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.", + "PublicDescription": "This event counts resource-related stall cycles.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1168,12 +1164,12 @@ { "EventCode": "0xC2", "UMask": "0x1", - "BriefDescription": "Actually retired uops. (Precise Event - PEBS)", + "BriefDescription": "Actually retired uops.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "UOPS_RETIRED.ALL", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.", + "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1204,11 +1200,11 @@ { "EventCode": "0xC2", "UMask": "0x2", - "BriefDescription": "Retirement slots used. (Precise Event - PEBS)", + "BriefDescription": "Retirement slots used.", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "UOPS_RETIRED.RETIRE_SLOTS", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.", + "PublicDescription": "This event counts the number of retirement slots used.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1266,33 +1262,33 @@ { "EventCode": "0xC4", "UMask": "0x1", - "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)", + "BriefDescription": "Conditional branch instructions retired.", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.CONDITIONAL", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.", + "PublicDescription": "This event counts conditional branch instructions retired.", "SampleAfterValue": "400009", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xC4", "UMask": "0x2", - "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)", + "BriefDescription": "Direct and indirect near call instructions retired.", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.NEAR_CALL", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.", + "PublicDescription": "This event counts both direct and indirect near call instructions retired.", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xC4", "UMask": "0x2", - "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)", + "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.NEAR_CALL_R3", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).", + "PublicDescription": "This event counts both direct and indirect macro near call instructions retired (captured in ring 3).", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1311,11 +1307,11 @@ { "EventCode": "0xC4", "UMask": "0x8", - "BriefDescription": "Return instructions retired. (Precise Event - PEBS)", + "BriefDescription": "Return instructions retired.", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.NEAR_RETURN", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.", + "PublicDescription": "This event counts return instructions retired.", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1332,11 +1328,11 @@ { "EventCode": "0xC4", "UMask": "0x20", - "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)", + "BriefDescription": "Taken branch instructions retired.", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.NEAR_TAKEN", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.", + "PublicDescription": "This event counts taken branch instructions retired.", "SampleAfterValue": "400009", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1364,11 +1360,11 @@ { "EventCode": "0xC5", "UMask": "0x1", - "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)", + "BriefDescription": "Mispredicted conditional branch instructions retired.", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_MISP_RETIRED.CONDITIONAL", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.", + "PublicDescription": "This event counts mispredicted conditional branch instructions retired.", "SampleAfterValue": "400009", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1386,22 +1382,22 @@ { "EventCode": "0xC5", "UMask": "0x8", - "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)", + "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_MISP_RETIRED.RET", - "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.", + "PublicDescription": "This event counts mispredicted return instructions retired.", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xC5", "UMask": "0x20", - "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).", + "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", - "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).", + "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.", "SampleAfterValue": "400009", "CounterHTOff": "0,1,2,3,4,5,6,7" }, diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json index 71e9737f4614..1a1a3501180a 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json @@ -1,164 +1,394 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", - "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )", - "MetricGroup": "Frontend", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { + "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "Branch_Misprediction_Cost" + }, + { + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts_SMT", + "MetricName": "Branch_Misprediction_Cost_SMT" }, { + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" + }, + { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Access_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { + "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )", + "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_Lat", + "MetricName": "DRAM_Read_Latency" + }, + { + "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@", + "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_Parallel_Reads" + }, + { + "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 1 == 1 else 0 else 0", + "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches", + "MetricGroup": "Memory_Lat", + "MetricName": "MEM_PMM_Read_Latency" + }, + { + "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0", + "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "PMM_Read_BW" + }, + { + "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 1 == 1 else 0 else 0", + "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "PMM_Write_BW" + }, + { + "MetricExpr": "cha_0@event\\=0x0@", + "BriefDescription": "Socket actual clocks when any core is active on that socket", + "MetricGroup": "", + "MetricName": "Socket_CLKS" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json index f8bbe087b0f8..52a105666afc 100644 --- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json +++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json @@ -77,7 +77,8 @@ "UMask": "0x21", "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", "SampleAfterValue": "200003", - "BriefDescription": "Locked load uops retired (Precise event capable)" + "BriefDescription": "Locked load uops retired (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -88,7 +89,8 @@ "UMask": "0x41", "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)" + "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -99,7 +101,8 @@ "UMask": "0x42", "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", "SampleAfterValue": "200003", - "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)" + "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -110,7 +113,8 @@ "UMask": "0x43", "EventName": "MEM_UOPS_RETIRED.SPLIT", "SampleAfterValue": "200003", - "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)" + "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -121,7 +125,8 @@ "UMask": "0x81", "EventName": "MEM_UOPS_RETIRED.ALL_LOADS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired (Precise event capable)" + "BriefDescription": "Load uops retired (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -132,7 +137,8 @@ "UMask": "0x82", "EventName": "MEM_UOPS_RETIRED.ALL_STORES", "SampleAfterValue": "200003", - "BriefDescription": "Store uops retired (Precise event capable)" + "BriefDescription": "Store uops retired (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -143,7 +149,8 @@ "UMask": "0x83", "EventName": "MEM_UOPS_RETIRED.ALL", "SampleAfterValue": "200003", - "BriefDescription": "Memory uops retired (Precise event capable)" + "BriefDescription": "Memory uops retired (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -154,7 +161,8 @@ "UMask": "0x1", "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)" + "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -165,7 +173,8 @@ "UMask": "0x2", "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that hit L2 (Precise event capable)" + "BriefDescription": "Load uops retired that hit L2 (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -176,7 +185,8 @@ "UMask": "0x8", "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)" + "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -187,7 +197,8 @@ "UMask": "0x10", "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that missed L2 (Precise event capable)" + "BriefDescription": "Load uops retired that missed L2 (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -198,7 +209,8 @@ "UMask": "0x20", "EventName": "MEM_LOAD_UOPS_RETIRED.HITM", "SampleAfterValue": "200003", - "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)" + "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -209,7 +221,8 @@ "UMask": "0x40", "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT", "SampleAfterValue": "200003", - "BriefDescription": "Loads retired that hit WCB (Precise event capable)" + "BriefDescription": "Loads retired that hit WCB (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -220,26 +233,14 @@ "UMask": "0x80", "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT", "SampleAfterValue": "200003", - "BriefDescription": "Loads retired that came from DRAM (Precise event capable)" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x40000032b7 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" + "BriefDescription": "Loads retired that came from DRAM (Precise event capable)", + "Data_LA": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x36000032b7 ", + "MSRValue": "0x36000032b7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.ANY", @@ -252,7 +253,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x10000032b7 ", + "MSRValue": "0x10000032b7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE", @@ -265,7 +266,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x04000032b7 ", + "MSRValue": "0x04000032b7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -278,20 +279,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x02000032b7 ", + "MSRValue": "0x02000032b7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x00000432b7 ", + "MSRValue": "0x00000432b7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT", @@ -302,35 +303,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x00000132b7 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000022 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000022 ", + "MSRValue": "0x3600000022", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY", @@ -343,7 +318,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000000022 ", + "MSRValue": "0x1000000022", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE", @@ -356,7 +331,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400000022 ", + "MSRValue": "0x0400000022", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -369,20 +344,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200000022 ", + "MSRValue": "0x0200000022", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000040022 ", + "MSRValue": "0x0000040022", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT", @@ -393,32 +368,6 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010022 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000003091", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", "MSRValue": "0x3600003091", @@ -466,7 +415,7 @@ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -484,35 +433,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000013091", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000003010 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600003010 ", + "MSRValue": "0x3600003010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY", @@ -525,7 +448,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000003010 ", + "MSRValue": "0x1000003010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE", @@ -538,7 +461,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400003010 ", + "MSRValue": "0x0400003010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -551,20 +474,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200003010 ", + "MSRValue": "0x0200003010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000043010 ", + "MSRValue": "0x0000043010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT", @@ -575,48 +498,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000013010 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000008000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x3600008000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.ANY", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000008000 ", + "MSRValue": "0x1000008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE", @@ -629,7 +513,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400008000 ", + "MSRValue": "0x0400008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -642,20 +526,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200008000 ", + "MSRValue": "0x0200008000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000048000 ", + "MSRValue": "0x0000048000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT", @@ -668,7 +552,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000018000 ", + "MSRValue": "0x0000018000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE", @@ -679,22 +563,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000004800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600004800 ", + "MSRValue": "0x3600004800", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY", @@ -705,48 +576,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x1000004800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0400004800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0200004800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000044800 ", + "MSRValue": "0x0000044800", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT", @@ -757,35 +589,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000014800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000004000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600004000 ", + "MSRValue": "0x3600004000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY", @@ -798,7 +604,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000004000 ", + "MSRValue": "0x1000004000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE", @@ -811,7 +617,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400004000 ", + "MSRValue": "0x0400004000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -824,20 +630,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200004000 ", + "MSRValue": "0x0200004000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000044000 ", + "MSRValue": "0x0000044000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT", @@ -848,35 +654,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000014000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000002000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600002000 ", + "MSRValue": "0x3600002000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY", @@ -889,7 +669,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000002000 ", + "MSRValue": "0x1000002000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE", @@ -902,7 +682,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400002000 ", + "MSRValue": "0x0400002000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -915,20 +695,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200002000 ", + "MSRValue": "0x0200002000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000042000 ", + "MSRValue": "0x0000042000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT", @@ -939,35 +719,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000012000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000001000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600001000 ", + "MSRValue": "0x3600001000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY", @@ -980,7 +734,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000001000 ", + "MSRValue": "0x1000001000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE", @@ -993,7 +747,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400001000 ", + "MSRValue": "0x0400001000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -1006,20 +760,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200001000 ", + "MSRValue": "0x0200001000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000041000 ", + "MSRValue": "0x0000041000", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT", @@ -1030,35 +784,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000011000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000800 ", + "MSRValue": "0x3600000800", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY", @@ -1071,7 +799,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000000800 ", + "MSRValue": "0x1000000800", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE", @@ -1084,7 +812,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400000800 ", + "MSRValue": "0x0400000800", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -1097,20 +825,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200000800 ", + "MSRValue": "0x0200000800", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000040800 ", + "MSRValue": "0x0000040800", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT", @@ -1121,100 +849,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000400 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x3600000400 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.ANY", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x1000000400 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0400000400 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0200000400 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. ", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts bus lock and split lock requests that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000040400 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts bus lock and split lock requests that hit the L2 cache.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000010400 ", + "MSRValue": "0x0000010400", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE", @@ -1225,113 +862,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000200 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x3600000200 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.ANY", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x1000000200 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0400000200 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0200000200 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000040200 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_HIT", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010200 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000100 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000100 ", + "MSRValue": "0x3600000100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY", @@ -1342,87 +875,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x1000000100 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0400000100 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0200000100 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. ", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000040100 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010100 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000080 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000080 ", + "MSRValue": "0x3600000080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY", @@ -1433,87 +888,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x1000000080 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0400000080 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0200000080 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. ", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000040080 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010080 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000020 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000020 ", + "MSRValue": "0x3600000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY", @@ -1526,7 +903,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000000020 ", + "MSRValue": "0x1000000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE", @@ -1539,7 +916,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400000020 ", + "MSRValue": "0x0400000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -1552,20 +929,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200000020 ", + "MSRValue": "0x0200000020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000040020 ", + "MSRValue": "0x0000040020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT", @@ -1576,35 +953,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010020 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000010 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000010 ", + "MSRValue": "0x3600000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY", @@ -1617,7 +968,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000000010 ", + "MSRValue": "0x1000000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE", @@ -1630,7 +981,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400000010 ", + "MSRValue": "0x0400000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -1643,20 +994,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200000010 ", + "MSRValue": "0x0200000010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000040010 ", + "MSRValue": "0x0000040010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT", @@ -1667,35 +1018,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010010 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x4000000008 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000008 ", + "MSRValue": "0x3600000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY", @@ -1708,7 +1033,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000000008 ", + "MSRValue": "0x1000000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE", @@ -1721,7 +1046,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400000008 ", + "MSRValue": "0x0400000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -1734,20 +1059,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200000008 ", + "MSRValue": "0x0200000008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000040008 ", + "MSRValue": "0x0000040008", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT", @@ -1758,22 +1083,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010008 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x4000000004 ", + "MSRValue": "0x4000000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING", @@ -1786,7 +1098,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000004 ", + "MSRValue": "0x3600000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY", @@ -1797,22 +1109,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x1000000004 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400000004 ", + "MSRValue": "0x0400000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -1825,20 +1124,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200000004 ", + "MSRValue": "0x0200000004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000040004 ", + "MSRValue": "0x0000040004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT", @@ -1849,22 +1148,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010004 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x4000000002 ", + "MSRValue": "0x4000000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING", @@ -1877,7 +1163,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000002 ", + "MSRValue": "0x3600000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY", @@ -1890,7 +1176,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000000002 ", + "MSRValue": "0x1000000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE", @@ -1903,7 +1189,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400000002 ", + "MSRValue": "0x0400000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -1916,20 +1202,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200000002 ", + "MSRValue": "0x0200000002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000040002 ", + "MSRValue": "0x0000040002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT", @@ -1940,22 +1226,9 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010002 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", "PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x4000000001 ", + "MSRValue": "0x4000000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING", @@ -1968,7 +1241,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x3600000001 ", + "MSRValue": "0x3600000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY", @@ -1981,7 +1254,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x1000000001 ", + "MSRValue": "0x1000000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE", @@ -1994,7 +1267,7 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0400000001 ", + "MSRValue": "0x0400000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD", @@ -2007,20 +1280,20 @@ "CollectPEBSRecord": "1", "PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0200000001 ", + "MSRValue": "0x0200000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { "CollectPEBSRecord": "1", "PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", "EventCode": "0xB7", - "MSRValue": "0x0000040001 ", + "MSRValue": "0x0000040001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT", @@ -2028,18 +1301,5 @@ "SampleAfterValue": "100007", "BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.", "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x0000010001 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem.", - "Offcore": "1" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json index 690cebd12a94..197dc76d49dd 100644 --- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json +++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json @@ -30,265 +30,5 @@ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING", "SampleAfterValue": "200003", "BriefDescription": "Machine clears due to memory ordering issue" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x20000032b7 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000022 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000003091", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000003010 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000008000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000004800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000004000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000002000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000001000 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000800 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000400 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000200 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000100 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000080 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000020 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000010 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000008 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6", - "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000004 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000002 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" - }, - { - "CollectPEBSRecord": "1", - "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)", - "EventCode": "0xB7", - "MSRValue": "0x2000000001 ", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.NON_DRAM", - "MSRIndex": "0x1a6,0x1a7", - "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address.", - "Offcore": "1" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json index 254788af8ab6..6342368accf8 100644 --- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json @@ -1,7 +1,6 @@ [ { "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.", - "EventCode": "0x00", "Counter": "Fixed counter 0", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", @@ -10,7 +9,6 @@ }, { "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.CORE", @@ -19,7 +17,6 @@ }, { "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.", - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", @@ -188,7 +185,7 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.", + "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.", "EventCode": "0xC3", "Counter": "0,1,2,3", "UMask": "0x1", diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json index 9805198d3f5f..343d66bbd777 100644 --- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json @@ -48,7 +48,8 @@ "UMask": "0x11", "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)" + "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -59,7 +60,8 @@ "UMask": "0x12", "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES", "SampleAfterValue": "200003", - "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)" + "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -70,6 +72,7 @@ "UMask": "0x13", "EventName": "MEM_UOPS_RETIRED.DTLB_MISS", "SampleAfterValue": "200003", - "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)" + "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)", + "Data_LA": "1" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json index b4791b443a66..5a6ac8285ad4 100644 --- a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json @@ -92,7 +92,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", "SampleAfterValue": "200003", - "BriefDescription": "Locked load uops retired (Precise event capable)" + "BriefDescription": "Locked load uops retired (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -104,7 +105,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)" + "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -116,7 +118,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", "SampleAfterValue": "200003", - "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)" + "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -128,7 +131,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT", "SampleAfterValue": "200003", - "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)" + "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -140,7 +144,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.ALL_LOADS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired (Precise event capable)" + "BriefDescription": "Load uops retired (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -152,7 +157,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.ALL_STORES", "SampleAfterValue": "200003", - "BriefDescription": "Store uops retired (Precise event capable)" + "BriefDescription": "Store uops retired (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -164,7 +170,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.ALL", "SampleAfterValue": "200003", - "BriefDescription": "Memory uops retired (Precise event capable)" + "BriefDescription": "Memory uops retired (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -176,7 +183,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)" + "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -188,7 +196,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that hit L2 (Precise event capable)" + "BriefDescription": "Load uops retired that hit L2 (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -200,7 +209,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)" + "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -212,7 +222,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that missed L2 (Precise event capable)" + "BriefDescription": "Load uops retired that missed L2 (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -224,7 +235,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.HITM", "SampleAfterValue": "200003", - "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)" + "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -236,7 +248,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT", "SampleAfterValue": "200003", - "BriefDescription": "Loads retired that hit WCB (Precise event capable)" + "BriefDescription": "Loads retired that hit WCB (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -248,7 +261,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT", "SampleAfterValue": "200003", - "BriefDescription": "Loads retired that came from DRAM (Precise event capable)" + "BriefDescription": "Loads retired that came from DRAM (Precise event capable)", + "Data_LA": "1" }, { "CollectPEBSRecord": "1", @@ -292,7 +306,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -367,7 +381,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -442,7 +456,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -517,7 +531,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -592,7 +606,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -667,7 +681,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -742,7 +756,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -817,7 +831,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -892,7 +906,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -967,7 +981,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -1042,7 +1056,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -1117,7 +1131,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -1192,7 +1206,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -1267,7 +1281,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -1342,7 +1356,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { @@ -1417,7 +1431,7 @@ "PDIR_COUNTER": "na", "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. ", + "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.", "Offcore": "1" }, { diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json index ccf1aed69197..e3fa1a0ba71b 100644 --- a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json @@ -3,7 +3,6 @@ "PEBS": "2", "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.", - "EventCode": "0x00", "Counter": "Fixed counter 0", "UMask": "0x1", "PEBScounters": "32", @@ -15,7 +14,6 @@ { "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "PEBScounters": "33", @@ -27,7 +25,6 @@ { "CollectPEBSRecord": "1", "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.", - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x3", "PEBScounters": "34", @@ -231,7 +228,7 @@ }, { "CollectPEBSRecord": "1", - "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.", + "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.", "EventCode": "0xC3", "Counter": "0,1,2,3", "UMask": "0x1", diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json index 0b53a3b0dfb8..0d32fd26ded1 100644 --- a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json @@ -189,7 +189,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS", "SampleAfterValue": "200003", - "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)" + "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -201,7 +202,8 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES", "SampleAfterValue": "200003", - "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)" + "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)", + "Data_LA": "1" }, { "PEBS": "2", @@ -213,6 +215,7 @@ "PEBScounters": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.DTLB_MISS", "SampleAfterValue": "200003", - "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)" + "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)", + "Data_LA": "1" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json index da4d6ddd4f92..7fb0ad8d8ca1 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/cache.json +++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json @@ -63,10 +63,10 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Demand data read requests that hit L2 cache.", + "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x41", + "UMask": "0xc1", "Errata": "HSD78", "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", "SampleAfterValue": "200003", @@ -77,7 +77,7 @@ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x42", + "UMask": "0xc2", "EventName": "L2_RQSTS.RFO_HIT", "SampleAfterValue": "200003", "BriefDescription": "RFO requests that hit L2 cache", @@ -87,7 +87,7 @@ "PublicDescription": "Number of instruction fetches that hit the L2 cache.", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x44", + "UMask": "0xc4", "EventName": "L2_RQSTS.CODE_RD_HIT", "SampleAfterValue": "200003", "BriefDescription": "L2 cache hits when fetching instructions, code reads.", @@ -97,7 +97,7 @@ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x50", + "UMask": "0xd0", "EventName": "L2_RQSTS.L2_PF_HIT", "SampleAfterValue": "200003", "BriefDescription": "L2 prefetch requests that hit L2 cache", @@ -610,7 +610,7 @@ "Errata": "HSD29, HSD25, HSM26, HSM30", "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", "SampleAfterValue": "20011", - "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ", + "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.", "CounterHTOff": "0,1,2,3", "Data_LA": "1" }, @@ -623,7 +623,7 @@ "Errata": "HSD29, HSD25, HSM26, HSM30", "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", "SampleAfterValue": "20011", - "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ", + "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.", "CounterHTOff": "0,1,2,3", "Data_LA": "1" }, @@ -792,7 +792,6 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "", "EventCode": "0xf4", "Counter": "0,1,2,3", "UMask": "0x10", @@ -802,262 +801,262 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all requests hit in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c8fff", + "MSRValue": "0x3F803C8FFF", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_HIT.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all requests that hit in the L3", + "BriefDescription": "Counts all requests hit in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c07f7", + "MSRValue": "0x10003C07F7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "BriefDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c07f7", + "MSRValue": "0x04003C07F7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "BriefDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0244", + "MSRValue": "0x04003C0244", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0122", + "MSRValue": "0x10003C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0122", + "MSRValue": "0x04003C0122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0091", + "MSRValue": "0x10003C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0091", + "MSRValue": "0x04003C0091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0200", + "MSRValue": "0x3F803C0200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0100", + "MSRValue": "0x3F803C0100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0080", + "MSRValue": "0x3F803C0080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0040", + "MSRValue": "0x3F803C0040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0020", + "MSRValue": "0x3F803C0020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3f803c0010", + "MSRValue": "0x3F803C0010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0004", + "MSRValue": "0x10003C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0004", + "MSRValue": "0x04003C0004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0002", + "MSRValue": "0x10003C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0002", + "MSRValue": "0x04003C0002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10003c0001", + "MSRValue": "0x10003C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04003c0001", + "MSRValue": "0x04003C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "Offcore": "1", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json index f9843e5a9b42..f5a3beaa19fc 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json @@ -1,22 +1,26 @@ [ { + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xC1", "Counter": "0,1,2,3", "UMask": "0x8", "Errata": "HSD56, HSM57", "EventName": "OTHER_ASSISTS.AVX_TO_SSE", "SampleAfterValue": "100003", - "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.", + "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xC1", "Counter": "0,1,2,3", "UMask": "0x10", "Errata": "HSD56, HSM57", "EventName": "OTHER_ASSISTS.SSE_TO_AVX", "SampleAfterValue": "100003", - "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.", + "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -30,53 +34,58 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Number of X87 FP assists due to output values.", + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x2", "EventName": "FP_ASSIST.X87_OUTPUT", "SampleAfterValue": "100003", - "BriefDescription": "Number of X87 assists due to output value.", + "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Number of X87 FP assists due to input values.", + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x4", "EventName": "FP_ASSIST.X87_INPUT", "SampleAfterValue": "100003", - "BriefDescription": "Number of X87 assists due to input value.", + "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Number of SIMD FP assists due to output values.", + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x8", "EventName": "FP_ASSIST.SIMD_OUTPUT", "SampleAfterValue": "100003", - "BriefDescription": "Number of SIMD FP assists due to Output values", + "BriefDescription": "SSE* FP micro-code assist when output value is invalid.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Number of SIMD FP assists due to input values.", + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x10", "EventName": "FP_ASSIST.SIMD_INPUT", "SampleAfterValue": "100003", - "BriefDescription": "Number of SIMD FP assists due to input values", + "BriefDescription": "Any input SSE* FP Assist", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Cycles with any input/output SSE* or FP assists.", + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x1e", "EventName": "FP_ASSIST.ANY", "SampleAfterValue": "100003", - "BriefDescription": "Cycles with any input/output SSE or FP assist", + "BriefDescription": "Counts any FP_ASSIST umask was incrementing", "CounterMask": "1", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json index 5ab5c78fe580..21b27488b621 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json +++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json @@ -1,158 +1,322 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )", - "MetricGroup": "Frontend", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" }, { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json index e5f9fa6655b3..ef13ed88e2ea 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/memory.json +++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json @@ -298,7 +298,7 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4", "MSRIndex": "0x3F6", "SampleAfterValue": "100003", - "BriefDescription": "Loads with latency value being above 4.", + "BriefDescription": "Randomly selected loads with latency value being above 4.", "TakenAlone": "1", "CounterHTOff": "3" }, @@ -312,7 +312,7 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8", "MSRIndex": "0x3F6", "SampleAfterValue": "50021", - "BriefDescription": "Loads with latency value being above 8.", + "BriefDescription": "Randomly selected loads with latency value being above 8.", "TakenAlone": "1", "CounterHTOff": "3" }, @@ -326,7 +326,7 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16", "MSRIndex": "0x3F6", "SampleAfterValue": "20011", - "BriefDescription": "Loads with latency value being above 16.", + "BriefDescription": "Randomly selected loads with latency value being above 16.", "TakenAlone": "1", "CounterHTOff": "3" }, @@ -340,7 +340,7 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32", "MSRIndex": "0x3F6", "SampleAfterValue": "100003", - "BriefDescription": "Loads with latency value being above 32.", + "BriefDescription": "Randomly selected loads with latency value being above 32.", "TakenAlone": "1", "CounterHTOff": "3" }, @@ -354,7 +354,7 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64", "MSRIndex": "0x3F6", "SampleAfterValue": "2003", - "BriefDescription": "Loads with latency value being above 64.", + "BriefDescription": "Randomly selected loads with latency value being above 64.", "TakenAlone": "1", "CounterHTOff": "3" }, @@ -368,7 +368,7 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128", "MSRIndex": "0x3F6", "SampleAfterValue": "1009", - "BriefDescription": "Loads with latency value being above 128.", + "BriefDescription": "Randomly selected loads with latency value being above 128.", "TakenAlone": "1", "CounterHTOff": "3" }, @@ -382,7 +382,7 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256", "MSRIndex": "0x3F6", "SampleAfterValue": "503", - "BriefDescription": "Loads with latency value being above 256.", + "BriefDescription": "Randomly selected loads with latency value being above 256.", "TakenAlone": "1", "CounterHTOff": "3" }, @@ -396,280 +396,280 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512", "MSRIndex": "0x3F6", "SampleAfterValue": "101", - "BriefDescription": "Loads with latency value being above 512.", + "BriefDescription": "Randomly selected loads with latency value being above 512.", "TakenAlone": "1", "CounterHTOff": "3" }, { - "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all requests miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc08fff", + "MSRValue": "0x3FFFC08FFF", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all requests that miss in the L3", + "BriefDescription": "Counts all requests miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "miss the L3 and the data is returned from local dram", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01004007f7", + "MSRValue": "0x01004007F7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram", + "BriefDescription": "miss the L3 and the data is returned from local dram", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc007f7", + "MSRValue": "0x3FFFC007F7", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3", + "BriefDescription": "miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram", "EventCode": "0xB7, 0xBB", "MSRValue": "0x0100400244", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00244", + "MSRValue": "0x3FFFC00244", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3", + "BriefDescription": "Counts all demand & prefetch code reads miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram", "EventCode": "0xB7, 0xBB", "MSRValue": "0x0100400122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00122", + "MSRValue": "0x3FFFC00122", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3", + "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram", "EventCode": "0xB7, 0xBB", "MSRValue": "0x0100400091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00091", + "MSRValue": "0x3FFFC00091", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3", + "BriefDescription": "Counts all demand & prefetch data reads miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00200", + "MSRValue": "0x3FFFC00200", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00100", + "MSRValue": "0x3FFFC00100", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00080", + "MSRValue": "0x3FFFC00080", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00040", + "MSRValue": "0x3FFFC00040", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00020", + "MSRValue": "0x3FFFC00020", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00010", + "MSRValue": "0x3FFFC00010", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram", "EventCode": "0xB7, 0xBB", "MSRValue": "0x0100400004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00004", + "MSRValue": "0x3FFFC00004", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand code reads that miss in the L3", + "BriefDescription": "Counts all demand code reads miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram", "EventCode": "0xB7, 0xBB", "MSRValue": "0x0100400002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00002", + "MSRValue": "0x3FFFC00002", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3", + "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram", "EventCode": "0xB7, 0xBB", "MSRValue": "0x0100400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads miss in the L3", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fffc00001", + "MSRValue": "0x3FFFC00001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that miss in the L3", + "BriefDescription": "Counts demand data reads miss in the L3", "Offcore": "1", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json index a4dcfce4a512..734d3873729e 100644 --- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json @@ -1,7 +1,6 @@ [ { "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.", - "EventCode": "0x00", "Counter": "Fixed counter 0", "UMask": "0x1", "Errata": "HSD140, HSD143", @@ -12,7 +11,6 @@ }, { "PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.THREAD", @@ -21,7 +19,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "AnyThread": "1", @@ -32,7 +29,6 @@ }, { "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.", - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", @@ -1071,7 +1067,8 @@ "CounterHTOff": "1" }, { - "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.", "EventCode": "0xC0", "Counter": "0,1,2,3", "UMask": "0x2", @@ -1081,13 +1078,13 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.", + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xC1", "Counter": "0,1,2,3", "UMask": "0x40", "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST", "SampleAfterValue": "100003", - "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -1102,28 +1099,34 @@ "Data_LA": "1" }, { + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xC2", "Invert": "1", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "UOPS_RETIRED.STALL_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles without actually retired uops.", + "BriefDescription": "Cycles no executable uops retired", "CounterMask": "1", "CounterHTOff": "0,1,2,3" }, { + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xC2", "Invert": "1", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "UOPS_RETIRED.TOTAL_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles with less than 10 actually retired uops.", + "BriefDescription": "Number of cycles using always true condition applied to PEBS uops retired event.", "CounterMask": "10", "CounterHTOff": "0,1,2,3" }, { + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xC2", "Invert": "1", "Counter": "0,1,2,3", @@ -1131,7 +1134,7 @@ "AnyThread": "1", "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles without actually retired uops.", + "BriefDescription": "Cycles no executable uops retired on core", "CounterMask": "1", "CounterHTOff": "0,1,2,3" }, @@ -1245,13 +1248,14 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts the number of not taken branch instructions retired.", + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xC4", "Counter": "0,1,2,3", "UMask": "0x10", "EventName": "BR_INST_RETIRED.NOT_TAKEN", "SampleAfterValue": "400009", - "BriefDescription": "Not taken branch instructions retired.", + "BriefDescription": "Counts all not taken macro branch instructions retired.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -1265,13 +1269,14 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Number of far branches retired.", + "PEBS": "1", + "PublicDescription": "", "EventCode": "0xC4", "Counter": "0,1,2,3", "UMask": "0x40", "EventName": "BR_INST_RETIRED.FAR_BRANCH", "SampleAfterValue": "100003", - "BriefDescription": "Far branch instructions retired.", + "BriefDescription": "Counts the number of far branch instructions retired.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json index b2fbd617306a..a9e62d4357af 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json @@ -64,18 +64,18 @@ }, { "EventCode": "0x24", - "UMask": "0x41", + "UMask": "0xc1", "BriefDescription": "Demand Data Read requests that hit L2 cache", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", "Errata": "HSD78", - "PublicDescription": "Demand data read requests that hit L2 cache.", + "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache", "SampleAfterValue": "200003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x24", - "UMask": "0x42", + "UMask": "0xc2", "BriefDescription": "RFO requests that hit L2 cache", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.RFO_HIT", @@ -85,7 +85,7 @@ }, { "EventCode": "0x24", - "UMask": "0x44", + "UMask": "0xc4", "BriefDescription": "L2 cache hits when fetching instructions, code reads.", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.CODE_RD_HIT", @@ -95,7 +95,7 @@ }, { "EventCode": "0x24", - "UMask": "0x50", + "UMask": "0xd0", "BriefDescription": "L2 prefetch requests that hit L2 cache", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.L2_PF_HIT", @@ -416,7 +416,7 @@ { "EventCode": "0xD0", "UMask": "0x11", - "BriefDescription": "Retired load uops that miss the STLB. (precise Event)", + "BriefDescription": "Retired load uops that miss the STLB.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -428,7 +428,7 @@ { "EventCode": "0xD0", "UMask": "0x12", - "BriefDescription": "Retired store uops that miss the STLB. (precise Event)", + "BriefDescription": "Retired store uops that miss the STLB.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -441,7 +441,7 @@ { "EventCode": "0xD0", "UMask": "0x21", - "BriefDescription": "Retired load uops with locked access. (precise Event)", + "BriefDescription": "Retired load uops with locked access.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -453,34 +453,32 @@ { "EventCode": "0xD0", "UMask": "0x41", - "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)", + "BriefDescription": "Retired load uops that split across a cacheline boundary.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", "Errata": "HSD29, HSM30", - "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD0", "UMask": "0x42", - "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)", + "BriefDescription": "Retired store uops that split across a cacheline boundary.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", "Errata": "HSD29, HSM30", "L1_Hit_Indication": "1", - "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD0", "UMask": "0x81", - "BriefDescription": "All retired load uops. (precise Event)", + "BriefDescription": "All retired load uops.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -492,14 +490,13 @@ { "EventCode": "0xD0", "UMask": "0x82", - "BriefDescription": "All retired store uops. (precise Event)", + "BriefDescription": "All retired store uops.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_UOPS_RETIRED.ALL_STORES", "Errata": "HSD29, HSM30", "L1_Hit_Indication": "1", - "PublicDescription": "This event counts all store uops retired. This is a precise event.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3" }, @@ -530,13 +527,13 @@ { "EventCode": "0xD1", "UMask": "0x4", - "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.", + "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT", "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30", - "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.", + "PublicDescription": "Retired load uops with L3 cache hits as data sources.", "SampleAfterValue": "50021", "CounterHTOff": "0,1,2,3" }, @@ -549,19 +546,20 @@ "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS", "Errata": "HSM30", - "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.", + "PublicDescription": "Retired load uops missed L1 cache as data sources.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD1", "UMask": "0x10", - "BriefDescription": "Retired load uops with L2 cache misses as data sources.", + "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS", "Errata": "HSD29, HSM30", + "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.", "SampleAfterValue": "50021", "CounterHTOff": "0,1,2,3" }, @@ -574,6 +572,7 @@ "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS", "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30", + "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -604,26 +603,24 @@ { "EventCode": "0xD2", "UMask": "0x2", - "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ", + "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT", "Errata": "HSD29, HSD25, HSM26, HSM30", - "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.", "SampleAfterValue": "20011", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD2", "UMask": "0x4", - "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ", + "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM", "Errata": "HSD29, HSD25, HSM26, HSM30", - "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.", "SampleAfterValue": "20011", "CounterHTOff": "0,1,2,3" }, @@ -642,19 +639,20 @@ { "EventCode": "0xD3", "UMask": "0x1", + "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM", "Errata": "HSD74, HSD29, HSD25, HSM30", - "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.", + "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xD3", "UMask": "0x4", - "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)", + "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -666,7 +664,7 @@ { "EventCode": "0xD3", "UMask": "0x10", - "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)", + "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -678,7 +676,7 @@ { "EventCode": "0xD3", "UMask": "0x20", - "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)", + "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache", "Data_LA": "1", "PEBS": "1", "Counter": "0,1,2,3", @@ -833,7 +831,6 @@ "BriefDescription": "Split locks in SQ", "Counter": "0,1,2,3", "EventName": "SQ_MISC.SPLIT_LOCK", - "PublicDescription": "", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -841,12 +838,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0001", + "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -854,12 +851,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c0001", + "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C0001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -867,12 +864,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0002", + "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -880,12 +877,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c0002", + "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C0002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -893,12 +890,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0004", + "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -906,12 +903,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c0004", + "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C0004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -919,12 +916,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3", - "MSRValue": "0x3f803c0010", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3", + "MSRValue": "0x3F803C0010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -932,12 +929,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3", - "MSRValue": "0x3f803c0020", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3", + "MSRValue": "0x3F803C0020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -945,12 +942,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3", - "MSRValue": "0x3f803c0040", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3", + "MSRValue": "0x3F803C0040", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -958,12 +955,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3", - "MSRValue": "0x3f803c0080", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3", + "MSRValue": "0x3F803C0080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -971,12 +968,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3", - "MSRValue": "0x3f803c0100", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3", + "MSRValue": "0x3F803C0100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -984,12 +981,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3", - "MSRValue": "0x3f803c0200", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3", + "MSRValue": "0x3F803C0200", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -997,12 +994,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0091", + "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1010,12 +1007,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c0091", + "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C0091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1023,12 +1020,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0122", + "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1036,12 +1033,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c0122", + "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C0122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1049,12 +1046,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c0244", + "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C0244", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1062,12 +1059,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", - "MSRValue": "0x04003c07f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", + "MSRValue": "0x04003C07F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1075,12 +1072,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", - "MSRValue": "0x10003c07f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", + "MSRValue": "0x10003C07F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1088,12 +1085,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all requests that hit in the L3", - "MSRValue": "0x3f803c8fff", + "BriefDescription": "Counts all requests hit in the L3", + "MSRValue": "0x3F803C8FFF", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all requests hit in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json index 5ab5c78fe580..e5aac148c941 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json @@ -1,158 +1,340 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )", - "MetricGroup": "Frontend", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - (( 14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION )) ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" }, { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { + "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )", + "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_Lat", + "MetricName": "DRAM_Read_Latency" + }, + { + "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@", + "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_Parallel_Reads" + }, + { + "MetricExpr": "cbox_0@event\\=0x0@", + "BriefDescription": "Socket actual clocks when any core is active on that socket", + "MetricGroup": "", + "MetricName": "Socket_CLKS" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json index 56b0f24b8029..a42d5ce86b6f 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json @@ -291,7 +291,7 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 4.", + "BriefDescription": "Randomly selected loads with latency value being above 4.", "PEBS": "2", "MSRValue": "0x4", "Counter": "3", @@ -305,7 +305,7 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 8.", + "BriefDescription": "Randomly selected loads with latency value being above 8.", "PEBS": "2", "MSRValue": "0x8", "Counter": "3", @@ -319,7 +319,7 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 16.", + "BriefDescription": "Randomly selected loads with latency value being above 16.", "PEBS": "2", "MSRValue": "0x10", "Counter": "3", @@ -333,7 +333,7 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 32.", + "BriefDescription": "Randomly selected loads with latency value being above 32.", "PEBS": "2", "MSRValue": "0x20", "Counter": "3", @@ -347,7 +347,7 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 64.", + "BriefDescription": "Randomly selected loads with latency value being above 64.", "PEBS": "2", "MSRValue": "0x40", "Counter": "3", @@ -361,7 +361,7 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 128.", + "BriefDescription": "Randomly selected loads with latency value being above 128.", "PEBS": "2", "MSRValue": "0x80", "Counter": "3", @@ -375,7 +375,7 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 256.", + "BriefDescription": "Randomly selected loads with latency value being above 256.", "PEBS": "2", "MSRValue": "0x100", "Counter": "3", @@ -389,7 +389,7 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Loads with latency value being above 512.", + "BriefDescription": "Randomly selected loads with latency value being above 512.", "PEBS": "2", "MSRValue": "0x200", "Counter": "3", @@ -404,12 +404,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that miss in the L3", - "MSRValue": "0x3fbfc00001", + "BriefDescription": "Counts demand data reads miss in the L3", + "MSRValue": "0x3FBFC00001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -417,12 +417,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram", "MSRValue": "0x0600400001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -430,12 +430,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3", - "MSRValue": "0x3fbfc00002", + "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3", + "MSRValue": "0x3FBFC00002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -443,12 +443,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram", "MSRValue": "0x0600400002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -456,12 +456,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache", - "MSRValue": "0x103fc00002", + "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache", + "MSRValue": "0x103FC00002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -469,12 +469,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that miss in the L3", - "MSRValue": "0x3fbfc00004", + "BriefDescription": "Counts all demand code reads miss in the L3", + "MSRValue": "0x3FBFC00004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -482,12 +482,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram", "MSRValue": "0x0600400004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -495,12 +495,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3", - "MSRValue": "0x3fbfc00010", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3", + "MSRValue": "0x3FBFC00010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -508,12 +508,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3", - "MSRValue": "0x3fbfc00020", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3", + "MSRValue": "0x3FBFC00020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -521,12 +521,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3", - "MSRValue": "0x3fbfc00040", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3", + "MSRValue": "0x3FBFC00040", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -534,12 +534,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3", - "MSRValue": "0x3fbfc00080", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3", + "MSRValue": "0x3FBFC00080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -547,12 +547,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3", - "MSRValue": "0x3fbfc00100", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3", + "MSRValue": "0x3FBFC00100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -560,12 +560,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3", - "MSRValue": "0x3fbfc00200", + "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3", + "MSRValue": "0x3FBFC00200", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -573,12 +573,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3", - "MSRValue": "0x3fbfc00091", + "BriefDescription": "Counts all demand & prefetch data reads miss in the L3", + "MSRValue": "0x3FBFC00091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -586,12 +586,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram", "MSRValue": "0x0600400091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -599,12 +599,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram", - "MSRValue": "0x063f800091", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram", + "MSRValue": "0x063F800091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -612,12 +612,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache", - "MSRValue": "0x103fc00091", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache", + "MSRValue": "0x103FC00091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -625,12 +625,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache", - "MSRValue": "0x083fc00091", + "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache", + "MSRValue": "0x083FC00091", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -638,12 +638,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3", - "MSRValue": "0x3fbfc00122", + "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3", + "MSRValue": "0x3FBFC00122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -651,12 +651,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram", "MSRValue": "0x0600400122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -664,12 +664,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3", - "MSRValue": "0x3fbfc00244", + "BriefDescription": "Counts all demand & prefetch code reads miss in the L3", + "MSRValue": "0x3FBFC00244", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -677,12 +677,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram", + "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram", "MSRValue": "0x0600400244", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -690,12 +690,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3", - "MSRValue": "0x3fbfc007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3", + "MSRValue": "0x3FBFC007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -703,12 +703,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram", - "MSRValue": "0x06004007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram", + "MSRValue": "0x06004007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -716,12 +716,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram", - "MSRValue": "0x063f8007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram", + "MSRValue": "0x063F8007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -729,12 +729,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache", - "MSRValue": "0x103fc007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache", + "MSRValue": "0x103FC007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -742,12 +742,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache", - "MSRValue": "0x083fc007f7", + "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache", + "MSRValue": "0x083FC007F7", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -755,12 +755,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all requests that miss in the L3", - "MSRValue": "0x3fbfc08fff", + "BriefDescription": "Counts all requests miss in the L3", + "MSRValue": "0x3FBFC08FFF", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts all requests miss in the L3", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json index 8a18bfe9e3e4..26f2888341ee 100644 --- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json @@ -1,6 +1,5 @@ [ { - "EventCode": "0x00", "UMask": "0x1", "BriefDescription": "Instructions retired from execution.", "Counter": "Fixed counter 0", @@ -11,7 +10,6 @@ "CounterHTOff": "Fixed counter 0" }, { - "EventCode": "0x00", "UMask": "0x2", "BriefDescription": "Core cycles when the thread is not in halt state.", "Counter": "Fixed counter 1", @@ -21,7 +19,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "UMask": "0x2", "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.", "Counter": "Fixed counter 1", @@ -31,7 +28,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "UMask": "0x3", "BriefDescription": "Reference cycles when the core is not in halt state.", "Counter": "Fixed counter 2", @@ -1098,6 +1094,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "UOPS_RETIRED.ALL", + "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1142,6 +1139,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "UOPS_RETIRED.RETIRE_SLOTS", + "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1201,6 +1199,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.CONDITIONAL", + "PublicDescription": "Counts the number of conditional branch instructions retired.", "SampleAfterValue": "400009", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1241,6 +1240,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.NEAR_RETURN", + "PublicDescription": "Counts the number of near return instructions retired.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1261,6 +1261,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.NEAR_TAKEN", + "PublicDescription": "Number of near taken branches retired.", "SampleAfterValue": "400009", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -1312,6 +1313,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", + "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.", "SampleAfterValue": "400009", "CounterHTOff": "0,1,2,3,4,5,6,7" }, diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json index 999a01bc6467..5f6cb2abc384 100644 --- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json +++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json @@ -1012,7 +1012,7 @@ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address ", + "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address", "CounterHTOff": "0,1,2,3" }, { @@ -1036,7 +1036,7 @@ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand data reads ", + "BriefDescription": "Counts all demand data reads", "CounterHTOff": "0,1,2,3" }, { @@ -1048,7 +1048,7 @@ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand rfo's ", + "BriefDescription": "Counts all demand rfo's", "CounterHTOff": "0,1,2,3" }, { @@ -1084,7 +1084,7 @@ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all demand & prefetch prefetch RFOs ", + "BriefDescription": "Counts all demand & prefetch prefetch RFOs", "CounterHTOff": "0,1,2,3" }, { @@ -1096,7 +1096,7 @@ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts all data/code/rfo references (demand & prefetch) ", + "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)", "CounterHTOff": "0,1,2,3" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json index 7c2679514efb..bc4d5fc284a0 100644 --- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json +++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json @@ -1,164 +1,340 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", - "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )", - "MetricGroup": "Frontend", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { + "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" }, { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json index 0afbfd95ea30..2a0aad91d83d 100644 --- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json @@ -1,6 +1,5 @@ [ { - "EventCode": "0x00", "Counter": "Fixed counter 0", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", @@ -9,7 +8,6 @@ "CounterHTOff": "Fixed counter 0" }, { - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.THREAD", @@ -19,7 +17,6 @@ }, { "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "AnyThread": "1", @@ -29,7 +26,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json index 7c2679514efb..f3874b5f9995 100644 --- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json +++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json @@ -1,164 +1,346 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", - "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )", - "MetricGroup": "Frontend", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { + "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFETCH_STALL ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" }, { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { + "MetricExpr": "cbox_0@event\\=0x0@", + "BriefDescription": "Socket actual clocks when any core is active on that socket", + "MetricGroup": "", + "MetricName": "Socket_CLKS" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json index 0afbfd95ea30..2a0aad91d83d 100644 --- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json @@ -1,6 +1,5 @@ [ { - "EventCode": "0x00", "Counter": "Fixed counter 0", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", @@ -9,7 +8,6 @@ "CounterHTOff": "Fixed counter 0" }, { - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.THREAD", @@ -19,7 +17,6 @@ }, { "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "AnyThread": "1", @@ -29,7 +26,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json index ee22e4a5e30d..52dc6ef40e63 100644 --- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json +++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json @@ -31,7 +31,7 @@ }, { "PEBS": "1", - "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", + "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", "EventCode": "0xD0", "Counter": "0,1,2,3", "UMask": "0x41", @@ -42,7 +42,7 @@ }, { "PEBS": "1", - "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", + "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", "EventCode": "0xD0", "Counter": "0,1,2,3", "UMask": "0x42", @@ -179,7 +179,7 @@ "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier. ", + "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.", "EventCode": "0x51", "Counter": "0,1,2,3", "UMask": "0x1", diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json index fd7d7c438226..98c73e430b05 100644 --- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json +++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json @@ -1,140 +1,232 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", - "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )", - "MetricGroup": "Frontend", + "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Average CPU Utilization", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { + "MetricExpr": "cbox_0@event\\=0x0@", + "BriefDescription": "Socket actual clocks when any core is active on that socket", + "MetricGroup": "", + "MetricName": "Socket_CLKS" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json index 34a519d9bfa0..783a5b4a67b1 100644 --- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json @@ -1,7 +1,6 @@ [ { - "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ", - "EventCode": "0x00", + "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.", "Counter": "Fixed counter 1", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", @@ -10,8 +9,7 @@ "CounterHTOff": "Fixed counter 1" }, { - "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ", - "EventCode": "0x00", + "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.", "Counter": "Fixed counter 2", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.THREAD", @@ -20,8 +18,7 @@ "CounterHTOff": "Fixed counter 2" }, { - "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ", - "EventCode": "0x00", + "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.", "Counter": "Fixed counter 3", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", @@ -778,7 +775,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.", + "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.", "EventCode": "0x03", "Counter": "0,1,2,3", "UMask": "0x2", @@ -1098,7 +1095,6 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x2", "AnyThread": "1", diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json index e434ec723001..e847b0fd696d 100644 --- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json +++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json @@ -32,16 +32,16 @@ "BriefDescription": "Counts the number of L2 cache misses" }, { - "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses. ", + "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.", "EventCode": "0x86", "Counter": "0,1", "UMask": "0x4", "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES", "SampleAfterValue": "200003", - "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses. " + "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses." }, { - "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted. ", + "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.", "EventCode": "0x04", "Counter": "0,1", "UMask": "0x1", @@ -115,29 +115,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000070 ", + "MSRValue": "0x4000000070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400070 ", + "MSRValue": "0x1000400070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400070 ", + "MSRValue": "0x0800400070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_E_F", @@ -148,29 +148,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080070 ", + "MSRValue": "0x1000080070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080070 ", + "MSRValue": "0x0800080070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010070 ", + "MSRValue": "0x0000010070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.ANY_RESPONSE", @@ -181,29 +181,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x40000032f7 ", + "MSRValue": "0x40000032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x10004032f7 ", + "MSRValue": "0x10004032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x08004032f7 ", + "MSRValue": "0x08004032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_E_F", @@ -214,29 +214,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x10000832f7 ", + "MSRValue": "0x10000832f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x08000832f7 ", + "MSRValue": "0x08000832f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x00000132f7 ", + "MSRValue": "0x00000132f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE", @@ -247,29 +247,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000044 ", + "MSRValue": "0x4000000044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400044 ", + "MSRValue": "0x1000400044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400044 ", + "MSRValue": "0x0800400044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_E_F", @@ -280,29 +280,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080044 ", + "MSRValue": "0x1000080044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080044 ", + "MSRValue": "0x0800080044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010044 ", + "MSRValue": "0x0000010044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE", @@ -313,29 +313,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000022 ", + "MSRValue": "0x4000000022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400022 ", + "MSRValue": "0x1000400022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400022 ", + "MSRValue": "0x0800400022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_E_F", @@ -346,29 +346,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080022 ", + "MSRValue": "0x1000080022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080022 ", + "MSRValue": "0x0800080022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010022 ", + "MSRValue": "0x0000010022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE", @@ -379,29 +379,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000003091 ", + "MSRValue": "0x4000003091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000403091 ", + "MSRValue": "0x1000403091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800403091 ", + "MSRValue": "0x0800403091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_E_F", @@ -412,29 +412,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000083091 ", + "MSRValue": "0x1000083091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800083091 ", + "MSRValue": "0x0800083091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000013091 ", + "MSRValue": "0x0000013091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE", @@ -445,29 +445,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000008000 ", + "MSRValue": "0x4000008000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000408000 ", + "MSRValue": "0x1000408000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800408000 ", + "MSRValue": "0x0800408000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_E_F", @@ -478,29 +478,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000088000 ", + "MSRValue": "0x1000088000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800088000 ", + "MSRValue": "0x0800088000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000018000 ", + "MSRValue": "0x0000018000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE", @@ -511,7 +511,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0000014800 ", + "MSRValue": "0x0000014800", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE", @@ -522,7 +522,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0000014000 ", + "MSRValue": "0x0000014000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE", @@ -533,29 +533,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000002000 ", + "MSRValue": "0x4000002000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000402000 ", + "MSRValue": "0x1000402000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800402000 ", + "MSRValue": "0x0800402000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_E_F", @@ -566,29 +566,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000082000 ", + "MSRValue": "0x1000082000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800082000 ", + "MSRValue": "0x0800082000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000012000 ", + "MSRValue": "0x0000012000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE", @@ -599,29 +599,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000001000 ", + "MSRValue": "0x4000001000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000401000 ", + "MSRValue": "0x1000401000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800401000 ", + "MSRValue": "0x0800401000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_E_F", @@ -632,29 +632,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000081000 ", + "MSRValue": "0x1000081000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800081000 ", + "MSRValue": "0x0800081000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000011000 ", + "MSRValue": "0x0000011000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE", @@ -665,7 +665,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0000010800 ", + "MSRValue": "0x0000010800", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE", @@ -676,29 +676,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000400 ", + "MSRValue": "0x4000000400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400400 ", + "MSRValue": "0x1000400400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400400 ", + "MSRValue": "0x0800400400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_E_F", @@ -709,29 +709,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080400 ", + "MSRValue": "0x1000080400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080400 ", + "MSRValue": "0x0800080400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010400 ", + "MSRValue": "0x0000010400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE", @@ -742,29 +742,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000200 ", + "MSRValue": "0x4000000200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400200 ", + "MSRValue": "0x1000400200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400200 ", + "MSRValue": "0x0800400200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_E_F", @@ -775,29 +775,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080200 ", + "MSRValue": "0x1000080200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080200 ", + "MSRValue": "0x0800080200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010200 ", + "MSRValue": "0x0000010200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.ANY_RESPONSE", @@ -808,18 +808,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000400100 ", + "MSRValue": "0x1000400100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400100 ", + "MSRValue": "0x0800400100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_E_F", @@ -830,29 +830,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080100 ", + "MSRValue": "0x1000080100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080100 ", + "MSRValue": "0x0800080100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010100 ", + "MSRValue": "0x0000010100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE", @@ -863,29 +863,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000080 ", + "MSRValue": "0x4000000080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400080 ", + "MSRValue": "0x1000400080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400080 ", + "MSRValue": "0x0800400080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_E_F", @@ -896,29 +896,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080080 ", + "MSRValue": "0x1000080080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080080 ", + "MSRValue": "0x0800080080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010080 ", + "MSRValue": "0x0000010080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE", @@ -929,29 +929,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000040 ", + "MSRValue": "0x4000000040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400040 ", + "MSRValue": "0x1000400040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400040 ", + "MSRValue": "0x0800400040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_E_F", @@ -962,29 +962,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080040 ", + "MSRValue": "0x1000080040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080040 ", + "MSRValue": "0x0800080040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010040 ", + "MSRValue": "0x0000010040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE", @@ -995,18 +995,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000400020 ", + "MSRValue": "0x1000400020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400020 ", + "MSRValue": "0x0800400020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_E_F", @@ -1017,29 +1017,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080020 ", + "MSRValue": "0x1000080020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080020 ", + "MSRValue": "0x0800080020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000020020 ", + "MSRValue": "0x0000020020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE", @@ -1050,7 +1050,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0000010020 ", + "MSRValue": "0x0000010020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE", @@ -1061,29 +1061,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000004 ", + "MSRValue": "0x4000000004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400004 ", + "MSRValue": "0x1000400004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400004 ", + "MSRValue": "0x0800400004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_E_F", @@ -1094,29 +1094,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080004 ", + "MSRValue": "0x1000080004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080004 ", + "MSRValue": "0x0800080004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010004 ", + "MSRValue": "0x0000010004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE", @@ -1127,29 +1127,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000002 ", + "MSRValue": "0x4000000002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400002 ", + "MSRValue": "0x1000400002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400002 ", + "MSRValue": "0x0800400002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_E_F", @@ -1160,29 +1160,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080002 ", + "MSRValue": "0x1000080002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080002 ", + "MSRValue": "0x0800080002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010002 ", + "MSRValue": "0x0000010002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE", @@ -1193,29 +1193,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x4000000001 ", + "MSRValue": "0x4000000001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING", "MSRIndex": "0x1a6", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1000400001 ", + "MSRValue": "0x1000400001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800400001 ", + "MSRValue": "0x0800400001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_E_F", @@ -1226,29 +1226,29 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1000080001 ", + "MSRValue": "0x1000080001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0800080001 ", + "MSRValue": "0x0800080001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_E_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0000010001 ", + "MSRValue": "0x0000010001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE", @@ -1259,722 +1259,722 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0002000001 ", + "MSRValue": "0x0002000001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000002 ", + "MSRValue": "0x0002000002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000004 ", + "MSRValue": "0x0002000004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000020 ", + "MSRValue": "0x0002000020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000080 ", + "MSRValue": "0x0002000080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000100 ", + "MSRValue": "0x0002000100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000200 ", + "MSRValue": "0x0002000200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000400 ", + "MSRValue": "0x0002000400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002001000 ", + "MSRValue": "0x0002001000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002002000 ", + "MSRValue": "0x0002002000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002008000 ", + "MSRValue": "0x0002008000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002003091 ", + "MSRValue": "0x0002003091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000022 ", + "MSRValue": "0x0002000022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000044 ", + "MSRValue": "0x0002000044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x00020032f7 ", + "MSRValue": "0x00020032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0002000070 ", + "MSRValue": "0x0002000070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_M", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state ", + "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000001 ", + "MSRValue": "0x0004000001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000002 ", + "MSRValue": "0x0004000002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000004 ", + "MSRValue": "0x0004000004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000020 ", + "MSRValue": "0x0004000020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000040 ", + "MSRValue": "0x0004000040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000080 ", + "MSRValue": "0x0004000080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000100 ", + "MSRValue": "0x0004000100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000200 ", + "MSRValue": "0x0004000200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000400 ", + "MSRValue": "0x0004000400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004001000 ", + "MSRValue": "0x0004001000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004002000 ", + "MSRValue": "0x0004002000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004008000 ", + "MSRValue": "0x0004008000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004003091 ", + "MSRValue": "0x0004003091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000022 ", + "MSRValue": "0x0004000022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000044 ", + "MSRValue": "0x0004000044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x00040032f7 ", + "MSRValue": "0x00040032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0004000070 ", + "MSRValue": "0x0004000070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_E", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state ", + "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000001 ", + "MSRValue": "0x0008000001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000002 ", + "MSRValue": "0x0008000002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000004 ", + "MSRValue": "0x0008000004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000020 ", + "MSRValue": "0x0008000020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000080 ", + "MSRValue": "0x0008000080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000100 ", + "MSRValue": "0x0008000100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000200 ", + "MSRValue": "0x0008000200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000400 ", + "MSRValue": "0x0008000400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008001000 ", + "MSRValue": "0x0008001000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008002000 ", + "MSRValue": "0x0008002000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008008000 ", + "MSRValue": "0x0008008000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008003091 ", + "MSRValue": "0x0008003091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000022 ", + "MSRValue": "0x0008000022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0008000044 ", + "MSRValue": "0x0008000044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x00080032f7 ", + "MSRValue": "0x00080032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_S", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state ", + "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000001 ", + "MSRValue": "0x0010000001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000002 ", + "MSRValue": "0x0010000002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000004 ", + "MSRValue": "0x0010000004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000020 ", + "MSRValue": "0x0010000020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000040 ", + "MSRValue": "0x0010000040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000080 ", + "MSRValue": "0x0010000080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000100 ", + "MSRValue": "0x0010000100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000200 ", + "MSRValue": "0x0010000200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000400 ", + "MSRValue": "0x0010000400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010001000 ", + "MSRValue": "0x0010001000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010002000 ", + "MSRValue": "0x0010002000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010008000 ", + "MSRValue": "0x0010008000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010003091 ", + "MSRValue": "0x0010003091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000022 ", + "MSRValue": "0x0010000022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000044 ", + "MSRValue": "0x0010000044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x00100032f7 ", + "MSRValue": "0x00100032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0010000070 ", + "MSRValue": "0x0010000070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_F", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state ", + "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x1800180002 ", + "MSRValue": "0x1800180002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE", @@ -1985,7 +1985,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180004 ", + "MSRValue": "0x1800180004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE", @@ -1996,7 +1996,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180020 ", + "MSRValue": "0x1800180020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE", @@ -2007,7 +2007,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180040 ", + "MSRValue": "0x1800180040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE", @@ -2018,7 +2018,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180080 ", + "MSRValue": "0x1800180080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE", @@ -2029,7 +2029,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180100 ", + "MSRValue": "0x1800180100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE", @@ -2040,7 +2040,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180200 ", + "MSRValue": "0x1800180200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE", @@ -2051,7 +2051,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180400 ", + "MSRValue": "0x1800180400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE", @@ -2062,7 +2062,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800181000 ", + "MSRValue": "0x1800181000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE", @@ -2073,7 +2073,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800182000 ", + "MSRValue": "0x1800182000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE", @@ -2084,7 +2084,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800188000 ", + "MSRValue": "0x1800188000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE", @@ -2095,7 +2095,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800183091 ", + "MSRValue": "0x1800183091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE", @@ -2106,7 +2106,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180022 ", + "MSRValue": "0x1800180022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE", @@ -2117,7 +2117,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180044 ", + "MSRValue": "0x1800180044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE", @@ -2128,7 +2128,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x18001832f7 ", + "MSRValue": "0x18001832f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE", @@ -2139,7 +2139,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800180070 ", + "MSRValue": "0x1800180070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE", @@ -2150,7 +2150,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400002 ", + "MSRValue": "0x1800400002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE", @@ -2161,7 +2161,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400004 ", + "MSRValue": "0x1800400004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE", @@ -2172,7 +2172,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400040 ", + "MSRValue": "0x1800400040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE", @@ -2183,7 +2183,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400080 ", + "MSRValue": "0x1800400080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE", @@ -2194,7 +2194,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400100 ", + "MSRValue": "0x1800400100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE", @@ -2205,7 +2205,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400400 ", + "MSRValue": "0x1800400400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE", @@ -2216,7 +2216,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800401000 ", + "MSRValue": "0x1800401000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE", @@ -2227,7 +2227,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800402000 ", + "MSRValue": "0x1800402000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE", @@ -2238,7 +2238,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800408000 ", + "MSRValue": "0x1800408000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE", @@ -2249,7 +2249,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800403091 ", + "MSRValue": "0x1800403091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE", @@ -2260,7 +2260,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400022 ", + "MSRValue": "0x1800400022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE", @@ -2271,7 +2271,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400044 ", + "MSRValue": "0x1800400044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE", @@ -2282,7 +2282,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x18004032f7 ", + "MSRValue": "0x18004032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE", @@ -2293,7 +2293,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x1800400070 ", + "MSRValue": "0x1800400070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE", diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json index 700652566200..c6bb16ba0f86 100644 --- a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json +++ b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json @@ -9,18 +9,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400070 ", + "MSRValue": "0x0100400070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200070 ", + "MSRValue": "0x0080200070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_NEAR", @@ -31,18 +31,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000070 ", + "MSRValue": "0x0101000070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800070 ", + "MSRValue": "0x0080800070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_NEAR", @@ -53,18 +53,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x01004032f7 ", + "MSRValue": "0x01004032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x00802032f7 ", + "MSRValue": "0x00802032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_NEAR", @@ -75,18 +75,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x01010032f7 ", + "MSRValue": "0x01010032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x00808032f7 ", + "MSRValue": "0x00808032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_NEAR", @@ -97,18 +97,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400044 ", + "MSRValue": "0x0100400044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200044 ", + "MSRValue": "0x0080200044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_NEAR", @@ -119,18 +119,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000044 ", + "MSRValue": "0x0101000044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800044 ", + "MSRValue": "0x0080800044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_NEAR", @@ -141,18 +141,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400022 ", + "MSRValue": "0x0100400022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200022 ", + "MSRValue": "0x0080200022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_NEAR", @@ -163,18 +163,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000022 ", + "MSRValue": "0x0101000022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800022 ", + "MSRValue": "0x0080800022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_NEAR", @@ -185,18 +185,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100403091 ", + "MSRValue": "0x0100403091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080203091 ", + "MSRValue": "0x0080203091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_NEAR", @@ -207,18 +207,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101003091 ", + "MSRValue": "0x0101003091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080803091 ", + "MSRValue": "0x0080803091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_NEAR", @@ -229,18 +229,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100408000 ", + "MSRValue": "0x0100408000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080208000 ", + "MSRValue": "0x0080208000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_NEAR", @@ -251,18 +251,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101008000 ", + "MSRValue": "0x0101008000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts any request that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts any request that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080808000 ", + "MSRValue": "0x0080808000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_NEAR", @@ -273,18 +273,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100402000 ", + "MSRValue": "0x0100402000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080202000 ", + "MSRValue": "0x0080202000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_NEAR", @@ -295,18 +295,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101002000 ", + "MSRValue": "0x0101002000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080802000 ", + "MSRValue": "0x0080802000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_NEAR", @@ -317,18 +317,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100401000 ", + "MSRValue": "0x0100401000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080201000 ", + "MSRValue": "0x0080201000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_NEAR", @@ -339,18 +339,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101001000 ", + "MSRValue": "0x0101001000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080801000 ", + "MSRValue": "0x0080801000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_NEAR", @@ -361,18 +361,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400400 ", + "MSRValue": "0x0100400400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200400 ", + "MSRValue": "0x0080200400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_NEAR", @@ -383,18 +383,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000400 ", + "MSRValue": "0x0101000400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800400 ", + "MSRValue": "0x0080800400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_NEAR", @@ -405,18 +405,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400200 ", + "MSRValue": "0x0100400200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200200 ", + "MSRValue": "0x0080200200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_NEAR", @@ -427,18 +427,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000200 ", + "MSRValue": "0x0101000200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800200 ", + "MSRValue": "0x0080800200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_NEAR", @@ -449,18 +449,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400100 ", + "MSRValue": "0x0100400100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_FAR", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200100 ", + "MSRValue": "0x0080200100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_NEAR", @@ -471,18 +471,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000100 ", + "MSRValue": "0x0101000100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_FAR", "MSRIndex": "0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800100 ", + "MSRValue": "0x0080800100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_NEAR", @@ -493,7 +493,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x2000020080 ", + "MSRValue": "0x2000020080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.NON_DRAM", @@ -504,18 +504,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400080 ", + "MSRValue": "0x0100400080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200080 ", + "MSRValue": "0x0080200080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_NEAR", @@ -526,18 +526,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000080 ", + "MSRValue": "0x0101000080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800080 ", + "MSRValue": "0x0080800080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_NEAR", @@ -548,18 +548,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400040 ", + "MSRValue": "0x0100400040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200040 ", + "MSRValue": "0x0080200040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_NEAR", @@ -570,18 +570,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000040 ", + "MSRValue": "0x0101000040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800040 ", + "MSRValue": "0x0080800040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_NEAR", @@ -592,7 +592,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x2000020020 ", + "MSRValue": "0x2000020020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.NON_DRAM", @@ -603,18 +603,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400020 ", + "MSRValue": "0x0100400020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200020 ", + "MSRValue": "0x0080200020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_NEAR", @@ -625,18 +625,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000020 ", + "MSRValue": "0x0101000020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800020 ", + "MSRValue": "0x0080800020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_NEAR", @@ -647,18 +647,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400004 ", + "MSRValue": "0x0100400004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200004 ", + "MSRValue": "0x0080200004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_NEAR", @@ -669,18 +669,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000004 ", + "MSRValue": "0x0101000004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800004 ", + "MSRValue": "0x0080800004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_NEAR", @@ -691,18 +691,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400002 ", + "MSRValue": "0x0100400002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200002 ", + "MSRValue": "0x0080200002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_NEAR", @@ -713,18 +713,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000002 ", + "MSRValue": "0x0101000002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800002 ", + "MSRValue": "0x0080800002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_NEAR", @@ -735,18 +735,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0100400001 ", + "MSRValue": "0x0100400001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080200001 ", + "MSRValue": "0x0080200001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_NEAR", @@ -757,18 +757,18 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0101000001 ", + "MSRValue": "0x0101000001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_FAR", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100007", - "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far. ", + "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far.", "Offcore": "1" }, { "EventCode": "0xB7", - "MSRValue": "0x0080800001 ", + "MSRValue": "0x0080800001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_NEAR", @@ -779,7 +779,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600001 ", + "MSRValue": "0x0180600001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM", @@ -790,7 +790,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600002 ", + "MSRValue": "0x0180600002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM", @@ -801,7 +801,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600004 ", + "MSRValue": "0x0180600004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM", @@ -812,7 +812,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600020 ", + "MSRValue": "0x0180600020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM", @@ -823,7 +823,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600080 ", + "MSRValue": "0x0180600080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM", @@ -834,7 +834,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600100 ", + "MSRValue": "0x0180600100", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM", @@ -845,7 +845,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600200 ", + "MSRValue": "0x0180600200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM", @@ -856,7 +856,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600400 ", + "MSRValue": "0x0180600400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM", @@ -867,7 +867,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180601000 ", + "MSRValue": "0x0180601000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM", @@ -878,7 +878,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180608000 ", + "MSRValue": "0x0180608000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM", @@ -889,7 +889,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180603091 ", + "MSRValue": "0x0180603091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM", @@ -900,7 +900,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600022 ", + "MSRValue": "0x0180600022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM", @@ -911,7 +911,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600044 ", + "MSRValue": "0x0180600044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM", @@ -922,7 +922,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x01806032f7 ", + "MSRValue": "0x01806032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM", @@ -933,7 +933,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0180600070 ", + "MSRValue": "0x0180600070", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM", @@ -944,7 +944,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800001 ", + "MSRValue": "0x0181800001", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR", @@ -955,7 +955,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800002 ", + "MSRValue": "0x0181800002", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR", @@ -966,7 +966,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800004 ", + "MSRValue": "0x0181800004", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR", @@ -977,7 +977,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800020 ", + "MSRValue": "0x0181800020", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR", @@ -988,7 +988,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800040 ", + "MSRValue": "0x0181800040", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR", @@ -999,7 +999,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800080 ", + "MSRValue": "0x0181800080", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR", @@ -1010,7 +1010,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800200 ", + "MSRValue": "0x0181800200", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR", @@ -1021,7 +1021,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800400 ", + "MSRValue": "0x0181800400", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR", @@ -1032,7 +1032,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181801000 ", + "MSRValue": "0x0181801000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR", @@ -1043,7 +1043,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181802000 ", + "MSRValue": "0x0181802000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR", @@ -1054,7 +1054,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181808000 ", + "MSRValue": "0x0181808000", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR", @@ -1065,7 +1065,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181803091 ", + "MSRValue": "0x0181803091", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR", @@ -1076,7 +1076,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800022 ", + "MSRValue": "0x0181800022", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR", @@ -1087,7 +1087,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x0181800044 ", + "MSRValue": "0x0181800044", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR", @@ -1098,7 +1098,7 @@ }, { "EventCode": "0xB7", - "MSRValue": "0x01818032f7 ", + "MSRValue": "0x01818032f7", "Counter": "0,1", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR", diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json index bb5494cfb5ae..92e4ef2e22c6 100644 --- a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json @@ -144,7 +144,7 @@ "BriefDescription": "Counts the number of micro-ops retired that are from the complex flows issued by the micro-sequencer (MS)." }, { - "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. ", + "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists.", "EventCode": "0xC2", "Counter": "0,1", "UMask": "0x10", @@ -218,7 +218,7 @@ "UMask": "0x20", "EventName": "NO_ALLOC_CYCLES.RAT_STALL", "SampleAfterValue": "200003", - "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted. " + "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted." }, { "PublicDescription": "This event counts the number of core cycles when no uops are allocated, the instruction queue is empty and the alloc pipe is stalled waiting for instructions to be fetched.", @@ -251,7 +251,7 @@ "UMask": "0x1f", "EventName": "RS_FULL_STALL.ALL", "SampleAfterValue": "200003", - "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full. " + "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full." }, { "EventCode": "0xC0", @@ -268,11 +268,10 @@ "UMask": "0x1", "EventName": "CYCLES_DIV_BUSY.ALL", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider. " + "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider." }, { "PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", @@ -296,8 +295,7 @@ "BriefDescription": "Counts the number of unhalted reference clock cycles" }, { - "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter\r\n", - "EventCode": "0x00", + "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter", "Counter": "Fixed counter 2", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.THREAD", @@ -305,7 +303,6 @@ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles" }, { - "EventCode": "0x00", "Counter": "Fixed counter 3", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", @@ -343,7 +340,7 @@ "UMask": "0x1", "EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD", "SampleAfterValue": "200003", - "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store ", + "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store", "Data_LA": "1" }, { diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json index f31594507f8c..9e493977771f 100644 --- a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json @@ -36,7 +36,7 @@ "EdgeDetect": "1" }, { - "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress. ", + "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress.", "EventCode": "0x05", "Counter": "0,1", "UMask": "0x2", diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json index 16b04a20bc12..bb79e89c2049 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json @@ -1,207 +1,200 @@ [ { - "PEBS": "1", - "EventCode": "0xD0", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x11", - "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS", - "SampleAfterValue": "100003", - "BriefDescription": "Retired load uops that miss the STLB.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x1", + "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", + "SampleAfterValue": "200003", + "BriefDescription": "Demand Data Read requests that hit L2 cache.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xD0", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x12", - "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES", - "SampleAfterValue": "100003", - "BriefDescription": "Retired store uops that miss the STLB.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x3", + "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD", + "SampleAfterValue": "200003", + "BriefDescription": "Demand Data Read requests.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xD0", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x21", - "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", - "SampleAfterValue": "100007", - "BriefDescription": "Retired load uops with locked access.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x4", + "EventName": "L2_RQSTS.RFO_HIT", + "SampleAfterValue": "200003", + "BriefDescription": "RFO requests that hit L2 cache.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", - "EventCode": "0xD0", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x41", - "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", - "SampleAfterValue": "100003", - "BriefDescription": "Retired load uops that split across a cacheline boundary.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x8", + "EventName": "L2_RQSTS.RFO_MISS", + "SampleAfterValue": "200003", + "BriefDescription": "RFO requests that miss L2 cache.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).", - "EventCode": "0xD0", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x42", - "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", - "SampleAfterValue": "100003", - "BriefDescription": "Retired store uops that split across a cacheline boundary.", - "CounterHTOff": "0,1,2,3" + "UMask": "0xc", + "EventName": "L2_RQSTS.ALL_RFO", + "SampleAfterValue": "200003", + "BriefDescription": "RFO requests to L2 cache.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts the number of load uops retired", - "EventCode": "0xD0", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x81", - "EventName": "MEM_UOPS_RETIRED.ALL_LOADS", - "SampleAfterValue": "2000003", - "BriefDescription": "All retired load uops.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x10", + "EventName": "L2_RQSTS.CODE_RD_HIT", + "SampleAfterValue": "200003", + "BriefDescription": "L2 cache hits when fetching instructions, code reads.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts the number of store uops retired.", - "EventCode": "0xD0", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x82", - "EventName": "MEM_UOPS_RETIRED.ALL_STORES", - "SampleAfterValue": "2000003", - "BriefDescription": "All retired store uops.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x20", + "EventName": "L2_RQSTS.CODE_RD_MISS", + "SampleAfterValue": "200003", + "BriefDescription": "L2 cache misses when fetching instructions.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xD1", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", - "SampleAfterValue": "2000003", - "BriefDescription": "Retired load uops with L1 cache hits as data sources.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x30", + "EventName": "L2_RQSTS.ALL_CODE_RD", + "SampleAfterValue": "200003", + "BriefDescription": "L2 code requests.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xD1", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", - "SampleAfterValue": "100003", - "BriefDescription": "Retired load uops with L2 cache hits as data sources.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x40", + "EventName": "L2_RQSTS.PF_HIT", + "SampleAfterValue": "200003", + "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.", - "EventCode": "0xD1", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT", - "SampleAfterValue": "50021", - "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.", - "CounterHTOff": "0,1,2,3" + "UMask": "0x80", + "EventName": "L2_RQSTS.PF_MISS", + "SampleAfterValue": "200003", + "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xD1", + "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x40", - "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB", - "SampleAfterValue": "100003", - "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.", - "CounterHTOff": "0,1,2,3" + "UMask": "0xc0", + "EventName": "L2_RQSTS.ALL_PF", + "SampleAfterValue": "200003", + "BriefDescription": "Requests from L2 hardware prefetchers.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xD2", + "EventCode": "0x27", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", - "SampleAfterValue": "20011", - "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.", - "CounterHTOff": "0,1,2,3" - }, - { - "PEBS": "1", - "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.", - "EventCode": "0xD2", - "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", - "SampleAfterValue": "20011", - "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.", - "CounterHTOff": "0,1,2,3" + "EventName": "L2_STORE_LOCK_RQSTS.MISS", + "SampleAfterValue": "200003", + "BriefDescription": "RFOs that miss cache lines.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.", - "EventCode": "0xD2", + "EventCode": "0x27", "Counter": "0,1,2,3", "UMask": "0x4", - "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", - "SampleAfterValue": "20011", - "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.", - "CounterHTOff": "0,1,2,3" + "EventName": "L2_STORE_LOCK_RQSTS.HIT_E", + "SampleAfterValue": "200003", + "BriefDescription": "RFOs that hit cache lines in E state.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xD2", + "EventCode": "0x27", "Counter": "0,1,2,3", "UMask": "0x8", - "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE", - "SampleAfterValue": "100003", - "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.", - "CounterHTOff": "0,1,2,3" + "EventName": "L2_STORE_LOCK_RQSTS.HIT_M", + "SampleAfterValue": "200003", + "BriefDescription": "RFOs that hit cache lines in M state.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops.", - "EventCode": "0xD4", + "EventCode": "0x27", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS", - "SampleAfterValue": "100007", - "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load.", - "CounterHTOff": "0,1,2,3" + "UMask": "0xf", + "EventName": "L2_STORE_LOCK_RQSTS.ALL", + "SampleAfterValue": "200003", + "BriefDescription": "RFOs that access cache lines in any state.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier. ", - "EventCode": "0x51", + "EventCode": "0x28", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "L1D.REPLACEMENT", - "SampleAfterValue": "2000003", - "BriefDescription": "L1D data line replacements.", + "EventName": "L2_L1D_WB_RQSTS.MISS", + "SampleAfterValue": "200003", + "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x51", + "EventCode": "0x28", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "L1D.ALLOCATED_IN_M", - "SampleAfterValue": "2000003", - "BriefDescription": "Allocated L1D data cache lines in M state.", + "EventName": "L2_L1D_WB_RQSTS.HIT_S", + "SampleAfterValue": "200003", + "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x51", + "EventCode": "0x28", "Counter": "0,1,2,3", "UMask": "0x4", - "EventName": "L1D.EVICTION", - "SampleAfterValue": "2000003", - "BriefDescription": "L1D data cache lines in M state evicted due to replacement.", + "EventName": "L2_L1D_WB_RQSTS.HIT_E", + "SampleAfterValue": "200003", + "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x51", + "EventCode": "0x28", "Counter": "0,1,2,3", "UMask": "0x8", - "EventName": "L1D.ALL_M_REPLACEMENT", - "SampleAfterValue": "2000003", - "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.", + "EventName": "L2_L1D_WB_RQSTS.HIT_M", + "SampleAfterValue": "200003", + "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x28", + "Counter": "0,1,2,3", + "UMask": "0xf", + "EventName": "L2_L1D_WB_RQSTS.ALL", + "SampleAfterValue": "200003", + "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x2E", + "Counter": "0,1,2,3", + "UMask": "0x41", + "EventName": "LONGEST_LAT_CACHE.MISS", + "SampleAfterValue": "100003", + "BriefDescription": "Core-originated cacheable demand requests missed LLC.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x2E", + "Counter": "0,1,2,3", + "UMask": "0x4f", + "EventName": "LONGEST_LAT_CACHE.REFERENCE", + "SampleAfterValue": "100003", + "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -224,12 +217,61 @@ "CounterHTOff": "2" }, { - "EventCode": "0x63", + "EventCode": "0x48", + "Counter": "2", + "UMask": "0x1", + "AnyThread": "1", + "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.", + "CounterMask": "1", + "CounterHTOff": "2" + }, + { + "EventCode": "0x48", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION", + "EventName": "L1D_PEND_MISS.FB_FULL", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when L1D is locked.", + "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.", + "CounterMask": "1", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.", + "EventCode": "0x51", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "L1D.REPLACEMENT", + "SampleAfterValue": "2000003", + "BriefDescription": "L1D data line replacements.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x51", + "Counter": "0,1,2,3", + "UMask": "0x2", + "EventName": "L1D.ALLOCATED_IN_M", + "SampleAfterValue": "2000003", + "BriefDescription": "Allocated L1D data cache lines in M state.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x51", + "Counter": "0,1,2,3", + "UMask": "0x4", + "EventName": "L1D.EVICTION", + "SampleAfterValue": "2000003", + "BriefDescription": "L1D data cache lines in M state evicted due to replacement.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x51", + "Counter": "0,1,2,3", + "UMask": "0x8", + "EventName": "L1D.ALL_M_REPLACEMENT", + "SampleAfterValue": "2000003", + "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -254,6 +296,16 @@ { "EventCode": "0x60", "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.", + "CounterMask": "6", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x60", + "Counter": "0,1,2,3", "UMask": "0x4", "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO", "SampleAfterValue": "2000003", @@ -263,6 +315,16 @@ { "EventCode": "0x60", "Counter": "0,1,2,3", + "UMask": "0x4", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", + "SampleAfterValue": "2000003", + "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.", + "CounterMask": "1", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x60", + "Counter": "0,1,2,3", "UMask": "0x8", "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", "SampleAfterValue": "2000003", @@ -280,6 +342,15 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "EventCode": "0x63", + "Counter": "0,1,2,3", + "UMask": "0x2", + "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles when L1D is locked.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { "EventCode": "0xB0", "Counter": "0,1,2,3", "UMask": "0x1", @@ -325,148 +396,182 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x24", + "EventCode": "0xBF", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", - "SampleAfterValue": "200003", - "BriefDescription": "Demand Data Read requests that hit L2 cache.", + "UMask": "0x5", + "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES", + "SampleAfterValue": "100003", + "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x24", + "PEBS": "1", + "EventCode": "0xD0", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "L2_RQSTS.RFO_HIT", - "SampleAfterValue": "200003", - "BriefDescription": "RFO requests that hit L2 cache.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x11", + "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS", + "SampleAfterValue": "100003", + "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x24", + "PEBS": "1", + "EventCode": "0xD0", "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "L2_RQSTS.RFO_MISS", - "SampleAfterValue": "200003", - "BriefDescription": "RFO requests that miss L2 cache.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x12", + "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES", + "SampleAfterValue": "100003", + "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x24", + "PEBS": "1", + "EventCode": "0xD0", "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "L2_RQSTS.CODE_RD_HIT", - "SampleAfterValue": "200003", - "BriefDescription": "L2 cache hits when fetching instructions, code reads.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x21", + "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS", + "SampleAfterValue": "100007", + "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x24", + "PEBS": "1", + "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)", + "EventCode": "0xD0", "Counter": "0,1,2,3", - "UMask": "0x20", - "EventName": "L2_RQSTS.CODE_RD_MISS", - "SampleAfterValue": "200003", - "BriefDescription": "L2 cache misses when fetching instructions.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x41", + "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS", + "SampleAfterValue": "100003", + "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x24", + "PEBS": "1", + "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)", + "EventCode": "0xD0", "Counter": "0,1,2,3", - "UMask": "0x40", - "EventName": "L2_RQSTS.PF_HIT", - "SampleAfterValue": "200003", - "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x42", + "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES", + "SampleAfterValue": "100003", + "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x24", + "PEBS": "1", + "PublicDescription": "This event counts the number of load uops retired (Precise Event)", + "EventCode": "0xD0", "Counter": "0,1,2,3", - "UMask": "0x80", - "EventName": "L2_RQSTS.PF_MISS", - "SampleAfterValue": "200003", - "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x81", + "EventName": "MEM_UOPS_RETIRED.ALL_LOADS", + "SampleAfterValue": "2000003", + "BriefDescription": "All retired load uops. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x27", + "PEBS": "1", + "PublicDescription": "This event counts the number of store uops retired. (Precise Event - PEBS)", + "EventCode": "0xD0", + "Counter": "0,1,2,3", + "UMask": "0x82", + "EventName": "MEM_UOPS_RETIRED.ALL_STORES", + "SampleAfterValue": "2000003", + "BriefDescription": "All retired store uops. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" + }, + { + "PEBS": "1", + "EventCode": "0xD1", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "L2_STORE_LOCK_RQSTS.MISS", - "SampleAfterValue": "200003", - "BriefDescription": "RFOs that miss cache lines.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT", + "SampleAfterValue": "2000003", + "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x27", + "PEBS": "1", + "EventCode": "0xD1", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "L2_STORE_LOCK_RQSTS.HIT_E", - "SampleAfterValue": "200003", - "BriefDescription": "RFOs that hit cache lines in E state.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x2", + "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT", + "SampleAfterValue": "100003", + "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x27", + "PEBS": "1", + "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required. (Precise Event - PEBS)", + "EventCode": "0xD1", "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "L2_STORE_LOCK_RQSTS.HIT_M", - "SampleAfterValue": "200003", - "BriefDescription": "RFOs that hit cache lines in M state.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x4", + "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT", + "SampleAfterValue": "50021", + "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x27", + "PEBS": "1", + "EventCode": "0xD1", "Counter": "0,1,2,3", - "UMask": "0xf", - "EventName": "L2_STORE_LOCK_RQSTS.ALL", - "SampleAfterValue": "200003", - "BriefDescription": "RFOs that access cache lines in any state.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x40", + "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB", + "SampleAfterValue": "100003", + "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x28", + "PEBS": "1", + "EventCode": "0xD2", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "L2_L1D_WB_RQSTS.MISS", - "SampleAfterValue": "200003", - "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS", + "SampleAfterValue": "20011", + "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x28", + "PEBS": "1", + "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state. (Precise Event - PEBS)", + "EventCode": "0xD2", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "L2_L1D_WB_RQSTS.HIT_S", - "SampleAfterValue": "200003", - "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT", + "SampleAfterValue": "20011", + "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x28", + "PEBS": "1", + "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2. (Precise Event - PEBS)", + "EventCode": "0xD2", "Counter": "0,1,2,3", "UMask": "0x4", - "EventName": "L2_L1D_WB_RQSTS.HIT_E", - "SampleAfterValue": "200003", - "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM", + "SampleAfterValue": "20011", + "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x28", + "PEBS": "1", + "EventCode": "0xD2", "Counter": "0,1,2,3", "UMask": "0x8", - "EventName": "L2_L1D_WB_RQSTS.HIT_M", - "SampleAfterValue": "200003", - "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE", + "SampleAfterValue": "100003", + "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x28", + "PEBS": "1", + "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops. (Precise Event - PEBS)", + "EventCode": "0xD4", "Counter": "0,1,2,3", - "UMask": "0xf", - "EventName": "L2_L1D_WB_RQSTS.ALL", - "SampleAfterValue": "200003", - "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x2", + "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS", + "SampleAfterValue": "100007", + "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xF0", @@ -623,24 +728,6 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x2E", - "Counter": "0,1,2,3", - "UMask": "0x41", - "EventName": "LONGEST_LAT_CACHE.MISS", - "SampleAfterValue": "100003", - "BriefDescription": "Core-originated cacheable demand requests missed LLC.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x2E", - "Counter": "0,1,2,3", - "UMask": "0x4f", - "EventName": "LONGEST_LAT_CACHE.REFERENCE", - "SampleAfterValue": "100003", - "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { "EventCode": "0xF4", "Counter": "0,1,2,3", "UMask": "0x10", @@ -650,93 +737,6 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x24", - "Counter": "0,1,2,3", - "UMask": "0x3", - "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD", - "SampleAfterValue": "200003", - "BriefDescription": "Demand Data Read requests.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x24", - "Counter": "0,1,2,3", - "UMask": "0xc", - "EventName": "L2_RQSTS.ALL_RFO", - "SampleAfterValue": "200003", - "BriefDescription": "RFO requests to L2 cache.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x24", - "Counter": "0,1,2,3", - "UMask": "0x30", - "EventName": "L2_RQSTS.ALL_CODE_RD", - "SampleAfterValue": "200003", - "BriefDescription": "L2 code requests.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x24", - "Counter": "0,1,2,3", - "UMask": "0xc0", - "EventName": "L2_RQSTS.ALL_PF", - "SampleAfterValue": "200003", - "BriefDescription": "Requests from L2 hardware prefetchers.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xBF", - "Counter": "0,1,2,3", - "UMask": "0x5", - "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES", - "SampleAfterValue": "100003", - "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.", - "CounterMask": "1", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x60", - "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO", - "SampleAfterValue": "2000003", - "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.", - "CounterMask": "1", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x60", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.", - "CounterMask": "6", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x48", - "Counter": "2", - "UMask": "0x1", - "AnyThread": "1", - "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.", - "CounterMask": "1", - "CounterHTOff": "2" - }, - { - "EventCode": "0x48", - "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "L1D_PEND_MISS.FB_FULL", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.", - "CounterMask": "1", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { "EventCode": "0xB7, 0xBB", "MSRValue": "0x10003c0244", "Counter": "0,1,2,3", @@ -1825,7 +1825,7 @@ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE", + "BriefDescription": "REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE", "CounterHTOff": "0,1,2,3" }, { @@ -1837,7 +1837,7 @@ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_M.HITM", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM", + "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM", "CounterHTOff": "0,1,2,3" }, { @@ -1849,7 +1849,7 @@ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE", + "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE", "CounterHTOff": "0,1,2,3" }, { @@ -1861,7 +1861,7 @@ "EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE", + "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE", "CounterHTOff": "0,1,2,3" }, { @@ -1873,7 +1873,7 @@ "EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.ANY_RESPONSE", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE", + "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE", "CounterHTOff": "0,1,2,3" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json index 982eda48785e..ce26537c7d47 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json @@ -1,68 +1,5 @@ [ { - "EventCode": "0xC1", - "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "OTHER_ASSISTS.AVX_STORE", - "SampleAfterValue": "100003", - "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xC1", - "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "OTHER_ASSISTS.AVX_TO_SSE", - "SampleAfterValue": "100003", - "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xC1", - "Counter": "0,1,2,3", - "UMask": "0x20", - "EventName": "OTHER_ASSISTS.SSE_TO_AVX", - "SampleAfterValue": "100003", - "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xCA", - "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "FP_ASSIST.X87_OUTPUT", - "SampleAfterValue": "100003", - "BriefDescription": "Number of X87 assists due to output value.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xCA", - "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "FP_ASSIST.X87_INPUT", - "SampleAfterValue": "100003", - "BriefDescription": "Number of X87 assists due to input value.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xCA", - "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "FP_ASSIST.SIMD_OUTPUT", - "SampleAfterValue": "100003", - "BriefDescription": "Number of SIMD FP assists due to Output values.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xCA", - "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "FP_ASSIST.SIMD_INPUT", - "SampleAfterValue": "100003", - "BriefDescription": "Number of SIMD FP assists due to input values.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { "EventCode": "0x10", "Counter": "0,1,2,3", "UMask": "0x1", @@ -126,6 +63,69 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "EventCode": "0xC1", + "Counter": "0,1,2,3", + "UMask": "0x8", + "EventName": "OTHER_ASSISTS.AVX_STORE", + "SampleAfterValue": "100003", + "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xC1", + "Counter": "0,1,2,3", + "UMask": "0x10", + "EventName": "OTHER_ASSISTS.AVX_TO_SSE", + "SampleAfterValue": "100003", + "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xC1", + "Counter": "0,1,2,3", + "UMask": "0x20", + "EventName": "OTHER_ASSISTS.SSE_TO_AVX", + "SampleAfterValue": "100003", + "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xCA", + "Counter": "0,1,2,3", + "UMask": "0x2", + "EventName": "FP_ASSIST.X87_OUTPUT", + "SampleAfterValue": "100003", + "BriefDescription": "Number of X87 assists due to output value.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xCA", + "Counter": "0,1,2,3", + "UMask": "0x4", + "EventName": "FP_ASSIST.X87_INPUT", + "SampleAfterValue": "100003", + "BriefDescription": "Number of X87 assists due to input value.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xCA", + "Counter": "0,1,2,3", + "UMask": "0x8", + "EventName": "FP_ASSIST.SIMD_OUTPUT", + "SampleAfterValue": "100003", + "BriefDescription": "Number of SIMD FP assists due to Output values.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xCA", + "Counter": "0,1,2,3", + "UMask": "0x10", + "EventName": "FP_ASSIST.SIMD_INPUT", + "SampleAfterValue": "100003", + "BriefDescription": "Number of SIMD FP assists due to input values.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { "EventCode": "0xCA", "Counter": "0,1,2,3", "UMask": "0x1e", diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json index 1b7b1dd36c68..e58ed14a204c 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json @@ -1,24 +1,5 @@ [ { - "EventCode": "0x80", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "ICACHE.HIT", - "SampleAfterValue": "2000003", - "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.", - "EventCode": "0x80", - "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "ICACHE.MISSES", - "SampleAfterValue": "200003", - "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { "EventCode": "0x79", "Counter": "0,1,2,3", "UMask": "0x2", @@ -39,159 +20,201 @@ { "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "IDQ.DSB_UOPS", + "UMask": "0x4", + "EventName": "IDQ.MITE_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.", + "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "IDQ.MS_DSB_UOPS", + "UMask": "0x8", + "EventName": "IDQ.DSB_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x20", - "EventName": "IDQ.MS_MITE_UOPS", + "UMask": "0x8", + "EventName": "IDQ.DSB_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", + "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x30", - "EventName": "IDQ.MS_UOPS", + "UMask": "0x10", + "EventName": "IDQ.MS_DSB_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", + "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.", "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x30", - "EventName": "IDQ.MS_CYCLES", + "UMask": "0x10", + "EventName": "IDQ.MS_DSB_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", + "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.", - "EventCode": "0x9C", + "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE", + "UMask": "0x10", + "EdgeDetect": "1", + "EventName": "IDQ.MS_DSB_OCCUR", "SampleAfterValue": "2000003", - "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .", - "CounterHTOff": "0,1,2,3" + "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.", + "CounterMask": "1", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x9C", + "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", + "UMask": "0x18", + "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.", "CounterMask": "4", - "CounterHTOff": "0,1,2,3" + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x9C", + "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE", + "UMask": "0x18", + "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.", - "CounterMask": "3", - "CounterHTOff": "0,1,2,3" + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.", + "CounterMask": "1", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xAB", + "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "DSB2MITE_SWITCHES.COUNT", + "UMask": "0x20", + "EventName": "IDQ.MS_MITE_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.", + "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.", - "EventCode": "0xAB", + "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES", + "UMask": "0x24", + "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.", + "BriefDescription": "Cycles MITE is delivering 4 Uops.", + "CounterMask": "4", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xAC", + "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "DSB_FILL.OTHER_CANCEL", + "UMask": "0x24", + "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.", + "BriefDescription": "Cycles MITE is delivering any Uop.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xAC", + "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "DSB_FILL.EXCEED_DSB_LINES", + "UMask": "0x30", + "EventName": "IDQ.MS_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.", + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more information.", "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "IDQ.MITE_CYCLES", + "UMask": "0x30", + "EventName": "IDQ.MS_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.", + "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "IDQ.DSB_CYCLES", + "UMask": "0x30", + "EdgeDetect": "1", + "EventName": "IDQ.MS_SWITCHES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.", + "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.", "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x79", "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "IDQ.MS_DSB_CYCLES", + "UMask": "0x3c", + "EventName": "IDQ.MITE_ALL_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.", - "CounterMask": "1", + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x79", + "EventCode": "0x80", "Counter": "0,1,2,3", - "UMask": "0x10", - "EdgeDetect": "1", - "EventName": "IDQ.MS_DSB_OCCUR", + "UMask": "0x1", + "EventName": "ICACHE.HIT", "SampleAfterValue": "2000003", - "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.", - "CounterMask": "1", + "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.", + "EventCode": "0x80", + "Counter": "0,1,2,3", + "UMask": "0x2", + "EventName": "ICACHE.MISSES", + "SampleAfterValue": "200003", + "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.", + "EventCode": "0x9C", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE", + "SampleAfterValue": "2000003", + "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .", + "CounterHTOff": "0,1,2,3" + }, + { + "EventCode": "0x9C", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.", + "CounterMask": "4", + "CounterHTOff": "0,1,2,3" + }, + { + "EventCode": "0x9C", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.", + "CounterMask": "3", + "CounterHTOff": "0,1,2,3" + }, + { "EventCode": "0x9C", "Counter": "0,1,2,3", "UMask": "0x1", @@ -223,83 +246,60 @@ "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x79", - "Counter": "0,1,2,3", - "UMask": "0x18", - "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.", - "CounterMask": "4", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x79", + "EventCode": "0x9C", + "Invert": "1", "Counter": "0,1,2,3", - "UMask": "0x18", - "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS", + "UMask": "0x1", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.", + "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.", "CounterMask": "1", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x79", + "EventCode": "0xAB", "Counter": "0,1,2,3", - "UMask": "0x24", - "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS", + "UMask": "0x1", + "EventName": "DSB2MITE_SWITCHES.COUNT", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles MITE is delivering 4 Uops.", - "CounterMask": "4", + "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x79", + "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.", + "EventCode": "0xAB", "Counter": "0,1,2,3", - "UMask": "0x24", - "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS", + "UMask": "0x2", + "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles MITE is delivering any Uop.", - "CounterMask": "1", + "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xAC", "Counter": "0,1,2,3", - "UMask": "0xa", - "EventName": "DSB_FILL.ALL_CANCEL", + "UMask": "0x2", + "EventName": "DSB_FILL.OTHER_CANCEL", "SampleAfterValue": "2000003", - "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.", + "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x9C", - "Invert": "1", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK", - "SampleAfterValue": "2000003", - "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.", - "CounterMask": "1", - "CounterHTOff": "0,1,2,3" - }, - { - "EventCode": "0x79", + "EventCode": "0xAC", "Counter": "0,1,2,3", - "UMask": "0x3c", - "EventName": "IDQ.MITE_ALL_UOPS", + "UMask": "0x8", + "EventName": "DSB_FILL.EXCEED_DSB_LINES", "SampleAfterValue": "2000003", - "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.", + "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x79", + "EventCode": "0xAC", "Counter": "0,1,2,3", - "UMask": "0x30", - "EdgeDetect": "1", - "EventName": "IDQ.MS_SWITCHES", + "UMask": "0xa", + "EventName": "DSB_FILL.ALL_CANCEL", "SampleAfterValue": "2000003", - "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.", - "CounterMask": "1", + "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.", "CounterHTOff": "0,1,2,3,4,5,6,7" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json index e6dfa89d00f3..78c1a987f9a2 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json @@ -1,5 +1,32 @@ [ { + "EventCode": "0x05", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "MISALIGN_MEM_REF.LOADS", + "SampleAfterValue": "2000003", + "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x05", + "Counter": "0,1,2,3", + "UMask": "0x2", + "EventName": "MISALIGN_MEM_REF.STORES", + "SampleAfterValue": "2000003", + "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xBE", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "PAGE_WALKS.LLC_MISS", + "SampleAfterValue": "100003", + "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.", "EventCode": "0xC3", "Counter": "0,1,2,3", @@ -126,33 +153,6 @@ "CounterHTOff": "3" }, { - "EventCode": "0xBE", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "PAGE_WALKS.LLC_MISS", - "SampleAfterValue": "100003", - "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x05", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "MISALIGN_MEM_REF.LOADS", - "SampleAfterValue": "2000003", - "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x05", - "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "MISALIGN_MEM_REF.STORES", - "SampleAfterValue": "2000003", - "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { "EventCode": "0xB7, 0xBB", "MSRValue": "0x300400244", "Counter": "0,1,2,3", @@ -367,7 +367,7 @@ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_MISS_LOCAL.DRAM", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", + "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", "CounterHTOff": "0,1,2,3" }, { @@ -379,7 +379,7 @@ "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS_LOCAL.ANY_LLC_HIT", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT", + "BriefDescription": "REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT", "CounterHTOff": "0,1,2,3" }, { @@ -391,7 +391,7 @@ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_MISS_LOCAL.DRAM", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", + "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", "CounterHTOff": "0,1,2,3" }, { @@ -403,7 +403,7 @@ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_MISS_LOCAL.DRAM", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", + "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", "CounterHTOff": "0,1,2,3" }, { @@ -415,7 +415,7 @@ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_MISS_LOCAL.DRAM", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", + "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", "CounterHTOff": "0,1,2,3" }, { @@ -427,7 +427,7 @@ "EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.LLC_MISS_LOCAL.DRAM", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", + "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", "CounterHTOff": "0,1,2,3" }, { @@ -439,7 +439,7 @@ "EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.LLC_MISS_LOCAL.DRAM", "MSRIndex": "0x1a6,0x1a7", "SampleAfterValue": "100003", - "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", + "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM", "CounterHTOff": "0,1,2,3" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/other.json b/tools/perf/pmu-events/arch/x86/sandybridge/other.json index 64b195b82c50..874eb40a2e0f 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/other.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/other.json @@ -9,6 +9,15 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "EventCode": "0x4E", + "Counter": "0,1,2,3", + "UMask": "0x2", + "EventName": "HW_PRE_REQ.DL1_MISS", + "SampleAfterValue": "2000003", + "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { "EventCode": "0x5C", "Counter": "0,1,2,3", "UMask": "0x1", @@ -38,15 +47,6 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x4E", - "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "HW_PRE_REQ.DL1_MISS", - "SampleAfterValue": "2000003", - "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { "EventCode": "0x63", "Counter": "0,1,2,3", "UMask": "0x1", diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json index 34a519d9bfa0..b7150f65f16d 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json @@ -1,289 +1,307 @@ [ { - "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ", - "EventCode": "0x00", - "Counter": "Fixed counter 1", + "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.", + "Counter": "Fixed counter 2", + "UMask": "0x3", + "EventName": "CPU_CLK_UNHALTED.REF_TSC", + "SampleAfterValue": "2000003", + "BriefDescription": "Reference cycles when the core is not in halt state.", + "CounterHTOff": "Fixed counter 2" + }, + { + "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.", + "Counter": "Fixed counter 0", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", "SampleAfterValue": "2000003", "BriefDescription": "Instructions retired from execution.", - "CounterHTOff": "Fixed counter 1" + "CounterHTOff": "Fixed counter 0" }, { - "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ", - "EventCode": "0x00", - "Counter": "Fixed counter 2", + "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.", + "Counter": "Fixed counter 1", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.THREAD", "SampleAfterValue": "2000003", "BriefDescription": "Core cycles when the thread is not in halt state.", - "CounterHTOff": "Fixed counter 2" + "CounterHTOff": "Fixed counter 1" }, { - "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ", - "EventCode": "0x00", - "Counter": "Fixed counter 3", - "UMask": "0x3", - "EventName": "CPU_CLK_UNHALTED.REF_TSC", + "Counter": "Fixed counter 1", + "UMask": "0x2", + "AnyThread": "1", + "EventName": "CPU_CLK_UNHALTED.THREAD_ANY", "SampleAfterValue": "2000003", - "BriefDescription": "Reference cycles when the core is not in halt state.", - "CounterHTOff": "Fixed counter 3" - }, - { - "EventCode": "0x88", - "Counter": "0,1,2,3", - "UMask": "0x41", - "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL", - "SampleAfterValue": "200003", - "BriefDescription": "Not taken macro-conditional branches.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.", + "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x88", + "EventCode": "0x03", "Counter": "0,1,2,3", - "UMask": "0x81", - "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired macro-conditional branches.", + "UMask": "0x1", + "EventName": "LD_BLOCKS.DATA_UNKNOWN", + "SampleAfterValue": "100003", + "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.", + "EventCode": "0x03", "Counter": "0,1,2,3", - "UMask": "0x82", - "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.", + "UMask": "0x2", + "EventName": "LD_BLOCKS.STORE_FORWARD", + "SampleAfterValue": "100003", + "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "EventCode": "0x03", "Counter": "0,1,2,3", - "UMask": "0x84", - "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.", + "UMask": "0x8", + "EventName": "LD_BLOCKS.NO_SR", + "SampleAfterValue": "100003", + "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "EventCode": "0x03", "Counter": "0,1,2,3", - "UMask": "0x88", - "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.", + "UMask": "0x10", + "EventName": "LD_BLOCKS.ALL_BLOCK", + "SampleAfterValue": "100003", + "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.", + "EventCode": "0x07", "Counter": "0,1,2,3", - "UMask": "0x90", - "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired direct near calls.", + "UMask": "0x1", + "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", + "SampleAfterValue": "100003", + "BriefDescription": "False dependencies in MOB due to partial compare.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "EventCode": "0x07", "Counter": "0,1,2,3", - "UMask": "0xa0", - "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired indirect calls.", + "UMask": "0x8", + "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK", + "SampleAfterValue": "100003", + "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "EventCode": "0x0D", "Counter": "0,1,2,3", - "UMask": "0xc1", - "EventName": "BR_INST_EXEC.ALL_CONDITIONAL", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired macro-conditional branches.", + "UMask": "0x3", + "EventName": "INT_MISC.RECOVERY_CYCLES", + "SampleAfterValue": "2000003", + "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "EventCode": "0x0D", "Counter": "0,1,2,3", - "UMask": "0xc2", - "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.", + "UMask": "0x3", + "EdgeDetect": "1", + "EventName": "INT_MISC.RECOVERY_STALLS_COUNT", + "SampleAfterValue": "2000003", + "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "EventCode": "0x0D", "Counter": "0,1,2,3", - "UMask": "0xc4", - "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.", + "UMask": "0x3", + "AnyThread": "1", + "EventName": "INT_MISC.RECOVERY_CYCLES_ANY", + "SampleAfterValue": "2000003", + "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "EventCode": "0x0D", "Counter": "0,1,2,3", - "UMask": "0xc8", - "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired indirect return branches.", + "UMask": "0x40", + "EventName": "INT_MISC.RAT_STALL_CYCLES", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.", + "EventCode": "0x0E", "Counter": "0,1,2,3", - "UMask": "0xd0", - "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired direct near calls.", + "UMask": "0x1", + "EventName": "UOPS_ISSUED.ANY", + "SampleAfterValue": "2000003", + "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x89", + "EventCode": "0x0E", + "Invert": "1", "Counter": "0,1,2,3", - "UMask": "0x41", - "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL", - "SampleAfterValue": "200003", - "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x1", + "EventName": "UOPS_ISSUED.STALL_CYCLES", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.", + "CounterMask": "1", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x89", + "EventCode": "0x0E", + "Invert": "1", "Counter": "0,1,2,3", - "UMask": "0x81", - "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x1", + "AnyThread": "1", + "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.", + "CounterMask": "1", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x89", + "EventCode": "0x14", "Counter": "0,1,2,3", - "UMask": "0x84", - "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.", + "UMask": "0x1", + "EventName": "ARITH.FPU_DIV_ACTIVE", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles when divider is busy executing divide operations.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x89", + "PublicDescription": "This event counts the number of the divide operations executed.", + "EventCode": "0x14", "Counter": "0,1,2,3", - "UMask": "0x88", - "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.", + "UMask": "0x1", + "EdgeDetect": "1", + "EventName": "ARITH.FPU_DIV", + "SampleAfterValue": "100003", + "BriefDescription": "Divide operations executed.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x89", + "EventCode": "0x3C", "Counter": "0,1,2,3", - "UMask": "0x90", - "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired mispredicted direct near calls.", + "UMask": "0x0", + "EventName": "CPU_CLK_UNHALTED.THREAD_P", + "SampleAfterValue": "2000003", + "BriefDescription": "Thread cycles when thread is not in halt state.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x89", + "EventCode": "0x3C", "Counter": "0,1,2,3", - "UMask": "0xa0", - "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL", - "SampleAfterValue": "200003", - "BriefDescription": "Taken speculative and retired mispredicted indirect calls.", + "UMask": "0x0", + "AnyThread": "1", + "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY", + "SampleAfterValue": "2000003", + "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x89", + "EventCode": "0x3C", "Counter": "0,1,2,3", - "UMask": "0xc1", - "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired mispredicted macro conditional branches.", + "UMask": "0x1", + "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK", + "SampleAfterValue": "2000003", + "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x89", + "EventCode": "0x3C", "Counter": "0,1,2,3", - "UMask": "0xc4", - "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET", - "SampleAfterValue": "200003", - "BriefDescription": "Mispredicted indirect branches excluding calls and returns.", + "UMask": "0x1", + "AnyThread": "1", + "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY", + "SampleAfterValue": "2000003", + "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x89", + "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)", + "EventCode": "0x3C", "Counter": "0,1,2,3", - "UMask": "0xd0", - "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired mispredicted direct near calls.", + "UMask": "0x1", + "EventName": "CPU_CLK_UNHALTED.REF_XCLK", + "SampleAfterValue": "2000003", + "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x3C", "Counter": "0,1,2,3", - "UMask": "0x0", - "EventName": "CPU_CLK_UNHALTED.THREAD_P", + "UMask": "0x1", + "AnyThread": "1", + "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY", "SampleAfterValue": "2000003", - "BriefDescription": "Thread cycles when thread is not in halt state.", + "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA8", + "EventCode": "0x3C", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "LSD.UOPS", + "UMask": "0x2", + "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE", "SampleAfterValue": "2000003", - "BriefDescription": "Number of Uops delivered by the LSD.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0xA8", + "EventCode": "0x3C", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "LSD.CYCLES_ACTIVE", + "UMask": "0x2", + "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.", - "CounterMask": "1", + "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x87", + "EventCode": "0x4C", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "ILD_STALL.LCP", - "SampleAfterValue": "2000003", - "BriefDescription": "Stalls caused by changing prefix length of the instruction.", + "EventName": "LOAD_HIT_PRE.SW_PF", + "SampleAfterValue": "100003", + "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x87", + "EventCode": "0x4C", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "ILD_STALL.IQ_FULL", - "SampleAfterValue": "2000003", - "BriefDescription": "Stall cycles because IQ is full.", + "UMask": "0x2", + "EventName": "LOAD_HIT_PRE.HW_PF", + "SampleAfterValue": "100003", + "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x0D", + "EventCode": "0x59", "Counter": "0,1,2,3", - "UMask": "0x40", - "EventName": "INT_MISC.RAT_STALL_CYCLES", + "UMask": "0x20", + "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.", + "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual.", "EventCode": "0x59", "Counter": "0,1,2,3", "UMask": "0x20", - "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP", + "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.", + "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.", + "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.", "EventCode": "0x59", "Counter": "0,1,2,3", "UMask": "0x40", @@ -302,48 +320,21 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA2", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "RESOURCE_STALLS.ANY", - "SampleAfterValue": "2000003", - "BriefDescription": "Resource-related stall cycles.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xA2", - "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "RESOURCE_STALLS.LB", - "SampleAfterValue": "2000003", - "BriefDescription": "Counts the cycles of stall due to lack of load buffers.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xA2", - "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "RESOURCE_STALLS.RS", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles stalled due to no eligible RS entry available.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xA2", + "EventCode": "0x5B", "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "RESOURCE_STALLS.SB", + "UMask": "0xc", + "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).", + "BriefDescription": "Cycles with either free list is empty.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA2", + "EventCode": "0x5B", "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "RESOURCE_STALLS.ROB", + "UMask": "0xf", + "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles stalled due to re-order buffer full.", + "BriefDescription": "Resource stalls2 control structures full for physical registers.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -356,702 +347,663 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.", - "EventCode": "0x0E", + "EventCode": "0x5B", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "UOPS_ISSUED.ANY", + "UMask": "0x4f", + "EventName": "RESOURCE_STALLS2.OOO_RSRC", "SampleAfterValue": "2000003", - "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).", + "BriefDescription": "Resource stalls out of order resources full.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x0E", - "Invert": "1", + "EventCode": "0x5E", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "UOPS_ISSUED.STALL_CYCLES", + "EventName": "RS_EVENTS.EMPTY_CYCLES", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.", - "CounterMask": "1", - "CounterHTOff": "0,1,2,3" + "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x0E", + "EventCode": "0x5E", "Invert": "1", "Counter": "0,1,2,3", "UMask": "0x1", - "AnyThread": "1", - "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES", + "EdgeDetect": "1", + "EventName": "RS_EVENTS.EMPTY_END", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.", + "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.", "CounterMask": "1", - "CounterHTOff": "0,1,2,3" + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x5E", + "EventCode": "0x87", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "RS_EVENTS.EMPTY_CYCLES", + "EventName": "ILD_STALL.LCP", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.", + "BriefDescription": "Stalls caused by changing prefix length of the instruction.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xCC", + "EventCode": "0x87", "Counter": "0,1,2,3", - "UMask": "0x20", - "EventName": "ROB_MISC_EVENTS.LBR_INSERTS", + "UMask": "0x4", + "EventName": "ILD_STALL.IQ_FULL", "SampleAfterValue": "2000003", - "BriefDescription": "Count cases of saving new LBR.", + "BriefDescription": "Stall cycles because IQ is full.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.", - "EventCode": "0xC3", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "MACHINE_CLEARS.SMC", - "SampleAfterValue": "100003", - "BriefDescription": "Self-modifying code (SMC) detected.", + "UMask": "0x41", + "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL", + "SampleAfterValue": "200003", + "BriefDescription": "Not taken macro-conditional branches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.", - "EventCode": "0xC3", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x20", - "EventName": "MACHINE_CLEARS.MASKMOV", - "SampleAfterValue": "100003", - "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.", + "UMask": "0x81", + "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired macro-conditional branches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xC0", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x0", - "EventName": "INST_RETIRED.ANY_P", - "SampleAfterValue": "2000003", - "BriefDescription": "Number of instructions retired. General Counter - architectural event.", + "UMask": "0x82", + "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts the number of micro-ops retired.", - "EventCode": "0xC2", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "UOPS_RETIRED.ALL", - "SampleAfterValue": "2000003", - "BriefDescription": "Actually retired uops.", + "UMask": "0x84", + "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.", - "EventCode": "0xC2", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "UOPS_RETIRED.RETIRE_SLOTS", - "SampleAfterValue": "2000003", - "BriefDescription": "Retirement slots used.", + "UMask": "0x88", + "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xC2", - "Invert": "1", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "UOPS_RETIRED.STALL_CYCLES", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles without actually retired uops.", - "CounterMask": "1", - "CounterHTOff": "0,1,2,3" + "UMask": "0x90", + "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired direct near calls.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xC2", - "Invert": "1", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "UOPS_RETIRED.TOTAL_CYCLES", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles with less than 10 actually retired uops.", - "CounterMask": "10", - "CounterHTOff": "0,1,2,3" + "UMask": "0xa0", + "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired indirect calls.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xC4", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "BR_INST_RETIRED.CONDITIONAL", - "SampleAfterValue": "400009", - "BriefDescription": "Conditional branch instructions retired.", + "UMask": "0xc1", + "EventName": "BR_INST_EXEC.ALL_CONDITIONAL", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired macro-conditional branches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xC4", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "BR_INST_RETIRED.NEAR_CALL", - "SampleAfterValue": "100007", - "BriefDescription": "Direct and indirect near call instructions retired.", + "UMask": "0xc2", + "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xC4", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x0", - "EventName": "BR_INST_RETIRED.ALL_BRANCHES", - "SampleAfterValue": "400009", - "BriefDescription": "All (macro) branch instructions retired.", + "UMask": "0xc4", + "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xC4", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "BR_INST_RETIRED.NEAR_RETURN", - "SampleAfterValue": "100007", - "BriefDescription": "Return instructions retired.", + "UMask": "0xc8", + "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired indirect return branches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xC4", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "BR_INST_RETIRED.NOT_TAKEN", - "SampleAfterValue": "400009", - "BriefDescription": "Not taken branch instructions retired.", + "UMask": "0xd0", + "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired direct near calls.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xC4", + "EventCode": "0x88", "Counter": "0,1,2,3", - "UMask": "0x20", - "EventName": "BR_INST_RETIRED.NEAR_TAKEN", - "SampleAfterValue": "400009", - "BriefDescription": "Taken branch instructions retired.", + "UMask": "0xff", + "EventName": "BR_INST_EXEC.ALL_BRANCHES", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired branches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xC4", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x40", - "EventName": "BR_INST_RETIRED.FAR_BRANCH", - "SampleAfterValue": "100007", - "BriefDescription": "Far branch instructions retired.", + "UMask": "0x41", + "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL", + "SampleAfterValue": "200003", + "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "2", - "EventCode": "0xC4", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS", - "SampleAfterValue": "400009", - "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).", - "CounterHTOff": "0,1,2,3" + "UMask": "0x81", + "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xC5", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "BR_MISP_RETIRED.CONDITIONAL", - "SampleAfterValue": "400009", - "BriefDescription": "Mispredicted conditional branch instructions retired.", + "UMask": "0x84", + "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xC5", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "BR_MISP_RETIRED.NEAR_CALL", - "SampleAfterValue": "100007", - "BriefDescription": "Direct and indirect mispredicted near call instructions retired.", + "UMask": "0x88", + "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xC5", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x0", - "EventName": "BR_MISP_RETIRED.ALL_BRANCHES", - "SampleAfterValue": "400009", - "BriefDescription": "All mispredicted macro branch instructions retired.", + "UMask": "0x90", + "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired mispredicted direct near calls.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xC5", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "BR_MISP_RETIRED.NOT_TAKEN", - "SampleAfterValue": "400009", - "BriefDescription": "Mispredicted not taken branch instructions retired.", + "UMask": "0xa0", + "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Taken speculative and retired mispredicted indirect calls.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "1", - "EventCode": "0xC5", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x20", - "EventName": "BR_MISP_RETIRED.TAKEN", - "SampleAfterValue": "400009", - "BriefDescription": "Mispredicted taken branch instructions retired.", + "UMask": "0xc1", + "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired mispredicted macro conditional branches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "2", - "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)", - "EventCode": "0xC5", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS", - "SampleAfterValue": "400009", - "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).", - "CounterHTOff": "0,1,2,3" + "UMask": "0xc4", + "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET", + "SampleAfterValue": "200003", + "BriefDescription": "Mispredicted indirect branches excluding calls and returns.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xC1", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED", - "SampleAfterValue": "100003", - "BriefDescription": "Retired instructions experiencing ITLB misses.", + "UMask": "0xd0", + "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired mispredicted direct near calls.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x14", + "EventCode": "0x89", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "ARITH.FPU_DIV_ACTIVE", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles when divider is busy executing divide operations.", + "UMask": "0xff", + "EventName": "BR_MISP_EXEC.ALL_BRANCHES", + "SampleAfterValue": "200003", + "BriefDescription": "Speculative and retired mispredicted macro conditional branches.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of the divide operations executed.", - "EventCode": "0x14", + "EventCode": "0xA1", "Counter": "0,1,2,3", "UMask": "0x1", - "EdgeDetect": "1", - "EventName": "ARITH.FPU_DIV", - "SampleAfterValue": "100003", - "BriefDescription": "Divide operations executed.", - "CounterMask": "1", + "EventName": "UOPS_DISPATCHED_PORT.PORT_0", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles per thread when uops are dispatched to port 0.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xB1", + "EventCode": "0xA1", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "UOPS_DISPATCHED.THREAD", + "AnyThread": "1", + "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE", "SampleAfterValue": "2000003", - "BriefDescription": "Uops dispatched per thread.", + "BriefDescription": "Cycles per core when uops are dispatched to port 0.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xB1", + "EventCode": "0xA1", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "UOPS_DISPATCHED.CORE", + "EventName": "UOPS_DISPATCHED_PORT.PORT_1", "SampleAfterValue": "2000003", - "BriefDescription": "Uops dispatched from any thread.", + "BriefDescription": "Cycles per thread when uops are dispatched to port 1.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xA1", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "UOPS_DISPATCHED_PORT.PORT_0", + "UMask": "0x2", + "AnyThread": "1", + "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per thread when uops are dispatched to port 0.", + "BriefDescription": "Cycles per core when uops are dispatched to port 1.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xA1", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "UOPS_DISPATCHED_PORT.PORT_1", + "UMask": "0xc", + "EventName": "UOPS_DISPATCHED_PORT.PORT_2", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per thread when uops are dispatched to port 1.", + "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xA1", "Counter": "0,1,2,3", - "UMask": "0x40", - "EventName": "UOPS_DISPATCHED_PORT.PORT_4", + "UMask": "0xc", + "AnyThread": "1", + "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per thread when uops are dispatched to port 4.", + "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xA1", "Counter": "0,1,2,3", - "UMask": "0x80", - "EventName": "UOPS_DISPATCHED_PORT.PORT_5", + "UMask": "0x30", + "EventName": "UOPS_DISPATCHED_PORT.PORT_3", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per thread when uops are dispatched to port 5.", + "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA3", + "EventCode": "0xA1", "Counter": "0,1,2,3", - "UMask": "0x4", - "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH", + "UMask": "0x30", + "AnyThread": "1", + "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE", "SampleAfterValue": "2000003", - "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.", - "CounterMask": "4", - "CounterHTOff": "0,1,2,3" + "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA3", - "Counter": "2", - "UMask": "0x2", - "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING", + "EventCode": "0xA1", + "Counter": "0,1,2,3", + "UMask": "0x40", + "EventName": "UOPS_DISPATCHED_PORT.PORT_4", "SampleAfterValue": "2000003", - "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.", - "CounterMask": "2", - "CounterHTOff": "2" + "BriefDescription": "Cycles per thread when uops are dispatched to port 4.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA3", + "EventCode": "0xA1", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING", + "UMask": "0x40", + "AnyThread": "1", + "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE", "SampleAfterValue": "2000003", - "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.", - "CounterMask": "1", + "BriefDescription": "Cycles per core when uops are dispatched to port 4.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA3", - "Counter": "2", - "UMask": "0x6", - "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING", + "EventCode": "0xA1", + "Counter": "0,1,2,3", + "UMask": "0x80", + "EventName": "UOPS_DISPATCHED_PORT.PORT_5", "SampleAfterValue": "2000003", - "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.", - "CounterMask": "6", - "CounterHTOff": "2" + "BriefDescription": "Cycles per thread when uops are dispatched to port 5.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA3", + "EventCode": "0xA1", "Counter": "0,1,2,3", - "UMask": "0x5", - "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING", + "UMask": "0x80", + "AnyThread": "1", + "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE", "SampleAfterValue": "2000003", - "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.", - "CounterMask": "5", - "CounterHTOff": "0,1,2,3" + "BriefDescription": "Cycles per core when uops are dispatched to port 5.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x4C", + "EventCode": "0xA2", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "LOAD_HIT_PRE.SW_PF", - "SampleAfterValue": "100003", - "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.", + "EventName": "RESOURCE_STALLS.ANY", + "SampleAfterValue": "2000003", + "BriefDescription": "Resource-related stall cycles.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x4C", + "EventCode": "0xA2", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "LOAD_HIT_PRE.HW_PF", - "SampleAfterValue": "100003", - "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x03", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "LD_BLOCKS.DATA_UNKNOWN", - "SampleAfterValue": "100003", - "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.", + "EventName": "RESOURCE_STALLS.LB", + "SampleAfterValue": "2000003", + "BriefDescription": "Counts the cycles of stall due to lack of load buffers.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.", - "EventCode": "0x03", + "EventCode": "0xA2", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "LD_BLOCKS.STORE_FORWARD", - "SampleAfterValue": "100003", - "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.", + "UMask": "0x4", + "EventName": "RESOURCE_STALLS.RS", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles stalled due to no eligible RS entry available.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x03", + "EventCode": "0xA2", "Counter": "0,1,2,3", "UMask": "0x8", - "EventName": "LD_BLOCKS.NO_SR", - "SampleAfterValue": "100003", - "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.", + "EventName": "RESOURCE_STALLS.SB", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x03", + "EventCode": "0xA2", "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "LD_BLOCKS.ALL_BLOCK", - "SampleAfterValue": "100003", - "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).", + "UMask": "0xa", + "EventName": "RESOURCE_STALLS.LB_SB", + "SampleAfterValue": "2000003", + "BriefDescription": "Resource stalls due to load or store buffers all being in use.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.", - "EventCode": "0x07", + "EventCode": "0xA2", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", - "SampleAfterValue": "100003", - "BriefDescription": "False dependencies in MOB due to partial compare.", + "UMask": "0xe", + "EventName": "RESOURCE_STALLS.MEM_RS", + "SampleAfterValue": "2000003", + "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x07", + "EventCode": "0xA2", "Counter": "0,1,2,3", - "UMask": "0x8", - "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK", - "SampleAfterValue": "100003", - "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.", + "UMask": "0x10", + "EventName": "RESOURCE_STALLS.ROB", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles stalled due to re-order buffer full.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xB6", + "EventCode": "0xA2", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "AGU_BYPASS_CANCEL.COUNT", - "SampleAfterValue": "100003", - "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.", + "UMask": "0xf0", + "EventName": "RESOURCE_STALLS.OOO_RSRC", + "SampleAfterValue": "2000003", + "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x3C", + "EventCode": "0xA3", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK", + "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING", "SampleAfterValue": "2000003", - "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).", + "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x3C", - "Counter": "0,1,2,3", + "EventCode": "0xA3", + "Counter": "2", "UMask": "0x2", - "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE", + "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING", "SampleAfterValue": "2000003", - "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.", - "CounterHTOff": "0,1,2,3" + "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.", + "CounterMask": "2", + "CounterHTOff": "2" }, { - "EventCode": "0xA1", + "EventCode": "0xA3", "Counter": "0,1,2,3", - "UMask": "0x1", - "AnyThread": "1", - "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE", + "UMask": "0x4", + "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per core when uops are dispatched to port 0.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.", + "CounterMask": "4", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0xA1", + "EventCode": "0xA3", "Counter": "0,1,2,3", - "UMask": "0x2", - "AnyThread": "1", - "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE", + "UMask": "0x5", + "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per core when uops are dispatched to port 1.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.", + "CounterMask": "5", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0xA1", - "Counter": "0,1,2,3", - "UMask": "0x40", - "AnyThread": "1", - "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE", + "EventCode": "0xA3", + "Counter": "2", + "UMask": "0x6", + "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per core when uops are dispatched to port 4.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.", + "CounterMask": "6", + "CounterHTOff": "2" }, { - "EventCode": "0xA1", + "EventCode": "0xA8", "Counter": "0,1,2,3", - "UMask": "0x80", - "AnyThread": "1", - "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE", + "UMask": "0x1", + "EventName": "LSD.UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per core when uops are dispatched to port 5.", + "BriefDescription": "Number of Uops delivered by the LSD.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA1", + "EventCode": "0xA8", "Counter": "0,1,2,3", - "UMask": "0xc", - "EventName": "UOPS_DISPATCHED_PORT.PORT_2", + "UMask": "0x1", + "EventName": "LSD.CYCLES_ACTIVE", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.", + "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA1", + "EventCode": "0xA8", "Counter": "0,1,2,3", - "UMask": "0x30", - "EventName": "UOPS_DISPATCHED_PORT.PORT_3", + "UMask": "0x1", + "EventName": "LSD.CYCLES_4_UOPS", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.", + "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.", + "CounterMask": "4", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA1", + "EventCode": "0xB1", "Counter": "0,1,2,3", - "UMask": "0xc", - "AnyThread": "1", - "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE", + "UMask": "0x1", + "EventName": "UOPS_DISPATCHED.THREAD", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.", + "BriefDescription": "Uops dispatched per thread.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA1", + "EventCode": "0xB1", "Counter": "0,1,2,3", - "UMask": "0x30", - "AnyThread": "1", - "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE", + "UMask": "0x2", + "EventName": "UOPS_DISPATCHED.CORE", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.", + "BriefDescription": "Uops dispatched from any thread.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PEBS": "2", - "EventCode": "0xC0", - "Counter": "1", - "UMask": "0x1", - "EventName": "INST_RETIRED.PREC_DIST", - "SampleAfterValue": "2000003", - "BriefDescription": "Instructions retired. (Precise Event - PEBS).", - "TakenAlone": "1", - "CounterHTOff": "1" - }, - { - "EventCode": "0x5B", + "EventCode": "0xB1", "Counter": "0,1,2,3", - "UMask": "0xf", - "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL", + "UMask": "0x2", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1", "SampleAfterValue": "2000003", - "BriefDescription": "Resource stalls2 control structures full for physical registers.", + "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.", + "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x5B", + "EventCode": "0xB1", "Counter": "0,1,2,3", - "UMask": "0xc", - "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY", + "UMask": "0x2", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles with either free list is empty.", + "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.", + "CounterMask": "2", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA2", + "EventCode": "0xB1", "Counter": "0,1,2,3", - "UMask": "0xe", - "EventName": "RESOURCE_STALLS.MEM_RS", + "UMask": "0x2", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3", "SampleAfterValue": "2000003", - "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.", + "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.", + "CounterMask": "3", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA2", + "EventCode": "0xB1", "Counter": "0,1,2,3", - "UMask": "0xf0", - "EventName": "RESOURCE_STALLS.OOO_RSRC", + "UMask": "0x2", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4", "SampleAfterValue": "2000003", - "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.", + "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.", + "CounterMask": "4", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x5B", + "EventCode": "0xB1", + "Invert": "1", "Counter": "0,1,2,3", - "UMask": "0x4f", - "EventName": "RESOURCE_STALLS2.OOO_RSRC", + "UMask": "0x2", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE", "SampleAfterValue": "2000003", - "BriefDescription": "Resource stalls out of order resources full.", + "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA2", + "EventCode": "0xB6", "Counter": "0,1,2,3", - "UMask": "0xa", - "EventName": "RESOURCE_STALLS.LB_SB", - "SampleAfterValue": "2000003", - "BriefDescription": "Resource stalls due to load or store buffers all being in use.", + "UMask": "0x1", + "EventName": "AGU_BYPASS_CANCEL.COUNT", + "SampleAfterValue": "100003", + "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x0D", + "EventCode": "0xC0", "Counter": "0,1,2,3", - "UMask": "0x3", - "EventName": "INT_MISC.RECOVERY_CYCLES", + "UMask": "0x0", + "EventName": "INST_RETIRED.ANY_P", "SampleAfterValue": "2000003", - "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).", - "CounterMask": "1", + "BriefDescription": "Number of instructions retired. General Counter - architectural event.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.", - "EventCode": "0x59", - "Counter": "0,1,2,3", - "UMask": "0x20", - "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES", + "PEBS": "2", + "EventCode": "0xC0", + "Counter": "1", + "UMask": "0x1", + "EventName": "INST_RETIRED.PREC_DIST", "SampleAfterValue": "2000003", - "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.", - "CounterMask": "1", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "BriefDescription": "Instructions retired. (Precise Event - PEBS).", + "TakenAlone": "1", + "CounterHTOff": "1" }, { - "EventCode": "0x0D", + "EventCode": "0xC1", "Counter": "0,1,2,3", - "UMask": "0x3", - "EdgeDetect": "1", - "EventName": "INT_MISC.RECOVERY_STALLS_COUNT", - "SampleAfterValue": "2000003", - "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).", - "CounterMask": "1", + "UMask": "0x2", + "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED", + "SampleAfterValue": "100003", + "BriefDescription": "Retired instructions experiencing ITLB misses.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xE6", + "PEBS": "1", + "PublicDescription": "This event counts the number of micro-ops retired. (Precise Event)", + "EventCode": "0xC2", "Counter": "0,1,2,3", - "UMask": "0x1f", - "EventName": "BACLEARS.ANY", - "SampleAfterValue": "100003", - "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.", + "UMask": "0x1", + "EventName": "UOPS_RETIRED.ALL", + "SampleAfterValue": "2000003", + "BriefDescription": "Actually retired uops. (Precise Event - PEBS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x88", + "EventCode": "0xC2", + "Invert": "1", "Counter": "0,1,2,3", - "UMask": "0xff", - "EventName": "BR_INST_EXEC.ALL_BRANCHES", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired branches.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x1", + "EventName": "UOPS_RETIRED.STALL_CYCLES", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles without actually retired uops.", + "CounterMask": "1", + "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0x89", + "EventCode": "0xC2", + "Invert": "1", "Counter": "0,1,2,3", - "UMask": "0xff", - "EventName": "BR_MISP_EXEC.ALL_BRANCHES", - "SampleAfterValue": "200003", - "BriefDescription": "Speculative and retired mispredicted macro conditional branches.", - "CounterHTOff": "0,1,2,3,4,5,6,7" + "UMask": "0x1", + "EventName": "UOPS_RETIRED.TOTAL_CYCLES", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles with less than 10 actually retired uops.", + "CounterMask": "10", + "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xC2", @@ -1065,13 +1017,14 @@ "CounterHTOff": "0,1,2,3" }, { - "EventCode": "0xA8", + "PEBS": "1", + "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization. (Precise Event - PEBS)", + "EventCode": "0xC2", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "LSD.CYCLES_4_UOPS", + "UMask": "0x2", + "EventName": "UOPS_RETIRED.RETIRE_SLOTS", "SampleAfterValue": "2000003", - "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.", - "CounterMask": "4", + "BriefDescription": "Retirement slots used. (Precise Event - PEBS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -1086,135 +1039,188 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x5E", - "Invert": "1", + "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.", + "EventCode": "0xC3", "Counter": "0,1,2,3", - "UMask": "0x1", - "EdgeDetect": "1", - "EventName": "RS_EVENTS.EMPTY_END", - "SampleAfterValue": "2000003", - "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.", - "CounterMask": "1", + "UMask": "0x4", + "EventName": "MACHINE_CLEARS.SMC", + "SampleAfterValue": "100003", + "BriefDescription": "Self-modifying code (SMC) detected.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x00", - "Counter": "Fixed counter 2", - "UMask": "0x2", - "AnyThread": "1", - "EventName": "CPU_CLK_UNHALTED.THREAD_ANY", - "SampleAfterValue": "2000003", - "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.", - "CounterHTOff": "Fixed counter 2" + "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.", + "EventCode": "0xC3", + "Counter": "0,1,2,3", + "UMask": "0x20", + "EventName": "MACHINE_CLEARS.MASKMOV", + "SampleAfterValue": "100003", + "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.", + "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x3C", + "EventCode": "0xC4", "Counter": "0,1,2,3", "UMask": "0x0", - "AnyThread": "1", - "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY", - "SampleAfterValue": "2000003", - "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.", + "EventName": "BR_INST_RETIRED.ALL_BRANCHES", + "SampleAfterValue": "400009", + "BriefDescription": "All (macro) branch instructions retired.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x3C", + "PEBS": "1", + "EventCode": "0xC4", "Counter": "0,1,2,3", "UMask": "0x1", - "AnyThread": "1", - "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY", - "SampleAfterValue": "2000003", - "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).", + "EventName": "BR_INST_RETIRED.CONDITIONAL", + "SampleAfterValue": "400009", + "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x0D", + "PEBS": "1", + "EventCode": "0xC4", "Counter": "0,1,2,3", - "UMask": "0x3", - "AnyThread": "1", - "EventName": "INT_MISC.RECOVERY_CYCLES_ANY", - "SampleAfterValue": "2000003", - "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).", - "CounterMask": "1", + "UMask": "0x2", + "EventName": "BR_INST_RETIRED.NEAR_CALL", + "SampleAfterValue": "100007", + "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xB1", + "PEBS": "1", + "EventCode": "0xC4", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.", - "CounterMask": "1", + "EventName": "BR_INST_RETIRED.NEAR_CALL_R3", + "SampleAfterValue": "100007", + "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xB1", + "PEBS": "2", + "EventCode": "0xC4", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.", - "CounterMask": "2", + "UMask": "0x4", + "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS", + "SampleAfterValue": "400009", + "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" + }, + { + "PEBS": "1", + "EventCode": "0xC4", + "Counter": "0,1,2,3", + "UMask": "0x8", + "EventName": "BR_INST_RETIRED.NEAR_RETURN", + "SampleAfterValue": "100007", + "BriefDescription": "Return instructions retired. (Precise Event - PEBS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xB1", + "EventCode": "0xC4", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.", - "CounterMask": "3", + "UMask": "0x10", + "EventName": "BR_INST_RETIRED.NOT_TAKEN", + "SampleAfterValue": "400009", + "BriefDescription": "Not taken branch instructions retired.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xB1", + "PEBS": "1", + "EventCode": "0xC4", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.", - "CounterMask": "4", + "UMask": "0x20", + "EventName": "BR_INST_RETIRED.NEAR_TAKEN", + "SampleAfterValue": "400009", + "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xB1", - "Invert": "1", + "EventCode": "0xC4", "Counter": "0,1,2,3", - "UMask": "0x2", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.", + "UMask": "0x40", + "EventName": "BR_INST_RETIRED.FAR_BRANCH", + "SampleAfterValue": "100007", + "BriefDescription": "Far branch instructions retired.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)", - "EventCode": "0x3C", + "EventCode": "0xC5", "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "CPU_CLK_UNHALTED.REF_XCLK", - "SampleAfterValue": "2000003", - "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).", + "UMask": "0x0", + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES", + "SampleAfterValue": "400009", + "BriefDescription": "All mispredicted macro branch instructions retired.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x3C", + "PEBS": "1", + "EventCode": "0xC5", "Counter": "0,1,2,3", "UMask": "0x1", - "AnyThread": "1", - "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY", - "SampleAfterValue": "2000003", - "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).", + "EventName": "BR_MISP_RETIRED.CONDITIONAL", + "SampleAfterValue": "400009", + "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x3C", + "PEBS": "1", + "EventCode": "0xC5", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE", + "EventName": "BR_MISP_RETIRED.NEAR_CALL", + "SampleAfterValue": "100007", + "BriefDescription": "Direct and indirect mispredicted near call instructions retired. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "PEBS": "2", + "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0x4", + "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS", + "SampleAfterValue": "400009", + "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3" + }, + { + "PEBS": "1", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0x10", + "EventName": "BR_MISP_RETIRED.NOT_TAKEN", + "SampleAfterValue": "400009", + "BriefDescription": "Mispredicted not taken branch instructions retired.(Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "PEBS": "1", + "EventCode": "0xC5", + "Counter": "0,1,2,3", + "UMask": "0x20", + "EventName": "BR_MISP_RETIRED.TAKEN", + "SampleAfterValue": "400009", + "BriefDescription": "Mispredicted taken branch instructions retired. (Precise Event - PEBS).", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xCC", + "Counter": "0,1,2,3", + "UMask": "0x20", + "EventName": "ROB_MISC_EVENTS.LBR_INSERTS", "SampleAfterValue": "2000003", - "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.", + "BriefDescription": "Count cases of saving new LBR.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xE6", + "Counter": "0,1,2,3", + "UMask": "0x1f", + "EventName": "BACLEARS.ANY", + "SampleAfterValue": "100003", + "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.", "CounterHTOff": "0,1,2,3,4,5,6,7" } ]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json index fd7d7c438226..cfeba5067bab 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json @@ -1,140 +1,226 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", - "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4) )", - "MetricGroup": "Frontend", + "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Average CPU Utilization", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2* FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4*( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8* SIMD_FP_256.PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json index a654ab771fce..b8eccce5d75d 100644 --- a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json +++ b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json @@ -1,131 +1,131 @@ [ { - "EventCode": "0xAE", - "Counter": "0,1,2,3", - "UMask": "0x1", - "EventName": "ITLB.ITLB_FLUSH", - "SampleAfterValue": "100007", - "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x4F", - "Counter": "0,1,2,3", - "UMask": "0x10", - "EventName": "EPT.WALK_CYCLES", - "SampleAfterValue": "2000003", - "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x85", + "EventCode": "0x08", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK", + "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK", "SampleAfterValue": "100003", - "BriefDescription": "Misses at all ITLB levels that cause page walks.", + "BriefDescription": "Load misses in all DTLB levels that cause page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x85", + "EventCode": "0x08", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "ITLB_MISSES.WALK_COMPLETED", + "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED", "SampleAfterValue": "100003", - "BriefDescription": "Misses in all ITLB levels that cause completed page walks.", + "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.", - "EventCode": "0x85", + "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.", + "EventCode": "0x08", "Counter": "0,1,2,3", "UMask": "0x4", - "EventName": "ITLB_MISSES.WALK_DURATION", + "EventName": "DTLB_LOAD_MISSES.WALK_DURATION", "SampleAfterValue": "2000003", "BriefDescription": "Cycles when PMH is busy with page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x85", + "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.", + "EventCode": "0x08", "Counter": "0,1,2,3", "UMask": "0x10", - "EventName": "ITLB_MISSES.STLB_HIT", + "EventName": "DTLB_LOAD_MISSES.STLB_HIT", "SampleAfterValue": "100003", - "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.", + "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x08", + "EventCode": "0x49", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK", + "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK", "SampleAfterValue": "100003", - "BriefDescription": "Load misses in all DTLB levels that cause page walks.", + "BriefDescription": "Store misses in all DTLB levels that cause page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x08", + "EventCode": "0x49", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED", + "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED", "SampleAfterValue": "100003", - "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.", + "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.", - "EventCode": "0x08", + "EventCode": "0x49", "Counter": "0,1,2,3", "UMask": "0x4", - "EventName": "DTLB_LOAD_MISSES.WALK_DURATION", + "EventName": "DTLB_STORE_MISSES.WALK_DURATION", "SampleAfterValue": "2000003", "BriefDescription": "Cycles when PMH is busy with page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.", - "EventCode": "0x08", + "EventCode": "0x49", "Counter": "0,1,2,3", "UMask": "0x10", - "EventName": "DTLB_LOAD_MISSES.STLB_HIT", + "EventName": "DTLB_STORE_MISSES.STLB_HIT", "SampleAfterValue": "100003", - "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.", + "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x49", + "EventCode": "0x4F", + "Counter": "0,1,2,3", + "UMask": "0x10", + "EventName": "EPT.WALK_CYCLES", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x85", "Counter": "0,1,2,3", "UMask": "0x1", - "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK", + "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK", "SampleAfterValue": "100003", - "BriefDescription": "Store misses in all DTLB levels that cause page walks.", + "BriefDescription": "Misses at all ITLB levels that cause page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x49", + "EventCode": "0x85", "Counter": "0,1,2,3", "UMask": "0x2", - "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED", + "EventName": "ITLB_MISSES.WALK_COMPLETED", "SampleAfterValue": "100003", - "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.", + "BriefDescription": "Misses in all ITLB levels that cause completed page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x49", + "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.", + "EventCode": "0x85", "Counter": "0,1,2,3", "UMask": "0x4", - "EventName": "DTLB_STORE_MISSES.WALK_DURATION", + "EventName": "ITLB_MISSES.WALK_DURATION", "SampleAfterValue": "2000003", "BriefDescription": "Cycles when PMH is busy with page walks.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x49", + "EventCode": "0x85", "Counter": "0,1,2,3", "UMask": "0x10", - "EventName": "DTLB_STORE_MISSES.STLB_HIT", + "EventName": "ITLB_MISSES.STLB_HIT", "SampleAfterValue": "100003", - "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.", + "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xAE", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "ITLB.ITLB_FLUSH", + "SampleAfterValue": "100007", + "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json index 82be7d1b8b81..805ef1436539 100644 --- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json +++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json @@ -36,7 +36,7 @@ "BriefDescription": "L2 cache request misses" }, { - "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events. \r\n", + "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.", "EventCode": "0x86", "Counter": "0,1", "UMask": "0x4", diff --git a/tools/perf/pmu-events/arch/x86/silvermont/other.json b/tools/perf/pmu-events/arch/x86/silvermont/other.json new file mode 100644 index 000000000000..47814046fa9d --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/silvermont/other.json @@ -0,0 +1,20 @@ +[ + { + "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.", + "EventCode": "0x86", + "Counter": "0,1", + "UMask": "0x2", + "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss." + }, + { + "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.", + "EventCode": "0x86", + "Counter": "0,1", + "UMask": "0x3f", + "EventName": "FETCH_STALL.ALL", + "SampleAfterValue": "200003", + "BriefDescription": "Cycles code-fetch stalled due to any reason." + } +]
\ No newline at end of file diff --git a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json index 7468af99190a..1ed62ad4cf77 100644 --- a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json @@ -210,7 +210,7 @@ "UMask": "0x4", "EventName": "NO_ALLOC_CYCLES.MISPREDICTS", "SampleAfterValue": "200003", - "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted " + "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted" }, { "EventCode": "0xCA", @@ -275,7 +275,6 @@ }, { "PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps. Background: Modern microprocessors employ extensive pipelining and speculative techniques. Since sometimes an instruction is started but never completed, the notion of \"retirement\" is introduced. A retired instruction is one that commits its states. Or stated differently, an instruction might be abandoned at some point. No instruction is truly finished until it retires. This counter measures the number of completed instructions. The fixed event is INST_RETIRED.ANY and the programmable event is INST_RETIRED.ANY_P.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", @@ -284,7 +283,6 @@ }, { "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. In systems with a constant core frequency, this event can give you a measurement of the elapsed time while the core was not in halt state by dividing the event count by the core frequency. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.", - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.CORE", @@ -293,7 +291,6 @@ }, { "PublicDescription": "Counts the number of reference cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.", - "EventCode": "0x00", "Counter": "Fixed counter 3", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json index 54bfe9e4045c..720458139049 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/cache.json +++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json @@ -60,10 +60,10 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.", + "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x41", + "UMask": "0xc1", "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", "SampleAfterValue": "200003", "BriefDescription": "Demand Data Read requests that hit L2 cache", @@ -73,7 +73,7 @@ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x42", + "UMask": "0xc2", "EventName": "L2_RQSTS.RFO_HIT", "SampleAfterValue": "200003", "BriefDescription": "RFO requests that hit L2 cache", @@ -83,7 +83,7 @@ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.", "EventCode": "0x24", "Counter": "0,1,2,3", - "UMask": "0x44", + "UMask": "0xc4", "EventName": "L2_RQSTS.CODE_RD_HIT", "SampleAfterValue": "200003", "BriefDescription": "L2 cache hits when fetching instructions, code reads.", @@ -482,7 +482,7 @@ }, { "PEBS": "1", - "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n", + "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.", "EventCode": "0xD1", "Counter": "0,1,2,3", "UMask": "0x1", @@ -554,7 +554,7 @@ }, { "PEBS": "1", - "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n", + "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.", "EventCode": "0xD1", "Counter": "0,1,2,3", "UMask": "0x40", @@ -661,13 +661,13 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.", + "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF", "EventCode": "0xF2", "Counter": "0,1,2,3", "UMask": "0x4", "EventName": "L2_LINES_OUT.USELESS_PREF", "SampleAfterValue": "200003", - "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache", + "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -690,249 +690,2238 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC01C8000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x10001C8000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x04001C8000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x02001C8000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x01001C8000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00801C8000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00401C8000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0108000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000108000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400108000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200108000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100108000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080108000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040108000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0088000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000088000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400088000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200088000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100088000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080088000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040088000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0028000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000028000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400028000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fc0400001 ", + "MSRValue": "0x0200028000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100028000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080028000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040028000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests have any response type.", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0000018000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests have any response type.", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC01C0004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x10001C0004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x04001C0004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x02001C0004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x01001C0004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00801C0004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00401C0004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0100004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000100004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400100004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200100004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100100004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080100004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040100004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0080004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000080004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400080004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200080004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100080004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080080004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040080004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0020004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000020004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400020004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200020004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100020004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080020004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040020004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0000010004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC01C0002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x10001C0002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x04001C0002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x02001C0002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x01001C0002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00801C0002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00401C0002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0100002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000100002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400100002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200100002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100100002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080100002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040100002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0080002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000080002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400080002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200080002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100080002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080080002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040080002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0020002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000020002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400020002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200020002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100020002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080020002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040020002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs) have any response type.", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0000010002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs) have any response type.", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000400001 ", + "MSRValue": "0x1000400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400400001 ", + "MSRValue": "0x0400400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200400001 ", + "MSRValue": "0x0200400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100400001 ", + "MSRValue": "0x0100400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080400001 ", + "MSRValue": "0x0080400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040400001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fc01c0001 ", + "MSRValue": "0x3FC01C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_HIT & ANY_SNOOP", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x10001c0001 ", + "MSRValue": "0x10001C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x04001c0001 ", + "MSRValue": "0x04001C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x02001c0001 ", + "MSRValue": "0x02001C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x01001c0001 ", + "MSRValue": "0x01001C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00801c0001 ", + "MSRValue": "0x00801C0001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00401C0001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0100001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000100001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400100001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200100001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100100001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NONE", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fc0020001 ", + "MSRValue": "0x0080100001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040100001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0080001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000080001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400080001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200080001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100080001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080080001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040080001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1000040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0400040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0200040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0100040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0080040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC0020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1000020001 ", + "MSRValue": "0x1000020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0400020001 ", + "MSRValue": "0x0400020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0200020001 ", + "MSRValue": "0x0200020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0100020001 ", + "MSRValue": "0x0100020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0080020001 ", + "MSRValue": "0x0080020001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0040020001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads have any response type.", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0000010001 ", + "MSRValue": "0x0000010001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "Counts demand data reads that have any response type.", + "BriefDescription": "Counts demand data reads have any response type.", "Offcore": "1", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json index 578dff5bd823..7fa95a35e3ca 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json +++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json @@ -177,7 +177,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.", + "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.", "EventCode": "0x9C", "Counter": "0,1,2,3", "UMask": "0x1", @@ -242,7 +242,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.", + "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.", "EventCode": "0xAB", "Counter": "0,1,2,3", "UMask": "0x2", @@ -253,7 +253,7 @@ }, { "PEBS": "1", - "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n", + "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.", "EventCode": "0xC6", "MSRValue": "0x11", "Counter": "0,1,2,3", @@ -360,7 +360,7 @@ }, { "PEBS": "1", - "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.", "EventCode": "0xC6", "MSRValue": "0x400806", "Counter": "0,1,2,3", @@ -374,7 +374,7 @@ }, { "PEBS": "1", - "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.", "EventCode": "0xC6", "MSRValue": "0x401006", "Counter": "0,1,2,3", @@ -388,7 +388,7 @@ }, { "PEBS": "1", - "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.", "EventCode": "0xC6", "MSRValue": "0x402006", "Counter": "0,1,2,3", @@ -454,7 +454,7 @@ }, { "PEBS": "1", - "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.", "EventCode": "0xC6", "MSRValue": "0x100206", "Counter": "0,1,2,3", diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json index 3bd8b712c889..f197b4c7695b 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/memory.json +++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json @@ -215,7 +215,7 @@ "UMask": "0x4", "EventName": "HLE_RETIRED.ABORTED", "SampleAfterValue": "2000003", - "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ", + "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -237,6 +237,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).", "EventCode": "0xC8", "Counter": "0,1,2,3", "UMask": "0x20", @@ -292,7 +293,7 @@ "UMask": "0x4", "EventName": "RTM_RETIRED.ABORTED", "SampleAfterValue": "2000003", - "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ", + "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -346,7 +347,7 @@ }, { "PEBS": "2", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.", "EventCode": "0xCD", "MSRValue": "0x4", "Counter": "0,1,2,3", @@ -354,13 +355,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4", "MSRIndex": "0x3F6", "SampleAfterValue": "100003", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.", "TakenAlone": "1", "CounterHTOff": "0,1,2,3" }, { "PEBS": "2", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.", "EventCode": "0xCD", "MSRValue": "0x8", "Counter": "0,1,2,3", @@ -368,13 +369,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8", "MSRIndex": "0x3F6", "SampleAfterValue": "50021", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.", "TakenAlone": "1", "CounterHTOff": "0,1,2,3" }, { "PEBS": "2", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.", "EventCode": "0xCD", "MSRValue": "0x10", "Counter": "0,1,2,3", @@ -382,13 +383,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16", "MSRIndex": "0x3F6", "SampleAfterValue": "20011", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.", "TakenAlone": "1", "CounterHTOff": "0,1,2,3" }, { "PEBS": "2", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.", "EventCode": "0xCD", "MSRValue": "0x20", "Counter": "0,1,2,3", @@ -396,13 +397,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32", "MSRIndex": "0x3F6", "SampleAfterValue": "100007", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.", "TakenAlone": "1", "CounterHTOff": "0,1,2,3" }, { "PEBS": "2", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.", "EventCode": "0xCD", "MSRValue": "0x40", "Counter": "0,1,2,3", @@ -410,13 +411,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64", "MSRIndex": "0x3F6", "SampleAfterValue": "2003", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.", "TakenAlone": "1", "CounterHTOff": "0,1,2,3" }, { "PEBS": "2", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.", "EventCode": "0xCD", "MSRValue": "0x80", "Counter": "0,1,2,3", @@ -424,13 +425,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128", "MSRIndex": "0x3F6", "SampleAfterValue": "1009", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.", "TakenAlone": "1", "CounterHTOff": "0,1,2,3" }, { "PEBS": "2", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.", "EventCode": "0xCD", "MSRValue": "0x100", "Counter": "0,1,2,3", @@ -438,13 +439,13 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256", "MSRIndex": "0x3F6", "SampleAfterValue": "503", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.", "TakenAlone": "1", "CounterHTOff": "0,1,2,3" }, { "PEBS": "2", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.", "EventCode": "0xCD", "MSRValue": "0x200", "Counter": "0,1,2,3", @@ -452,163 +453,1151 @@ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512", "MSRIndex": "0x3F6", "SampleAfterValue": "101", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.", "TakenAlone": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts any other requests", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3ffc000001 ", + "MSRValue": "0x3FFC408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x203C408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x103C408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x043C408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x023C408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x013C408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00BC408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x007C408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC4008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2004008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1004008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0404008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0204008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0104008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0084008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0044008000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000408000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x20001C8000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000108000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000088000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000048000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts any other requests", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000028000", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts any other requests", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FFC400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x203C400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x103C400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x043C400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x023C400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x013C400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00BC400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x007C400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC4000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2004000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1004000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0404000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0204000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0104000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0084000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0044000004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000400004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x20001C0004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000100004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000080004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000040004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000020004", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FFC400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x203C400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x103C400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x043C400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x023C400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x013C400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x00BC400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x007C400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC4000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2004000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x1004000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0404000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0204000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0104000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0084000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0044000002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000400002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x20001C0002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000100002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000080002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000040002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts all demand data writes (RFOs)", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000020002", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts all demand data writes (RFOs)", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FFC400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x203C400001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS & ANY_SNOOP", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x103c000001 ", + "MSRValue": "0x103C400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HITM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x043c000001 ", + "MSRValue": "0x043C400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x023c000001 ", + "MSRValue": "0x023C400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_MISS", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x013c000001 ", + "MSRValue": "0x013C400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x00bc000001 ", + "MSRValue": "0x00BC400001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NONE", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x3fc4000001 ", + "MSRValue": "0x007C400001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x3FC4000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2004000001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x1004000001 ", + "MSRValue": "0x1004000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0404000001 ", + "MSRValue": "0x0404000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0204000001 ", + "MSRValue": "0x0204000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0104000001 ", + "MSRValue": "0x0104000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" }, { - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "PublicDescription": "Counts demand data reads", "EventCode": "0xB7, 0xBB", - "MSRValue": "0x0084000001 ", + "MSRValue": "0x0084000001", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE", - "MSRIndex": "0x1a6,0x1a7", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x0044000001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000400001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x20001C0001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000100001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000080001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000040001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", + "SampleAfterValue": "100003", + "BriefDescription": "Counts demand data reads", + "Offcore": "1", + "CounterHTOff": "0,1,2,3" + }, + { + "PublicDescription": "Counts demand data reads", + "EventCode": "0xB7, 0xBB", + "MSRValue": "0x2000020001", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM", + "MSRIndex": "0x1a6, 0x1a7", "SampleAfterValue": "100003", - "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE", + "BriefDescription": "Counts demand data reads", "Offcore": "1", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json index bc6d2afbcd8a..4a891fbbc4bb 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json @@ -1,7 +1,6 @@ [ { "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.", - "EventCode": "0x00", "Counter": "Fixed counter 0", "UMask": "0x1", "EventName": "INST_RETIRED.ANY", @@ -11,7 +10,6 @@ }, { "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.", - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "EventName": "CPU_CLK_UNHALTED.THREAD", @@ -20,7 +18,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "Counter": "Fixed counter 1", "UMask": "0x2", "AnyThread": "1", @@ -31,7 +28,6 @@ }, { "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.", - "EventCode": "0x00", "Counter": "Fixed counter 2", "UMask": "0x3", "EventName": "CPU_CLK_UNHALTED.REF_TSC", @@ -121,7 +117,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.", + "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.", "EventCode": "0x0E", "Counter": "0,1,2,3", "UMask": "0x2", @@ -248,6 +244,16 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.", + "EventCode": "0x59", + "Counter": "0,1,2,3", + "UMask": "0x1", + "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD", + "SampleAfterValue": "2000003", + "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.", "EventCode": "0x5E", "Counter": "0,1,2,3", @@ -361,8 +367,8 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.", - "EventCode": "0xA2", + "PublicDescription": "Counts resource-related stall cycles.", + "EventCode": "0xa2", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "RESOURCE_STALLS.ANY", @@ -735,7 +741,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.", + "PublicDescription": "This event counts cycles without actually retired uops.", "EventCode": "0xC2", "Invert": "1", "Counter": "0,1,2,3", @@ -759,6 +765,7 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "PublicDescription": "Number of machine clears (nukes) of any type.", "EventCode": "0xC3", "Counter": "0,1,2,3", "UMask": "0x1", @@ -839,14 +846,15 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.", + "PEBS": "1", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.", "EventCode": "0xC4", "Counter": "0,1,2,3", "UMask": "0x10", "Errata": "SKL091", "EventName": "BR_INST_RETIRED.NOT_TAKEN", "SampleAfterValue": "400009", - "BriefDescription": "Not taken branch instructions retired.", + "BriefDescription": "Counts all not taken macro branch instructions retired.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -924,7 +932,7 @@ "UMask": "0x20", "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", "SampleAfterValue": "400009", - "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ", + "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { @@ -938,6 +946,15 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "EventCode": "0xCC", + "Counter": "0,1,2,3", + "UMask": "0x40", + "EventName": "ROB_MISC_EVENTS.PAUSE_INST", + "SampleAfterValue": "2000003", + "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.", "EventCode": "0xE6", "Counter": "0,1,2,3", diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json index 71e9737f4614..2c95417a4dae 100644 --- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json @@ -1,164 +1,364 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", - "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )", - "MetricGroup": "Frontend", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { + "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "Branch_Misprediction_Cost" }, { + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts_SMT", + "MetricName": "Branch_Misprediction_Cost_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" + }, + { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Access_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { + "MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@", + "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_Parallel_Reads" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json index 5c9940866acd..24df183693fa 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json @@ -61,17 +61,17 @@ }, { "EventCode": "0x24", - "UMask": "0x41", + "UMask": "0xc1", "BriefDescription": "Demand Data Read requests that hit L2 cache", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT", - "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.", + "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache", "SampleAfterValue": "200003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x24", - "UMask": "0x42", + "UMask": "0xc2", "BriefDescription": "RFO requests that hit L2 cache", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.RFO_HIT", @@ -81,7 +81,7 @@ }, { "EventCode": "0x24", - "UMask": "0x44", + "UMask": "0xc4", "BriefDescription": "L2 cache hits when fetching instructions, code reads.", "Counter": "0,1,2,3", "EventName": "L2_RQSTS.CODE_RD_HIT", @@ -165,6 +165,7 @@ "BriefDescription": "Core-originated cacheable demand requests missed L3", "Counter": "0,1,2,3", "EventName": "LONGEST_LAT_CACHE.MISS", + "Errata": "SKL057", "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3,4,5,6,7" @@ -175,28 +176,29 @@ "BriefDescription": "Core-originated cacheable demand requests that refer to L3", "Counter": "0,1,2,3", "EventName": "LONGEST_LAT_CACHE.REFERENCE", - "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.", + "Errata": "SKL057", + "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x48", "UMask": "0x1", - "BriefDescription": "L1D miss outstandings duration in cycles", + "BriefDescription": "Cycles with L1D load Misses outstanding.", "Counter": "0,1,2,3", - "EventName": "L1D_PEND_MISS.PENDING", - "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.", + "EventName": "L1D_PEND_MISS.PENDING_CYCLES", + "CounterMask": "1", + "PublicDescription": "Counts duration of L1D miss outstanding in cycles.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x48", "UMask": "0x1", - "BriefDescription": "Cycles with L1D load Misses outstanding.", + "BriefDescription": "L1D miss outstandings duration in cycles", "Counter": "0,1,2,3", - "EventName": "L1D_PEND_MISS.PENDING_CYCLES", - "CounterMask": "1", - "PublicDescription": "Counts duration of L1D miss outstanding in cycles.", + "EventName": "L1D_PEND_MISS.PENDING", + "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -234,21 +236,21 @@ { "EventCode": "0x60", "UMask": "0x1", - "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.", + "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore", "Counter": "0,1,2,3", - "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", - "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", + "CounterMask": "1", + "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x60", "UMask": "0x1", - "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore", + "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.", "Counter": "0,1,2,3", - "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD", - "CounterMask": "1", - "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", + "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -307,21 +309,21 @@ { "EventCode": "0x60", "UMask": "0x8", - "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore", + "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.", "Counter": "0,1,2,3", - "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", - "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", + "CounterMask": "1", + "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x60", "UMask": "0x8", - "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.", + "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore", "Counter": "0,1,2,3", - "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD", - "CounterMask": "1", - "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD", + "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -486,7 +488,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_RETIRED.L1_HIT", - "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n", + "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3" }, @@ -558,7 +560,7 @@ "PEBS": "1", "Counter": "0,1,2,3", "EventName": "MEM_LOAD_RETIRED.FB_HIT", - "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n", + "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" }, @@ -690,6 +692,7 @@ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.", "Counter": "0,1,2,3", "EventName": "L2_LINES_OUT.SILENT", + "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.", "SampleAfterValue": "200003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -699,17 +702,18 @@ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped", "Counter": "0,1,2,3", "EventName": "L2_LINES_OUT.NON_SILENT", - "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.", + "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3", "SampleAfterValue": "200003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xF2", "UMask": "0x4", - "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache", + "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF", + "Deprecated": "1", "Counter": "0,1,2,3", "EventName": "L2_LINES_OUT.USELESS_PREF", - "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.", + "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF", "SampleAfterValue": "200003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -736,12 +740,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that have any response type.", - "MSRValue": "0x0000010001 ", + "BriefDescription": "Counts demand data reads have any response type.", + "MSRValue": "0x0000010001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -749,12 +753,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0001 ", + "BriefDescription": "Counts demand data reads TBD TBD", + "MSRValue": "0x01003C0001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -762,25 +766,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0001 ", + "BriefDescription": "Counts demand data reads TBD TBD", + "MSRValue": "0x04003C0001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0001 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -788,12 +779,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0001 ", + "BriefDescription": "Counts demand data reads TBD TBD", + "MSRValue": "0x10003C0001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -801,12 +792,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that hit in the L3.", - "MSRValue": "0x3f803c0001 ", + "BriefDescription": "Counts demand data reads TBD TBD", + "MSRValue": "0x3F803C0001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -814,12 +805,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.", - "MSRValue": "0x0000010002 ", + "BriefDescription": "Counts all demand data writes (RFOs) have any response type.", + "MSRValue": "0x0000010002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -827,12 +818,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD", + "MSRValue": "0x01003C0002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -840,12 +831,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD", + "MSRValue": "0x04003C0002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -853,25 +844,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0002 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD", + "MSRValue": "0x10003C0002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -879,12 +857,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.", - "MSRValue": "0x3f803c0002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD", + "MSRValue": "0x3F803C0002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -892,12 +870,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that have any response type.", - "MSRValue": "0x0000010004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.", + "MSRValue": "0x0000010004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -905,12 +883,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", + "MSRValue": "0x01003C0004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -918,12 +896,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", + "MSRValue": "0x04003C0004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -931,25 +909,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0004 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", + "MSRValue": "0x10003C0004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -957,12 +922,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that hit in the L3.", - "MSRValue": "0x3f803c0004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", + "MSRValue": "0x3F803C0004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -970,12 +935,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.", - "MSRValue": "0x0000010010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.", + "MSRValue": "0x0000010010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -983,12 +948,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", + "MSRValue": "0x01003C0010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -996,12 +961,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", + "MSRValue": "0x04003C0010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1009,25 +974,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0010 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", + "MSRValue": "0x10003C0010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1035,12 +987,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.", - "MSRValue": "0x3f803c0010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", + "MSRValue": "0x3F803C0010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1048,12 +1000,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.", - "MSRValue": "0x0000010020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.", + "MSRValue": "0x0000010020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1061,12 +1013,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", + "MSRValue": "0x01003C0020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1074,12 +1026,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", + "MSRValue": "0x04003C0020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1087,25 +1039,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0020 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", + "MSRValue": "0x10003C0020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1113,12 +1052,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.", - "MSRValue": "0x3f803c0020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", + "MSRValue": "0x3F803C0020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1126,12 +1065,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.", - "MSRValue": "0x0000010080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.", + "MSRValue": "0x0000010080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1139,12 +1078,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", + "MSRValue": "0x01003C0080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1152,25 +1091,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", + "MSRValue": "0x04003C0080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0080 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1178,12 +1104,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", + "MSRValue": "0x10003C0080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1191,12 +1117,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.", - "MSRValue": "0x3f803c0080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", + "MSRValue": "0x3F803C0080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1204,12 +1130,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.", - "MSRValue": "0x0000010100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.", + "MSRValue": "0x0000010100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1217,12 +1143,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", + "MSRValue": "0x01003C0100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1230,12 +1156,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", + "MSRValue": "0x04003C0100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1243,12 +1169,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", + "MSRValue": "0x10003C0100", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1256,12 +1182,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", + "MSRValue": "0x3F803C0100", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1269,12 +1195,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.", - "MSRValue": "0x3f803c0100 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.", + "MSRValue": "0x0000010400", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1282,12 +1208,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.", - "MSRValue": "0x0000010400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", + "MSRValue": "0x01003C0400", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1295,12 +1221,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", + "MSRValue": "0x04003C0400", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1308,12 +1234,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", + "MSRValue": "0x10003C0400", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1321,12 +1247,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "PF_L1D_AND_SW & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", + "MSRValue": "0x3F803C0400", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1334,12 +1260,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0400 ", + "BriefDescription": "TBD have any response type.", + "MSRValue": "0x0000010490", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1347,12 +1273,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.", - "MSRValue": "0x3f803c0400 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x01003C0490", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1360,12 +1286,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts any other requests that have any response type.", - "MSRValue": "0x0000018000 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x04003C0490", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1373,12 +1299,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c8000 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x10003C0490", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1386,12 +1312,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c8000 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x3F803C0490", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1399,12 +1325,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "OTHER & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c8000 ", + "BriefDescription": "TBD have any response type.", + "MSRValue": "0x0000010120", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1412,12 +1338,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c8000 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x01003C0120", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1425,12 +1351,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts any other requests that hit in the L3.", - "MSRValue": "0x3f803c8000 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x04003C0120", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1438,12 +1364,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that have any response type.", - "MSRValue": "0x0000010490 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x10003C0120", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1451,12 +1377,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0490 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x3F803C0120", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1464,12 +1390,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0490 ", + "BriefDescription": "TBD have any response type.", + "MSRValue": "0x0000010491", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1477,12 +1403,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0490 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x01003C0491", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1490,12 +1416,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0490 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x04003C0491", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1503,12 +1429,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that hit in the L3.", - "MSRValue": "0x3f803c0490 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x10003C0491", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1516,12 +1442,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that have any response type.", - "MSRValue": "0x0000010120 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x3F803C0491", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1529,12 +1455,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0120 ", + "BriefDescription": "TBD have any response type.", + "MSRValue": "0x0000010122", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD have any response type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1542,12 +1468,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0120 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x01003C0122", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1555,12 +1481,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0120 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x04003C0122", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1568,12 +1494,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0120 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x10003C0122", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1581,12 +1507,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that hit in the L3.", - "MSRValue": "0x3f803c0120 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x3F803C0122", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1594,12 +1520,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that have any response type.", - "MSRValue": "0x0000010491 ", + "BriefDescription": "Counts demand data reads", + "MSRValue": "0x08007C0001", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "Counts demand data reads", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1607,12 +1532,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0491 ", + "BriefDescription": "Counts all demand data writes (RFOs)", + "MSRValue": "0x08007C0002", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "Counts all demand data writes (RFOs)", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1620,12 +1544,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0491 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", + "MSRValue": "0x08007C0004", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1633,12 +1556,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0491 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads", + "MSRValue": "0x08007C0010", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1646,12 +1568,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0491 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs", + "MSRValue": "0x08007C0020", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1659,12 +1580,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.", - "MSRValue": "0x3f803c0491 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads", + "MSRValue": "0x08007C0080", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1672,12 +1592,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.", - "MSRValue": "0x0000010122 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs", + "MSRValue": "0x08007C0100", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1685,12 +1604,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.", - "MSRValue": "0x01003c0122 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests", + "MSRValue": "0x08007C0400", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1698,12 +1616,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x04003c0122 ", + "BriefDescription": "TBD", + "MSRValue": "0x08007C0490", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1711,12 +1628,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HIT_WITH_FWD", - "MSRValue": "0x08003c0122 ", + "BriefDescription": "TBD", + "MSRValue": "0x08007C0120", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1724,12 +1640,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.", - "MSRValue": "0x10003c0122 ", + "BriefDescription": "TBD", + "MSRValue": "0x08007C0491", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1737,12 +1652,11 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.", - "MSRValue": "0x3f803c0122 ", + "BriefDescription": "TBD", + "MSRValue": "0x08007C0122", "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD", + "PublicDescription": "TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json index 286ed1a37ec9..c5d0babe89fc 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json @@ -59,7 +59,6 @@ "BriefDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8)", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE", - "PublicDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8).", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -69,7 +68,6 @@ "BriefDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16)", "Counter": "0,1,2,3", "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE", - "PublicDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16).", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json index 403a4f89e9b2..4dc583cfb545 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json @@ -2,16 +2,6 @@ { "EventCode": "0x79", "UMask": "0x4", - "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path", - "Counter": "0,1,2,3", - "EventName": "IDQ.MITE_UOPS", - "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).", - "SampleAfterValue": "2000003", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0x79", - "UMask": "0x4", "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path", "Counter": "0,1,2,3", "EventName": "IDQ.MITE_CYCLES", @@ -22,11 +12,11 @@ }, { "EventCode": "0x79", - "UMask": "0x8", - "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path", + "UMask": "0x4", + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path", "Counter": "0,1,2,3", - "EventName": "IDQ.DSB_UOPS", - "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.", + "EventName": "IDQ.MITE_UOPS", + "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -43,6 +33,16 @@ }, { "EventCode": "0x79", + "UMask": "0x8", + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path", + "Counter": "0,1,2,3", + "EventName": "IDQ.DSB_UOPS", + "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.", + "SampleAfterValue": "2000003", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0x79", "UMask": "0x10", "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy", "Counter": "0,1,2,3", @@ -55,22 +55,22 @@ { "EventCode": "0x79", "UMask": "0x18", - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop", "Counter": "0,1,2,3", - "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS", - "CounterMask": "4", - "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.", + "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS", + "CounterMask": "1", + "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x79", "UMask": "0x18", - "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop", + "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops", "Counter": "0,1,2,3", - "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS", - "CounterMask": "1", - "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.", + "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS", + "CounterMask": "4", + "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -87,22 +87,22 @@ { "EventCode": "0x79", "UMask": "0x24", - "BriefDescription": "Cycles MITE is delivering 4 Uops", + "BriefDescription": "Cycles MITE is delivering any Uop", "Counter": "0,1,2,3", - "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS", - "CounterMask": "4", - "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS", + "CounterMask": "1", + "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x79", "UMask": "0x24", - "BriefDescription": "Cycles MITE is delivering any Uop", + "BriefDescription": "Cycles MITE is delivering 4 Uops", "Counter": "0,1,2,3", - "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS", - "CounterMask": "1", - "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", + "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS", + "CounterMask": "4", + "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -118,24 +118,24 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EdgeDetect": "1", "EventCode": "0x79", "UMask": "0x30", - "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer", + "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy", "Counter": "0,1,2,3", - "EventName": "IDQ.MS_SWITCHES", - "CounterMask": "1", - "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.", + "EventName": "IDQ.MS_UOPS", + "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "EdgeDetect": "1", "EventCode": "0x79", "UMask": "0x30", - "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy", + "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer", "Counter": "0,1,2,3", - "EventName": "IDQ.MS_UOPS", - "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.", + "EventName": "IDQ.MS_SWITCHES", + "CounterMask": "1", + "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -177,67 +177,67 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "Invert": "1", "EventCode": "0x9C", "UMask": "0x1", - "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled", + "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.", "Counter": "0,1,2,3", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE", - "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK", + "CounterMask": "1", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x9C", "UMask": "0x1", - "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled", + "BriefDescription": "Cycles with less than 3 uops delivered by the front end.", "Counter": "0,1,2,3", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", - "CounterMask": "4", - "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE", + "CounterMask": "1", + "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x9C", "UMask": "0x1", - "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled", + "BriefDescription": "Cycles with less than 2 uops delivered by the front end.", "Counter": "0,1,2,3", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE", - "CounterMask": "3", - "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE", + "CounterMask": "2", + "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x9C", "UMask": "0x1", - "BriefDescription": "Cycles with less than 2 uops delivered by the front end.", + "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled", "Counter": "0,1,2,3", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE", - "CounterMask": "2", - "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE", + "CounterMask": "3", + "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x9C", "UMask": "0x1", - "BriefDescription": "Cycles with less than 3 uops delivered by the front end.", + "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled", "Counter": "0,1,2,3", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE", - "CounterMask": "1", - "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE", + "CounterMask": "4", + "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "Invert": "1", "EventCode": "0x9C", "UMask": "0x1", - "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.", + "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled", "Counter": "0,1,2,3", - "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK", - "CounterMask": "1", + "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE", + "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -247,20 +247,19 @@ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.", "Counter": "0,1,2,3", "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES", - "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.", + "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x11", + "MSRValue": "0x400406", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.DSB_MISS", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_4", "MSRIndex": "0x3F7", - "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -268,11 +267,11 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x12", + "MSRValue": "0x200206", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.L1I_MISS", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2", "MSRIndex": "0x3F7", "TakenAlone": "1", "SampleAfterValue": "100007", @@ -281,11 +280,11 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x13", + "MSRValue": "0x400206", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.L2_MISS", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_2", "MSRIndex": "0x3F7", "TakenAlone": "1", "SampleAfterValue": "100007", @@ -294,13 +293,13 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.", + "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.", "PEBS": "1", - "MSRValue": "0x14", + "MSRValue": "0x15", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.ITLB_MISS", + "EventName": "FRONTEND_RETIRED.STLB_MISS", "MSRIndex": "0x3F7", - "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.", + "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -308,13 +307,13 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.", + "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.", "PEBS": "1", - "MSRValue": "0x15", + "MSRValue": "0x14", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.STLB_MISS", + "EventName": "FRONTEND_RETIRED.ITLB_MISS", "MSRIndex": "0x3F7", - "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.", + "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -322,11 +321,11 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.", "PEBS": "1", - "MSRValue": "0x400206", + "MSRValue": "0x13", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_2", + "EventName": "FRONTEND_RETIRED.L2_MISS", "MSRIndex": "0x3F7", "TakenAlone": "1", "SampleAfterValue": "100007", @@ -335,11 +334,11 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.", "PEBS": "1", - "MSRValue": "0x200206", + "MSRValue": "0x12", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2", + "EventName": "FRONTEND_RETIRED.L1I_MISS", "MSRIndex": "0x3F7", "TakenAlone": "1", "SampleAfterValue": "100007", @@ -348,12 +347,13 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.", "PEBS": "1", - "MSRValue": "0x400406", + "MSRValue": "0x11", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_4", + "EventName": "FRONTEND_RETIRED.DSB_MISS", "MSRIndex": "0x3F7", + "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -361,13 +361,12 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x400806", + "MSRValue": "0x300206", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_8", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3", "MSRIndex": "0x3F7", - "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -375,13 +374,13 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x401006", + "MSRValue": "0x100206", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_16", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1", "MSRIndex": "0x3F7", - "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -389,13 +388,12 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x402006", + "MSRValue": "0x420006", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_32", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_512", "MSRIndex": "0x3F7", - "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -403,11 +401,11 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x404006", + "MSRValue": "0x410006", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_64", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_256", "MSRIndex": "0x3F7", "TakenAlone": "1", "SampleAfterValue": "100007", @@ -429,11 +427,11 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x410006", + "MSRValue": "0x404006", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_256", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_64", "MSRIndex": "0x3F7", "TakenAlone": "1", "SampleAfterValue": "100007", @@ -442,12 +440,13 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x420006", + "MSRValue": "0x402006", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_512", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_32", "MSRIndex": "0x3F7", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -455,13 +454,13 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.", "PEBS": "1", - "MSRValue": "0x100206", + "MSRValue": "0x401006", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_16", "MSRIndex": "0x3F7", - "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" @@ -469,12 +468,13 @@ { "EventCode": "0xC6", "UMask": "0x1", - "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.", + "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.", "PEBS": "1", - "MSRValue": "0x300206", + "MSRValue": "0x400806", "Counter": "0,1,2,3", - "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3", + "EventName": "FRONTEND_RETIRED.LATENCY_GE_8", "MSRIndex": "0x3F7", + "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.", "TakenAlone": "1", "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json index e7f1aa31226d..48a9cdf81307 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json @@ -129,20 +129,20 @@ { "EventCode": "0x60", "UMask": "0x10", - "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.", + "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.", "Counter": "0,1,2,3", - "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD", - "CounterMask": "1", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6", + "CounterMask": "6", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x60", "UMask": "0x10", - "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.", + "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.", "Counter": "0,1,2,3", - "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6", - "CounterMask": "6", + "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD", + "CounterMask": "1", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -210,7 +210,7 @@ { "EventCode": "0xC8", "UMask": "0x4", - "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ", + "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "HLE_RETIRED.ABORTED", @@ -242,6 +242,7 @@ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).", "Counter": "0,1,2,3", "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY", + "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -287,7 +288,7 @@ { "EventCode": "0xC9", "UMask": "0x4", - "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ", + "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "RTM_RETIRED.ABORTED", @@ -347,125 +348,125 @@ { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.", "PEBS": "2", - "MSRValue": "0x4", + "MSRValue": "0x200", "Counter": "0,1,2,3", - "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512", "MSRIndex": "0x3F6", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.", "TakenAlone": "1", - "SampleAfterValue": "100003", + "SampleAfterValue": "101", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.", "PEBS": "2", - "MSRValue": "0x8", + "MSRValue": "0x100", "Counter": "0,1,2,3", - "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256", "MSRIndex": "0x3F6", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.", "TakenAlone": "1", - "SampleAfterValue": "50021", + "SampleAfterValue": "503", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.", "PEBS": "2", - "MSRValue": "0x10", + "MSRValue": "0x80", "Counter": "0,1,2,3", - "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128", "MSRIndex": "0x3F6", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.", "TakenAlone": "1", - "SampleAfterValue": "20011", + "SampleAfterValue": "1009", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.", "PEBS": "2", - "MSRValue": "0x20", + "MSRValue": "0x40", "Counter": "0,1,2,3", - "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64", "MSRIndex": "0x3F6", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.", "TakenAlone": "1", - "SampleAfterValue": "100007", + "SampleAfterValue": "2003", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.", "PEBS": "2", - "MSRValue": "0x40", + "MSRValue": "0x20", "Counter": "0,1,2,3", - "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32", "MSRIndex": "0x3F6", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.", "TakenAlone": "1", - "SampleAfterValue": "2003", + "SampleAfterValue": "100007", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.", "PEBS": "2", - "MSRValue": "0x80", + "MSRValue": "0x10", "Counter": "0,1,2,3", - "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16", "MSRIndex": "0x3F6", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.", "TakenAlone": "1", - "SampleAfterValue": "1009", + "SampleAfterValue": "20011", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.", "PEBS": "2", - "MSRValue": "0x100", + "MSRValue": "0x8", "Counter": "0,1,2,3", - "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8", "MSRIndex": "0x3F6", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.", "TakenAlone": "1", - "SampleAfterValue": "503", + "SampleAfterValue": "50021", "CounterHTOff": "0,1,2,3" }, { "EventCode": "0xCD", "UMask": "0x1", - "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.", + "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.", "PEBS": "2", - "MSRValue": "0x200", + "MSRValue": "0x4", "Counter": "0,1,2,3", - "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512", + "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4", "MSRIndex": "0x3F6", - "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.", + "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.", "TakenAlone": "1", - "SampleAfterValue": "101", + "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, { "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that miss in the L3.", - "MSRValue": "0x3fbc000001 ", + "BriefDescription": "Counts demand data reads TBD TBD", + "MSRValue": "0x3FBC000001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -473,12 +474,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00001 ", + "BriefDescription": "Counts demand data reads TBD", + "MSRValue": "0x083FC00001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -486,12 +487,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00001 ", + "BriefDescription": "Counts demand data reads TBD", + "MSRValue": "0x103FC00001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -499,12 +500,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00001 ", + "BriefDescription": "Counts demand data reads TBD", + "MSRValue": "0x063FC00001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -512,12 +513,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800001 ", + "BriefDescription": "Counts demand data reads TBD", + "MSRValue": "0x063B800001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -525,12 +526,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000001 ", + "BriefDescription": "Counts demand data reads TBD", + "MSRValue": "0x0604000001", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -538,12 +539,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3.", - "MSRValue": "0x3fbc000002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD", + "MSRValue": "0x3FBC000002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -551,12 +552,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD", + "MSRValue": "0x083FC00002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -564,12 +565,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD", + "MSRValue": "0x103FC00002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -577,12 +578,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD", + "MSRValue": "0x063FC00002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -590,12 +591,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD", + "MSRValue": "0x063B800002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -603,12 +604,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000002 ", + "BriefDescription": "Counts all demand data writes (RFOs) TBD", + "MSRValue": "0x0604000002", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all demand data writes (RFOs) TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -616,12 +617,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that miss in the L3.", - "MSRValue": "0x3fbc000004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", + "MSRValue": "0x3FBC000004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -629,12 +630,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", + "MSRValue": "0x083FC00004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -642,12 +643,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", + "MSRValue": "0x103FC00004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -655,12 +656,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", + "MSRValue": "0x063FC00004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -668,12 +669,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", + "MSRValue": "0x063B800004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -681,12 +682,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000004 ", + "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", + "MSRValue": "0x0604000004", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -694,12 +695,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3.", - "MSRValue": "0x3fbc000010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", + "MSRValue": "0x3FBC000010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -707,12 +708,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD", + "MSRValue": "0x083FC00010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -720,12 +721,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD", + "MSRValue": "0x103FC00010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -733,12 +734,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD", + "MSRValue": "0x063FC00010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -746,12 +747,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD", + "MSRValue": "0x063B800010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -759,12 +760,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000010 ", + "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD", + "MSRValue": "0x0604000010", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -772,12 +773,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3.", - "MSRValue": "0x3fbc000020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", + "MSRValue": "0x3FBC000020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -785,12 +786,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", + "MSRValue": "0x083FC00020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -798,12 +799,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", + "MSRValue": "0x103FC00020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -811,12 +812,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", + "MSRValue": "0x063FC00020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -824,12 +825,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", + "MSRValue": "0x063B800020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -837,12 +838,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000020 ", + "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", + "MSRValue": "0x0604000020", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -850,12 +851,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3.", - "MSRValue": "0x3fbc000080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", + "MSRValue": "0x3FBC000080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -863,12 +864,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", + "MSRValue": "0x083FC00080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -876,12 +877,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", + "MSRValue": "0x103FC00080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -889,12 +890,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", + "MSRValue": "0x063FC00080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -902,12 +903,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", + "MSRValue": "0x063B800080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -915,12 +916,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000080 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", + "MSRValue": "0x0604000080", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -928,12 +929,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3.", - "MSRValue": "0x3fbc000100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", + "MSRValue": "0x3FBC000100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -941,12 +942,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", + "MSRValue": "0x083FC00100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -954,12 +955,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", + "MSRValue": "0x103FC00100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -967,12 +968,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", + "MSRValue": "0x063FC00100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -980,12 +981,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", + "MSRValue": "0x063B800100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -993,12 +994,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000100 ", + "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", + "MSRValue": "0x0604000100", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1006,12 +1007,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3.", - "MSRValue": "0x3fbc000400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", + "MSRValue": "0x3FBC000400", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1019,12 +1020,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", + "MSRValue": "0x083FC00400", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1032,12 +1033,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", + "MSRValue": "0x103FC00400", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1045,12 +1046,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", + "MSRValue": "0x063FC00400", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1058,12 +1059,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", + "MSRValue": "0x063B800400", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1071,90 +1072,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000400 ", + "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", + "MSRValue": "0x0604000400", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts any other requests that miss in the L3.", - "MSRValue": "0x3fbc008000 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc08000 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc08000 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc08000 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b808000 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", - "SampleAfterValue": "100003", - "CounterHTOff": "0,1,2,3" - }, - { - "Offcore": "1", - "EventCode": "0xB7, 0xBB", - "UMask": "0x1", - "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604008000 ", - "Counter": "0,1,2,3", - "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1162,12 +1085,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that miss in the L3.", - "MSRValue": "0x3fbc000490 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x3FBC000490", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1175,12 +1098,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00490 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x083FC00490", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1188,12 +1111,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00490 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x103FC00490", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1201,12 +1124,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00490 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x063FC00490", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1214,12 +1137,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800490 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x063B800490", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1227,12 +1150,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000490 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x0604000490", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1240,12 +1163,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that miss in the L3.", - "MSRValue": "0x3fbc000120 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x3FBC000120", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1253,12 +1176,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00120 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x083FC00120", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1266,12 +1189,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00120 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x103FC00120", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1279,12 +1202,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00120 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x063FC00120", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1292,12 +1215,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800120 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x063B800120", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1305,12 +1228,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000120 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x0604000120", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1318,12 +1241,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3.", - "MSRValue": "0x3fbc000491 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x3FBC000491", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1331,12 +1254,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00491 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x083FC00491", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1344,12 +1267,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00491 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x103FC00491", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1357,12 +1280,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00491 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x063FC00491", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1370,12 +1293,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800491 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x063B800491", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1383,12 +1306,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000491 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x0604000491", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1396,12 +1319,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3.", - "MSRValue": "0x3fbc000122 ", + "BriefDescription": "TBD TBD TBD", + "MSRValue": "0x3FBC000122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1409,12 +1332,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.", - "MSRValue": "0x083fc00122 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x083FC00122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1422,12 +1345,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.", - "MSRValue": "0x103fc00122 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x103FC00122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1435,12 +1358,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram.", - "MSRValue": "0x063fc00122 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x063FC00122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1448,12 +1371,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram.", - "MSRValue": "0x063b800122 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x063B800122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" }, @@ -1461,12 +1384,12 @@ "Offcore": "1", "EventCode": "0xB7, 0xBB", "UMask": "0x1", - "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.", - "MSRValue": "0x0604000122 ", + "BriefDescription": "TBD TBD", + "MSRValue": "0x0604000122", "Counter": "0,1,2,3", "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD", - "MSRIndex": "0x1a6,0x1a7", - "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.", + "MSRIndex": "0x1a6, 0x1a7", + "PublicDescription": "TBD TBD", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3" } diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json index f99f7ae27820..369f56c1d1b5 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json @@ -1,6 +1,5 @@ [ { - "EventCode": "0x00", "UMask": "0x1", "BriefDescription": "Instructions retired from execution.", "Counter": "Fixed counter 0", @@ -10,7 +9,6 @@ "CounterHTOff": "Fixed counter 0" }, { - "EventCode": "0x00", "UMask": "0x2", "BriefDescription": "Core cycles when the thread is not in halt state", "Counter": "Fixed counter 1", @@ -20,7 +18,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "UMask": "0x2", "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.", "Counter": "Fixed counter 1", @@ -30,7 +27,6 @@ "CounterHTOff": "Fixed counter 1" }, { - "EventCode": "0x00", "UMask": "0x3", "BriefDescription": "Reference cycles when the core is not in halt state.", "Counter": "Fixed counter 2", @@ -99,24 +95,24 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "Invert": "1", "EventCode": "0x0E", "UMask": "0x1", - "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)", + "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread", "Counter": "0,1,2,3", - "EventName": "UOPS_ISSUED.ANY", - "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).", + "EventName": "UOPS_ISSUED.STALL_CYCLES", + "CounterMask": "1", + "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "Invert": "1", "EventCode": "0x0E", "UMask": "0x1", - "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread", + "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)", "Counter": "0,1,2,3", - "EventName": "UOPS_ISSUED.STALL_CYCLES", - "CounterMask": "1", - "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.", + "EventName": "UOPS_ISSUED.ANY", + "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -126,7 +122,7 @@ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.", "Counter": "0,1,2,3", "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH", - "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.", + "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -203,19 +199,19 @@ { "EventCode": "0x3C", "UMask": "0x1", - "BriefDescription": "Core crystal clock cycles when the thread is unhalted.", + "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.", "Counter": "0,1,2,3", - "EventName": "CPU_CLK_UNHALTED.REF_XCLK", + "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY", + "AnyThread": "1", "SampleAfterValue": "2503", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0x3C", "UMask": "0x1", - "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.", + "BriefDescription": "Core crystal clock cycles when the thread is unhalted.", "Counter": "0,1,2,3", - "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY", - "AnyThread": "1", + "EventName": "CPU_CLK_UNHALTED.REF_XCLK", "SampleAfterValue": "2503", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -248,12 +244,12 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0x5E", + "EventCode": "0x59", "UMask": "0x1", - "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread", + "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.", "Counter": "0,1,2,3", - "EventName": "RS_EVENTS.EMPTY_CYCLES", - "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.", + "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD", + "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -271,6 +267,16 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "EventCode": "0x5E", + "UMask": "0x1", + "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread", + "Counter": "0,1,2,3", + "EventName": "RS_EVENTS.EMPTY_CYCLES", + "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.", + "SampleAfterValue": "2000003", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { "EventCode": "0x87", "UMask": "0x1", "BriefDescription": "Stalls caused by changing prefix length of the instruction.", @@ -361,12 +367,12 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA2", + "EventCode": "0xa2", "UMask": "0x1", "BriefDescription": "Resource-related stall cycles", "Counter": "0,1,2,3", "EventName": "RESOURCE_STALLS.ANY", - "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.", + "PublicDescription": "Counts resource-related stall cycles.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -522,6 +528,17 @@ { "EventCode": "0xA8", "UMask": "0x1", + "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.", + "Counter": "0,1,2,3", + "EventName": "LSD.CYCLES_4_UOPS", + "CounterMask": "4", + "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).", + "SampleAfterValue": "2000003", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { + "EventCode": "0xA8", + "UMask": "0x1", "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.", "Counter": "0,1,2,3", "EventName": "LSD.CYCLES_ACTIVE", @@ -531,35 +548,35 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "EventCode": "0xA8", + "EventCode": "0xB1", "UMask": "0x1", - "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.", + "BriefDescription": "Cycles where at least 4 uops were executed per-thread", "Counter": "0,1,2,3", - "EventName": "LSD.CYCLES_4_UOPS", + "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC", "CounterMask": "4", - "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).", + "PublicDescription": "Cycles where at least 4 uops were executed per-thread.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xB1", "UMask": "0x1", - "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.", + "BriefDescription": "Cycles where at least 3 uops were executed per-thread", "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.THREAD", - "PublicDescription": "Number of uops to be executed per-thread each cycle.", + "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", + "CounterMask": "3", + "PublicDescription": "Cycles where at least 3 uops were executed per-thread.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "Invert": "1", "EventCode": "0xB1", "UMask": "0x1", - "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.", + "BriefDescription": "Cycles where at least 2 uops were executed per-thread", "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.STALL_CYCLES", - "CounterMask": "1", - "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.", + "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", + "CounterMask": "2", + "PublicDescription": "Cycles where at least 2 uops were executed per-thread.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -575,35 +592,24 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "Invert": "1", "EventCode": "0xB1", "UMask": "0x1", - "BriefDescription": "Cycles where at least 2 uops were executed per-thread", - "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC", - "CounterMask": "2", - "PublicDescription": "Cycles where at least 2 uops were executed per-thread.", - "SampleAfterValue": "2000003", - "CounterHTOff": "0,1,2,3,4,5,6,7" - }, - { - "EventCode": "0xB1", - "UMask": "0x1", - "BriefDescription": "Cycles where at least 3 uops were executed per-thread", + "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.", "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC", - "CounterMask": "3", - "PublicDescription": "Cycles where at least 3 uops were executed per-thread.", + "EventName": "UOPS_EXECUTED.STALL_CYCLES", + "CounterMask": "1", + "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "EventCode": "0xB1", "UMask": "0x1", - "BriefDescription": "Cycles where at least 4 uops were executed per-thread", + "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.", "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC", - "CounterMask": "4", - "PublicDescription": "Cycles where at least 4 uops were executed per-thread.", + "EventName": "UOPS_EXECUTED.THREAD", + "PublicDescription": "Number of uops to be executed per-thread each cycle.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -618,11 +624,12 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "Invert": "1", "EventCode": "0xB1", "UMask": "0x2", - "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.", + "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.", "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE", "CounterMask": "1", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" @@ -630,10 +637,10 @@ { "EventCode": "0xB1", "UMask": "0x2", - "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.", + "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.", "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2", - "CounterMask": "2", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4", + "CounterMask": "4", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -650,20 +657,19 @@ { "EventCode": "0xB1", "UMask": "0x2", - "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.", + "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.", "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4", - "CounterMask": "4", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2", + "CounterMask": "2", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "Invert": "1", "EventCode": "0xB1", "UMask": "0x2", - "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.", + "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.", "Counter": "0,1,2,3", - "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE", + "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1", "CounterMask": "1", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" @@ -725,12 +731,14 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "Invert": "1", "EventCode": "0xC2", "UMask": "0x2", - "BriefDescription": "Retirement slots used.", + "BriefDescription": "Cycles with less than 10 actually retired uops.", "Counter": "0,1,2,3", - "EventName": "UOPS_RETIRED.RETIRE_SLOTS", - "PublicDescription": "Counts the retirement slots used.", + "EventName": "UOPS_RETIRED.TOTAL_CYCLES", + "CounterMask": "10", + "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -742,19 +750,17 @@ "Counter": "0,1,2,3", "EventName": "UOPS_RETIRED.STALL_CYCLES", "CounterMask": "1", - "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.", + "PublicDescription": "This event counts cycles without actually retired uops.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { - "Invert": "1", "EventCode": "0xC2", "UMask": "0x2", - "BriefDescription": "Cycles with less than 10 actually retired uops.", + "BriefDescription": "Retirement slots used.", "Counter": "0,1,2,3", - "EventName": "UOPS_RETIRED.TOTAL_CYCLES", - "CounterMask": "10", - "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.", + "EventName": "UOPS_RETIRED.RETIRE_SLOTS", + "PublicDescription": "Counts the retirement slots used.", "SampleAfterValue": "2000003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -766,6 +772,7 @@ "Counter": "0,1,2,3", "EventName": "MACHINE_CLEARS.COUNT", "CounterMask": "1", + "PublicDescription": "Number of machine clears (nukes) of any type.", "SampleAfterValue": "100003", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -841,11 +848,12 @@ { "EventCode": "0xC4", "UMask": "0x10", - "BriefDescription": "Not taken branch instructions retired.", + "BriefDescription": "Counts all not taken macro branch instructions retired.", + "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_INST_RETIRED.NOT_TAKEN", "Errata": "SKL091", - "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.", + "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.", "SampleAfterValue": "400009", "CounterHTOff": "0,1,2,3,4,5,6,7" }, @@ -919,7 +927,7 @@ { "EventCode": "0xC5", "UMask": "0x20", - "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ", + "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.", "PEBS": "1", "Counter": "0,1,2,3", "EventName": "BR_MISP_RETIRED.NEAR_TAKEN", @@ -938,6 +946,15 @@ "CounterHTOff": "0,1,2,3,4,5,6,7" }, { + "EventCode": "0xCC", + "UMask": "0x40", + "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.", + "Counter": "0,1,2,3", + "EventName": "ROB_MISC_EVENTS.PAUSE_INST", + "SampleAfterValue": "2000003", + "CounterHTOff": "0,1,2,3,4,5,6,7" + }, + { "EventCode": "0xE6", "UMask": "0x1", "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.", diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json index 71e9737f4614..56e03ba771f4 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json @@ -1,164 +1,394 @@ [ { - "BriefDescription": "Instructions Per Cycle (per logical thread)", + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Frontend_Bound" + }, + { + "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Frontend_Bound_SMT" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations", + "MetricGroup": "TopdownL1", + "MetricName": "Bad_Speculation" + }, + { + "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Bad_Speculation_SMT" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend", + "MetricGroup": "TopdownL1", + "MetricName": "Backend_Bound" + }, + { + "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )", + "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Backend_Bound_SMT" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. ", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired", + "MetricGroup": "TopdownL1", + "MetricName": "Retiring" + }, + { + "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))", + "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU.", + "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.", + "MetricGroup": "TopdownL1_SMT", + "MetricName": "Retiring_SMT" + }, + { "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Instructions Per Cycle (per logical thread)", "MetricGroup": "TopDownL1", "MetricName": "IPC" }, { - "BriefDescription": "Uops Per Instruction", "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY", - "MetricGroup": "Pipeline", + "BriefDescription": "Uops Per Instruction", + "MetricGroup": "Pipeline;Retiring", "MetricName": "UPI" }, { - "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions", - "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )", - "MetricGroup": "Frontend", + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Instruction per taken branch", + "MetricGroup": "Branches;PGO", + "MetricName": "IpTB" + }, + { + "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN", + "BriefDescription": "Branch instructions per taken branch. ", + "MetricGroup": "Branches;PGO", + "MetricName": "BpTB" + }, + { + "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )", + "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions", + "MetricGroup": "PGO", "MetricName": "IFetch_Line_Utilization" }, { - "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)", - "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )", - "MetricGroup": "DSB; Frontend_Bandwidth", + "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ))", + "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)", + "MetricGroup": "DSB;Frontend_Bandwidth", "MetricName": "DSB_Coverage" }, { - "BriefDescription": "Cycles Per Instruction (threaded)", "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)", + "BriefDescription": "Cycles Per Instruction (threaded)", "MetricGroup": "Pipeline;Summary", "MetricName": "CPI" }, { - "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.", "MetricExpr": "CPU_CLK_UNHALTED.THREAD", + "BriefDescription": "Per-thread actual clocks when the logical processor is active.", "MetricGroup": "Summary", "MetricName": "CLKS" }, { - "BriefDescription": "Total issue-pipeline slots", - "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", + "MetricExpr": "4 * cycles", + "BriefDescription": "Total issue-pipeline slots (per core)", "MetricGroup": "TopDownL1", "MetricName": "SLOTS" }, { - "BriefDescription": "Total number of retired Instructions", + "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Total issue-pipeline slots (per core)", + "MetricGroup": "TopDownL1_SMT", + "MetricName": "SLOTS_SMT" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS", + "BriefDescription": "Instructions per Load (lower number means loads are more frequent)", + "MetricGroup": "Instruction_Type;L1_Bound", + "MetricName": "IpL" + }, + { + "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES", + "BriefDescription": "Instructions per Store", + "MetricGroup": "Instruction_Type;Store_Bound", + "MetricName": "IpS" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES", + "BriefDescription": "Instructions per Branch", + "MetricGroup": "Branches;Instruction_Type;Port_5;Port_6", + "MetricName": "IpB" + }, + { + "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL", + "BriefDescription": "Instruction per (near) call", + "MetricGroup": "Branches", + "MetricName": "IpCall" + }, + { "MetricExpr": "INST_RETIRED.ANY", + "BriefDescription": "Total number of retired Instructions", "MetricGroup": "Summary", "MetricName": "Instructions" }, { + "MetricExpr": "INST_RETIRED.ANY / cycles", "BriefDescription": "Instructions Per Cycle (per physical core)", - "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)", "MetricGroup": "SMT", "MetricName": "CoreIPC" }, { + "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Instructions Per Cycle (per physical core)", + "MetricGroup": "SMT", + "MetricName": "CoreIPC_SMT" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS", + "MetricName": "FLOPc" + }, + { + "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))", + "BriefDescription": "Floating Point Operations Per Cycle", + "MetricGroup": "FLOPS_SMT", + "MetricName": "FLOPc_SMT" + }, + { + "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)", - "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)", "MetricGroup": "Pipeline;Ports_Utilization", "MetricName": "ILP" }, { - "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)", - "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)", - "MetricGroup": "Unknown_Branches", - "MetricName": "BAClear_Cost" + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "Branch_Misprediction_Cost" + }, + { + "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per branch misprediction (jeclear and baclear)", + "MetricGroup": "Branch_Mispredicts_SMT", + "MetricName": "Branch_Misprediction_Cost_SMT" }, { + "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES", + "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)", + "MetricGroup": "Branch_Mispredicts", + "MetricName": "IpMispredict" + }, + { + "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )", "BriefDescription": "Core actual clocks when any thread is active on the physical core", - "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD", "MetricGroup": "SMT", "MetricName": "CORE_CLKS" }, { - "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads", "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )", + "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)", "MetricGroup": "Memory_Bound;Memory_Lat", "MetricName": "Load_Miss_Real_Latency" }, { - "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)", - "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)", + "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES", + "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-thread)", "MetricGroup": "Memory_Bound;Memory_BW", "MetricName": "MLP" }, { + "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )", "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", - "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )", "MetricGroup": "TLB", "MetricName": "Page_Walks_Utilization" }, { - "BriefDescription": "Average CPU Utilization", + "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )", + "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses", + "MetricGroup": "TLB_SMT", + "MetricName": "Page_Walks_Utilization_SMT" + }, + { + "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L1D_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time", + "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L2_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Fill_BW" + }, + { + "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time", + "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "L3_Cache_Access_BW" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY", + "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L1MPKI" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI" + }, + { + "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY", + "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2MPKI_All" + }, + { + "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY", + "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)", + "MetricGroup": "Cache_Misses;", + "MetricName": "L2HPKI_All" + }, + { + "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY", + "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads", + "MetricGroup": "Cache_Misses;", + "MetricName": "L3MPKI" + }, + { "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@", + "BriefDescription": "Average CPU Utilization", "MetricGroup": "Summary", "MetricName": "CPU_Utilization" }, { + "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time", "BriefDescription": "Giga Floating Point Operations Per Second", - "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time", "MetricGroup": "FLOPS;Summary", "MetricName": "GFLOPs" }, { - "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Average Frequency Utilization relative nominal frequency", "MetricGroup": "Power", "MetricName": "Turbo_Utilization" }, { - "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0", + "BriefDescription": "Fraction of cycles where both hardware threads were active", "MetricGroup": "SMT;Summary", "MetricName": "SMT_2T_Utilization" }, { - "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC", + "BriefDescription": "Fraction of cycles spent in Kernel mode", "MetricGroup": "Summary", "MetricName": "Kernel_Utilization" }, { - "BriefDescription": "C3 residency percent per core", + "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", + "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_BW_Use" + }, + { + "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )", + "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_Lat", + "MetricName": "DRAM_Read_Latency" + }, + { + "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@", + "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches", + "MetricGroup": "Memory_BW", + "MetricName": "DRAM_Parallel_Reads" + }, + { + "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0", + "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches", + "MetricGroup": "Memory_Lat", + "MetricName": "MEM_PMM_Read_Latency" + }, + { + "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0", + "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "PMM_Read_BW" + }, + { + "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0", + "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]", + "MetricGroup": "Memory_BW", + "MetricName": "PMM_Write_BW" + }, + { + "MetricExpr": "cha_0@event\\=0x0@", + "BriefDescription": "Socket actual clocks when any core is active on that socket", + "MetricGroup": "", + "MetricName": "Socket_CLKS" + }, + { "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per core", "MetricName": "C3_Core_Residency" }, { - "BriefDescription": "C6 residency percent per core", "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per core", "MetricName": "C6_Core_Residency" }, { - "BriefDescription": "C7 residency percent per core", "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per core", "MetricName": "C7_Core_Residency" }, { - "BriefDescription": "C2 residency percent per package", "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C2 residency percent per package", "MetricName": "C2_Pkg_Residency" }, { - "BriefDescription": "C3 residency percent per package", "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C3 residency percent per package", "MetricName": "C3_Pkg_Residency" }, { - "BriefDescription": "C6 residency percent per package", "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C6 residency percent per package", "MetricName": "C6_Pkg_Residency" }, { - "BriefDescription": "C7 residency percent per package", "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100", "MetricGroup": "Power", + "BriefDescription": "C7 residency percent per package", "MetricName": "C7_Pkg_Residency" } ] diff --git a/tools/perf/trace/beauty/renameat.c b/tools/perf/trace/beauty/renameat.c index 6dab340cc506..852d2e271833 100644 --- a/tools/perf/trace/beauty/renameat.c +++ b/tools/perf/trace/beauty/renameat.c @@ -2,7 +2,6 @@ // Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> #include "trace/beauty/beauty.h" -#include <uapi/linux/fs.h> static size_t renameat2__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix) { diff --git a/tools/perf/trace/strace/groups/string b/tools/perf/trace/strace/groups/string new file mode 100644 index 000000000000..c87129a3e3c4 --- /dev/null +++ b/tools/perf/trace/strace/groups/string @@ -0,0 +1,65 @@ +access +acct +add_key +chdir +chmod +chown +chroot +creat +delete_module +execve +execveat +faccessat +fchmodat +fchownat +fgetxattr +finit_module +fremovexattr +fsetxattr +futimesat +getxattr +inotify_add_watch +lchown +lgetxattr +link +linkat +listxattr +llistxattr +lremovexattr +lsetxattr +lstat +memfd_create +mkdir +mkdirat +mknod +mknodat +mq_open +mq_timedsend +mq_unlink +name_to_handle_at +newfstatat +open +openat +pivot_root +pwrite64 +quotactl +readlink +readlinkat +removexattr +rename +renameat +renameat2 +request_key +rmdir +setxattr +stat +statfs +statx +swapoff +swapon +symlink +symlinkat +truncate +unlink +unlinkat +utimensat diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index c8b01176c9e1..09762985c713 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1714,8 +1714,8 @@ static int symbol__disassemble_bpf(struct symbol *sym, if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO) return -1; - pr_debug("%s: handling sym %s addr %lx len %lx\n", __func__, - sym->name, sym->start, sym->end - sym->start); + pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, + sym->name, sym->start, sym->end - sym->start); memset(tpath, 0, sizeof(tpath)); perf_exe(tpath, sizeof(tpath)); @@ -1740,7 +1740,7 @@ static int symbol__disassemble_bpf(struct symbol *sym, info_linear = info_node->info_linear; sub_id = dso->bpf_prog.sub_id; - info.buffer = (void *)(info_linear->info.jited_prog_insns); + info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns); info.buffer_length = info_linear->info.jited_prog_len; if (info_linear->info.nr_line_info) @@ -1776,7 +1776,7 @@ static int symbol__disassemble_bpf(struct symbol *sym, const char *srcline; u64 addr; - addr = pc + ((u64 *)(info_linear->info.jited_ksyms))[sub_id]; + addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id]; count = disassemble(pc, &info); if (prog_linfo) diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c index ca0fff6272be..06f48312c5ed 100644 --- a/tools/perf/util/cloexec.c +++ b/tools/perf/util/cloexec.c @@ -7,7 +7,6 @@ #include "asm/bug.h" #include "debug.h" #include <unistd.h> -#include <asm/unistd.h> #include <sys/syscall.h> static unsigned long flag = PERF_FLAG_FD_CLOEXEC; diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 110804936fc3..de488b43f440 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -422,11 +422,9 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm) if (!etmq->packet) goto out_free; - if (etm->synth_opts.last_branch || etm->sample_branches) { - etmq->prev_packet = zalloc(szp); - if (!etmq->prev_packet) - goto out_free; - } + etmq->prev_packet = zalloc(szp); + if (!etmq->prev_packet) + goto out_free; if (etm->synth_opts.last_branch) { size_t sz = sizeof(struct branch_stack); @@ -981,7 +979,6 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) * PREV_PACKET is a branch. */ if (etm->synth_opts.last_branch && - etmq->prev_packet && etmq->prev_packet->sample_type == CS_ETM_RANGE && etmq->prev_packet->last_instr_taken_branch) cs_etm__update_last_branch_rb(etmq); @@ -1014,7 +1011,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) etmq->period_instructions = instrs_over; } - if (etm->sample_branches && etmq->prev_packet) { + if (etm->sample_branches) { bool generate_sample = false; /* Generate sample for tracing on packet */ @@ -1071,9 +1068,6 @@ static int cs_etm__flush(struct cs_etm_queue *etmq) struct cs_etm_auxtrace *etm = etmq->etm; struct cs_etm_packet *tmp; - if (!etmq->prev_packet) - return 0; - /* Handle start tracing packet */ if (etmq->prev_packet->sample_type == CS_ETM_EMPTY) goto swap_packet; diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c index 26af43ad9ddd..e0311c9750ad 100644 --- a/tools/perf/util/data-convert-bt.c +++ b/tools/perf/util/data-convert-bt.c @@ -310,7 +310,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, if (flags & TEP_FIELD_IS_DYNAMIC) { unsigned long long tmp_val; - tmp_val = tep_read_number(fmtf->event->pevent, + tmp_val = tep_read_number(fmtf->event->tep, data + offset, len); offset = tmp_val; len = offset >> 16; @@ -354,7 +354,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, unsigned long long value_int; value_int = tep_read_number( - fmtf->event->pevent, + fmtf->event->tep, data + offset + i * len, len); if (!(flags & TEP_FIELD_IS_SIGNED)) diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index 9494f9dc61ec..6a3eaf7d9353 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -115,8 +115,8 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) } node = NULL; - up_read(&env->bpf_progs.lock); out: + up_read(&env->bpf_progs.lock); return node; } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 36ae7e92dab1..4e908ec1ef64 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -6,6 +6,7 @@ #include <stdio.h> #include <linux/kernel.h> #include <linux/bpf.h> +#include <linux/perf_event.h> #include "../perf.h" #include "build-id.h" diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 51ead577533f..4b6783ff5813 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1009,7 +1009,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, */ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, unsigned int auxtrace_pages, - bool auxtrace_overwrite, int nr_cblocks, int affinity) + bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush) { struct perf_evsel *evsel; const struct cpu_map *cpus = evlist->cpus; @@ -1019,7 +1019,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, * Its value is decided by evsel's write_backward. * So &mp should not be passed through const pointer. */ - struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity }; + struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush }; if (!evlist->mmap) evlist->mmap = perf_evlist__alloc_mmap(evlist, false); @@ -1051,7 +1051,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) { - return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS); + return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1); } int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 6a94785b9100..c9a0f72677fd 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -177,7 +177,8 @@ unsigned long perf_event_mlock_kb_in_pages(void); int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, unsigned int auxtrace_pages, - bool auxtrace_overwrite, int nr_cblocks, int affinity); + bool auxtrace_overwrite, int nr_cblocks, + int affinity, int flush); int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages); void perf_evlist__munmap(struct perf_evlist *evlist); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 966360844fff..a10cf4cde920 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -580,6 +580,12 @@ static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); } +static int perf_evsel__tool_name(char *bf, size_t size) +{ + int ret = scnprintf(bf, size, "duration_time"); + return ret; +} + const char *perf_evsel__name(struct perf_evsel *evsel) { char bf[128]; @@ -601,7 +607,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel) break; case PERF_TYPE_SOFTWARE: - perf_evsel__sw_name(evsel, bf, sizeof(bf)); + if (evsel->tool_event) + perf_evsel__tool_name(bf, sizeof(bf)); + else + perf_evsel__sw_name(evsel, bf, sizeof(bf)); break; case PERF_TYPE_TRACEPOINT: diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 0f2c6c93d721..6d190cbf1070 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -75,6 +75,11 @@ struct perf_stat_evsel; typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data); +enum perf_tool_event { + PERF_TOOL_NONE = 0, + PERF_TOOL_DURATION_TIME = 1, +}; + /** struct perf_evsel - event selector * * @evlist - evlist this evsel is in, if it is in one. @@ -121,6 +126,7 @@ struct perf_evsel { unsigned int sample_size; int id_pos; int is_pos; + enum perf_tool_event tool_event; bool uniquified_name; bool snapshot; bool supported; diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index cdc7740fc181..ef3d79b2c90b 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -440,6 +440,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c perf_mmap__setup_affinity_mask(map, mp); + map->flush = mp->flush; + if (auxtrace_mmap__mmap(&map->auxtrace_mmap, &mp->auxtrace_mp, map->base, fd)) return -1; @@ -492,7 +494,7 @@ static int __perf_mmap__read_init(struct perf_mmap *md) md->start = md->overwrite ? head : old; md->end = md->overwrite ? old : head; - if (md->start == md->end) + if ((md->end - md->start) < md->flush) return -EAGAIN; size = md->end - md->start; diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index e566c19b242b..b82f8c2d55c4 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -39,6 +39,7 @@ struct perf_mmap { } aio; #endif cpu_set_t affinity_mask; + u64 flush; }; /* @@ -70,7 +71,7 @@ enum bkw_mmap_state { }; struct mmap_params { - int prot, mask, nr_cblocks, affinity; + int prot, mask, nr_cblocks, affinity, flush; struct auxtrace_mmap_params auxtrace_mp; }; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 5ef4939408f2..4432bfe039fd 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -317,10 +317,12 @@ static struct perf_evsel * __add_event(struct list_head *list, int *idx, struct perf_event_attr *attr, char *name, struct perf_pmu *pmu, - struct list_head *config_terms, bool auto_merge_stats) + struct list_head *config_terms, bool auto_merge_stats, + const char *cpu_list) { struct perf_evsel *evsel; - struct cpu_map *cpus = pmu ? pmu->cpus : NULL; + struct cpu_map *cpus = pmu ? pmu->cpus : + cpu_list ? cpu_map__new(cpu_list) : NULL; event_attr_init(attr); @@ -348,7 +350,25 @@ static int add_event(struct list_head *list, int *idx, struct perf_event_attr *attr, char *name, struct list_head *config_terms) { - return __add_event(list, idx, attr, name, NULL, config_terms, false) ? 0 : -ENOMEM; + return __add_event(list, idx, attr, name, NULL, config_terms, false, NULL) ? 0 : -ENOMEM; +} + +static int add_event_tool(struct list_head *list, int *idx, + enum perf_tool_event tool_event) +{ + struct perf_evsel *evsel; + struct perf_event_attr attr = { + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_DUMMY, + }; + + evsel = __add_event(list, idx, &attr, NULL, NULL, NULL, false, "0"); + if (!evsel) + return -ENOMEM; + evsel->tool_event = tool_event; + if (tool_event == PERF_TOOL_DURATION_TIME) + evsel->unit = strdup("ns"); + return 0; } static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) @@ -1233,6 +1253,13 @@ int parse_events_add_numeric(struct parse_events_state *parse_state, get_config_name(head_config), &config_terms); } +int parse_events_add_tool(struct parse_events_state *parse_state, + struct list_head *list, + enum perf_tool_event tool_event) +{ + return add_event_tool(list, &parse_state->idx, tool_event); +} + int parse_events_add_pmu(struct parse_events_state *parse_state, struct list_head *list, char *name, struct list_head *head_config, @@ -1267,7 +1294,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, if (!head_config) { attr.type = pmu->type; - evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats); + evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, + auto_merge_stats, NULL); if (evsel) { evsel->pmu_name = name; evsel->use_uncore_alias = use_uncore_alias; @@ -1295,7 +1323,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, evsel = __add_event(list, &parse_state->idx, &attr, get_config_name(head_config), pmu, - &config_terms, auto_merge_stats); + &config_terms, auto_merge_stats, NULL); if (evsel) { evsel->unit = info.unit; evsel->scale = info.scale; @@ -2429,6 +2457,25 @@ out_enomem: return evt_num; } +static void print_tool_event(const char *name, const char *event_glob, + bool name_only) +{ + if (event_glob && !strglobmatch(name, event_glob)) + return; + if (name_only) + printf("%s ", name); + else + printf(" %-50s [%s]\n", name, "Tool event"); + +} + +void print_tool_events(const char *event_glob, bool name_only) +{ + print_tool_event("duration_time", event_glob, name_only); + if (pager_in_use()) + printf("\n"); +} + void print_symbol_events(const char *event_glob, unsigned type, struct event_symbol *syms, unsigned max, bool name_only) @@ -2512,6 +2559,7 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag, print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, event_symbols_sw, PERF_COUNT_SW_MAX, name_only); + print_tool_events(event_glob, name_only); print_hwcache_events(event_glob, name_only); diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 5ed035cbcbb7..a052cd6ac63e 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -160,6 +160,10 @@ int parse_events_add_numeric(struct parse_events_state *parse_state, struct list_head *list, u32 type, u64 config, struct list_head *head_config); +enum perf_tool_event; +int parse_events_add_tool(struct parse_events_state *parse_state, + struct list_head *list, + enum perf_tool_event tool_event); int parse_events_add_cache(struct list_head *list, int *idx, char *type, char *op_result1, char *op_result2, struct parse_events_error *error, @@ -200,6 +204,7 @@ extern struct event_symbol event_symbols_sw[]; void print_symbol_events(const char *event_glob, unsigned type, struct event_symbol *syms, unsigned max, bool name_only); +void print_tool_events(const char *event_glob, bool name_only); void print_tracepoint_events(const char *subsys_glob, const char *event_glob, bool name_only); int print_hwcache_events(const char *event_glob, bool name_only); diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 7805c71aaae2..c54bfe88626c 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -15,6 +15,7 @@ #include "../perf.h" #include "parse-events.h" #include "parse-events-bison.h" +#include "evsel.h" char *parse_events_get_text(yyscan_t yyscanner); YYSTYPE *parse_events_get_lval(yyscan_t yyscanner); @@ -154,6 +155,14 @@ static int sym(yyscan_t scanner, int type, int config) return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW; } +static int tool(yyscan_t scanner, enum perf_tool_event event) +{ + YYSTYPE *yylval = parse_events_get_lval(scanner); + + yylval->num = event; + return PE_VALUE_SYM_TOOL; +} + static int term(yyscan_t scanner, int type) { YYSTYPE *yylval = parse_events_get_lval(scanner); @@ -322,7 +331,7 @@ cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COU alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); } emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); } dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); } -duration_time { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); } +duration_time { return tool(yyscanner, PERF_TOOL_DURATION_TIME); } bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); } /* diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 44819bdb037d..6ad8d4914969 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -14,6 +14,7 @@ #include <linux/types.h> #include "util.h" #include "pmu.h" +#include "evsel.h" #include "debug.h" #include "parse-events.h" #include "parse-events-bison.h" @@ -45,6 +46,7 @@ static void inc_group_count(struct list_head *list, %token PE_START_EVENTS PE_START_TERMS %token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM +%token PE_VALUE_SYM_TOOL %token PE_EVENT_NAME %token PE_NAME %token PE_BPF_OBJECT PE_BPF_SOURCE @@ -58,6 +60,7 @@ static void inc_group_count(struct list_head *list, %type <num> PE_VALUE %type <num> PE_VALUE_SYM_HW %type <num> PE_VALUE_SYM_SW +%type <num> PE_VALUE_SYM_TOOL %type <num> PE_RAW %type <num> PE_TERM %type <str> PE_NAME @@ -321,6 +324,15 @@ value_sym sep_slash_slash_dc ABORT_ON(parse_events_add_numeric(_parse_state, list, type, config, NULL)); $$ = list; } +| +PE_VALUE_SYM_TOOL sep_slash_slash_dc +{ + struct list_head *list; + + ALLOC_LIST(list); + ABORT_ON(parse_events_add_tool(_parse_state, list, $1)); + $$ = list; +} event_legacy_cache: PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_event_config diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index dda0ac978b1e..6aa7e2352e16 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -342,7 +342,7 @@ static bool is_tracepoint(struct pyrf_event *pevent) static PyObject* tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field) { - struct tep_handle *pevent = field->event->pevent; + struct tep_handle *pevent = field->event->tep; void *data = pe->sample.raw_data; PyObject *ret = NULL; unsigned long long val; diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 5f06378a482b..61aa7f3df915 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -372,7 +372,7 @@ static void perl_process_tracepoint(struct perf_sample *sample, ns = nsecs - s * NSEC_PER_SEC; scripting_context->event_data = data; - scripting_context->pevent = evsel->tp_format->pevent; + scripting_context->pevent = evsel->tp_format->tep; ENTER; SAVETMPS; diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 09604c6508f0..22f52b669871 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -837,7 +837,7 @@ static void python_process_tracepoint(struct perf_sample *sample, ns = nsecs - s * NSEC_PER_SEC; scripting_context->event_data = data; - scripting_context->pevent = evsel->tp_format->pevent; + scripting_context->pevent = evsel->tp_format->tep; context = _PyCapsule_New(scripting_context, NULL, NULL); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index b17f1c9bc965..bad5f87ae001 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1928,12 +1928,14 @@ more: size = event->header.size; + skip = -EINVAL; + if (size < sizeof(struct perf_event_header) || (skip = rd->process(session, event, file_pos)) < 0) { - pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", + pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n", file_offset + head, event->header.size, - event->header.type); - err = -EINVAL; + event->header.type, strerror(-skip)); + err = skip; goto out; } diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index 6d043c78f3c2..3324f23c7efc 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -18,11 +18,6 @@ #define CNTR_NOT_SUPPORTED "<not supported>" #define CNTR_NOT_COUNTED "<not counted>" -static bool is_duration_time(struct perf_evsel *evsel) -{ - return !strcmp(evsel->name, "duration_time"); -} - static void print_running(struct perf_stat_config *config, u64 run, u64 ena) { @@ -628,9 +623,6 @@ static void print_aggr(struct perf_stat_config *config, ad.id = id = config->aggr_map->map[s]; first = true; evlist__for_each_entry(evlist, counter) { - if (is_duration_time(counter)) - continue; - ad.val = ad.ena = ad.run = 0; ad.nr = 0; if (!collect_data(config, counter, aggr_cb, &ad)) @@ -848,8 +840,6 @@ static void print_no_aggr_metric(struct perf_stat_config *config, if (prefix) fputs(prefix, config->output); evlist__for_each_entry(evlist, counter) { - if (is_duration_time(counter)) - continue; if (first) { aggr_printout(config, counter, cpu, 0); first = false; @@ -906,8 +896,6 @@ static void print_metric_headers(struct perf_stat_config *config, /* Print metrics headers only */ evlist__for_each_entry(evlist, counter) { - if (is_duration_time(counter)) - continue; os.evsel = counter; out.ctx = &os; out.print_metric = print_metric_header; @@ -1136,15 +1124,11 @@ perf_evlist__print_counters(struct perf_evlist *evlist, break; case AGGR_THREAD: evlist__for_each_entry(evlist, counter) { - if (is_duration_time(counter)) - continue; print_aggr_thread(config, _target, counter, prefix); } break; case AGGR_GLOBAL: evlist__for_each_entry(evlist, counter) { - if (is_duration_time(counter)) - continue; print_counter_aggr(config, counter, prefix); } if (metric_only) @@ -1155,8 +1139,6 @@ perf_evlist__print_counters(struct perf_evlist *evlist, print_no_aggr_metric(config, evlist, prefix); else { evlist__for_each_entry(evlist, counter) { - if (is_duration_time(counter)) - continue; print_counter(config, counter, prefix); } } diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index ad74be1f0e42..863955e4094e 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -111,7 +111,7 @@ raw_field_value(struct tep_event *event, const char *name, void *data) unsigned long long read_size(struct tep_event *event, void *ptr, int size) { - return tep_read_number(event->pevent, ptr, size); + return tep_read_number(event->tep, ptr, size); } void event_format__fprintf(struct tep_event *event, diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index efe2f58cff4e..48d53d8e3e16 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -442,7 +442,7 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe) tep_set_flag(pevent, TEP_NSEC_OUTPUT); tep_set_file_bigendian(pevent, file_bigendian); - tep_set_host_bigendian(pevent, host_bigendian); + tep_set_local_bigendian(pevent, host_bigendian); if (do_read(buf, 1) < 0) goto out; diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c index cbe0dd758e3a..01b9d89bf5bf 100644 --- a/tools/perf/util/trace-event.c +++ b/tools/perf/util/trace-event.c @@ -40,7 +40,7 @@ int trace_event__init(struct trace_event *t) static int trace_event__init2(void) { - int be = tep_host_bigendian(); + int be = tep_is_bigendian(); struct tep_handle *pevent; if (trace_event__init(&tevent)) @@ -49,7 +49,7 @@ static int trace_event__init2(void) pevent = tevent.pevent; tep_set_flag(pevent, TEP_NSEC_OUTPUT); tep_set_file_bigendian(pevent, be); - tep_set_host_bigendian(pevent, be); + tep_set_local_bigendian(pevent, be); tevent_initialized = true; return 0; } diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index 4715cfba20dc..93f99c6b7d79 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -288,8 +288,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, #endif max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1; guest_page_size = (1ul << guest_page_shift); - /* 1G of guest page sized pages */ - guest_num_pages = (1ul << (30 - guest_page_shift)); + /* + * A little more than 1G of guest page sized pages. Cover the + * case where the size is not aligned to 64 pages. + */ + guest_num_pages = (1ul << (30 - guest_page_shift)) + 3; host_page_size = getpagesize(); host_num_pages = (guest_num_pages * guest_page_size) / host_page_size + !!((guest_num_pages * guest_page_size) % host_page_size); @@ -359,7 +362,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations, kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap); #ifdef USE_CLEAR_DIRTY_LOG kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0, - DIV_ROUND_UP(host_num_pages, 64) * 64); + host_num_pages); #endif vm_dirty_log_verify(bmap); iteration++; diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c index 264425f75806..9a21e912097c 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c @@ -141,7 +141,13 @@ int main(int argc, char *argv[]) free(hv_cpuid_entries); - vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); + rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); + + if (rv) { + fprintf(stderr, + "Enlightened VMCS is unsupported, skip related test\n"); + goto vm_free; + } hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); if (!hv_cpuid_entries) @@ -151,6 +157,7 @@ int main(int argc, char *argv[]) free(hv_cpuid_entries); +vm_free: kvm_vm_free(vm); return 0; diff --git a/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh index 43540f1828cc..2deea2169fc2 100755 --- a/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh +++ b/tools/testing/selftests/rcutorture/bin/configNR_CPUS.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Extract the number of CPUs expected from the specified Kconfig-file # fragment by checking CONFIG_SMP and CONFIG_NR_CPUS. If the specified @@ -7,23 +8,9 @@ # # Usage: configNR_CPUS.sh config-frag # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2013 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> cf=$1 if test ! -r $cf diff --git a/tools/testing/selftests/rcutorture/bin/config_override.sh b/tools/testing/selftests/rcutorture/bin/config_override.sh index ef7fcbac3d42..90016c359e83 100755 --- a/tools/testing/selftests/rcutorture/bin/config_override.sh +++ b/tools/testing/selftests/rcutorture/bin/config_override.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # config_override.sh base override # @@ -6,23 +7,9 @@ # that conflict with any in override, concatenating what remains and # sending the result to standard output. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2017 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> base=$1 if test -r $base diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh index 197deece7c7c..31584cee84d7 100755 --- a/tools/testing/selftests/rcutorture/bin/configcheck.sh +++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh @@ -1,23 +1,11 @@ #!/bin/bash -# Usage: configcheck.sh .config .config-template -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. +# SPDX-License-Identifier: GPL-2.0+ # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. +# Usage: configcheck.sh .config .config-template # # Copyright (C) IBM Corporation, 2011 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> T=${TMPDIR-/tmp}/abat-chk-config.sh.$$ trap 'rm -rf $T' 0 @@ -26,6 +14,7 @@ mkdir $T cat $1 > $T/.config cat $2 | sed -e 's/\(.*\)=n/# \1 is not set/' -e 's/^#CHECK#//' | +grep -v '^CONFIG_INITRAMFS_SOURCE' | awk ' { print "if grep -q \"" $0 "\" < '"$T/.config"'"; diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh index 65541c21a544..40359486b3a8 100755 --- a/tools/testing/selftests/rcutorture/bin/configinit.sh +++ b/tools/testing/selftests/rcutorture/bin/configinit.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Usage: configinit.sh config-spec-file build-output-dir results-dir # @@ -14,23 +15,9 @@ # for example, "O=/tmp/foo". If this argument is omitted, the .config # file will be generated directly in the current directory. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2013 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> T=${TMPDIR-/tmp}/configinit.sh.$$ trap 'rm -rf $T' 0 diff --git a/tools/testing/selftests/rcutorture/bin/cpus2use.sh b/tools/testing/selftests/rcutorture/bin/cpus2use.sh index bb99cde3f5f9..ff7102212703 100755 --- a/tools/testing/selftests/rcutorture/bin/cpus2use.sh +++ b/tools/testing/selftests/rcutorture/bin/cpus2use.sh @@ -1,26 +1,13 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Get an estimate of how CPU-hoggy to be. # # Usage: cpus2use.sh # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2013 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> ncpus=`grep '^processor' /proc/cpuinfo | wc -l` idlecpus=`mpstat | tail -1 | \ diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh index 65f6655026f0..6bcb8b5b2ff2 100644 --- a/tools/testing/selftests/rcutorture/bin/functions.sh +++ b/tools/testing/selftests/rcutorture/bin/functions.sh @@ -1,24 +1,11 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Shell functions for the rest of the scripts. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2013 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> # bootparam_hotplug_cpu bootparam-string # diff --git a/tools/testing/selftests/rcutorture/bin/jitter.sh b/tools/testing/selftests/rcutorture/bin/jitter.sh index 3633828375e3..435b60933985 100755 --- a/tools/testing/selftests/rcutorture/bin/jitter.sh +++ b/tools/testing/selftests/rcutorture/bin/jitter.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Alternate sleeping and spinning on randomly selected CPUs. The purpose # of this script is to inflict random OS jitter on a concurrently running @@ -11,23 +12,9 @@ # sleepmax: Maximum microseconds to sleep, defaults to one second. # spinmax: Maximum microseconds to spin, defaults to one millisecond. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2016 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> me=$(($1 * 1000)) duration=$2 diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh index 9115fcdb5617..c27a0bbb9c02 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh @@ -1,26 +1,13 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Build a kvm-ready Linux kernel from the tree in the current directory. # # Usage: kvm-build.sh config-template build-dir resdir # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2011 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> config_template=${1} if test -z "$config_template" -o ! -f "$config_template" -o ! -r "$config_template" diff --git a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh index 98f650c9bf54..8426fe1f15ee 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh @@ -1,4 +1,5 @@ #!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ # # Invoke a text editor on all console.log files for all runs with diagnostics, # that is, on all such files having a console.log.diags counterpart. @@ -10,6 +11,10 @@ # # The "directory" above should end with the date/time directory, for example, # "tools/testing/selftests/rcutorture/res/2018.02.25-14:27:27". +# +# Copyright (C) IBM Corporation, 2018 +# +# Author: Paul E. McKenney <paulmck@linux.ibm.com> rundir="${1}" if test -z "$rundir" -o ! -d "$rundir" diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh index 2de92f43ee8c..f3a7a5e2b89d 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh @@ -1,26 +1,13 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Analyze a given results directory for locktorture progress. # # Usage: kvm-recheck-lock.sh resdir # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2014 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> i="$1" if test -d "$i" -a -r "$i" diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh index 0fa8a61ccb7b..2a7f3f4756a7 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh @@ -1,26 +1,13 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Analyze a given results directory for rcutorture progress. # # Usage: kvm-recheck-rcu.sh resdir # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2014 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> i="$1" if test -d "$i" -a -r "$i" diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh index 8948f7926b21..7d3c2be66c64 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Analyze a given results directory for rcuperf performance measurements, # looking for ftrace data. Exits with 0 if data was found, analyzed, and @@ -7,23 +8,9 @@ # # Usage: kvm-recheck-rcuperf-ftrace.sh resdir # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2016 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> i="$1" . functions.sh diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh index ccebf772fa1e..db0375a57f28 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh @@ -1,26 +1,13 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Analyze a given results directory for rcuperf performance measurements. # # Usage: kvm-recheck-rcuperf.sh resdir # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2016 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> i="$1" if test -d "$i" -a -r "$i" diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh index c9bab57a77eb..2adde6aaafdb 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Given the results directories for previous KVM-based torture runs, # check the build and console output for errors. Given a directory @@ -6,23 +7,9 @@ # # Usage: kvm-recheck.sh resdir ... # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2011 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH . functions.sh diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh index 58ca758a5786..0eb1ec16d78a 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Run a kvm-based test of the specified tree on the specified configs. # Fully automated run and error checking, no graphics console. @@ -20,23 +21,9 @@ # # More sophisticated argument parsing is clearly needed. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2011 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$ trap 'rm -rf $T' 0 diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 19864f1cb27a..8f1e337b9b54 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Run a series of tests under KVM. By default, this series is specified # by the relevant CFLIST file, but can be overridden by the --configs @@ -6,23 +7,9 @@ # # Usage: kvm.sh [ options ] # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2011 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> scriptname=$0 args="$*" diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh index 83552bb007b4..6fa9bd1ddc09 100755 --- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh +++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh @@ -1,21 +1,8 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Create an initrd directory if one does not already exist. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2013 # # Author: Connor Shu <Connor.Shu@ibm.com> diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh index 24fe5f822b28..0701b3bf6ade 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-build.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Check the build output from an rcutorture run for goodness. # The "file" is a pathname on the local system, and "title" is @@ -8,23 +9,9 @@ # # Usage: parse-build.sh file title # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2011 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> F=$1 title=$2 diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh index 84933f6aed77..4508373a922f 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-console.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh @@ -1,4 +1,5 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Check the console output from an rcutorture run for oopses. # The "file" is a pathname on the local system, and "title" is @@ -6,23 +7,9 @@ # # Usage: parse-console.sh file title # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2011 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> T=${TMPDIR-/tmp}/parse-console.sh.$$ file="$1" diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh index 80eb646e1319..d3e4b2971f92 100644 --- a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh +++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh @@ -1,24 +1,11 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Kernel-version-dependent shell functions for the rest of the scripts. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2014 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> # locktorture_param_onoff bootparam-string config-file # diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh index 7bab8246392b..effa415f9b92 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh +++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh @@ -1,24 +1,11 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Kernel-version-dependent shell functions for the rest of the scripts. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2013 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> # rcutorture_param_n_barrier_cbs bootparam-string # diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh index d36b8fd6f0fc..777d5b0c190f 100644 --- a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh +++ b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh @@ -1,24 +1,11 @@ #!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ # # Torture-suite-dependent shell functions for the rest of the scripts. # -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, you can access it online at -# http://www.gnu.org/licenses/gpl-2.0.html. -# # Copyright (C) IBM Corporation, 2015 # -# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> +# Authors: Paul E. McKenney <paulmck@linux.ibm.com> # per_version_boot_params bootparam-string config-file seconds # diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 3417f2dbc366..7fc272ecae16 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -508,6 +508,14 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); /* + * Update the timer output so that it is likely to match the + * state we're about to restore. If the timer expires between + * this point and the register restoration, we'll take the + * interrupt anyway. + */ + kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer); + + /* * When using a userspace irqchip with the architected timers and a * host interrupt controller that doesn't support an active state, we * must still prevent continuously exiting from the guest, and @@ -730,7 +738,6 @@ static void kvm_timer_init_interrupt(void *info) int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) { struct arch_timer_context *timer; - bool level; switch (regid) { case KVM_REG_ARM_TIMER_CTL: @@ -758,10 +765,6 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) return -1; } - level = kvm_timer_should_fire(timer); - kvm_timer_update_irq(vcpu, level, timer); - timer_emulate(timer); - return 0; } @@ -812,7 +815,7 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, switch (treg) { case TIMER_REG_TVAL: - val = kvm_phys_timer_read() - timer->cntvoff - timer->cnt_cval; + val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff; break; case TIMER_REG_CTL: @@ -858,7 +861,7 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, { switch (treg) { case TIMER_REG_TVAL: - timer->cnt_cval = val - kvm_phys_timer_read() - timer->cntvoff; + timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + val; break; case TIMER_REG_CTL: diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 99c37384ba7b..f412ebc90610 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -934,7 +934,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init) { - unsigned int i; + unsigned int i, ret; int phys_target = kvm_target_cpu(); if (init->target != phys_target) @@ -969,9 +969,14 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, vcpu->arch.target = phys_target; /* Now we know what it is, we can reset it. */ - return kvm_reset_vcpu(vcpu); -} + ret = kvm_reset_vcpu(vcpu); + if (ret) { + vcpu->arch.target = -1; + bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); + } + return ret; +} static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 27c958306449..a39dcfdbcc65 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1781,8 +1781,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * Only PMD_SIZE transparent hugepages(THP) are * currently supported. This code will need to be * updated to support other THP sizes. + * + * Make sure the host VA and the guest IPA are sufficiently + * aligned and that the block is contained within the memslot. */ - if (transparent_hugepage_adjust(&pfn, &fault_ipa)) + if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) && + transparent_hugepage_adjust(&pfn, &fault_ipa)) vma_pagesize = PMD_SIZE; } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 4a12322bf7df..9f4843fe9cda 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -200,6 +200,9 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu, vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS; + if (was_enabled && !vgic_cpu->lpis_enabled) + vgic_flush_pending_lpis(vcpu); + if (!was_enabled && vgic_cpu->lpis_enabled) vgic_enable_lpis(vcpu); } diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 3af69f2a3866..191deccf60bf 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -151,6 +151,27 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) kfree(irq); } +void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct vgic_irq *irq, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); + + list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { + if (irq->intid >= VGIC_MIN_LPI) { + raw_spin_lock(&irq->irq_lock); + list_del(&irq->ap_list); + irq->vcpu = NULL; + raw_spin_unlock(&irq->irq_lock); + vgic_put_irq(vcpu->kvm, irq); + } + } + + raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); +} + void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) { WARN_ON(irq_set_irqchip_state(irq->host_irq, diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index a90024718ca4..abeeffabc456 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -238,6 +238,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu); bool vgic_has_its(struct kvm *kvm); int kvm_vgic_register_its_device(void); void vgic_enable_lpis(struct kvm_vcpu *vcpu); +void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu); int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi); int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index dc8edc97ba85..a704d1f9bd96 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1240,7 +1240,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) return -EINVAL; - if ((log->first_page & 63) || (log->num_pages & 63)) + if (log->first_page & 63) return -EINVAL; slots = __kvm_memslots(kvm, as_id); @@ -1253,8 +1253,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, n = kvm_dirty_bitmap_bytes(memslot); if (log->first_page > memslot->npages || - log->num_pages > memslot->npages - log->first_page) - return -EINVAL; + log->num_pages > memslot->npages - log->first_page || + (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) + return -EINVAL; *flush = false; dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); |