summaryrefslogtreecommitdiffstats
path: root/arch/x86/events/amd
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2022-03-22 15:15:13 -0700
committerPeter Zijlstra <peterz@infradead.org>2022-04-05 10:24:38 +0200
commitd5616bac7adadbf42a3b63b8717e75eb82a2cc2c (patch)
treede075a68f6b4b1265a8ecd0f0cf1bf65e08d084e /arch/x86/events/amd
parent2a606a18cd672a16343d146a126721b34cc6adbd (diff)
downloadlinux-d5616bac7adadbf42a3b63b8717e75eb82a2cc2c.tar.bz2
perf/x86/amd: Add idle hooks for branch sampling
On AMD Fam19h Zen3, the branch sampling (BRS) feature must be disabled before entering low power and re-enabled (if was active) when returning from low power. Otherwise, the NMI interrupt may be held up for too long and cause problems. Stopping BRS will cause the NMI to be delivered if it was held up. Define a perf_amd_brs_lopwr_cb() callback to stop/restart BRS. The callback is protected by a jump label which is enabled only when AMD BRS is detected. In all other cases, the callback is never called. Signed-off-by: Stephane Eranian <eranian@google.com> [peterz: static_call() and build fixes] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220322221517.2510440-10-eranian@google.com
Diffstat (limited to 'arch/x86/events/amd')
-rw-r--r--arch/x86/events/amd/brs.c33
-rw-r--r--arch/x86/events/amd/core.c4
2 files changed, 37 insertions, 0 deletions
diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c
index 40461c3ce714..895c82165d85 100644
--- a/arch/x86/events/amd/brs.c
+++ b/arch/x86/events/amd/brs.c
@@ -7,6 +7,7 @@
* Contributed by Stephane Eranian <eranian@google.com>
*/
#include <linux/kernel.h>
+#include <linux/jump_label.h>
#include <asm/msr.h>
#include <asm/cpufeature.h>
@@ -329,3 +330,35 @@ void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in)
if (sched_in)
amd_brs_poison_buffer();
}
+
+/*
+ * called from ACPI processor_idle.c or acpi_pad.c
+ * with interrupts disabled
+ */
+void perf_amd_brs_lopwr_cb(bool lopwr_in)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ union amd_debug_extn_cfg cfg;
+
+ /*
+ * on mwait in, we may end up in non C0 state.
+ * we must disable branch sampling to avoid holding the NMI
+ * for too long. We disable it in hardware but we
+ * keep the state in cpuc, so we can re-enable.
+ *
+ * The hardware will deliver the NMI if needed when brsmen cleared
+ */
+ if (cpuc->brs_active) {
+ cfg.val = get_debug_extn_cfg();
+ cfg.brsmen = !lopwr_in;
+ set_debug_extn_cfg(cfg.val);
+ }
+}
+
+DEFINE_STATIC_CALL_NULL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
+EXPORT_STATIC_CALL_TRAMP_GPL(perf_lopwr_cb);
+
+void __init amd_brs_lopwr_init(void)
+{
+ static_call_update(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
+}
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index f7bce8364fe4..8e1e818f8195 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/perf_event.h>
+#include <linux/jump_label.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -1225,6 +1226,9 @@ static int __init amd_core_pmu_init(void)
/*
* put_event_constraints callback same as Fam17h, set above
*/
+
+ /* branch sampling must be stopped when entering low power */
+ amd_brs_lopwr_init();
}
x86_pmu.attr_update = amd_attr_update;