summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2022-04-15 21:19:59 +0200
committerThomas Gleixner <tglx@linutronix.de>2022-04-27 20:22:19 +0200
commitbb6e89df9028b2fab0ce6ac71cd9ef25b6ada32d (patch)
tree5440e10c14c8d2f32939a982f348ae83564b7b19 /arch/x86/kernel/cpu
parent73a5fa7d51366a549a9f2e3ee875ae51aa0b5580 (diff)
downloadlinux-bb6e89df9028b2fab0ce6ac71cd9ef25b6ada32d.tar.bz2
x86/aperfmperf: Make parts of the frequency invariance code unconditional
The frequency invariance support is currently limited to x86/64 and SMP, which is the vast majority of machines. arch_scale_freq_tick() is called every tick on all CPUs and reads the APERF and MPERF MSRs. The CPU frequency getters function do the same via dedicated IPIs. While it could be argued that on systems where frequency invariance support is disabled (32bit, !SMP) the per tick read of the APERF and MPERF MSRs can be avoided, it does not make sense to keep the extra code and the resulting runtime issues of mass IPIs around. As a first step split out the non frequency invariance specific initialization code and the read MSR portion of arch_scale_freq_tick(). The rest of the code is still conditional and guarded with a static key. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@kernel.org> Link: https://lore.kernel.org/r/20220415161206.761988704@linutronix.de
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c63
1 files changed, 38 insertions, 25 deletions
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 6220503af26a..df528a4f6de3 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -17,6 +17,7 @@
#include <linux/smp.h>
#include <linux/syscore_ops.h>
+#include <asm/cpu.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
@@ -164,6 +165,17 @@ unsigned int arch_freq_get_on_cpu(int cpu)
return per_cpu(samples.khz, cpu);
}
+static void init_counter_refs(void)
+{
+ u64 aperf, mperf;
+
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+
+ this_cpu_write(cpu_samples.aperf, aperf);
+ this_cpu_write(cpu_samples.mperf, mperf);
+}
+
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
/*
* APERF/MPERF frequency ratio computation.
@@ -405,17 +417,6 @@ out:
return true;
}
-static void init_counter_refs(void)
-{
- u64 aperf, mperf;
-
- rdmsrl(MSR_IA32_APERF, aperf);
- rdmsrl(MSR_IA32_MPERF, mperf);
-
- this_cpu_write(cpu_samples.aperf, aperf);
- this_cpu_write(cpu_samples.mperf, mperf);
-}
-
#ifdef CONFIG_PM_SLEEP
static struct syscore_ops freq_invariance_syscore_ops = {
.resume = init_counter_refs,
@@ -447,13 +448,8 @@ void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled)
freq_invariance_enable();
}
-void __init bp_init_freq_invariance(void)
+static void __init bp_init_freq_invariance(void)
{
- if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
- return;
-
- init_counter_refs();
-
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
@@ -461,12 +457,6 @@ void __init bp_init_freq_invariance(void)
freq_invariance_enable();
}
-void ap_init_freq_invariance(void)
-{
- if (cpu_feature_enabled(X86_FEATURE_APERFMPERF))
- init_counter_refs();
-}
-
static void disable_freq_invariance_workfn(struct work_struct *work)
{
static_branch_disable(&arch_scale_freq_key);
@@ -481,6 +471,9 @@ static void scale_freq_tick(u64 acnt, u64 mcnt)
{
u64 freq_scale;
+ if (!arch_scale_freq_invariant())
+ return;
+
if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
goto error;
@@ -501,13 +494,17 @@ error:
pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
schedule_work(&disable_freq_invariance_work);
}
+#else
+static inline void bp_init_freq_invariance(void) { }
+static inline void scale_freq_tick(u64 acnt, u64 mcnt) { }
+#endif /* CONFIG_X86_64 && CONFIG_SMP */
void arch_scale_freq_tick(void)
{
struct aperfmperf *s = this_cpu_ptr(&cpu_samples);
u64 acnt, mcnt, aperf, mperf;
- if (!arch_scale_freq_invariant())
+ if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
return;
rdmsrl(MSR_IA32_APERF, aperf);
@@ -520,4 +517,20 @@ void arch_scale_freq_tick(void)
scale_freq_tick(acnt, mcnt);
}
-#endif /* CONFIG_X86_64 && CONFIG_SMP */
+
+static int __init bp_init_aperfmperf(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF))
+ return 0;
+
+ init_counter_refs();
+ bp_init_freq_invariance();
+ return 0;
+}
+early_initcall(bp_init_aperfmperf);
+
+void ap_init_aperfmperf(void)
+{
+ if (cpu_feature_enabled(X86_FEATURE_APERFMPERF))
+ init_counter_refs();
+}