summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/cpufeature.c23
-rw-r--r--arch/arm64/kernel/perf_event.c23
2 files changed, 37 insertions, 9 deletions
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 94b8f7fc3310..817ce3365e20 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
*/
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
- for (; caps->matches; caps++)
- if (caps->enable && cpus_have_cap(caps->capability))
+ for (; caps->matches; caps++) {
+ unsigned int num = caps->capability;
+
+ if (!cpus_have_cap(num))
+ continue;
+
+ /* Ensure cpus_have_const_cap(num) works */
+ static_branch_enable(&cpu_hwcap_keys[num]);
+
+ if (caps->enable) {
/*
* Use stop_machine() as it schedules the work allowing
* us to modify PSTATE, instead of on_each_cpu() which
@@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* we return.
*/
stop_machine(caps->enable, NULL, cpu_online_mask);
+ }
+ }
}
/*
@@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void)
enable_cpu_capabilities(arm64_features);
}
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+EXPORT_SYMBOL(arm64_const_caps_ready);
+
+static void __init mark_const_caps_ready(void)
+{
+ static_branch_enable(&arm64_const_caps_ready);
+}
+
/*
* Check if the current CPU has a given feature capability.
* Should be called from non-preemptible context.
@@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void)
/* Set the CPU feature capabilies */
setup_feature_capabilities();
enable_errata_workarounds();
+ mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index bcc79471b38e..83a1b1ad189f 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
if (attr->exclude_idle)
return -EPERM;
- if (is_kernel_in_hyp_mode() &&
- attr->exclude_kernel != attr->exclude_hv)
- return -EINVAL;
+
+ /*
+ * If we're running in hyp mode, then we *are* the hypervisor.
+ * Therefore we ignore exclude_hv in this configuration, since
+ * there's no hypervisor to sample anyway. This is consistent
+ * with other architectures (x86 and Power).
+ */
+ if (is_kernel_in_hyp_mode()) {
+ if (!attr->exclude_kernel)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ } else {
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+ if (!attr->exclude_hv)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ }
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
- if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
- config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
- config_base |= ARMV8_PMU_INCLUDE_EL2;
/*
* Install the filter into config_base as this is used to