summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-08-06 17:26:58 +0200
committerIngo Molnar <mingo@kernel.org>2015-08-12 11:43:20 +0200
commit19b3340cf58d14decf2898fc795cc2b1fa49e79e (patch)
tree527fff56ba3f0c79f29ed0787bc5e61b3d5251d9 /arch
parent3d325bf0da91ca5d22f2525a72308dafd4fc0977 (diff)
downloadlinux-19b3340cf58d14decf2898fc795cc2b1fa49e79e.tar.bz2
perf/x86: Fix MSR PMU driver
Currently we only update the sysfs event files per available MSR, we didn't actually disallow creating unlisted events. Rework things such that the dectection, sysfs listing and event creation are better coordinated. Sadly it appears it's impossible to probe R/O MSRs under virt. This means we have to do the full model table to avoid listing all MSRs all the time. Tested-by: Kan Liang <kan.liang@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Andy Lutomirski <luto@amacapital.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event_msr.c168
1 files changed, 84 insertions, 84 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index af216e9223e8..b0dd2e8a6d12 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -10,17 +10,63 @@ enum perf_msr_id {
PERF_MSR_EVENT_MAX,
};
+bool test_aperfmperf(int idx)
+{
+ return boot_cpu_has(X86_FEATURE_APERFMPERF);
+}
+
+bool test_intel(int idx)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ boot_cpu_data.x86 != 6)
+ return false;
+
+ switch (boot_cpu_data.x86_model) {
+ case 30: /* 45nm Nehalem */
+ case 26: /* 45nm Nehalem-EP */
+ case 46: /* 45nm Nehalem-EX */
+
+ case 37: /* 32nm Westmere */
+ case 44: /* 32nm Westmere-EP */
+ case 47: /* 32nm Westmere-EX */
+
+ case 42: /* 32nm SandyBridge */
+ case 45: /* 32nm SandyBridge-E/EN/EP */
+
+ case 58: /* 22nm IvyBridge */
+ case 62: /* 22nm IvyBridge-EP/EX */
+
+ case 60: /* 22nm Haswell Core */
+ case 63: /* 22nm Haswell Server */
+ case 69: /* 22nm Haswell ULT */
+ case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+
+ case 61: /* 14nm Broadwell Core-M */
+ case 86: /* 14nm Broadwell Xeon D */
+ case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
+ case 79: /* 14nm Broadwell Server */
+
+ case 55: /* 22nm Atom "Silvermont" */
+ case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
+ case 76: /* 14nm Atom "Airmont" */
+ if (idx == PERF_MSR_SMI)
+ return true;
+ break;
+
+ case 78: /* 14nm Skylake Mobile */
+ case 94: /* 14nm Skylake Desktop */
+ if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
+ return true;
+ break;
+ }
+
+ return false;
+}
+
struct perf_msr {
- int id;
u64 msr;
-};
-
-static struct perf_msr msr[] = {
- { PERF_MSR_TSC, 0 },
- { PERF_MSR_APERF, MSR_IA32_APERF },
- { PERF_MSR_MPERF, MSR_IA32_MPERF },
- { PERF_MSR_PPERF, MSR_PPERF },
- { PERF_MSR_SMI, MSR_SMI_COUNT },
+ struct perf_pmu_events_attr *attr;
+ bool (*test)(int idx);
};
PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
@@ -29,8 +75,16 @@ PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
+static struct perf_msr msr[] = {
+ [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
+ [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
+ [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
+ [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
+ [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
+};
+
static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
- &evattr_tsc.attr.attr,
+ NULL,
};
static struct attribute_group events_attr_group = {
@@ -74,6 +128,9 @@ static int msr_event_init(struct perf_event *event)
event->attr.sample_period) /* no sampling */
return -EINVAL;
+ if (!msr[cfg].attr)
+ return -EINVAL;
+
event->hw.idx = -1;
event->hw.event_base = msr[cfg].msr;
event->hw.config = cfg;
@@ -151,89 +208,32 @@ static struct pmu pmu_msr = {
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
};
-static int __init intel_msr_init(int idx)
-{
- if (boot_cpu_data.x86 != 6)
- return 0;
-
- switch (boot_cpu_data.x86_model) {
- case 30: /* 45nm Nehalem */
- case 26: /* 45nm Nehalem-EP */
- case 46: /* 45nm Nehalem-EX */
-
- case 37: /* 32nm Westmere */
- case 44: /* 32nm Westmere-EP */
- case 47: /* 32nm Westmere-EX */
-
- case 42: /* 32nm SandyBridge */
- case 45: /* 32nm SandyBridge-E/EN/EP */
-
- case 58: /* 22nm IvyBridge */
- case 62: /* 22nm IvyBridge-EP/EX */
-
- case 60: /* 22nm Haswell Core */
- case 63: /* 22nm Haswell Server */
- case 69: /* 22nm Haswell ULT */
- case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
-
- case 61: /* 14nm Broadwell Core-M */
- case 86: /* 14nm Broadwell Xeon D */
- case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
- case 79: /* 14nm Broadwell Server */
- events_attrs[idx++] = &evattr_smi.attr.attr;
- break;
-
- case 78: /* 14nm Skylake Mobile */
- case 94: /* 14nm Skylake Desktop */
- events_attrs[idx++] = &evattr_pperf.attr.attr;
- events_attrs[idx++] = &evattr_smi.attr.attr;
- break;
-
- case 55: /* 22nm Atom "Silvermont" */
- case 76: /* 14nm Atom "Airmont" */
- case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
- events_attrs[idx++] = &evattr_smi.attr.attr;
- break;
- }
-
- events_attrs[idx] = NULL;
-
- return 0;
-}
-
-static int __init amd_msr_init(int idx)
-{
- return 0;
-}
-
static int __init msr_init(void)
{
- int err;
- int idx = 1;
+ int i, j = 0;
- if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
- events_attrs[idx++] = &evattr_aperf.attr.attr;
- events_attrs[idx++] = &evattr_mperf.attr.attr;
- events_attrs[idx] = NULL;
+ if (!boot_cpu_has(X86_FEATURE_TSC)) {
+ pr_cont("no MSR PMU driver.\n");
+ return 0;
}
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- err = intel_msr_init(idx);
- break;
-
- case X86_VENDOR_AMD:
- err = amd_msr_init(idx);
- break;
+ /* Probe the MSRs. */
+ for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
+ u64 val;
- default:
- err = -ENOTSUPP;
+ /*
+ * Virt sucks arse; you cannot tell if a R/O MSR is present :/
+ */
+ if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
+ msr[i].attr = NULL;
}
- if (err != 0) {
- pr_cont("no msr PMU driver.\n");
- return 0;
+ /* List remaining MSRs in the sysfs attrs. */
+ for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
+ if (msr[i].attr)
+ events_attrs[j++] = &msr[i].attr->attr.attr;
}
+ events_attrs[j] = NULL;
perf_pmu_register(&pmu_msr, "msr", -1);