summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-08 09:02:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-08 09:02:16 -0700
commit612807fe28abb0a04a627684fb5d4d23108edb1b (patch)
treecbf283d9b8329ea23f009900b2b5a16ead3fa43a /kernel
parent977dcf0c475eaefa334f6bf6b63d27742d0eade5 (diff)
parent2c81a6477081966fe80b8c6daa68459bca896774 (diff)
downloadlinux-612807fe28abb0a04a627684fb5d4d23108edb1b.tar.bz2
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Various fixes: - 32-bit callgraph bug fix - suboptimal event group scheduling bug fix - event constraint fixes for Broadwell/Skylake - RAPL module name collision fix" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Fix pmu::filter_match for SW-led groups x86/perf/intel/rapl: Fix module name collision with powercap intel-rapl perf/x86: Fix 32-bit perf user callgraph collection perf/x86/intel: Update event constraints when HT is off
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c23
1 files changed, 22 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 85cd41878a74..43d43a2d5811 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1678,12 +1678,33 @@ static bool is_orphaned_event(struct perf_event *event)
return event->state == PERF_EVENT_STATE_DEAD;
}
-static inline int pmu_filter_match(struct perf_event *event)
+static inline int __pmu_filter_match(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
return pmu->filter_match ? pmu->filter_match(event) : 1;
}
+/*
+ * Check whether we should attempt to schedule an event group based on
+ * PMU-specific filtering. An event group can consist of HW and SW events,
+ * potentially with a SW leader, so we must check all the filters, to
+ * determine whether a group is schedulable:
+ */
+static inline int pmu_filter_match(struct perf_event *event)
+{
+ struct perf_event *child;
+
+ if (!__pmu_filter_match(event))
+ return 0;
+
+ list_for_each_entry(child, &event->sibling_list, group_entry) {
+ if (!__pmu_filter_match(child))
+ return 0;
+ }
+
+ return 1;
+}
+
static inline int
event_filter_match(struct perf_event *event)
{