summaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-10-11 19:26:50 +0200
committerRobert Richter <robert.richter@amd.com>2010-10-11 19:26:50 +0200
commitad0f7cfaa85fc033523a09ab1f3dd6b8ded3dff5 (patch)
tree2565121e4b9945d953e02c77a2e53065b3789aa4 /kernel/perf_event.c
parent86c8c04792f152c5469023885510140dd34817bc (diff)
parentc7a27aa4652c63172489a73f3961455650a79a7f (diff)
downloadlinux-ad0f7cfaa85fc033523a09ab1f3dd6b8ded3dff5.tar.bz2
Merge branch 'oprofile/urgent' (early part) into oprofile/perf
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index e2534691db0d..fc512684423f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -407,11 +407,31 @@ static void perf_group_detach(struct perf_event *event)
}
}
+static inline int
+event_filter_match(struct perf_event *event)
+{
+ return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
+ u64 delta;
+ /*
+ * An event which could not be activated because of
+ * filter mismatch still needs to have its timings
+ * maintained, otherwise bogus information is return
+ * via read() for time_enabled, time_running:
+ */
+ if (event->state == PERF_EVENT_STATE_INACTIVE
+ && !event_filter_match(event)) {
+ delta = ctx->time - event->tstamp_stopped;
+ event->tstamp_running += delta;
+ event->tstamp_stopped = ctx->time;
+ }
+
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
@@ -437,9 +457,7 @@ group_sched_out(struct perf_event *group_event,
struct perf_event_context *ctx)
{
struct perf_event *event;
-
- if (group_event->state != PERF_EVENT_STATE_ACTIVE)
- return;
+ int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
@@ -449,7 +467,7 @@ group_sched_out(struct perf_event *group_event,
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
- if (group_event->attr.exclusive)
+ if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
@@ -5748,15 +5766,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
+ case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
+ case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
perf_event_exit_cpu(cpu);
break;