summaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorSukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>2015-09-03 20:07:46 -0700
committerIngo Molnar <mingo@kernel.org>2015-09-13 11:27:25 +0200
commit01add3eaf1b25e497b14ca210f3bfe5f5dd2b112 (patch)
tree50f2e21629f3168525f82402e5477fea4ece6d8b /kernel/events
parentfbbe07011581990ef74dfac06dc8511b1a14badb (diff)
downloadlinux-01add3eaf1b25e497b14ca210f3bfe5f5dd2b112.tar.bz2
perf/core: Split perf_event_read() and perf_event_count()
perf_event_read() does two things: - call the PMU to read/update the counter value, and - compute the total count of the event and its children Not all callers need both. perf_event_reset() for instance needs the first piece but doesn't need the second. Similarly, when we implement the ability to read a group of events using the transaction interface, we would need the two pieces done independently. Break up perf_event_read() and have it just read/update the counter and have the callers compute the total count if necessary. Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Link: http://lkml.kernel.org/r/1441336073-22750-4-git-send-email-sukadev@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c80cee82959f..260bf8cfed51 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3275,7 +3275,7 @@ u64 perf_event_read_local(struct perf_event *event)
return val;
}
-static u64 perf_event_read(struct perf_event *event)
+static void perf_event_read(struct perf_event *event)
{
/*
* If event is enabled and currently active on a CPU, update the
@@ -3301,8 +3301,6 @@ static u64 perf_event_read(struct perf_event *event)
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
-
- return perf_event_count(event);
}
/*
@@ -3818,14 +3816,18 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
*running = 0;
mutex_lock(&event->child_mutex);
- total += perf_event_read(event);
+
+ perf_event_read(event);
+ total += perf_event_count(event);
+
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
- total += perf_event_read(child);
+ perf_event_read(child);
+ total += perf_event_count(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
@@ -3985,7 +3987,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
static void _perf_event_reset(struct perf_event *event)
{
- (void)perf_event_read(event);
+ perf_event_read(event);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}