diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-04-09 21:17:45 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-05-28 18:01:17 +0200 |
commit | db24d33e08b88e990991760a44d72006a5dc6102 (patch) | |
tree | a36c4aba0e221e5833b15432971e526959a3aff1 /kernel/events | |
parent | 2c29ef0fef8aaff1f91263fc75c749d659da6972 (diff) | |
download | linux-db24d33e08b88e990991760a44d72006a5dc6102.tar.bz2 |
perf: Change and simplify ctx::is_active semantics
Instead of tracking if a context is active or not, track which events
of the context are active. By making it a bitmask of
EVENT_PINNED|EVENT_FLEXIBLE we can simplify some of the scheduling
routines since it can avoid adding events that are already active.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110409192141.930282378@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 60b333ae0bcf..71c2d44ff95d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1763,8 +1763,9 @@ static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type) { struct perf_event *event; + int is_active = ctx->is_active; - ctx->is_active = 0; + ctx->is_active &= ~event_type; if (likely(!ctx->nr_events)) return; @@ -1774,12 +1775,12 @@ static void ctx_sched_out(struct perf_event_context *ctx, return; perf_pmu_disable(ctx->pmu); - if (event_type & EVENT_PINNED) { + if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { list_for_each_entry(event, &ctx->pinned_groups, group_entry) group_sched_out(event, cpuctx, ctx); } - if (event_type & EVENT_FLEXIBLE) { + if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { list_for_each_entry(event, &ctx->flexible_groups, group_entry) group_sched_out(event, cpuctx, ctx); } @@ -2058,8 +2059,9 @@ ctx_sched_in(struct perf_event_context *ctx, struct task_struct *task) { u64 now; + int is_active = ctx->is_active; - ctx->is_active = 1; + ctx->is_active |= event_type; if (likely(!ctx->nr_events)) return; @@ -2070,11 +2072,11 @@ ctx_sched_in(struct perf_event_context *ctx, * First go through the list and put on any pinned groups * in order to give them the best chance of going on. */ - if (event_type & EVENT_PINNED) + if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) ctx_pinned_sched_in(ctx, cpuctx); /* Then walk through the lower prio flexible groups */ - if (event_type & EVENT_FLEXIBLE) + if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) ctx_flexible_sched_in(ctx, cpuctx); } |