diff options
author | Cyrill Gorcunov <gorcunov@openvz.org> | 2010-08-05 19:09:17 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-08-08 22:53:50 +0200 |
commit | 1c250d709fdc8aa5bf42d90be99428a01a256a55 (patch) | |
tree | e71c6d304b12017a034a6ad26468abe296ea5a6c /arch/x86 | |
parent | ef8f34aabf2450a9fb36b2c87fe0ea0b86a38195 (diff) | |
download | linux-1c250d709fdc8aa5bf42d90be99428a01a256a55.tar.bz2 |
perf, x86: P4 PMU -- update nmi irq statistics and unmask lvt entry properly
In case if last active performance counter is not overflowed at
moment of NMI being triggered by another counter, the irq
statistics may miss an update stage. As a more serious
consequence -- apic quirk may not be triggered so apic lvt entry
stay masked.
Tested-by: Lin Ming <ming.m.lin@intel.com>
Signed-off-by: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20100805150917.GA6311@lenovo>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 107711bf0ee8..febb12cea795 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -656,6 +656,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < x86_pmu.num_counters; idx++) { + int overflow; if (!test_bit(idx, cpuc->active_mask)) continue; @@ -666,12 +667,14 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) WARN_ON_ONCE(hwc->idx != idx); /* it might be unflagged overflow */ - handled = p4_pmu_clear_cccr_ovf(hwc); + overflow = p4_pmu_clear_cccr_ovf(hwc); val = x86_perf_event_update(event); - if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) + if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) continue; + handled += overflow; + /* event overflow for sure */ data.period = event->hw.last_period; @@ -687,7 +690,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) inc_irq_stat(apic_perf_irqs); } - return handled; + return handled > 0; } /* |