From e2da97d328d4951d25f6634eda7213f7257417b6 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Tue, 10 Jul 2018 09:58:00 +0100 Subject: arm_pmu: Add support for 64bit event counters Each PMU has a set of 32bit event counters. But in some special cases, the events could be counted using counters which are effectively 64bit wide. e.g, Arm V8 PMUv3 has a 64 bit cycle counter which can count only the CPU cycles. Also, the PMU can chain the event counters to effectively count as a 64bit counter. Add support for tracking the events that uses 64bit counters. This only affects the periods set for each counter in the core driver. Cc: Will Deacon Reviewed-by: Julien Thierry Acked-by: Mark Rutland Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- drivers/perf/arm_pmu.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/perf') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6ddc00da5373..8cad6b535a2c 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -28,9 +28,12 @@ static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(int, cpu_irq); -static inline u64 arm_pmu_max_period(void) +static inline u64 arm_pmu_event_max_period(struct perf_event *event) { - return (1ULL << 32) - 1; + if (event->hw.flags & ARMPMU_EVT_64BIT) + return GENMASK_ULL(63, 0); + else + return GENMASK_ULL(31, 0); } static int @@ -122,7 +125,7 @@ int armpmu_event_set_period(struct perf_event *event) u64 max_period; int ret = 0; - max_period = arm_pmu_max_period(); + max_period = arm_pmu_event_max_period(event); if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); @@ -148,7 +151,7 @@ int armpmu_event_set_period(struct perf_event *event) local64_set(&hwc->prev_count, (u64)-left); - armpmu->write_counter(event, (u64)(-left) & 0xffffffff); + armpmu->write_counter(event, (u64)(-left) & max_period); perf_event_update_userpage(event); @@ -160,7 +163,7 @@ u64 armpmu_event_update(struct perf_event *event) struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; u64 delta, prev_raw_count, new_raw_count; - u64 max_period = arm_pmu_max_period(); + u64 max_period = arm_pmu_event_max_period(event); again: prev_raw_count = local64_read(&hwc->prev_count); @@ -368,6 +371,7 @@ __hw_perf_event_init(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int mapping; + hwc->flags = 0; mapping = armpmu->map_event(event); if (mapping < 0) { @@ -410,7 +414,7 @@ __hw_perf_event_init(struct perf_event *event) * is far less likely to overtake the previous one unless * you have some serious IRQ latency issues. */ - hwc->sample_period = arm_pmu_max_period() >> 1; + hwc->sample_period = arm_pmu_event_max_period(event) >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } -- cgit v1.2.3