diff options
author | Will Deacon <will.deacon@arm.com> | 2018-07-27 14:39:04 +0100 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-07-27 14:39:04 +0100 |
commit | ba70ffa7d20d771ae47a1597799da84980aafe15 (patch) | |
tree | a25f393d7c97683673f12396369a8a6f95e136dc /drivers/perf | |
parent | c5157101e7793b42a56e07368c7f4cb73fb58008 (diff) | |
parent | 809092dc3e606f3508b53baa624b27bfff8f0e7f (diff) | |
download | linux-ba70ffa7d20d771ae47a1597799da84980aafe15.tar.bz2 |
Merge branch 'for-next/perf' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into aarch64/for-next/core
Pull in arm perf updates, including support for 64-bit (chained) event
counters and some non-critical fixes for some of the system PMU drivers.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/perf')
-rw-r--r-- | drivers/perf/arm-cci.c | 38 | ||||
-rw-r--r-- | drivers/perf/arm-ccn.c | 14 | ||||
-rw-r--r-- | drivers/perf/arm_pmu.c | 38 | ||||
-rw-r--r-- | drivers/perf/arm_pmu_platform.c | 2 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_pmu.c | 12 |
5 files changed, 59 insertions, 45 deletions
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 0d09d8e669cd..1bfeb160c5b1 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -53,6 +53,16 @@ enum { CCI_IF_MAX, }; +#define NUM_HW_CNTRS_CII_4XX 4 +#define NUM_HW_CNTRS_CII_5XX 8 +#define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX + +#define FIXED_HW_CNTRS_CII_4XX 1 +#define FIXED_HW_CNTRS_CII_5XX 0 +#define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX + +#define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX) + struct event_range { u32 min; u32 max; @@ -633,8 +643,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) { int i; struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; - - DECLARE_BITMAP(mask, cci_pmu->num_cntrs); + DECLARE_BITMAP(mask, HW_CNTRS_MAX); bitmap_zero(mask, cci_pmu->num_cntrs); for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { @@ -940,7 +949,7 @@ static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) { int i; - DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs); + DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX); bitmap_zero(saved_mask, cci_pmu->num_cntrs); pmu_save_counters(cci_pmu, saved_mask); @@ -1245,7 +1254,7 @@ static int validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); - unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)]; + unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)]; struct cci_pmu_hw_events fake_pmu = { /* * Initialise the fake PMU. We only need to populate the @@ -1403,6 +1412,11 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) char *name = model->name; u32 num_cntrs; + if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX)) + return -EINVAL; + if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX)) + return -EINVAL; + pmu_event_attr_group.attrs = model->event_attrs; pmu_format_attr_group.attrs = model->format_attrs; @@ -1455,8 +1469,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = { #ifdef CONFIG_ARM_CCI400_PMU [CCI400_R0] = { .name = "CCI_400", - .fixed_hw_cntrs = 1, /* Cycle counter */ - .num_hw_cntrs = 4, + .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ + .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, .cntr_size = SZ_4K, .format_attrs = cci400_pmu_format_attrs, .event_attrs = cci400_r0_pmu_event_attrs, @@ -1475,8 +1489,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = { }, [CCI400_R1] = { .name = "CCI_400_r1", - .fixed_hw_cntrs = 1, /* Cycle counter */ - .num_hw_cntrs = 4, + .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ + .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, .cntr_size = SZ_4K, .format_attrs = cci400_pmu_format_attrs, .event_attrs = cci400_r1_pmu_event_attrs, @@ -1497,8 +1511,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = { #ifdef CONFIG_ARM_CCI5xx_PMU [CCI500_R0] = { .name = "CCI_500", - .fixed_hw_cntrs = 0, - .num_hw_cntrs = 8, + .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, + .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, .cntr_size = SZ_64K, .format_attrs = cci5xx_pmu_format_attrs, .event_attrs = cci5xx_pmu_event_attrs, @@ -1521,8 +1535,8 @@ static __maybe_unused struct cci_pmu_model cci_pmu_models[] = { }, [CCI550_R0] = { .name = "CCI_550", - .fixed_hw_cntrs = 0, - .num_hw_cntrs = 8, + .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, + .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, .cntr_size = SZ_64K, .format_attrs = cci5xx_pmu_format_attrs, .event_attrs = cci5xx_pmu_event_attrs, diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index b416ee18e6bb..4b15c36f4631 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1485,17 +1485,9 @@ static int arm_ccn_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ccn); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -EINVAL; - - if (!devm_request_mem_region(ccn->dev, res->start, - resource_size(res), pdev->name)) - return -EBUSY; - - ccn->base = devm_ioremap(ccn->dev, res->start, - resource_size(res)); - if (!ccn->base) - return -EFAULT; + ccn->base = devm_ioremap_resource(ccn->dev, res); + if (IS_ERR(ccn->base)) + return PTR_ERR(ccn->base); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index a6347d487635..7f01f6f60b87 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -28,6 +28,14 @@ static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); static DEFINE_PER_CPU(int, cpu_irq); +static inline u64 arm_pmu_event_max_period(struct perf_event *event) +{ + if (event->hw.flags & ARMPMU_EVT_64BIT) + return GENMASK_ULL(63, 0); + else + return GENMASK_ULL(31, 0); +} + static int armpmu_map_cache_event(const unsigned (*cache_map) [PERF_COUNT_HW_CACHE_MAX] @@ -114,8 +122,10 @@ int armpmu_event_set_period(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; + u64 max_period; int ret = 0; + max_period = arm_pmu_event_max_period(event); if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); @@ -136,12 +146,12 @@ int armpmu_event_set_period(struct perf_event *event) * effect we are reducing max_period to account for * interrupt latency (and we are being very conservative). */ - if (left > (armpmu->max_period >> 1)) - left = armpmu->max_period >> 1; + if (left > (max_period >> 1)) + left = (max_period >> 1); local64_set(&hwc->prev_count, (u64)-left); - armpmu->write_counter(event, (u64)(-left) & 0xffffffff); + armpmu->write_counter(event, (u64)(-left) & max_period); perf_event_update_userpage(event); @@ -153,6 +163,7 @@ u64 armpmu_event_update(struct perf_event *event) struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; u64 delta, prev_raw_count, new_raw_count; + u64 max_period = arm_pmu_event_max_period(event); again: prev_raw_count = local64_read(&hwc->prev_count); @@ -162,7 +173,7 @@ again: new_raw_count) != prev_raw_count) goto again; - delta = (new_raw_count - prev_raw_count) & armpmu->max_period; + delta = (new_raw_count - prev_raw_count) & max_period; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); @@ -227,11 +238,10 @@ armpmu_del(struct perf_event *event, int flags) armpmu_stop(event, PERF_EF_UPDATE); hw_events->events[idx] = NULL; - clear_bit(idx, hw_events->used_mask); - if (armpmu->clear_event_idx) - armpmu->clear_event_idx(hw_events, event); - + armpmu->clear_event_idx(hw_events, event); perf_event_update_userpage(event); + /* Clear the allocated counter */ + hwc->idx = -1; } static int @@ -360,6 +370,7 @@ __hw_perf_event_init(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int mapping; + hwc->flags = 0; mapping = armpmu->map_event(event); if (mapping < 0) { @@ -402,7 +413,7 @@ __hw_perf_event_init(struct perf_event *event) * is far less likely to overtake the previous one unless * you have some serious IRQ latency issues. */ - hwc->sample_period = armpmu->max_period >> 1; + hwc->sample_period = arm_pmu_event_max_period(event) >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } @@ -654,14 +665,9 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) int idx; for (idx = 0; idx < armpmu->num_events; idx++) { - /* - * If the counter is not used skip it, there is no - * need of stopping/restarting it. - */ - if (!test_bit(idx, hw_events->used_mask)) - continue; - event = hw_events->events[idx]; + if (!event) + continue; switch (cmd) { case CPU_PM_ENTER: diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 971ff336494a..96075cecb0ae 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c @@ -160,7 +160,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) static int armpmu_request_irqs(struct arm_pmu *armpmu) { struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; - int cpu, err; + int cpu, err = 0; for_each_cpu(cpu, &armpmu->supported_cpus) { int irq = per_cpu(hw_events->irq, cpu); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 44df61397a38..9efd2413240c 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -350,19 +350,21 @@ void hisi_uncore_pmu_disable(struct pmu *pmu) /* * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1. - * If multi-threading is supported, SCCL_ID is in MPIDR[aff3] and CCL_ID - * is in MPIDR[aff2]; if not, SCCL_ID is in MPIDR[aff2] and CCL_ID is - * in MPIDR[aff1]. If this changes in future, this shall be updated. + * If multi-threading is supported, CCL_ID is the low 3-bits in MPIDR[Aff2] + * and SCCL_ID is the upper 5-bits of Aff2 field; if not, SCCL_ID + * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1]. */ static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id) { u64 mpidr = read_cpuid_mpidr(); if (mpidr & MPIDR_MT_BITMASK) { + int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3); + *sccl_id = aff2 >> 3; if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + *ccl_id = aff2 & 0x7; } else { if (sccl_id) *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); |