summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-05 12:11:37 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-05 12:11:37 -0700
commitab182e67ec99ea0c8d7435a32a4a1ed9bb02559a (patch)
treefa71bef0067a61952561552c6652d922060f5530 /drivers
parent7246f60068840847bdcf595be5f0b5ca632736e0 (diff)
parent92f66f84d9695d07adf9bc987bbcce4bf9b8e87c (diff)
downloadlinux-ab182e67ec99ea0c8d7435a32a4a1ed9bb02559a.tar.bz2
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: - kdump support, including two necessary memblock additions: memblock_clear_nomap() and memblock_cap_memory_range() - ARMv8.3 HWCAP bits for JavaScript conversion instructions, complex numbers and weaker release consistency - arm64 ACPI platform MSI support - arm perf updates: ACPI PMU support, L3 cache PMU in some Qualcomm SoCs, Cortex-A53 L2 cache events and DTLB refills, MAINTAINERS update for DT perf bindings - architected timer errata framework (the arch/arm64 changes only) - support for DMA_ATTR_FORCE_CONTIGUOUS in the arm64 iommu DMA API - arm64 KVM refactoring to use common system register definitions - remove support for ASID-tagged VIVT I-cache (no ARMv8 implementation using it and deprecated in the architecture) together with some I-cache handling clean-up - PE/COFF EFI header clean-up/hardening - define BUG() instruction without CONFIG_BUG * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits) arm64: Fix the DMA mmap and get_sgtable API with DMA_ATTR_FORCE_CONTIGUOUS arm64: Print DT machine model in setup_machine_fdt() arm64: pmu: Wire-up Cortex A53 L2 cache events and DTLB refills arm64: module: split core and init PLT sections arm64: pmuv3: handle pmuv3+ arm64: Add CNTFRQ_EL0 trap handler arm64: Silence spurious kbuild warning on menuconfig arm64: pmuv3: use arm_pmu ACPI framework arm64: pmuv3: handle !PMUv3 when probing drivers/perf: arm_pmu: add ACPI framework arm64: add function to get a cpu's MADT GICC table drivers/perf: arm_pmu: split out platform device probe logic drivers/perf: arm_pmu: move irq request/free into probe drivers/perf: arm_pmu: split cpu-local irq request/free drivers/perf: arm_pmu: rename irq request/free functions drivers/perf: arm_pmu: handle no platform_device drivers/perf: arm_pmu: simplify cpu_pmu_request_irqs() drivers/perf: arm_pmu: factor out pmu registration drivers/perf: arm_pmu: fold init into alloc drivers/perf: arm_pmu: define armpmu_init_fn ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/arm64/iort.c158
-rw-r--r--drivers/acpi/glue.c6
-rw-r--r--drivers/firmware/efi/libstub/fdt.c28
-rw-r--r--drivers/perf/Kconfig14
-rw-r--r--drivers/perf/Makefile4
-rw-r--r--drivers/perf/arm_pmu.c530
-rw-r--r--drivers/perf/arm_pmu_acpi.c256
-rw-r--r--drivers/perf/arm_pmu_platform.c235
-rw-r--r--drivers/perf/qcom_l3_pmu.c849
9 files changed, 1676 insertions, 404 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 4a5bb967250b..22e08d272db7 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -225,7 +225,7 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
if (iort_node->type == type &&
ACPI_SUCCESS(callback(iort_node, context)))
- return iort_node;
+ return iort_node;
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
iort_node->length);
@@ -253,17 +253,15 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
void *context)
{
struct device *dev = context;
- acpi_status status;
+ acpi_status status = AE_NOT_FOUND;
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
struct acpi_iort_named_component *ncomp;
- if (!adev) {
- status = AE_NOT_FOUND;
+ if (!adev)
goto out;
- }
status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
if (ACPI_FAILURE(status)) {
@@ -289,8 +287,6 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
*/
status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
AE_OK : AE_NOT_FOUND;
- } else {
- status = AE_NOT_FOUND;
}
out:
return status;
@@ -322,8 +318,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
static
struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
- u32 *id_out, u8 type_mask,
- int index)
+ u32 *id_out, int index)
{
struct acpi_iort_node *parent;
struct acpi_iort_id_mapping *map;
@@ -345,9 +340,6 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
map->output_reference);
- if (!(IORT_TYPE_MASK(parent->type) & type_mask))
- return NULL;
-
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
@@ -359,11 +351,11 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
return NULL;
}
-static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
- u32 rid_in, u32 *rid_out,
- u8 type_mask)
+static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
+ u32 id_in, u32 *id_out,
+ u8 type_mask)
{
- u32 rid = rid_in;
+ u32 id = id_in;
/* Parse the ID mapping tree to find specified node type */
while (node) {
@@ -371,8 +363,8 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
int i;
if (IORT_TYPE_MASK(node->type) & type_mask) {
- if (rid_out)
- *rid_out = rid;
+ if (id_out)
+ *id_out = id;
return node;
}
@@ -389,9 +381,9 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
goto fail_map;
}
- /* Do the RID translation */
+ /* Do the ID translation */
for (i = 0; i < node->mapping_count; i++, map++) {
- if (!iort_id_map(map, node->type, rid, &rid))
+ if (!iort_id_map(map, node->type, id, &id))
break;
}
@@ -403,13 +395,41 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
}
fail_map:
- /* Map input RID to output RID unchanged on mapping failure*/
- if (rid_out)
- *rid_out = rid_in;
+ /* Map input ID to output ID unchanged on mapping failure */
+ if (id_out)
+ *id_out = id_in;
return NULL;
}
+static
+struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node,
+ u32 *id_out, u8 type_mask,
+ int index)
+{
+ struct acpi_iort_node *parent;
+ u32 id;
+
+ /* step 1: retrieve the initial dev id */
+ parent = iort_node_get_id(node, &id, index);
+ if (!parent)
+ return NULL;
+
+ /*
+ * optional step 2: map the initial dev id if its parent is not
+ * the target type we want, map it again for the use cases such
+ * as NC (named component) -> SMMU -> ITS. If the type is matched,
+ * return the initial dev id and its parent pointer directly.
+ */
+ if (!(IORT_TYPE_MASK(parent->type) & type_mask))
+ parent = iort_node_map_id(parent, id, id_out, type_mask);
+ else
+ if (id_out)
+ *id_out = id;
+
+ return parent;
+}
+
static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
{
struct pci_bus *pbus;
@@ -443,13 +463,38 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id)
if (!node)
return req_id;
- iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE);
+ iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
return dev_id;
}
/**
+ * iort_pmsi_get_dev_id() - Get the device id for a device
+ * @dev: The device for which the mapping is to be done.
+ * @dev_id: The device ID found.
+ *
+ * Returns: 0 for successful find a dev id, -ENODEV on error
+ */
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
+{
+ int i;
+ struct acpi_iort_node *node;
+
+ node = iort_find_dev_node(dev);
+ if (!node)
+ return -ENODEV;
+
+ for (i = 0; i < node->mapping_count; i++) {
+ if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i))
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+/**
* iort_dev_find_its_id() - Find the ITS identifier for a device
* @dev: The device.
+ * @req_id: Device's requester ID
* @idx: Index of the ITS identifier list.
* @its_id: ITS identifier.
*
@@ -465,7 +510,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
if (!node)
return -ENXIO;
- node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE);
+ node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
if (!node)
return -ENXIO;
@@ -503,6 +548,56 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
}
+/**
+ * iort_get_platform_device_domain() - Find MSI domain related to a
+ * platform device
+ * @dev: the dev pointer associated with the platform device
+ *
+ * Returns: the MSI domain for this device, NULL otherwise
+ */
+static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
+{
+ struct acpi_iort_node *node, *msi_parent;
+ struct fwnode_handle *iort_fwnode;
+ struct acpi_iort_its_group *its;
+ int i;
+
+ /* find its associated iort node */
+ node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+ iort_match_node_callback, dev);
+ if (!node)
+ return NULL;
+
+ /* then find its msi parent node */
+ for (i = 0; i < node->mapping_count; i++) {
+ msi_parent = iort_node_map_platform_id(node, NULL,
+ IORT_MSI_TYPE, i);
+ if (msi_parent)
+ break;
+ }
+
+ if (!msi_parent)
+ return NULL;
+
+ /* Move to ITS specific data */
+ its = (struct acpi_iort_its_group *)msi_parent->node_data;
+
+ iort_fwnode = iort_find_domain_token(its->identifiers[0]);
+ if (!iort_fwnode)
+ return NULL;
+
+ return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
+}
+
+void acpi_configure_pmsi_domain(struct device *dev)
+{
+ struct irq_domain *msi_domain;
+
+ msi_domain = iort_get_platform_device_domain(dev);
+ if (msi_domain)
+ dev_set_msi_domain(dev, msi_domain);
+}
+
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
{
u32 *rid = data;
@@ -594,8 +689,8 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (!node)
return NULL;
- parent = iort_node_map_rid(node, rid, &streamid,
- IORT_IOMMU_TYPE);
+ parent = iort_node_map_id(node, rid, &streamid,
+ IORT_IOMMU_TYPE);
ops = iort_iommu_xlate(dev, parent, streamid);
@@ -607,14 +702,15 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (!node)
return NULL;
- parent = iort_node_get_id(node, &streamid,
- IORT_IOMMU_TYPE, i++);
+ parent = iort_node_map_platform_id(node, &streamid,
+ IORT_IOMMU_TYPE, i++);
while (parent) {
ops = iort_iommu_xlate(dev, parent, streamid);
- parent = iort_node_get_id(node, &streamid,
- IORT_IOMMU_TYPE, i++);
+ parent = iort_node_map_platform_id(node, &streamid,
+ IORT_IOMMU_TYPE,
+ i++);
}
}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index edc8663b5db3..3e7020751d34 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -6,6 +6,8 @@
*
* This file is released under the GPLv2.
*/
+
+#include <linux/acpi_iort.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -14,6 +16,7 @@
#include <linux/rwsem.h>
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include "internal.h"
@@ -322,6 +325,9 @@ static int acpi_platform_notify(struct device *dev)
if (!adev)
goto out;
+ if (dev->bus == &platform_bus_type)
+ acpi_configure_pmsi_domain(dev);
+
if (type && type->setup)
type->setup(dev);
else if (adev->handler && adev->handler->bind)
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 41f457be64e8..8830fa601e45 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,6 +16,22 @@
#include "efistub.h"
+#define EFI_DT_ADDR_CELLS_DEFAULT 2
+#define EFI_DT_SIZE_CELLS_DEFAULT 2
+
+static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
+{
+ int offset;
+
+ offset = fdt_path_offset(fdt, "/");
+ /* Set the #address-cells and #size-cells values for an empty tree */
+
+ fdt_setprop_u32(fdt, offset, "#address-cells",
+ EFI_DT_ADDR_CELLS_DEFAULT);
+
+ fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
+}
+
static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
unsigned long orig_fdt_size,
void *fdt, int new_fdt_size, char *cmdline_ptr,
@@ -42,10 +58,18 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
}
}
- if (orig_fdt)
+ if (orig_fdt) {
status = fdt_open_into(orig_fdt, fdt, new_fdt_size);
- else
+ } else {
status = fdt_create_empty_tree(fdt, new_fdt_size);
+ if (status == 0) {
+ /*
+ * Any failure from the following function is non
+ * critical
+ */
+ fdt_update_cell_size(sys_table, fdt);
+ }
+ }
if (status != 0)
goto fdt_set_fail;
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 93651907874f..aa587edaf9ea 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -12,6 +12,10 @@ config ARM_PMU
Say y if you want to use CPU performance monitors on ARM-based
systems.
+config ARM_PMU_ACPI
+ depends on ARM_PMU && ACPI
+ def_bool y
+
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI
@@ -21,6 +25,16 @@ config QCOM_L2_PMU
Adds the L2 cache PMU into the perf events subsystem for
monitoring L2 cache events.
+config QCOM_L3_PMU
+ bool "Qualcomm Technologies L3-cache PMU"
+ depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI
+ select QCOM_IRQ_COMBINER
+ help
+ Provides support for the L3 cache performance monitor unit (PMU)
+ in Qualcomm Technologies processors.
+ Adds the L3 cache PMU into the perf events subsystem for
+ monitoring L3 cache events.
+
config XGENE_PMU
depends on PERF_EVENTS && ARCH_XGENE
bool "APM X-Gene SoC PMU"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index ef24833c94a8..6420bd4394d5 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_ARM_PMU) += arm_pmu.o
+obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
+obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
+obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 9612b84bc3e0..dc459eb1246b 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -16,7 +16,6 @@
#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -25,7 +24,6 @@
#include <linux/irq.h>
#include <linux/irqdesc.h>
-#include <asm/cputype.h>
#include <asm/irq_regs.h>
static int
@@ -235,20 +233,15 @@ armpmu_add(struct perf_event *event, int flags)
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
- int err = 0;
/* An event following a process won't be stopped earlier */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return -ENOENT;
- perf_pmu_disable(event->pmu);
-
/* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(hw_events, event);
- if (idx < 0) {
- err = idx;
- goto out;
- }
+ if (idx < 0)
+ return idx;
/*
* If there is an event in the counter we are going to use then make
@@ -265,9 +258,7 @@ armpmu_add(struct perf_event *event, int flags)
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
-out:
- perf_pmu_enable(event->pmu);
- return err;
+ return 0;
}
static int
@@ -323,10 +314,16 @@ validate_group(struct perf_event *event)
return 0;
}
+static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
+{
+ struct platform_device *pdev = armpmu->plat_device;
+
+ return pdev ? dev_get_platdata(&pdev->dev) : NULL;
+}
+
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
- struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
@@ -338,8 +335,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
- plat_device = armpmu->plat_device;
- plat = dev_get_platdata(&plat_device->dev);
+
+ plat = armpmu_get_platdata(armpmu);
start_clock = sched_clock();
if (plat && plat->handle_irq)
@@ -352,37 +349,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
return ret;
}
-static void
-armpmu_release_hardware(struct arm_pmu *armpmu)
-{
- armpmu->free_irq(armpmu);
-}
-
-static int
-armpmu_reserve_hardware(struct arm_pmu *armpmu)
-{
- int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
- if (err) {
- armpmu_release_hardware(armpmu);
- return err;
- }
-
- return 0;
-}
-
-static void
-hw_perf_event_destroy(struct perf_event *event)
-{
- struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- atomic_t *active_events = &armpmu->active_events;
- struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
-
- if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
- armpmu_release_hardware(armpmu);
- mutex_unlock(pmu_reserve_mutex);
- }
-}
-
static int
event_requires_mode_exclusion(struct perf_event_attr *attr)
{
@@ -455,8 +421,6 @@ __hw_perf_event_init(struct perf_event *event)
static int armpmu_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- int err = 0;
- atomic_t *active_events = &armpmu->active_events;
/*
* Reject CPU-affine events for CPUs that are of a different class to
@@ -476,26 +440,7 @@ static int armpmu_event_init(struct perf_event *event)
if (armpmu->map_event(event) == -ENOENT)
return -ENOENT;
- event->destroy = hw_perf_event_destroy;
-
- if (!atomic_inc_not_zero(active_events)) {
- mutex_lock(&armpmu->reserve_mutex);
- if (atomic_read(active_events) == 0)
- err = armpmu_reserve_hardware(armpmu);
-
- if (!err)
- atomic_inc(active_events);
- mutex_unlock(&armpmu->reserve_mutex);
- }
-
- if (err)
- return err;
-
- err = __hw_perf_event_init(event);
- if (err)
- hw_perf_event_destroy(event);
-
- return err;
+ return __hw_perf_event_init(event);
}
static void armpmu_enable(struct pmu *pmu)
@@ -553,27 +498,6 @@ static struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
-static void armpmu_init(struct arm_pmu *armpmu)
-{
- atomic_set(&armpmu->active_events, 0);
- mutex_init(&armpmu->reserve_mutex);
-
- armpmu->pmu = (struct pmu) {
- .pmu_enable = armpmu_enable,
- .pmu_disable = armpmu_disable,
- .event_init = armpmu_event_init,
- .add = armpmu_add,
- .del = armpmu_del,
- .start = armpmu_start,
- .stop = armpmu_stop,
- .read = armpmu_read,
- .filter_match = armpmu_filter_match,
- .attr_groups = armpmu->attr_groups,
- };
- armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
- &armpmu_common_attr_group;
-}
-
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *__oprofile_cpu_pmu;
@@ -601,113 +525,85 @@ int perf_num_counters(void)
}
EXPORT_SYMBOL_GPL(perf_num_counters);
-static void cpu_pmu_enable_percpu_irq(void *data)
+void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
{
- int irq = *(int *)data;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+ int irq = per_cpu(hw_events->irq, cpu);
- enable_percpu_irq(irq, IRQ_TYPE_NONE);
-}
+ if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
+ return;
-static void cpu_pmu_disable_percpu_irq(void *data)
-{
- int irq = *(int *)data;
+ if (irq_is_percpu(irq)) {
+ free_percpu_irq(irq, &hw_events->percpu_pmu);
+ cpumask_clear(&armpmu->active_irqs);
+ return;
+ }
- disable_percpu_irq(irq);
+ free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
-static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+void armpmu_free_irqs(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
- struct platform_device *pmu_device = cpu_pmu->plat_device;
- struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
- irqs = min(pmu_device->num_resources, num_possible_cpus());
-
- irq = platform_get_irq(pmu_device, 0);
- if (irq > 0 && irq_is_percpu(irq)) {
- on_each_cpu_mask(&cpu_pmu->supported_cpus,
- cpu_pmu_disable_percpu_irq, &irq, 1);
- free_percpu_irq(irq, &hw_events->percpu_pmu);
- } else {
- for (i = 0; i < irqs; ++i) {
- int cpu = i;
-
- if (cpu_pmu->irq_affinity)
- cpu = cpu_pmu->irq_affinity[i];
+ int cpu;
- if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq > 0)
- free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
- }
- }
+ for_each_cpu(cpu, &armpmu->supported_cpus)
+ armpmu_free_irq(armpmu, cpu);
}
-static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
{
- int i, err, irq, irqs;
- struct platform_device *pmu_device = cpu_pmu->plat_device;
- struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
- if (!pmu_device)
- return -ENODEV;
-
- irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
- pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+ int err = 0;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+ const irq_handler_t handler = armpmu_dispatch_irq;
+ int irq = per_cpu(hw_events->irq, cpu);
+ if (!irq)
return 0;
- }
- irq = platform_get_irq(pmu_device, 0);
- if (irq > 0 && irq_is_percpu(irq)) {
+ if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
+ } else if (irq_is_percpu(irq)) {
+ int other_cpu = cpumask_first(&armpmu->active_irqs);
+ int other_irq = per_cpu(hw_events->irq, other_cpu);
- on_each_cpu_mask(&cpu_pmu->supported_cpus,
- cpu_pmu_enable_percpu_irq, &irq, 1);
+ if (irq != other_irq) {
+ pr_warn("mismatched PPIs detected.\n");
+ err = -EINVAL;
+ }
} else {
- for (i = 0; i < irqs; ++i) {
- int cpu = i;
+ err = request_irq(irq, handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+ per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ }
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ return err;
+ }
- if (cpu_pmu->irq_affinity)
- cpu = cpu_pmu->irq_affinity[i];
+ cpumask_set_cpu(cpu, &armpmu->active_irqs);
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
- pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, cpu);
- continue;
- }
-
- err = request_irq(irq, handler,
- IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
- per_cpu_ptr(&hw_events->percpu_pmu, cpu));
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- return err;
- }
-
- cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
- }
+ return 0;
+}
+
+int armpmu_request_irqs(struct arm_pmu *armpmu)
+{
+ int cpu, err;
+
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ err = armpmu_request_irq(armpmu, cpu);
+ if (err)
+ break;
}
- return 0;
+ return err;
+}
+
+static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
+{
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ return per_cpu(hw_events->irq, cpu);
}
/*
@@ -719,11 +615,42 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
{
struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+ int irq;
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
return 0;
if (pmu->reset)
pmu->reset(pmu);
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq) {
+ if (irq_is_percpu(irq)) {
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ if (irq_force_affinity(irq, cpumask_of(cpu)) &&
+ num_possible_cpus() > 1) {
+ pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, cpu);
+ }
+ }
+
+ return 0;
+}
+
+static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
+ int irq;
+
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return 0;
+
+ irq = armpmu_get_cpu_irq(pmu, cpu);
+ if (irq && irq_is_percpu(irq))
+ disable_percpu_irq(irq);
+
return 0;
}
@@ -828,56 +755,22 @@ static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int err;
- int cpu;
- struct pmu_hw_events __percpu *cpu_hw_events;
-
- cpu_hw_events = alloc_percpu(struct pmu_hw_events);
- if (!cpu_hw_events)
- return -ENOMEM;
- err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
- &cpu_pmu->node);
+ err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
+ &cpu_pmu->node);
if (err)
- goto out_free;
+ goto out;
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
goto out_unregister;
- for_each_possible_cpu(cpu) {
- struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
- raw_spin_lock_init(&events->pmu_lock);
- events->percpu_pmu = cpu_pmu;
- }
-
- cpu_pmu->hw_events = cpu_hw_events;
- cpu_pmu->request_irq = cpu_pmu_request_irq;
- cpu_pmu->free_irq = cpu_pmu_free_irq;
-
- /* Ensure the PMU has sane values out of reset. */
- if (cpu_pmu->reset)
- on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
- cpu_pmu, 1);
-
- /* If no interrupts available, set the corresponding capability flag */
- if (!platform_get_irq(cpu_pmu->plat_device, 0))
- cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
- /*
- * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
- * big.LITTLE). This is not an uncore PMU, and we have taken ctx
- * sharing into account (e.g. with our pmu::filter_match callback and
- * pmu::event_init group validation).
- */
- cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
-
return 0;
out_unregister:
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
-out_free:
- free_percpu(cpu_hw_events);
+out:
return err;
}
@@ -886,177 +779,78 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
cpu_pm_pmu_unregister(cpu_pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
&cpu_pmu->node);
- free_percpu(cpu_pmu->hw_events);
}
-/*
- * CPU PMU identification and probing.
- */
-static int probe_current_pmu(struct arm_pmu *pmu,
- const struct pmu_probe_info *info)
+struct arm_pmu *armpmu_alloc(void)
{
- int cpu = get_cpu();
- unsigned int cpuid = read_cpuid_id();
- int ret = -ENODEV;
-
- pr_info("probing PMU on CPU %d\n", cpu);
+ struct arm_pmu *pmu;
+ int cpu;
- for (; info->init != NULL; info++) {
- if ((cpuid & info->mask) != info->cpuid)
- continue;
- ret = info->init(pmu);
- break;
+ pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ if (!pmu) {
+ pr_info("failed to allocate PMU device!\n");
+ goto out;
}
- put_cpu();
- return ret;
-}
-
-static int of_pmu_irq_cfg(struct arm_pmu *pmu)
-{
- int *irqs, i = 0;
- bool using_spi = false;
- struct platform_device *pdev = pmu->plat_device;
-
- irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
- if (!irqs)
- return -ENOMEM;
-
- do {
- struct device_node *dn;
- int cpu, irq;
-
- /* See if we have an affinity entry */
- dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
- if (!dn)
- break;
-
- /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
- irq = platform_get_irq(pdev, i);
- if (irq > 0) {
- bool spi = !irq_is_percpu(irq);
-
- if (i > 0 && spi != using_spi) {
- pr_err("PPI/SPI IRQ type mismatch for %s!\n",
- dn->name);
- of_node_put(dn);
- kfree(irqs);
- return -EINVAL;
- }
-
- using_spi = spi;
- }
-
- /* Now look up the logical CPU number */
- for_each_possible_cpu(cpu) {
- struct device_node *cpu_dn;
-
- cpu_dn = of_cpu_device_node_get(cpu);
- of_node_put(cpu_dn);
-
- if (dn == cpu_dn)
- break;
- }
+ pmu->hw_events = alloc_percpu(struct pmu_hw_events);
+ if (!pmu->hw_events) {
+ pr_info("failed to allocate per-cpu PMU data.\n");
+ goto out_free_pmu;
+ }
- if (cpu >= nr_cpu_ids) {
- pr_warn("Failed to find logical CPU for %s\n",
- dn->name);
- of_node_put(dn);
- cpumask_setall(&pmu->supported_cpus);
- break;
- }
- of_node_put(dn);
+ pmu->pmu = (struct pmu) {
+ .pmu_enable = armpmu_enable,
+ .pmu_disable = armpmu_disable,
+ .event_init = armpmu_event_init,
+ .add = armpmu_add,
+ .del = armpmu_del,
+ .start = armpmu_start,
+ .stop = armpmu_stop,
+ .read = armpmu_read,
+ .filter_match = armpmu_filter_match,
+ .attr_groups = pmu->attr_groups,
+ /*
+ * This is a CPU PMU potentially in a heterogeneous
+ * configuration (e.g. big.LITTLE). This is not an uncore PMU,
+ * and we have taken ctx sharing into account (e.g. with our
+ * pmu::filter_match callback and pmu::event_init group
+ * validation).
+ */
+ .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
+ };
- /* For SPIs, we need to track the affinity per IRQ */
- if (using_spi) {
- if (i >= pdev->num_resources)
- break;
+ pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
+ &armpmu_common_attr_group;
- irqs[i] = cpu;
- }
+ for_each_possible_cpu(cpu) {
+ struct pmu_hw_events *events;
- /* Keep track of the CPUs containing this PMU type */
- cpumask_set_cpu(cpu, &pmu->supported_cpus);
- i++;
- } while (1);
-
- /* If we didn't manage to parse anything, try the interrupt affinity */
- if (cpumask_weight(&pmu->supported_cpus) == 0) {
- int irq = platform_get_irq(pdev, 0);
-
- if (irq > 0 && irq_is_percpu(irq)) {
- /* If using PPIs, check the affinity of the partition */
- int ret;
-
- ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
- if (ret) {
- kfree(irqs);
- return ret;
- }
- } else {
- /* Otherwise default to all CPUs */
- cpumask_setall(&pmu->supported_cpus);
- }
+ events = per_cpu_ptr(pmu->hw_events, cpu);
+ raw_spin_lock_init(&events->pmu_lock);
+ events->percpu_pmu = pmu;
}
- /* If we matched up the IRQ affinities, use them to route the SPIs */
- if (using_spi && i == pdev->num_resources)
- pmu->irq_affinity = irqs;
- else
- kfree(irqs);
+ return pmu;
- return 0;
+out_free_pmu:
+ kfree(pmu);
+out:
+ return NULL;
}
-int arm_pmu_device_probe(struct platform_device *pdev,
- const struct of_device_id *of_table,
- const struct pmu_probe_info *probe_table)
+void armpmu_free(struct arm_pmu *pmu)
{
- const struct of_device_id *of_id;
- const int (*init_fn)(struct arm_pmu *);
- struct device_node *node = pdev->dev.of_node;
- struct arm_pmu *pmu;
- int ret = -ENODEV;
-
- pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
- if (!pmu) {
- pr_info("failed to allocate PMU device!\n");
- return -ENOMEM;
- }
-
- armpmu_init(pmu);
-
- pmu->plat_device = pdev;
-
- if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
- init_fn = of_id->data;
-
- pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
- "secure-reg-access");
-
- /* arm64 systems boot only as non-secure */
- if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
- pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
- pmu->secure_access = false;
- }
-
- ret = of_pmu_irq_cfg(pmu);
- if (!ret)
- ret = init_fn(pmu);
- } else if (probe_table) {
- cpumask_setall(&pmu->supported_cpus);
- ret = probe_current_pmu(pmu, probe_table);
- }
-
- if (ret) {
- pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
- goto out_free;
- }
+ free_percpu(pmu->hw_events);
+ kfree(pmu);
+}
+int armpmu_register(struct arm_pmu *pmu)
+{
+ int ret;
ret = cpu_pmu_init(pmu);
if (ret)
- goto out_free;
+ return ret;
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
if (ret)
@@ -1066,17 +860,12 @@ int arm_pmu_device_probe(struct platform_device *pdev,
__oprofile_cpu_pmu = pmu;
pr_info("enabled with %s PMU driver, %d counters available\n",
- pmu->name, pmu->num_events);
+ pmu->name, pmu->num_events);
return 0;
out_destroy:
cpu_pmu_destroy(pmu);
-out_free:
- pr_info("%s: failed to register PMU devices!\n",
- of_node_full_name(node));
- kfree(pmu->irq_affinity);
- kfree(pmu);
return ret;
}
@@ -1086,7 +875,8 @@ static int arm_pmu_hp_init(void)
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
"perf/arm/pmu:starting",
- arm_perf_starting_cpu, NULL);
+ arm_perf_starting_cpu,
+ arm_perf_teardown_cpu);
if (ret)
pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
ret);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
new file mode 100644
index 000000000000..34c862f213c7
--- /dev/null
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -0,0 +1,256 @@
+/*
+ * ACPI probing code for ARM performance counters.
+ *
+ * Copyright (C) 2017 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/perf/arm_pmu.h>
+
+#include <asm/cputype.h>
+
+static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
+static DEFINE_PER_CPU(int, pmu_irqs);
+
+static int arm_pmu_acpi_register_irq(int cpu)
+{
+ struct acpi_madt_generic_interrupt *gicc;
+ int gsi, trigger;
+
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (WARN_ON(!gicc))
+ return -EINVAL;
+
+ gsi = gicc->performance_interrupt;
+ if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
+ trigger = ACPI_EDGE_SENSITIVE;
+ else
+ trigger = ACPI_LEVEL_SENSITIVE;
+
+ /*
+ * Helpfully, the MADT GICC doesn't have a polarity flag for the
+ * "performance interrupt". Luckily, on compliant GICs the polarity is
+ * a fixed value in HW (for both SPIs and PPIs) that we cannot change
+ * from SW.
+ *
+ * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
+ * may not match the real polarity, but that should not matter.
+ *
+ * Other interrupt controllers are not supported with ACPI.
+ */
+ return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
+}
+
+static void arm_pmu_acpi_unregister_irq(int cpu)
+{
+ struct acpi_madt_generic_interrupt *gicc;
+ int gsi;
+
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (!gicc)
+ return;
+
+ gsi = gicc->performance_interrupt;
+ acpi_unregister_gsi(gsi);
+}
+
+static int arm_pmu_acpi_parse_irqs(void)
+{
+ int irq, cpu, irq_cpu, err;
+
+ for_each_possible_cpu(cpu) {
+ irq = arm_pmu_acpi_register_irq(cpu);
+ if (irq < 0) {
+ err = irq;
+ pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
+ cpu, err);
+ goto out_err;
+ } else if (irq == 0) {
+ pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
+ }
+
+ per_cpu(pmu_irqs, cpu) = irq;
+ }
+
+ return 0;
+
+out_err:
+ for_each_possible_cpu(cpu) {
+ irq = per_cpu(pmu_irqs, cpu);
+ if (!irq)
+ continue;
+
+ arm_pmu_acpi_unregister_irq(cpu);
+
+ /*
+ * Blat all copies of the IRQ so that we only unregister the
+ * corresponding GSI once (e.g. when we have PPIs).
+ */
+ for_each_possible_cpu(irq_cpu) {
+ if (per_cpu(pmu_irqs, irq_cpu) == irq)
+ per_cpu(pmu_irqs, irq_cpu) = 0;
+ }
+ }
+
+ return err;
+}
+
+static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
+{
+ unsigned long cpuid = read_cpuid_id();
+ struct arm_pmu *pmu;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ pmu = per_cpu(probed_pmus, cpu);
+ if (!pmu || pmu->acpi_cpuid != cpuid)
+ continue;
+
+ return pmu;
+ }
+
+ pmu = armpmu_alloc();
+ if (!pmu) {
+ pr_warn("Unable to allocate PMU for CPU%d\n",
+ smp_processor_id());
+ return NULL;
+ }
+
+ pmu->acpi_cpuid = cpuid;
+
+ return pmu;
+}
+
+/*
+ * This must run before the common arm_pmu hotplug logic, so that we can
+ * associate a CPU and its interrupt before the common code tries to manage the
+ * affinity and so on.
+ *
+ * Note that hotplug events are serialized, so we cannot race with another CPU
+ * coming up. The perf core won't open events while a hotplug event is in
+ * progress.
+ */
+static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
+{
+ struct arm_pmu *pmu;
+ struct pmu_hw_events __percpu *hw_events;
+ int irq;
+
+ /* If we've already probed this CPU, we have nothing to do */
+ if (per_cpu(probed_pmus, cpu))
+ return 0;
+
+ irq = per_cpu(pmu_irqs, cpu);
+
+ pmu = arm_pmu_acpi_find_alloc_pmu();
+ if (!pmu)
+ return -ENOMEM;
+
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
+
+ per_cpu(probed_pmus, cpu) = pmu;
+
+ /*
+ * Log and request the IRQ so the core arm_pmu code can manage it. In
+ * some situations (e.g. mismatched PPIs), we may fail to request the
+ * IRQ. However, it may be too late for us to do anything about it.
+ * The common ARM PMU code will log a warning in this case.
+ */
+ hw_events = pmu->hw_events;
+ per_cpu(hw_events->irq, cpu) = irq;
+ armpmu_request_irq(pmu, cpu);
+
+ /*
+ * Ideally, we'd probe the PMU here when we find the first matching
+ * CPU. We can't do that for several reasons; see the comment in
+ * arm_pmu_acpi_init().
+ *
+ * So for the time being, we're done.
+ */
+ return 0;
+}
+
+int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
+{
+ int pmu_idx = 0;
+ int cpu, ret;
+
+ if (acpi_disabled)
+ return 0;
+
+ /*
+ * Initialise and register the set of PMUs which we know about right
+ * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
+ * could handle late hotplug, but this may lead to deadlock since we
+ * might try to register a hotplug notifier instance from within a
+ * hotplug notifier.
+ *
+ * There's also the problem of having access to the right init_fn,
+ * without tying this too deeply into the "real" PMU driver.
+ *
+ * For the moment, as with the platform/DT case, we need at least one
+ * of a PMU's CPUs to be online at probe time.
+ */
+ for_each_possible_cpu(cpu) {
+ struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
+ char *base_name;
+
+ if (!pmu || pmu->name)
+ continue;
+
+ ret = init_fn(pmu);
+ if (ret == -ENODEV) {
+ /* PMU not handled by this driver, or not present */
+ continue;
+ } else if (ret) {
+ pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
+ return ret;
+ }
+
+ base_name = pmu->name;
+ pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
+ if (!pmu->name) {
+ pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
+ return -ENOMEM;
+ }
+
+ ret = armpmu_register(pmu);
+ if (ret) {
+ pr_warn("Failed to register PMU for CPU%d\n", cpu);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int arm_pmu_acpi_init(void)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return 0;
+
+ /*
+ * We can't request IRQs yet, since we don't know the cookie value
+ * until we know which CPUs share the same logical PMU. We'll handle
+ * that in arm_pmu_acpi_cpu_starting().
+ */
+ ret = arm_pmu_acpi_parse_irqs();
+ if (ret)
+ return ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING,
+ "perf/arm/pmu_acpi:starting",
+ arm_pmu_acpi_cpu_starting, NULL);
+
+ return ret;
+}
+subsys_initcall(arm_pmu_acpi_init)
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
new file mode 100644
index 000000000000..69255f53057a
--- /dev/null
+++ b/drivers/perf/arm_pmu_platform.c
@@ -0,0 +1,235 @@
+/*
+ * platform_device probing code for ARM performance counters.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/bug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/kconfig.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/percpu.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+static int probe_current_pmu(struct arm_pmu *pmu,
+ const struct pmu_probe_info *info)
+{
+ int cpu = get_cpu();
+ unsigned int cpuid = read_cpuid_id();
+ int ret = -ENODEV;
+
+ pr_info("probing PMU on CPU %d\n", cpu);
+
+ for (; info->init != NULL; info++) {
+ if ((cpuid & info->mask) != info->cpuid)
+ continue;
+ ret = info->init(pmu);
+ break;
+ }
+
+ put_cpu();
+ return ret;
+}
+
+static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
+{
+ int cpu, ret;
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+
+ ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
+ if (ret)
+ return ret;
+
+ for_each_cpu(cpu, &pmu->supported_cpus)
+ per_cpu(hw_events->irq, cpu) = irq;
+
+ return 0;
+}
+
+static bool pmu_has_irq_affinity(struct device_node *node)
+{
+ return !!of_find_property(node, "interrupt-affinity", NULL);
+}
+
+static int pmu_parse_irq_affinity(struct device_node *node, int i)
+{
+ struct device_node *dn;
+ int cpu;
+
+ /*
+ * If we don't have an interrupt-affinity property, we guess irq
+ * affinity matches our logical CPU order, as we used to assume.
+ * This is fragile, so we'll warn in pmu_parse_irqs().
+ */
+ if (!pmu_has_irq_affinity(node))
+ return i;
+
+ dn = of_parse_phandle(node, "interrupt-affinity", i);
+ if (!dn) {
+ pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
+ i, node->name);
+ return -EINVAL;
+ }
+
+ /* Now look up the logical CPU number */
+ for_each_possible_cpu(cpu) {
+ struct device_node *cpu_dn;
+
+ cpu_dn = of_cpu_device_node_get(cpu);
+ of_node_put(cpu_dn);
+
+ if (dn == cpu_dn)
+ break;
+ }
+
+ if (cpu >= nr_cpu_ids) {
+ pr_warn("failed to find logical CPU for %s\n", dn->name);
+ }
+
+ of_node_put(dn);
+
+ return cpu;
+}
+
+static int pmu_parse_irqs(struct arm_pmu *pmu)
+{
+ int i = 0, num_irqs;
+ struct platform_device *pdev = pmu->plat_device;
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+
+ num_irqs = platform_irq_count(pdev);
+ if (num_irqs < 0) {
+ pr_err("unable to count PMU IRQs\n");
+ return num_irqs;
+ }
+
+ /*
+ * In this case we have no idea which CPUs are covered by the PMU.
+ * To match our prior behaviour, we assume all CPUs in this case.
+ */
+ if (num_irqs == 0) {
+ pr_warn("no irqs for PMU, sampling events not supported\n");
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ cpumask_setall(&pmu->supported_cpus);
+ return 0;
+ }
+
+ if (num_irqs == 1) {
+ int irq = platform_get_irq(pdev, 0);
+ if (irq && irq_is_percpu(irq))
+ return pmu_parse_percpu_irq(pmu, irq);
+ }
+
+ if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
+ pr_warn("no interrupt-affinity property for %s, guessing.\n",
+ of_node_full_name(pdev->dev.of_node));
+ }
+
+ /*
+ * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
+ * special platdata function that attempts to demux them.
+ */
+ if (dev_get_platdata(&pdev->dev))
+ cpumask_setall(&pmu->supported_cpus);
+
+ for (i = 0; i < num_irqs; i++) {
+ int cpu, irq;
+
+ irq = platform_get_irq(pdev, i);
+ if (WARN_ON(irq <= 0))
+ continue;
+
+ if (irq_is_percpu(irq)) {
+ pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
+ return -EINVAL;
+ }
+
+ cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
+ if (cpu < 0)
+ return cpu;
+ if (cpu >= nr_cpu_ids)
+ continue;
+
+ if (per_cpu(hw_events->irq, cpu)) {
+ pr_warn("multiple PMU IRQs for the same CPU detected\n");
+ return -EINVAL;
+ }
+
+ per_cpu(hw_events->irq, cpu) = irq;
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
+ }
+
+ return 0;
+}
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+ const struct of_device_id *of_table,
+ const struct pmu_probe_info *probe_table)
+{
+ const struct of_device_id *of_id;
+ armpmu_init_fn init_fn;
+ struct device_node *node = pdev->dev.of_node;
+ struct arm_pmu *pmu;
+ int ret = -ENODEV;
+
+ pmu = armpmu_alloc();
+ if (!pmu)
+ return -ENOMEM;
+
+ pmu->plat_device = pdev;
+
+ ret = pmu_parse_irqs(pmu);
+ if (ret)
+ goto out_free;
+
+ if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
+ init_fn = of_id->data;
+
+ pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+ "secure-reg-access");
+
+ /* arm64 systems boot only as non-secure */
+ if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
+ pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+ pmu->secure_access = false;
+ }
+
+ ret = init_fn(pmu);
+ } else if (probe_table) {
+ cpumask_setall(&pmu->supported_cpus);
+ ret = probe_current_pmu(pmu, probe_table);
+ }
+
+ if (ret) {
+ pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
+ goto out_free;
+ }
+
+ ret = armpmu_request_irqs(pmu);
+ if (ret)
+ goto out_free_irqs;
+
+ ret = armpmu_register(pmu);
+ if (ret)
+ goto out_free;
+
+ return 0;
+
+out_free_irqs:
+ armpmu_free_irqs(pmu);
+out_free:
+ pr_info("%s: failed to register PMU devices!\n",
+ of_node_full_name(node));
+ armpmu_free(pmu);
+ return ret;
+}
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
new file mode 100644
index 000000000000..7f6b62b29e9d
--- /dev/null
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -0,0 +1,849 @@
+/*
+ * Driver for the L3 cache PMUs in Qualcomm Technologies chips.
+ *
+ * The driver supports a distributed cache architecture where the overall
+ * cache for a socket is comprised of multiple slices each with its own PMU.
+ * Access to each individual PMU is provided even though all CPUs share all
+ * the slices. User space needs to aggregate to individual counts to provide
+ * a global picture.
+ *
+ * See Documentation/perf/qcom_l3_pmu.txt for more details.
+ *
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+/*
+ * General constants
+ */
+
+/* Number of counters on each PMU */
+#define L3_NUM_COUNTERS 8
+/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
+#define L3_EVTYPE_MASK 0xFF
+/*
+ * Bit position of the 'long counter' flag within perf_event_attr.config.
+ * Reserve some space between the event type and this flag to allow expansion
+ * in the event type field.
+ */
+#define L3_EVENT_LC_BIT 32
+
+/*
+ * Register offsets
+ */
+
+/* Perfmon registers */
+#define L3_HML3_PM_CR 0x000
+#define L3_HML3_PM_EVCNTR(__cntr) (0x420 + ((__cntr) & 0x7) * 8)
+#define L3_HML3_PM_CNTCTL(__cntr) (0x120 + ((__cntr) & 0x7) * 8)
+#define L3_HML3_PM_EVTYPE(__cntr) (0x220 + ((__cntr) & 0x7) * 8)
+#define L3_HML3_PM_FILTRA 0x300
+#define L3_HML3_PM_FILTRB 0x308
+#define L3_HML3_PM_FILTRC 0x310
+#define L3_HML3_PM_FILTRAM 0x304
+#define L3_HML3_PM_FILTRBM 0x30C
+#define L3_HML3_PM_FILTRCM 0x314
+
+/* Basic counter registers */
+#define L3_M_BC_CR 0x500
+#define L3_M_BC_SATROLL_CR 0x504
+#define L3_M_BC_CNTENSET 0x508
+#define L3_M_BC_CNTENCLR 0x50C
+#define L3_M_BC_INTENSET 0x510
+#define L3_M_BC_INTENCLR 0x514
+#define L3_M_BC_GANG 0x718
+#define L3_M_BC_OVSR 0x740
+#define L3_M_BC_IRQCTL 0x96C
+
+/*
+ * Bit field definitions
+ */
+
+/* L3_HML3_PM_CR */
+#define PM_CR_RESET (0)
+
+/* L3_HML3_PM_XCNTCTL/L3_HML3_PM_CNTCTLx */
+#define PMCNT_RESET (0)
+
+/* L3_HML3_PM_EVTYPEx */
+#define EVSEL(__val) ((__val) & L3_EVTYPE_MASK)
+
+/* Reset value for all the filter registers */
+#define PM_FLTR_RESET (0)
+
+/* L3_M_BC_CR */
+#define BC_RESET (1UL << 1)
+#define BC_ENABLE (1UL << 0)
+
+/* L3_M_BC_SATROLL_CR */
+#define BC_SATROLL_CR_RESET (0)
+
+/* L3_M_BC_CNTENSET */
+#define PMCNTENSET(__cntr) (1UL << ((__cntr) & 0x7))
+
+/* L3_M_BC_CNTENCLR */
+#define PMCNTENCLR(__cntr) (1UL << ((__cntr) & 0x7))
+#define BC_CNTENCLR_RESET (0xFF)
+
+/* L3_M_BC_INTENSET */
+#define PMINTENSET(__cntr) (1UL << ((__cntr) & 0x7))
+
+/* L3_M_BC_INTENCLR */
+#define PMINTENCLR(__cntr) (1UL << ((__cntr) & 0x7))
+#define BC_INTENCLR_RESET (0xFF)
+
+/* L3_M_BC_GANG */
+#define GANG_EN(__cntr) (1UL << ((__cntr) & 0x7))
+#define BC_GANG_RESET (0)
+
+/* L3_M_BC_OVSR */
+#define PMOVSRCLR(__cntr) (1UL << ((__cntr) & 0x7))
+#define PMOVSRCLR_RESET (0xFF)
+
+/* L3_M_BC_IRQCTL */
+#define PMIRQONMSBEN(__cntr) (1UL << ((__cntr) & 0x7))
+#define BC_IRQCTL_RESET (0x0)
+
+/*
+ * Events
+ */
+
+#define L3_EVENT_CYCLES 0x01
+#define L3_EVENT_READ_HIT 0x20
+#define L3_EVENT_READ_MISS 0x21
+#define L3_EVENT_READ_HIT_D 0x22
+#define L3_EVENT_READ_MISS_D 0x23
+#define L3_EVENT_WRITE_HIT 0x24
+#define L3_EVENT_WRITE_MISS 0x25
+
+/*
+ * Decoding of settings from perf_event_attr
+ *
+ * The config format for perf events is:
+ * - config: bits 0-7: event type
+ * bit 32: HW counter size requested, 0: 32 bits, 1: 64 bits
+ */
+
+static inline u32 get_event_type(struct perf_event *event)
+{
+ return (event->attr.config) & L3_EVTYPE_MASK;
+}
+
+static inline bool event_uses_long_counter(struct perf_event *event)
+{
+ return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT));
+}
+
+static inline int event_num_counters(struct perf_event *event)
+{
+ return event_uses_long_counter(event) ? 2 : 1;
+}
+
+/*
+ * Main PMU, inherits from the core perf PMU type
+ */
+struct l3cache_pmu {
+ struct pmu pmu;
+ struct hlist_node node;
+ void __iomem *regs;
+ struct perf_event *events[L3_NUM_COUNTERS];
+ unsigned long used_mask[BITS_TO_LONGS(L3_NUM_COUNTERS)];
+ cpumask_t cpumask;
+};
+
+#define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu))
+
+/*
+ * Type used to group hardware counter operations
+ *
+ * Used to implement two types of hardware counters, standard (32bits) and
+ * long (64bits). The hardware supports counter chaining which we use to
+ * implement long counters. This support is exposed via the 'lc' flag field
+ * in perf_event_attr.config.
+ */
+struct l3cache_event_ops {
+ /* Called to start event monitoring */
+ void (*start)(struct perf_event *event);
+ /* Called to stop event monitoring */
+ void (*stop)(struct perf_event *event, int flags);
+ /* Called to update the perf_event */
+ void (*update)(struct perf_event *event);
+};
+
+/*
+ * Implementation of long counter operations
+ *
+ * 64bit counters are implemented by chaining two of the 32bit physical
+ * counters. The PMU only supports chaining of adjacent even/odd pairs
+ * and for simplicity the driver always configures the odd counter to
+ * count the overflows of the lower-numbered even counter. Note that since
+ * the resulting hardware counter is 64bits no IRQs are required to maintain
+ * the software counter which is also 64bits.
+ */
+
+static void qcom_l3_cache__64bit_counter_start(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 evsel = get_event_type(event);
+ u32 gang;
+
+ /* Set the odd counter to count the overflows of the even counter */
+ gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG);
+ gang |= GANG_EN(idx + 1);
+ writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG);
+
+ /* Initialize the hardware counters and reset prev_count*/
+ local64_set(&event->hw.prev_count, 0);
+ writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1));
+ writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
+
+ /*
+ * Set the event types, the upper half must use zero and the lower
+ * half the actual event type
+ */
+ writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1));
+ writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx));
+
+ /* Finally, enable the counters */
+ writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1));
+ writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET);
+ writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx));
+ writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET);
+}
+
+static void qcom_l3_cache__64bit_counter_stop(struct perf_event *event,
+ int flags)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG);
+
+ /* Disable the counters */
+ writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR);
+ writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR);
+
+ /* Disable chaining */
+ writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG);
+}
+
+static void qcom_l3_cache__64bit_counter_update(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 hi, lo;
+ u64 prev, new;
+
+ do {
+ prev = local64_read(&event->hw.prev_count);
+ do {
+ hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1));
+ lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
+ } while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)));
+ new = ((u64)hi << 32) | lo;
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+
+ local64_add(new - prev, &event->count);
+}
+
+static const struct l3cache_event_ops event_ops_long = {
+ .start = qcom_l3_cache__64bit_counter_start,
+ .stop = qcom_l3_cache__64bit_counter_stop,
+ .update = qcom_l3_cache__64bit_counter_update,
+};
+
+/*
+ * Implementation of standard counter operations
+ *
+ * 32bit counters use a single physical counter and a hardware feature that
+ * asserts the overflow IRQ on the toggling of the most significant bit in
+ * the counter. This feature allows the counters to be left free-running
+ * without needing the usual reprogramming required to properly handle races
+ * during concurrent calls to update.
+ */
+
+static void qcom_l3_cache__32bit_counter_start(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 evsel = get_event_type(event);
+ u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL);
+
+ /* Set the counter to assert the overflow IRQ on MSB toggling */
+ writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL);
+
+ /* Initialize the hardware counter and reset prev_count*/
+ local64_set(&event->hw.prev_count, 0);
+ writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
+
+ /* Set the event type */
+ writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx));
+
+ /* Enable interrupt generation by this counter */
+ writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET);
+
+ /* Finally, enable the counter */
+ writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx));
+ writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET);
+}
+
+static void qcom_l3_cache__32bit_counter_stop(struct perf_event *event,
+ int flags)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL);
+
+ /* Disable the counter */
+ writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR);
+
+ /* Disable interrupt generation by this counter */
+ writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR);
+
+ /* Set the counter to not assert the overflow IRQ on MSB toggling */
+ writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL);
+}
+
+static void qcom_l3_cache__32bit_counter_update(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u32 prev, new;
+
+ do {
+ prev = local64_read(&event->hw.prev_count);
+ new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx));
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+
+ local64_add(new - prev, &event->count);
+}
+
+static const struct l3cache_event_ops event_ops_std = {
+ .start = qcom_l3_cache__32bit_counter_start,
+ .stop = qcom_l3_cache__32bit_counter_stop,
+ .update = qcom_l3_cache__32bit_counter_update,
+};
+
+/* Retrieve the appropriate operations for the given event */
+static
+const struct l3cache_event_ops *l3cache_event_get_ops(struct perf_event *event)
+{
+ if (event_uses_long_counter(event))
+ return &event_ops_long;
+ else
+ return &event_ops_std;
+}
+
+/*
+ * Top level PMU functions.
+ */
+
+static inline void qcom_l3_cache__init(struct l3cache_pmu *l3pmu)
+{
+ int i;
+
+ writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR);
+
+ /*
+ * Use writel for the first programming command to ensure the basic
+ * counter unit is stopped before proceeding
+ */
+ writel(BC_SATROLL_CR_RESET, l3pmu->regs + L3_M_BC_SATROLL_CR);
+
+ writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR);
+ writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR);
+ writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR);
+ writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG);
+ writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL);
+ writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR);
+
+ for (i = 0; i < L3_NUM_COUNTERS; ++i) {
+ writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i));
+ writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i));
+ }
+
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC);
+ writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM);
+
+ /*
+ * Use writel here to ensure all programming commands are done
+ * before proceeding
+ */
+ writel(BC_ENABLE, l3pmu->regs + L3_M_BC_CR);
+}
+
+static irqreturn_t qcom_l3_cache__handle_irq(int irq_num, void *data)
+{
+ struct l3cache_pmu *l3pmu = data;
+ /* Read the overflow status register */
+ long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR);
+ int idx;
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ /* Clear the bits we read on the overflow status register */
+ writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR);
+
+ for_each_set_bit(idx, &status, L3_NUM_COUNTERS) {
+ struct perf_event *event;
+ const struct l3cache_event_ops *ops;
+
+ event = l3pmu->events[idx];
+ if (!event)
+ continue;
+
+ /*
+ * Since the IRQ is not enabled for events using long counters
+ * we should never see one of those here, however, be consistent
+ * and use the ops indirections like in the other operations.
+ */
+
+ ops = l3cache_event_get_ops(event);
+ ops->update(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Implementation of abstract pmu functionality required by
+ * the core perf events code.
+ */
+
+static void qcom_l3_cache__pmu_enable(struct pmu *pmu)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
+
+ /* Ensure the other programming commands are observed before enabling */
+ wmb();
+
+ writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR);
+}
+
+static void qcom_l3_cache__pmu_disable(struct pmu *pmu)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
+
+ writel_relaxed(0, l3pmu->regs + L3_M_BC_CR);
+
+ /* Ensure the basic counter unit is stopped before proceeding */
+ wmb();
+}
+
+/*
+ * We must NOT create groups containing events from multiple hardware PMUs,
+ * although mixing different software and hardware PMUs is allowed.
+ */
+static bool qcom_l3_cache__validate_event_group(struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int counters = 0;
+
+ if (leader->pmu != event->pmu && !is_software_event(leader))
+ return false;
+
+ counters = event_num_counters(event);
+ counters += event_num_counters(leader);
+
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (is_software_event(sibling))
+ continue;
+ if (sibling->pmu != event->pmu)
+ return false;
+ counters += event_num_counters(sibling);
+ }
+
+ /*
+ * If the group requires more counters than the HW has, it
+ * cannot ever be scheduled.
+ */
+ return counters <= L3_NUM_COUNTERS;
+}
+
+static int qcom_l3_cache__event_init(struct perf_event *event)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * Is the event for this PMU?
+ */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * There are no per-counter mode filters in the PMU.
+ */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_hv || event->attr.exclude_idle)
+ return -EINVAL;
+
+ /*
+ * Sampling not supported since these events are not core-attributable.
+ */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /*
+ * Task mode not available, we run the counters as socket counters,
+ * not attributable to any CPU and therefore cannot attribute per-task.
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* Validate the group */
+ if (!qcom_l3_cache__validate_event_group(event))
+ return -EINVAL;
+
+ hwc->idx = -1;
+
+ /*
+ * Many perf core operations (eg. events rotation) operate on a
+ * single CPU context. This is obvious for CPU PMUs, where one
+ * expects the same sets of events being observed on all CPUs,
+ * but can lead to issues for off-core PMUs, like this one, where
+ * each event could be theoretically assigned to a different CPU.
+ * To mitigate this, we enforce CPU assignment to one designated
+ * processor (the one described in the "cpumask" attribute exported
+ * by the PMU device). perf user space tools honor this and avoid
+ * opening more than one copy of the events.
+ */
+ event->cpu = cpumask_first(&l3pmu->cpumask);
+
+ return 0;
+}
+
+static void qcom_l3_cache__event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
+
+ hwc->state = 0;
+ ops->start(event);
+}
+
+static void qcom_l3_cache__event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ ops->stop(event, flags);
+ if (flags & PERF_EF_UPDATE)
+ ops->update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int qcom_l3_cache__event_add(struct perf_event *event, int flags)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int order = event_uses_long_counter(event) ? 1 : 0;
+ int idx;
+
+ /*
+ * Try to allocate a counter.
+ */
+ idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order);
+ if (idx < 0)
+ /* The counters are all in use. */
+ return -EAGAIN;
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ l3pmu->events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ qcom_l3_cache__event_start(event, 0);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void qcom_l3_cache__event_del(struct perf_event *event, int flags)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int order = event_uses_long_counter(event) ? 1 : 0;
+
+ /* Stop and clean up */
+ qcom_l3_cache__event_stop(event, flags | PERF_EF_UPDATE);
+ l3pmu->events[hwc->idx] = NULL;
+ bitmap_release_region(l3pmu->used_mask, hwc->idx, order);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+}
+
+static void qcom_l3_cache__event_read(struct perf_event *event)
+{
+ const struct l3cache_event_ops *ops = l3cache_event_get_ops(event);
+
+ ops->update(event);
+}
+
+/*
+ * Add sysfs attributes
+ *
+ * We export:
+ * - formats, used by perf user space and other tools to configure events
+ * - events, used by perf user space and other tools to create events
+ * symbolically, e.g.:
+ * perf stat -a -e l3cache_0_0/event=read-miss/ ls
+ * perf stat -a -e l3cache_0_0/event=0x21/ ls
+ * - cpumask, used by perf user space and other tools to know on which CPUs
+ * to open the events
+ */
+
+/* formats */
+
+static ssize_t l3cache_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "%s\n", (char *) eattr->var);
+}
+
+#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, l3cache_pmu_format_show, NULL), \
+ .var = (void *) _config, } \
+ })[0].attr.attr)
+
+static struct attribute *qcom_l3_cache_pmu_formats[] = {
+ L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"),
+ L3CACHE_PMU_FORMAT_ATTR(lc, "config:" __stringify(L3_EVENT_LC_BIT)),
+ NULL,
+};
+
+static struct attribute_group qcom_l3_cache_pmu_format_group = {
+ .name = "format",
+ .attrs = qcom_l3_cache_pmu_formats,
+};
+
+/* events */
+
+static ssize_t l3cache_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define L3CACHE_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, l3cache_pmu_event_show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *qcom_l3_cache_pmu_events[] = {
+ L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES),
+ L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT),
+ L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS),
+ L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D),
+ L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D),
+ L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT),
+ L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS),
+ NULL
+};
+
+static struct attribute_group qcom_l3_cache_pmu_events_group = {
+ .name = "events",
+ .attrs = qcom_l3_cache_pmu_events,
+};
+
+/* cpumask */
+
+static ssize_t qcom_l3_cache_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask);
+}
+
+static DEVICE_ATTR(cpumask, 0444, qcom_l3_cache_pmu_cpumask_show, NULL);
+
+static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = {
+ .attrs = qcom_l3_cache_pmu_cpumask_attrs,
+};
+
+/*
+ * Per PMU device attribute groups
+ */
+static const struct attribute_group *qcom_l3_cache_pmu_attr_grps[] = {
+ &qcom_l3_cache_pmu_format_group,
+ &qcom_l3_cache_pmu_events_group,
+ &qcom_l3_cache_pmu_cpumask_attr_group,
+ NULL,
+};
+
+/*
+ * Probing functions and data.
+ */
+
+static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
+
+ /* If there is not a CPU/PMU association pick this CPU */
+ if (cpumask_empty(&l3pmu->cpumask))
+ cpumask_set_cpu(cpu, &l3pmu->cpumask);
+
+ return 0;
+}
+
+static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node);
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask))
+ return 0;
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+ perf_pmu_migrate_context(&l3pmu->pmu, cpu, target);
+ cpumask_set_cpu(target, &l3pmu->cpumask);
+ return 0;
+}
+
+static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
+{
+ struct l3cache_pmu *l3pmu;
+ struct acpi_device *acpi_dev;
+ struct resource *memrc;
+ int ret;
+ char *name;
+
+ /* Initialize the PMU data structures */
+
+ acpi_dev = ACPI_COMPANION(&pdev->dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s",
+ acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id);
+ if (!l3pmu || !name)
+ return -ENOMEM;
+
+ l3pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+
+ .pmu_enable = qcom_l3_cache__pmu_enable,
+ .pmu_disable = qcom_l3_cache__pmu_disable,
+ .event_init = qcom_l3_cache__event_init,
+ .add = qcom_l3_cache__event_add,
+ .del = qcom_l3_cache__event_del,
+ .start = qcom_l3_cache__event_start,
+ .stop = qcom_l3_cache__event_stop,
+ .read = qcom_l3_cache__event_read,
+
+ .attr_groups = qcom_l3_cache_pmu_attr_grps,
+ };
+
+ memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc);
+ if (IS_ERR(l3pmu->regs)) {
+ dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start);
+ return PTR_ERR(l3pmu->regs);
+ }
+
+ qcom_l3_cache__init(l3pmu);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, ret, qcom_l3_cache__handle_irq, 0,
+ name, l3pmu);
+ if (ret) {
+ dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n",
+ &memrc->start);
+ return ret;
+ }
+
+ /* Add this instance to the list used by the offline callback */
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug", ret);
+ return ret;
+ }
+
+ ret = perf_pmu_register(&l3pmu->pmu, name, -1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n", ret);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type);
+
+ return 0;
+}
+
+static const struct acpi_device_id qcom_l3_cache_pmu_acpi_match[] = {
+ { "QCOM8081", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, qcom_l3_cache_pmu_acpi_match);
+
+static struct platform_driver qcom_l3_cache_pmu_driver = {
+ .driver = {
+ .name = "qcom-l3cache-pmu",
+ .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match),
+ },
+ .probe = qcom_l3_cache_pmu_probe,
+};
+
+static int __init register_qcom_l3_cache_pmu_driver(void)
+{
+ int ret;
+
+ /* Install a hook to update the reader CPU in case it goes offline */
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
+ "perf/qcom/l3cache:online",
+ qcom_l3_cache_pmu_online_cpu,
+ qcom_l3_cache_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&qcom_l3_cache_pmu_driver);
+}
+device_initcall(register_qcom_l3_cache_pmu_driver);