summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/oprofile
diff options
context:
space:
mode:
authorCarl E. Love <cel@linux.vnet.ibm.com>2012-08-03 03:02:17 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-09-07 11:10:54 +1000
commitadbf115d66649b3a52e6875f93c9f131ba690cce (patch)
tree710646c039a4449858f8c3e853bac22b04390647 /arch/powerpc/oprofile
parent22d8ce887991f8f70a68fb68972f187ec79d44b7 (diff)
downloadlinux-adbf115d66649b3a52e6875f93c9f131ba690cce.tar.bz2
powerpc/oprofile: Fix marked events support on Power7+ not set.
Starting with Power 7+ we need to check for marked events if the SIAR register is valid, i.e. it contains the correct address of the instruction at the time the performance counter overflowed. The mmcra register on Power 7+, contains a new bit to indicate that the contents of the SIAR is valid. If the event is not marked, then the sample is recorded independently of the SIAR valid bit setting. For older processors, there is no SIAR valid bit to check so the samples are always recorded. This is done by forcing the cntr_marked_events bit mask to zero. The code will always record the sample in this case since the bit mask says the event is not a marked event even if it really is a marked event. Signed-off-by: Carl Love <cel@us.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/oprofile')
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c100
1 files changed, 99 insertions, 1 deletions
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index caffffa5b8f2..315f9495e9b2 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -21,6 +21,13 @@
#include <asm/reg.h>
#define dbg(args...)
+#define OPROFILE_PM_PMCSEL_MSK 0xffULL
+#define OPROFILE_PM_UNIT_SHIFT 60
+#define OPROFILE_PM_UNIT_MSK 0xfULL
+#define OPROFILE_MAX_PMC_NUM 3
+#define OPROFILE_PMSEL_FIELD_WIDTH 8
+#define OPROFILE_UNIT_FIELD_WIDTH 4
+#define MMCRA_SIAR_VALID_MASK 0x10000000ULL
static unsigned long reset_value[OP_MAX_COUNTER];
@@ -31,6 +38,61 @@ static int use_slot_nums;
static u32 mmcr0_val;
static u64 mmcr1_val;
static u64 mmcra_val;
+static u32 cntr_marked_events;
+
+static int power7_marked_instr_event(u64 mmcr1)
+{
+ u64 psel, unit;
+ int pmc, cntr_marked_events = 0;
+
+ /* Given the MMCR1 value, look at the field for each counter to
+ * determine if it is a marked event. Code based on the function
+ * power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c.
+ */
+ for (pmc = 0; pmc < 4; pmc++) {
+ psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
+ << (OPROFILE_MAX_PMC_NUM - pmc)
+ * OPROFILE_MAX_PMC_NUM);
+ psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
+ * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
+ unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
+ << (OPROFILE_PM_UNIT_SHIFT
+ - (pmc * OPROFILE_PMSEL_FIELD_WIDTH )));
+ unit = unit >> (OPROFILE_PM_UNIT_SHIFT
+ - (pmc * OPROFILE_PMSEL_FIELD_WIDTH));
+
+ switch (psel >> 4) {
+ case 2:
+ cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
+ break;
+ case 3:
+ if (psel == 0x3c) {
+ cntr_marked_events |= (pmc == 0) << pmc;
+ break;
+ }
+
+ if (psel == 0x3e) {
+ cntr_marked_events |= (pmc != 1) << pmc;
+ break;
+ }
+
+ cntr_marked_events |= 1 << pmc;
+ break;
+ case 4:
+ case 5:
+ cntr_marked_events |= (unit == 0xd) << pmc;
+ break;
+ case 6:
+ if (psel == 0x64)
+ cntr_marked_events |= (pmc >= 2) << pmc;
+ break;
+ case 8:
+ cntr_marked_events |= (unit == 0xd) << pmc;
+ break;
+ }
+ }
+ return cntr_marked_events;
+}
static int power4_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
@@ -47,6 +109,23 @@ static int power4_reg_setup(struct op_counter_config *ctr,
mmcr1_val = sys->mmcr1;
mmcra_val = sys->mmcra;
+ /* Power 7+ and newer architectures:
+ * Determine which counter events in the group (the group of events is
+ * specified by the bit settings in the MMCR1 register) are marked
+ * events for use in the interrupt handler. Do the calculation once
+ * before OProfile starts. Information is used in the interrupt
+ * handler. Starting with Power 7+ we only record the sample for
+ * marked events if the SIAR valid bit is set. For non marked events
+ * the sample is always recorded.
+ */
+ if (pvr_version_is(PVR_POWER7p))
+ cntr_marked_events = power7_marked_instr_event(mmcr1_val);
+ else
+ cntr_marked_events = 0; /* For older processors, set the bit map
+ * to zero so the sample will always be
+ * be recorded.
+ */
+
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
@@ -291,6 +370,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
int i;
unsigned int mmcr0;
unsigned long mmcra;
+ bool siar_valid = false;
mmcra = mfspr(SPRN_MMCRA);
@@ -300,11 +380,29 @@ static void power4_handle_interrupt(struct pt_regs *regs,
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
+ /* Check that the SIAR valid bit in MMCRA is set to 1. */
+ if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
+ siar_valid = true;
+
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
val = classic_ctr_read(i);
if (pmc_overflow(val)) {
if (oprofile_running && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
+ /* Power 7+ and newer architectures:
+ * If the event is a marked event, then only
+ * save the sample if the SIAR valid bit is
+ * set. If the event is not marked, then
+ * always save the sample.
+ * Note, the Sample enable bit in the MMCRA
+ * register must be set to 1 if the group
+ * contains a marked event.
+ */
+ if ((siar_valid &&
+ (cntr_marked_events & (1 << i)))
+ || !(cntr_marked_events & (1 << i)))
+ oprofile_add_ext_sample(pc, regs, i,
+ is_kernel);
+
classic_ctr_write(i, reset_value[i]);
} else {
classic_ctr_write(i, 0);