summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-09-16 21:45:16 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-09-16 21:45:16 +0100
commit4722cd7741c6404f967f7a7b8b666540b6c1663e (patch)
tree877b7d8efe1e4e4ce48416186b4f45da3a5fccac /arch
parent1db3706b05b11abcf2673ffbed5ad43b4c90ed11 (diff)
parent4fb0d2ea397ab207fdecbd88ad0e37b36ce68a62 (diff)
downloadlinux-4722cd7741c6404f967f7a7b8b666540b6c1663e.tar.bz2
Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6-wd into devel-stable
Conflicts: arch/arm/mach-imx/mach-cpuimx27.c
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/boot/compressed/mmcif-sh7372.c2
-rw-r--r--arch/arm/boot/compressed/sdhi-sh7372.c2
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h2
-rw-r--r--arch/arm/include/asm/hw_breakpoint.h2
-rw-r--r--arch/arm/include/asm/pmu.h97
-rw-r--r--arch/arm/kernel/hw_breakpoint.c270
-rw-r--r--arch/arm/kernel/perf_event.c475
-rw-r--r--arch/arm/kernel/perf_event_v6.c87
-rw-r--r--arch/arm/kernel/perf_event_v7.c395
-rw-r--r--arch/arm/kernel/perf_event_xscale.c90
-rw-r--r--arch/arm/kernel/pmu.c186
-rw-r--r--arch/arm/kernel/relocate_kernel.S3
-rw-r--r--arch/arm/kernel/setup.c15
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/mach-at91/at91sam9261.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ts72xx.h26
-rw-r--r--arch/arm/mach-exynos4/clock.c2
-rw-r--r--arch/arm/mach-exynos4/cpu.c11
-rw-r--r--arch/arm/mach-exynos4/include/mach/irqs.h5
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-pmu.h2
-rw-r--r--arch/arm/mach-exynos4/irq-eint.c7
-rw-r--r--arch/arm/mach-exynos4/mach-universal_c210.c4
-rw-r--r--arch/arm/mach-exynos4/setup-usb-phy.c2
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-footbridge/dc21285.c1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c2
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c2
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c2
-rw-r--r--arch/arm/mach-orion5x/pci.c1
-rw-r--r--arch/arm/mach-realview/include/mach/system.h1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c1
-rw-r--r--arch/arm/mach-s5p64x0/irq-eint.c2
-rw-r--r--arch/arm/mach-s5pv210/pm.c2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c3
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c4
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c29
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c2
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h4
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c7
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c176
-rw-r--r--arch/arm/mach-vexpress/v2m.c7
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S10
-rw-r--r--arch/arm/mm/proc-v6.S16
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/mm/proc-xsc3.S6
-rw-r--r--arch/arm/plat-s5p/clock.c2
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c6
-rw-r--r--arch/arm/plat-samsung/include/plat/backlight.h2
-rw-r--r--arch/arm/plat-samsung/irq-vic-timer.c5
-rw-r--r--arch/arm/tools/mach-types6
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/sh/include/asm/ptrace.h2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c1
-rw-r--r--arch/sh/kernel/idle.c2
-rw-r--r--arch/sh/kernel/traps_32.c37
-rw-r--r--arch/sparc/kernel/irq.h2
-rw-r--r--arch/sparc/kernel/systbls_64.S2
62 files changed, 1190 insertions, 875 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 73c320ea172c..5a3a78633177 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1271,6 +1271,18 @@ config ARM_ERRATA_754327
This workaround defines cpu_relax() as smp_mb(), preventing correctly
written polling loops from denying visibility of updates to memory.
+config ARM_ERRATA_364296
+ bool "ARM errata: Possible cache data corruption with hit-under-miss enabled"
+ depends on CPU_V6 && !SMP
+ help
+ This options enables the workaround for the 364296 ARM1136
+ r0p2 erratum (possible cache data corruption with
+ hit-under-miss enabled). It sets the undocumented bit 31 in
+ the auxiliary control register and the FI bit in the control
+ register, thus disabling hit-under-miss without putting the
+ processor into full low interrupt latency mode. ARM11MPCore
+ is not affected.
+
endmenu
source "arch/arm/common/Kconfig"
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c
index b6f61d9a5a1b..672ae95db5c3 100644
--- a/arch/arm/boot/compressed/mmcif-sh7372.c
+++ b/arch/arm/boot/compressed/mmcif-sh7372.c
@@ -82,7 +82,7 @@ asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
/* Disable clock to MMC hardware block */
- __raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3);
+ __raw_writel(__raw_readl(SMSTPCR3) | (1 << 12), SMSTPCR3);
mmc_update_progress(MMC_PROGRESS_DONE);
}
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c
index d403a8b24d7f..d279294f2381 100644
--- a/arch/arm/boot/compressed/sdhi-sh7372.c
+++ b/arch/arm/boot/compressed/sdhi-sh7372.c
@@ -85,7 +85,7 @@ asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
goto err;
/* Disable clock to SDHI1 hardware block */
- __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3);
+ __raw_writel(__raw_readl(SMSTPCR3) | (1 << 13), SMSTPCR3);
mmc_update_progress(MMC_PROGRESS_DONE);
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 16bd48031583..bfa706ffd968 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -64,7 +64,7 @@
#define L2X0_AUX_CTRL_MASK 0xc0000fff
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
-#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
+#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h
index f389b2704d82..c190bc992f0e 100644
--- a/arch/arm/include/asm/hw_breakpoint.h
+++ b/arch/arm/include/asm/hw_breakpoint.h
@@ -50,6 +50,7 @@ static inline void decode_ctrl_reg(u32 reg,
#define ARM_DEBUG_ARCH_V6_1 2
#define ARM_DEBUG_ARCH_V7_ECP14 3
#define ARM_DEBUG_ARCH_V7_MM 4
+#define ARM_DEBUG_ARCH_V7_1 5
/* Breakpoint */
#define ARM_BREAKPOINT_EXECUTE 0
@@ -57,6 +58,7 @@ static inline void decode_ctrl_reg(u32 reg,
/* Watchpoints */
#define ARM_BREAKPOINT_LOAD 1
#define ARM_BREAKPOINT_STORE 2
+#define ARM_FSR_ACCESS_MASK (1 << 11)
/* Privilege Levels */
#define ARM_BREAKPOINT_PRIV 1
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 67c70a31a1be..71d99b83cdb9 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -13,7 +13,12 @@
#define __ARM_PMU_H__
#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+/*
+ * Types of PMUs that can be accessed directly and require mutual
+ * exclusion between profiling tools.
+ */
enum arm_pmu_type {
ARM_PMU_DEVICE_CPU = 0,
ARM_NUM_PMU_DEVICES,
@@ -37,21 +42,17 @@ struct arm_pmu_platdata {
* reserve_pmu() - reserve the hardware performance counters
*
* Reserve the hardware performance counters in the system for exclusive use.
- * The platform_device for the system is returned on success, ERR_PTR()
- * encoded error on failure.
+ * Returns 0 on success or -EBUSY if the lock is already held.
*/
-extern struct platform_device *
-reserve_pmu(enum arm_pmu_type device);
+extern int
+reserve_pmu(enum arm_pmu_type type);
/**
* release_pmu() - Relinquish control of the performance counters
*
* Release the performance counters and allow someone else to use them.
- * Callers must have disabled the counters and released IRQs before calling
- * this. The platform_device returned from reserve_pmu() must be passed as
- * a cookie.
*/
-extern int
+extern void
release_pmu(enum arm_pmu_type type);
/**
@@ -62,30 +63,84 @@ release_pmu(enum arm_pmu_type type);
* the actual hardware initialisation.
*/
extern int
-init_pmu(enum arm_pmu_type device);
+init_pmu(enum arm_pmu_type type);
#else /* CONFIG_CPU_HAS_PMU */
#include <linux/err.h>
-static inline struct platform_device *
-reserve_pmu(enum arm_pmu_type device)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline int
-release_pmu(struct platform_device *pdev)
+reserve_pmu(enum arm_pmu_type type)
{
return -ENODEV;
}
-static inline int
-init_pmu(enum arm_pmu_type device)
-{
- return -ENODEV;
-}
+static inline void
+release_pmu(enum arm_pmu_type type) { }
#endif /* CONFIG_CPU_HAS_PMU */
+#ifdef CONFIG_HW_PERF_EVENTS
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+ /*
+ * The events that are active on the PMU for the given index.
+ */
+ struct perf_event **events;
+
+ /*
+ * A 1 bit for an index indicates that the counter is being used for
+ * an event. A 0 means that the counter can be used.
+ */
+ unsigned long *used_mask;
+
+ /*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+ raw_spinlock_t pmu_lock;
+};
+
+struct arm_pmu {
+ struct pmu pmu;
+ enum arm_perf_pmu_ids id;
+ enum arm_pmu_type type;
+ cpumask_t active_irqs;
+ const char *name;
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ void (*enable)(struct hw_perf_event *evt, int idx);
+ void (*disable)(struct hw_perf_event *evt, int idx);
+ int (*get_event_idx)(struct pmu_hw_events *hw_events,
+ struct hw_perf_event *hwc);
+ int (*set_event_filter)(struct hw_perf_event *evt,
+ struct perf_event_attr *attr);
+ u32 (*read_counter)(int idx);
+ void (*write_counter)(int idx, u32 val);
+ void (*start)(void);
+ void (*stop)(void);
+ void (*reset)(void *);
+ int (*map_event)(struct perf_event *event);
+ int num_events;
+ atomic_t active_events;
+ struct mutex reserve_mutex;
+ u64 max_period;
+ struct platform_device *plat_device;
+ struct pmu_hw_events *(*get_hw_events)(void);
+};
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
+
+u64 armpmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx, int overflow);
+
+int armpmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx);
+
+#endif /* CONFIG_HW_PERF_EVENTS */
+
#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index a927ca1f5566..5a46225f007e 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -45,7 +45,6 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
/* Number of BRP/WRP registers on this CPU. */
static int core_num_brps;
-static int core_num_reserved_brps;
static int core_num_wrps;
/* Debug architecture version. */
@@ -137,10 +136,11 @@ static u8 get_debug_arch(void)
u32 didr;
/* Do we implement the extended CPUID interface? */
- if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
- "CPUID feature registers not supported. "
- "Assuming v6 debug is present.\n"))
+ if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
+ pr_warning("CPUID feature registers not supported. "
+ "Assuming v6 debug is present.\n");
return ARM_DEBUG_ARCH_V6;
+ }
ARM_DBG_READ(c0, 0, didr);
return (didr >> 16) & 0xf;
@@ -154,10 +154,21 @@ u8 arch_get_debug_arch(void)
static int debug_arch_supported(void)
{
u8 arch = get_debug_arch();
- return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
+
+ /* We don't support the memory-mapped interface. */
+ return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
+ arch >= ARM_DEBUG_ARCH_V7_1;
+}
+
+/* Determine number of WRP registers available. */
+static int get_num_wrp_resources(void)
+{
+ u32 didr;
+ ARM_DBG_READ(c0, 0, didr);
+ return ((didr >> 28) & 0xf) + 1;
}
-/* Determine number of BRP register available. */
+/* Determine number of BRP registers available. */
static int get_num_brp_resources(void)
{
u32 didr;
@@ -176,9 +187,10 @@ static int core_has_mismatch_brps(void)
static int get_num_wrps(void)
{
/*
- * FIXME: When a watchpoint fires, the only way to work out which
- * watchpoint it was is by disassembling the faulting instruction
- * and working out the address of the memory access.
+ * On debug architectures prior to 7.1, when a watchpoint fires, the
+ * only way to work out which watchpoint it was is by disassembling
+ * the faulting instruction and working out the address of the memory
+ * access.
*
* Furthermore, we can only do this if the watchpoint was precise
* since imprecise watchpoints prevent us from calculating register
@@ -192,36 +204,17 @@ static int get_num_wrps(void)
* [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
* that it is set on some implementations].
*/
+ if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
+ return 1;
-#if 0
- int wrps;
- u32 didr;
- ARM_DBG_READ(c0, 0, didr);
- wrps = ((didr >> 28) & 0xf) + 1;
-#endif
- int wrps = 1;
-
- if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
- wrps = get_num_brp_resources() - 1;
-
- return wrps;
-}
-
-/* We reserve one breakpoint for each watchpoint. */
-static int get_num_reserved_brps(void)
-{
- if (core_has_mismatch_brps())
- return get_num_wrps();
- return 0;
+ return get_num_wrp_resources();
}
/* Determine number of usable BRPs available. */
static int get_num_brps(void)
{
int brps = get_num_brp_resources();
- if (core_has_mismatch_brps())
- brps -= get_num_reserved_brps();
- return brps;
+ return core_has_mismatch_brps() ? brps - 1 : brps;
}
/*
@@ -239,7 +232,7 @@ static int enable_monitor_mode(void)
/* Ensure that halting mode is disabled. */
if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN,
- "halting debug mode enabled. Unable to access hardware resources.\n")) {
+ "halting debug mode enabled. Unable to access hardware resources.\n")) {
ret = -EPERM;
goto out;
}
@@ -255,6 +248,7 @@ static int enable_monitor_mode(void)
ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
break;
case ARM_DEBUG_ARCH_V7_ECP14:
+ case ARM_DEBUG_ARCH_V7_1:
ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
break;
default:
@@ -346,24 +340,10 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
val_base = ARM_BASE_BVR;
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
max_slots = core_num_brps;
- if (info->step_ctrl.enabled) {
- /* Override the breakpoint data with the step data. */
- addr = info->trigger & ~0x3;
- ctrl = encode_ctrl_reg(info->step_ctrl);
- }
} else {
/* Watchpoint */
- if (info->step_ctrl.enabled) {
- /* Install into the reserved breakpoint region. */
- ctrl_base = ARM_BASE_BCR + core_num_brps;
- val_base = ARM_BASE_BVR + core_num_brps;
- /* Override the watchpoint data with the step data. */
- addr = info->trigger & ~0x3;
- ctrl = encode_ctrl_reg(info->step_ctrl);
- } else {
- ctrl_base = ARM_BASE_WCR;
- val_base = ARM_BASE_WVR;
- }
+ ctrl_base = ARM_BASE_WCR;
+ val_base = ARM_BASE_WVR;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -382,6 +362,17 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
goto out;
}
+ /* Override the breakpoint data with the step data. */
+ if (info->step_ctrl.enabled) {
+ addr = info->trigger & ~0x3;
+ ctrl = encode_ctrl_reg(info->step_ctrl);
+ if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
+ i = 0;
+ ctrl_base = ARM_BASE_BCR + core_num_brps;
+ val_base = ARM_BASE_BVR + core_num_brps;
+ }
+ }
+
/* Setup the address register. */
write_wb_reg(val_base + i, addr);
@@ -405,10 +396,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
max_slots = core_num_brps;
} else {
/* Watchpoint */
- if (info->step_ctrl.enabled)
- base = ARM_BASE_BCR + core_num_brps;
- else
- base = ARM_BASE_WCR;
+ base = ARM_BASE_WCR;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -426,6 +414,13 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
return;
+ /* Ensure that we disable the mismatch breakpoint. */
+ if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
+ info->step_ctrl.enabled) {
+ i = 0;
+ base = ARM_BASE_BCR + core_num_brps;
+ }
+
/* Reset the control register. */
write_wb_reg(base + i, 0);
}
@@ -632,10 +627,9 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* we can use the mismatch feature as a poor-man's hardware
* single-step, but this only works for per-task breakpoints.
*/
- if (WARN_ONCE(!bp->overflow_handler &&
- (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps()
- || !bp->hw.bp_target),
- "overflow handler required but none found\n")) {
+ if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
+ !core_has_mismatch_brps() || !bp->hw.bp_target)) {
+ pr_warning("overflow handler required but none found\n");
ret = -EINVAL;
}
out:
@@ -666,34 +660,62 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
-static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
+static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
{
- int i;
+ int i, access;
+ u32 val, ctrl_reg, alignment_mask;
struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info;
+ struct arch_hw_breakpoint_ctrl ctrl;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
- /* Without a disassembler, we can only handle 1 watchpoint. */
- BUG_ON(core_num_wrps > 1);
-
for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
wp = slots[i];
- if (wp == NULL) {
- rcu_read_unlock();
- continue;
- }
+ if (wp == NULL)
+ goto unlock;
+ info = counter_arch_bp(wp);
/*
- * The DFAR is an unknown value. Since we only allow a
- * single watchpoint, we can set the trigger to the lowest
- * possible faulting address.
+ * The DFAR is an unknown value on debug architectures prior
+ * to 7.1. Since we only allow a single watchpoint on these
+ * older CPUs, we can set the trigger to the lowest possible
+ * faulting address.
*/
- info = counter_arch_bp(wp);
- info->trigger = wp->attr.bp_addr;
+ if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
+ BUG_ON(i > 0);
+ info->trigger = wp->attr.bp_addr;
+ } else {
+ if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ alignment_mask = 0x7;
+ else
+ alignment_mask = 0x3;
+
+ /* Check if the watchpoint value matches. */
+ val = read_wb_reg(ARM_BASE_WVR + i);
+ if (val != (addr & ~alignment_mask))
+ goto unlock;
+
+ /* Possible match, check the byte address select. */
+ ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ if (!((1 << (addr & alignment_mask)) & ctrl.len))
+ goto unlock;
+
+ /* Check that the access type matches. */
+ access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
+ HW_BREAKPOINT_R;
+ if (!(access & hw_breakpoint_type(wp)))
+ goto unlock;
+
+ /* We have a winner. */
+ info->trigger = addr;
+ }
+
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
perf_bp_event(wp, regs);
@@ -705,6 +727,7 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
if (!wp->overflow_handler)
enable_single_step(wp, instruction_pointer(regs));
+unlock:
rcu_read_unlock();
}
}
@@ -717,7 +740,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
- for (i = 0; i < core_num_reserved_brps; ++i) {
+ for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock();
wp = slots[i];
@@ -820,7 +843,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
case ARM_ENTRY_ASYNC_WATCHPOINT:
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
case ARM_ENTRY_SYNC_WATCHPOINT:
- watchpoint_handler(addr, regs);
+ watchpoint_handler(addr, fsr, regs);
break;
default:
ret = 1; /* Unhandled fault. */
@@ -834,11 +857,31 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
/*
* One-time initialisation.
*/
-static void reset_ctrl_regs(void *info)
+static cpumask_t debug_err_mask;
+
+static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
{
- int i, cpu = smp_processor_id();
+ int cpu = smp_processor_id();
+
+ pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
+ instr, cpu);
+
+ /* Set the error flag for this CPU and skip the faulting instruction. */
+ cpumask_set_cpu(cpu, &debug_err_mask);
+ instruction_pointer(regs) += 4;
+ return 0;
+}
+
+static struct undef_hook debug_reg_hook = {
+ .instr_mask = 0x0fe80f10,
+ .instr_val = 0x0e000e10,
+ .fn = debug_reg_trap,
+};
+
+static void reset_ctrl_regs(void *unused)
+{
+ int i, raw_num_brps, err = 0, cpu = smp_processor_id();
u32 dbg_power;
- cpumask_t *cpumask = info;
/*
* v7 debug contains save and restore registers so that debug state
@@ -848,38 +891,52 @@ static void reset_ctrl_regs(void *info)
* Access Register to avoid taking undefined instruction exceptions
* later on.
*/
- if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
+ switch (debug_arch) {
+ case ARM_DEBUG_ARCH_V7_ECP14:
/*
* Ensure sticky power-down is clear (i.e. debug logic is
* powered up).
*/
asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
- if ((dbg_power & 0x1) == 0) {
- pr_warning("CPU %d debug is powered down!\n", cpu);
- cpumask_or(cpumask, cpumask, cpumask_of(cpu));
- return;
- }
-
+ if ((dbg_power & 0x1) == 0)
+ err = -EPERM;
+ break;
+ case ARM_DEBUG_ARCH_V7_1:
/*
- * Unconditionally clear the lock by writing a value
- * other than 0xC5ACCE55 to the access register.
+ * Ensure the OS double lock is clear.
*/
- asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
- isb();
+ asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
+ if ((dbg_power & 0x1) == 1)
+ err = -EPERM;
+ break;
+ }
- /*
- * Clear any configured vector-catch events before
- * enabling monitor mode.
- */
- asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
- isb();
+ if (err) {
+ pr_warning("CPU %d debug is powered down!\n", cpu);
+ cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
+ return;
}
+ /*
+ * Unconditionally clear the lock by writing a value
+ * other than 0xC5ACCE55 to the access register.
+ */
+ asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
+ isb();
+
+ /*
+ * Clear any configured vector-catch events before
+ * enabling monitor mode.
+ */
+ asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
+ isb();
+
if (enable_monitor_mode())
return;
/* We must also reset any reserved registers. */
- for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) {
+ raw_num_brps = get_num_brp_resources();
+ for (i = 0; i < raw_num_brps; ++i) {
write_wb_reg(ARM_BASE_BCR + i, 0UL);
write_wb_reg(ARM_BASE_BVR + i, 0UL);
}
@@ -895,6 +952,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
{
if (action == CPU_ONLINE)
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
+
return NOTIFY_OK;
}
@@ -905,7 +963,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
static int __init arch_hw_breakpoint_init(void)
{
u32 dscr;
- cpumask_t cpumask = { CPU_BITS_NONE };
debug_arch = get_debug_arch();
@@ -916,28 +973,31 @@ static int __init arch_hw_breakpoint_init(void)
/* Determine how many BRPs/WRPs are available. */
core_num_brps = get_num_brps();
- core_num_reserved_brps = get_num_reserved_brps();
core_num_wrps = get_num_wrps();
- pr_info("found %d breakpoint and %d watchpoint registers.\n",
- core_num_brps + core_num_reserved_brps, core_num_wrps);
-
- if (core_num_reserved_brps)
- pr_info("%d breakpoint(s) reserved for watchpoint "
- "single-step.\n", core_num_reserved_brps);
+ /*
+ * We need to tread carefully here because DBGSWENABLE may be
+ * driven low on this core and there isn't an architected way to
+ * determine that.
+ */
+ register_undef_hook(&debug_reg_hook);
/*
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
*/
- on_each_cpu(reset_ctrl_regs, &cpumask, 1);
- if (!cpumask_empty(&cpumask)) {
+ on_each_cpu(reset_ctrl_regs, NULL, 1);
+ unregister_undef_hook(&debug_reg_hook);
+ if (!cpumask_empty(&debug_err_mask)) {
core_num_brps = 0;
- core_num_reserved_brps = 0;
core_num_wrps = 0;
return 0;
}
+ pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
+ core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
+ "", core_num_wrps);
+
ARM_DBG_READ(c1, 0, dscr);
if (dscr & ARM_DSCR_HDBGEN) {
max_watchpoint_len = 4;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 53c9c2610cbc..e6e5d7c84f1a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/bitmap.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -26,16 +27,8 @@
#include <asm/pmu.h>
#include <asm/stacktrace.h>
-static struct platform_device *pmu_device;
-
-/*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
-static DEFINE_RAW_SPINLOCK(pmu_lock);
-
/*
- * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
+ * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
* another platform that supports more, we need to increase this to be the
* largest of all platforms.
*
@@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock);
* cycle counter CCNT + 31 events counters CNT0..30.
* Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
*/
-#define ARMPMU_MAX_HWEVENTS 33
+#define ARMPMU_MAX_HWEVENTS 32
-/* The events for a given CPU. */
-struct cpu_hw_events {
- /*
- * The events that are active on the CPU for the given index. Index 0
- * is reserved.
- */
- struct perf_event *events[ARMPMU_MAX_HWEVENTS];
-
- /*
- * A 1 bit for an index indicates that the counter is being used for
- * an event. A 0 means that the counter can be used.
- */
- unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
+static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
+static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
+static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
- /*
- * A 1 bit for an index indicates that the counter is actively being
- * used.
- */
- unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
-};
-static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
-
-struct arm_pmu {
- enum arm_perf_pmu_ids id;
- const char *name;
- irqreturn_t (*handle_irq)(int irq_num, void *dev);
- void (*enable)(struct hw_perf_event *evt, int idx);
- void (*disable)(struct hw_perf_event *evt, int idx);
- int (*get_event_idx)(struct cpu_hw_events *cpuc,
- struct hw_perf_event *hwc);
- u32 (*read_counter)(int idx);
- void (*write_counter)(int idx, u32 val);
- void (*start)(void);
- void (*stop)(void);
- void (*reset)(void *);
- const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX];
- const unsigned (*event_map)[PERF_COUNT_HW_MAX];
- u32 raw_event_mask;
- int num_events;
- u64 max_period;
-};
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
/* Set at runtime when we know what CPU type we are. */
-static const struct arm_pmu *armpmu;
+static struct arm_pmu *cpu_pmu;
enum arm_perf_pmu_ids
armpmu_get_pmu_id(void)
{
int id = -ENODEV;
- if (armpmu != NULL)
- id = armpmu->id;
+ if (cpu_pmu != NULL)
+ id = cpu_pmu->id;
return id;
}
@@ -109,8 +64,8 @@ armpmu_get_max_events(void)
{
int max_events = 0;
- if (armpmu != NULL)
- max_events = armpmu->num_events;
+ if (cpu_pmu != NULL)
+ max_events = cpu_pmu->num_events;
return max_events;
}
@@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#define CACHE_OP_UNSUPPORTED 0xFFFF
static int
-armpmu_map_cache_event(u64 config)
+armpmu_map_cache_event(const unsigned (*cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u64 config)
{
unsigned int cache_type, cache_op, cache_result, ret;
@@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config)
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
- ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
+ ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT;
@@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config)
}
static int
-armpmu_map_event(u64 config)
+armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*armpmu->event_map)[config];
- return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
+ int mapping = (*event_map)[config];
+ return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
static int
-armpmu_map_raw_event(u64 config)
+armpmu_map_raw_event(u32 raw_event_mask, u64 config)
{
- return (int)(config & armpmu->raw_event_mask);
+ return (int)(config & raw_event_mask);
}
-static int
+static int map_cpu_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask)
+{
+ u64 config = event->attr.config;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ return armpmu_map_event(event_map, config);
+ case PERF_TYPE_HW_CACHE:
+ return armpmu_map_cache_event(cache_map, config);
+ case PERF_TYPE_RAW:
+ return armpmu_map_raw_event(raw_event_mask, config);
+ }
+
+ return -ENOENT;
+}
+
+int
armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
@@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event,
return ret;
}
-static u64
+u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx, int overflow)
{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
again:
@@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event)
static void
armpmu_stop(struct perf_event *event, int flags)
{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!armpmu)
- return;
-
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
@@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags)
static void
armpmu_start(struct perf_event *event, int flags)
{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!armpmu)
- return;
-
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
@@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags)
static void
armpmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0);
- clear_bit(idx, cpuc->active_mask);
armpmu_stop(event, PERF_EF_UPDATE);
- cpuc->events[idx] = NULL;
- clear_bit(idx, cpuc->used_mask);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
perf_event_update_userpage(event);
}
@@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags)
static int
armpmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
@@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags)
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
- idx = armpmu->get_event_idx(cpuc, hwc);
+ idx = armpmu->get_event_idx(hw_events, hwc);
if (idx < 0) {
err = idx;
goto out;
@@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags)
*/
event->hw.idx = idx;
armpmu->disable(hwc, idx);
- cpuc->events[idx] = event;
- set_bit(idx, cpuc->active_mask);
+ hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
@@ -345,25 +324,25 @@ out:
return err;
}
-static struct pmu pmu;
-
static int
-validate_event(struct cpu_hw_events *cpuc,
+validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event fake_event = event->hw;
+ struct pmu *leader_pmu = event->group_leader->pmu;
- if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
+ if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
- return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
+ return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
}
static int
validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
- struct cpu_hw_events fake_pmu;
+ struct pmu_hw_events fake_pmu;
memset(&fake_pmu, 0, sizeof(fake_pmu));
@@ -383,110 +362,119 @@ validate_group(struct perf_event *event)
static irqreturn_t armpmu_platform_irq(int irq, void *dev)
{
- struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev);
+ struct arm_pmu *armpmu = (struct arm_pmu *) dev;
+ struct platform_device *plat_device = armpmu->plat_device;
+ struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
return plat->handle_irq(irq, dev, armpmu->handle_irq);
}
+static void
+armpmu_release_hardware(struct arm_pmu *armpmu)
+{
+ int i, irq, irqs;
+ struct platform_device *pmu_device = armpmu->plat_device;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+ for (i = 0; i < irqs; ++i) {
+ if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, armpmu);
+ }
+
+ release_pmu(armpmu->type);
+}
+
static int
-armpmu_reserve_hardware(void)
+armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
struct arm_pmu_platdata *plat;
irq_handler_t handle_irq;
- int i, err = -ENODEV, irq;
+ int i, err, irq, irqs;
+ struct platform_device *pmu_device = armpmu->plat_device;
- pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
- if (IS_ERR(pmu_device)) {
+ err = reserve_pmu(armpmu->type);
+ if (err) {
pr_warning("unable to reserve pmu\n");
- return PTR_ERR(pmu_device);
+ return err;
}
- init_pmu(ARM_PMU_DEVICE_CPU);
-
plat = dev_get_platdata(&pmu_device->dev);
if (plat && plat->handle_irq)
handle_irq = armpmu_platform_irq;
else
handle_irq = armpmu->handle_irq;
- if (pmu_device->num_resources < 1) {
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (irqs < 1) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < pmu_device->num_resources; ++i) {
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+ }
+
err = request_irq(irq, handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING,
- "armpmu", NULL);
+ "arm-pmu", armpmu);
if (err) {
- pr_warning("unable to request IRQ%d for ARM perf "
- "counters\n", irq);
- break;
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
}
- }
- if (err) {
- for (i = i - 1; i >= 0; --i) {
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
- free_irq(irq, NULL);
- }
- release_pmu(ARM_PMU_DEVICE_CPU);
- pmu_device = NULL;
+ cpumask_set_cpu(i, &armpmu->active_irqs);
}
- return err;
+ return 0;
}
static void
-armpmu_release_hardware(void)
+hw_perf_event_destroy(struct perf_event *event)
{
- int i, irq;
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ atomic_t *active_events = &armpmu->active_events;
+ struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
- for (i = pmu_device->num_resources - 1; i >= 0; --i) {
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
- free_irq(irq, NULL);
+ if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
+ armpmu_release_hardware(armpmu);
+ mutex_unlock(pmu_reserve_mutex);
}
- armpmu->stop();
-
- release_pmu(ARM_PMU_DEVICE_CPU);
- pmu_device = NULL;
}
-static atomic_t active_events = ATOMIC_INIT(0);
-static DEFINE_MUTEX(pmu_reserve_mutex);
-
-static void
-hw_perf_event_destroy(struct perf_event *event)
+static int
+event_requires_mode_exclusion(struct perf_event_attr *attr)
{
- if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
- armpmu_release_hardware();
- mutex_unlock(&pmu_reserve_mutex);
- }
+ return attr->exclude_idle || attr->exclude_user ||
+ attr->exclude_kernel || attr->exclude_hv;
}
static int
__hw_perf_event_init(struct perf_event *event)
{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int mapping, err;
- /* Decode the generic type into an ARM event identifier. */
- if (PERF_TYPE_HARDWARE == event->attr.type) {
- mapping = armpmu_map_event(event->attr.config);
- } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
- mapping = armpmu_map_cache_event(event->attr.config);
- } else if (PERF_TYPE_RAW == event->attr.type) {
- mapping = armpmu_map_raw_event(event->attr.config);
- } else {
- pr_debug("event type %x not supported\n", event->attr.type);
- return -EOPNOTSUPP;
- }
+ mapping = armpmu->map_event(event);
if (mapping < 0) {
pr_debug("event %x:%llx not supported\n", event->attr.type,
@@ -495,34 +483,31 @@ __hw_perf_event_init(struct perf_event *event)
}
/*
+ * We don't assign an index until we actually place the event onto
+ * hardware. Use -1 to signify that we haven't decided where to put it
+ * yet. For SMP systems, each core has it's own PMU so we can't do any
+ * clever allocation or constraints checking at this point.
+ */
+ hwc->idx = -1;
+ hwc->config_base = 0;
+ hwc->config = 0;
+ hwc->event_base = 0;
+
+ /*
* Check whether we need to exclude the counter from certain modes.
- * The ARM performance counters are on all of the time so if someone
- * has asked us for some excludes then we have to fail.
*/
- if (event->attr.exclude_kernel || event->attr.exclude_user ||
- event->attr.exclude_hv || event->attr.exclude_idle) {
+ if ((!armpmu->set_event_filter ||
+ armpmu->set_event_filter(hwc, &event->attr)) &&
+ event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
return -EPERM;
}
/*
- * We don't assign an index until we actually place the event onto
- * hardware. Use -1 to signify that we haven't decided where to put it
- * yet. For SMP systems, each core has it's own PMU so we can't do any
- * clever allocation or constraints checking at this point.
+ * Store the event encoding into the config_base field.
*/
- hwc->idx = -1;
-
- /*
- * Store the event encoding into the config_base field. config and
- * event_base are unused as the only 2 things we need to know are
- * the event mapping and the counter to use. The counter to use is
- * also the indx and the config_base is the event type.
- */
- hwc->config_base = (unsigned long)mapping;
- hwc->config = 0;
- hwc->event_base = 0;
+ hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
hwc->sample_period = armpmu->max_period;
@@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event)
static int armpmu_event_init(struct perf_event *event)
{
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
int err = 0;
+ atomic_t *active_events = &armpmu->active_events;
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- break;
-
- default:
+ if (armpmu->map_event(event) == -ENOENT)
return -ENOENT;
- }
-
- if (!armpmu)
- return -ENODEV;
event->destroy = hw_perf_event_destroy;
- if (!atomic_inc_not_zero(&active_events)) {
- mutex_lock(&pmu_reserve_mutex);
- if (atomic_read(&active_events) == 0) {
- err = armpmu_reserve_hardware();
- }
+ if (!atomic_inc_not_zero(active_events)) {
+ mutex_lock(&armpmu->reserve_mutex);
+ if (atomic_read(active_events) == 0)
+ err = armpmu_reserve_hardware(armpmu);
if (!err)
- atomic_inc(&active_events);
- mutex_unlock(&pmu_reserve_mutex);
+ atomic_inc(active_events);
+ mutex_unlock(&armpmu->reserve_mutex);
}
if (err)
@@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event)
static void armpmu_enable(struct pmu *pmu)
{
- /* Enable all of the perf events on hardware. */
- int idx, enabled = 0;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
- if (!armpmu)
- return;
-
- for (idx = 0; idx <= armpmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
-
- if (!event)
- continue;
-
- armpmu->enable(&event->hw, idx);
- enabled = 1;
- }
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
+ struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (enabled)
armpmu->start();
@@ -605,20 +568,32 @@ static void armpmu_enable(struct pmu *pmu)
static void armpmu_disable(struct pmu *pmu)
{
- if (armpmu)
- armpmu->stop();
+ struct arm_pmu *armpmu = to_arm_pmu(pmu);
+ armpmu->stop();
}
-static struct pmu pmu = {
- .pmu_enable = armpmu_enable,
- .pmu_disable = armpmu_disable,
- .event_init = armpmu_event_init,
- .add = armpmu_add,
- .del = armpmu_del,
- .start = armpmu_start,
- .stop = armpmu_stop,
- .read = armpmu_read,
-};
+static void __init armpmu_init(struct arm_pmu *armpmu)
+{
+ atomic_set(&armpmu->active_events, 0);
+ mutex_init(&armpmu->reserve_mutex);
+
+ armpmu->pmu = (struct pmu) {
+ .pmu_enable = armpmu_enable,
+ .pmu_disable = armpmu_disable,
+ .event_init = armpmu_event_init,
+ .add = armpmu_add,
+ .del = armpmu_del,
+ .start = armpmu_start,
+ .stop = armpmu_stop,
+ .read = armpmu_read,
+ };
+}
+
+int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
+{
+ armpmu_init(armpmu);
+ return perf_pmu_register(&armpmu->pmu, name, type);
+}
/* Include the PMU-specific implementations. */
#include "perf_event_xscale.c"
@@ -630,14 +605,72 @@ static struct pmu pmu = {
* This requires SMP to be available, so exists as a separate initcall.
*/
static int __init
-armpmu_reset(void)
+cpu_pmu_reset(void)
+{
+ if (cpu_pmu && cpu_pmu->reset)
+ return on_each_cpu(cpu_pmu->reset, NULL, 1);
+ return 0;
+}
+arch_initcall(cpu_pmu_reset);
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static struct of_device_id armpmu_of_device_ids[] = {
+ {.compatible = "arm,cortex-a9-pmu"},
+ {.compatible = "arm,cortex-a8-pmu"},
+ {.compatible = "arm,arm1136-pmu"},
+ {.compatible = "arm,arm1176-pmu"},
+ {},
+};
+
+static struct platform_device_id armpmu_plat_device_ids[] = {
+ {.name = "arm-pmu"},
+ {},
+};
+
+static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
- if (armpmu && armpmu->reset)
- return on_each_cpu(armpmu->reset, NULL, 1);
+ cpu_pmu->plat_device = pdev;
return 0;
}
-arch_initcall(armpmu_reset);
+static struct platform_driver armpmu_driver = {
+ .driver = {
+ .name = "arm-pmu",
+ .of_match_table = armpmu_of_device_ids,
+ },
+ .probe = armpmu_device_probe,
+ .id_table = armpmu_plat_device_ids,
+};
+
+static int __init register_pmu_driver(void)
+{
+ return platform_driver_register(&armpmu_driver);
+}
+device_initcall(register_pmu_driver);
+
+static struct pmu_hw_events *armpmu_get_cpu_events(void)
+{
+ return &__get_cpu_var(cpu_hw_events);
+}
+
+static void __init cpu_pmu_init(struct arm_pmu *armpmu)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+ events->events = per_cpu(hw_events, cpu);
+ events->used_mask = per_cpu(used_mask, cpu);
+ raw_spin_lock_init(&events->pmu_lock);
+ }
+ armpmu->get_hw_events = armpmu_get_cpu_events;
+ armpmu->type = ARM_PMU_DEVICE_CPU;
+}
+
+/*
+ * CPU PMU identification and registration.
+ */
static int __init
init_hw_perf_events(void)
{
@@ -651,22 +684,22 @@ init_hw_perf_events(void)
case 0xB360: /* ARM1136 */
case 0xB560: /* ARM1156 */
case 0xB760: /* ARM1176 */
- armpmu = armv6pmu_init();
+ cpu_pmu = armv6pmu_init();
break;
case 0xB020: /* ARM11mpcore */
- armpmu = armv6mpcore_pmu_init();
+ cpu_pmu = armv6mpcore_pmu_init();
break;
case 0xC080: /* Cortex-A8 */
- armpmu = armv7_a8_pmu_init();
+ cpu_pmu = armv7_a8_pmu_init();
break;
case 0xC090: /* Cortex-A9 */
- armpmu = armv7_a9_pmu_init();
+ cpu_pmu = armv7_a9_pmu_init();
break;
case 0xC050: /* Cortex-A5 */
- armpmu = armv7_a5_pmu_init();
+ cpu_pmu = armv7_a5_pmu_init();
break;
case 0xC0F0: /* Cortex-A15 */
- armpmu = armv7_a15_pmu_init();
+ cpu_pmu = armv7_a15_pmu_init();
break;
}
/* Intel CPUs [xscale]. */
@@ -674,23 +707,23 @@ init_hw_perf_events(void)
part_number = (cpuid >> 13) & 0x7;
switch (part_number) {
case 1:
- armpmu = xscale1pmu_init();
+ cpu_pmu = xscale1pmu_init();
break;
case 2:
- armpmu = xscale2pmu_init();
+ cpu_pmu = xscale2pmu_init();
break;
}
}
- if (armpmu) {
+ if (cpu_pmu) {
pr_info("enabled with %s PMU driver, %d counters available\n",
- armpmu->name, armpmu->num_events);
+ cpu_pmu->name, cpu_pmu->num_events);
+ cpu_pmu_init(cpu_pmu);
+ armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
}
- perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-
return 0;
}
early_initcall(init_hw_perf_events);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index dd7f3b9f4cb3..e63d8115c01b 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -54,7 +54,7 @@ enum armv6_perf_types {
};
enum armv6_counters {
- ARMV6_CYCLE_COUNTER = 1,
+ ARMV6_CYCLE_COUNTER = 0,
ARMV6_COUNTER0,
ARMV6_COUNTER1,
};
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0;
@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int counter_is_active(unsigned long pmcr, int idx)
+{
+ unsigned long mask = 0;
+ if (idx == ARMV6_CYCLE_COUNTER)
+ mask = ARMV6_PMCR_CCOUNT_IEN;
+ else if (idx == ARMV6_COUNTER0)
+ mask = ARMV6_PMCR_COUNT0_IEN;
+ else if (idx == ARMV6_COUNTER1)
+ mask = ARMV6_PMCR_COUNT1_IEN;
+
+ if (mask)
+ return pmcr & mask;
+
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return 0;
}
static irqreturn_t
@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
+ struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
+ if (!counter_is_active(pmcr, idx))
continue;
/*
@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
continue;
if (perf_event_overflow(event, &data, regs))
- armpmu->disable(hwc, idx);
+ cpu_pmu->disable(hwc, idx);
}
/*
@@ -527,28 +545,30 @@ static void
armv6pmu_start(void)
{
unsigned long flags, val;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
armv6pmu_stop(void)
{
unsigned long flags, val;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int
-armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
+armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
/* Always place a cycle counter into the cycle counter. */
@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, flags, evt = 0;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int armv6_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv6_perf_map,
+ &armv6_perf_cache_map, 0xFF);
}
-static const struct arm_pmu armv6pmu = {
+static struct arm_pmu armv6pmu = {
.id = ARM_PERF_PMU_ID_V6,
.name = "v6",
.handle_irq = armv6pmu_handle_irq,
@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
- .cache_map = &armv6_perf_cache_map,
- .event_map = &armv6_perf_map,
- .raw_event_mask = 0xFF,
+ .map_event = armv6_map_event,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
-static const struct arm_pmu *__init armv6pmu_init(void)
+static struct arm_pmu *__init armv6pmu_init(void)
{
return &armv6pmu;
}
@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
* disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting.
*/
-static const struct arm_pmu armv6mpcore_pmu = {
+
+static int armv6mpcore_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv6mpcore_perf_map,
+ &armv6mpcore_perf_cache_map, 0xFF);
+}
+
+static struct arm_pmu armv6mpcore_pmu = {
.id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq,
@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
- .cache_map = &armv6mpcore_perf_cache_map,
- .event_map = &armv6mpcore_perf_map,
- .raw_event_mask = 0xFF,
+ .map_event = armv6mpcore_map_event,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
-static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
+static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return &armv6mpcore_pmu;
}
#else
-static const struct arm_pmu *__init armv6pmu_init(void)
+static struct arm_pmu *__init armv6pmu_init(void)
{
return NULL;
}
-static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
+static struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return NULL;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4c851834f68e..98b75738345e 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -17,6 +17,9 @@
*/
#ifdef CONFIG_CPU_V7
+
+static struct arm_pmu armv7pmu;
+
/*
* Common ARMv7 event types
*
@@ -676,23 +679,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
};
/*
- * Perf Events counters
+ * Perf Events' indices
*/
-enum armv7_counters {
- ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
- ARMV7_COUNTER0 = 2, /* First event counter */
-};
+#define ARMV7_IDX_CYCLE_COUNTER 0
+#define ARMV7_IDX_COUNTER0 1
+#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
+
+#define ARMV7_MAX_COUNTERS 32
+#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
/*
- * The cycle counter is ARMV7_CYCLE_COUNTER.
- * The first event counter is ARMV7_COUNTER0.
- * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
+ * ARMv7 low level PMNC access
*/
-#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
/*
- * ARMv7 low level PMNC access
+ * Perf Event to low level counters mapping
*/
+#define ARMV7_IDX_TO_COUNTER(x) \
+ (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
/*
* Per-CPU PMNC: config reg
@@ -708,103 +712,76 @@ enum armv7_counters {
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
- * Available counters
- */
-#define ARMV7_CNT0 0 /* First event counter */
-#define ARMV7_CCNT 31 /* Cycle counter */
-
-/* Perf Event to low level counters mapping */
-#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
-
-/*
- * CNTENS: counters enable reg
- */
-#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
-
-/*
- * CNTENC: counters disable reg
- */
-#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
-
-/*
- * INTENS: counters overflow interrupt enable reg
- */
-#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
-
-/*
- * INTENC: counters overflow interrupt disable reg
- */
-#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
-
-/*
- * EVTSEL: Event selection reg
+ * FLAG: counters overflow flag status reg
*/
-#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
+#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
+#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
/*
- * SELECT: Counter selection reg
+ * PMXEVTYPER: Event selection reg
*/
-#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
+#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
+#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
- * FLAG: counters overflow flag status reg
+ * Event filters for PMUv2
*/
-#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
-#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
-#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
+#define ARMV7_EXCLUDE_PL1 (1 << 31)
+#define ARMV7_EXCLUDE_USER (1 << 30)
+#define ARMV7_INCLUDE_HYP (1 << 27)
-static inline unsigned long armv7_pmnc_read(void)
+static inline u32 armv7_pmnc_read(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
return val;
}
-static inline void armv7_pmnc_write(unsigned long val)
+static inline void armv7_pmnc_write(u32 val)
{
val &= ARMV7_PMNC_MASK;
isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
}
-static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
+static inline int armv7_pmnc_has_overflowed(u32 pmnc)
{
return pmnc & ARMV7_OVERFLOWED_MASK;
}
-static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
- enum armv7_counters counter)
+static inline int armv7_pmnc_counter_valid(int idx)
+{
+ return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
+}
+
+static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
{
int ret = 0;
+ u32 counter;
- if (counter == ARMV7_CYCLE_COUNTER)
- ret = pmnc & ARMV7_FLAG_C;
- else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
- ret = pmnc & ARMV7_FLAG_P(counter);
- else
+ if (!armv7_pmnc_counter_valid(idx)) {
pr_err("CPU%u checking wrong counter %d overflow status\n",
- smp_processor_id(), counter);
+ smp_processor_id(), idx);
+ } else {
+ counter = ARMV7_IDX_TO_COUNTER(idx);
+ ret = pmnc & BIT(counter);
+ }
return ret;
}
-static inline int armv7_pmnc_select_counter(unsigned int idx)
+static inline int armv7_pmnc_select_counter(int idx)
{
- u32 val;
+ u32 counter;
- if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
- pr_err("CPU%u selecting wrong PMNC counter"
- " %d\n", smp_processor_id(), idx);
- return -1;
+ if (!armv7_pmnc_counter_valid(idx)) {
+ pr_err("CPU%u selecting wrong PMNC counter %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
}
- val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+ counter = ARMV7_IDX_TO_COUNTER(idx);
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
isb();
return idx;
@@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
static inline u32 armv7pmu_read_counter(int idx)
{
- unsigned long value = 0;
+ u32 value = 0;
- if (idx == ARMV7_CYCLE_COUNTER)
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
- else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
- if (armv7_pmnc_select_counter(idx) == idx)
- asm volatile("mrc p15, 0, %0, c9, c13, 2"
- : "=r" (value));
- } else
+ if (!armv7_pmnc_counter_valid(idx))
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
+ else if (idx == ARMV7_IDX_CYCLE_COUNTER)
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
+ else if (armv7_pmnc_select_counter(idx) == idx)
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
return value;
}
static inline void armv7pmu_write_counter(int idx, u32 value)
{
- if (idx == ARMV7_CYCLE_COUNTER)
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
- else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
- if (armv7_pmnc_select_counter(idx) == idx)
- asm volatile("mcr p15, 0, %0, c9, c13, 2"
- : : "r" (value));
- } else
+ if (!armv7_pmnc_counter_valid(idx))
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
+ else if (idx == ARMV7_IDX_CYCLE_COUNTER)
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+ else if (armv7_pmnc_select_counter(idx) == idx)
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
}
-static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
+static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
{
if (armv7_pmnc_select_counter(idx) == idx) {
- val &= ARMV7_EVTSEL_MASK;
+ val &= ARMV7_EVTYPE_MASK;
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
}
}
-static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
+static inline int armv7_pmnc_enable_counter(int idx)
{
- u32 val;
+ u32 counter;
- if ((idx != ARMV7_CYCLE_COUNTER) &&
- ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
- pr_err("CPU%u enabling wrong PMNC counter"
- " %d\n", smp_processor_id(), idx);
- return -1;
+ if (!armv7_pmnc_counter_valid(idx)) {
+ pr_err("CPU%u enabling wrong PMNC counter %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
}
- if (idx == ARMV7_CYCLE_COUNTER)
- val = ARMV7_CNTENS_C;
- else
- val = ARMV7_CNTENS_P(idx);
-
- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
-
+ counter = ARMV7_IDX_TO_COUNTER(idx);
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
return idx;
}
-static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
+static inline int armv7_pmnc_disable_counter(int idx)
{
- u32 val;
-
+ u32 counter;
- if ((idx != ARMV7_CYCLE_COUNTER) &&
- ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
- pr_err("CPU%u disabling wrong PMNC counter"
- " %d\n", smp_processor_id(), idx);
- return -1;
+ if (!armv7_pmnc_counter_valid(idx)) {
+ pr_err("CPU%u disabling wrong PMNC counter %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
}
- if (idx == ARMV7_CYCLE_COUNTER)
- val = ARMV7_CNTENC_C;
- else
- val = ARMV7_CNTENC_P(idx);
-
- asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
-
+ counter = ARMV7_IDX_TO_COUNTER(idx);
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
return idx;
}
-static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
+static inline int armv7_pmnc_enable_intens(int idx)
{
- u32 val;
+ u32 counter;
- if ((idx != ARMV7_CYCLE_COUNTER) &&
- ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
- pr_err("CPU%u enabling wrong PMNC counter"
- " interrupt enable %d\n", smp_processor_id(), idx);
- return -1;
+ if (!armv7_pmnc_counter_valid(idx)) {
+ pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
}
- if (idx == ARMV7_CYCLE_COUNTER)
- val = ARMV7_INTENS_C;
- else
- val = ARMV7_INTENS_P(idx);
-
- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
-
+ counter = ARMV7_IDX_TO_COUNTER(idx);
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
return idx;
}
-static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
+static inline int armv7_pmnc_disable_intens(int idx)
{
- u32 val;
+ u32 counter;
- if ((idx != ARMV7_CYCLE_COUNTER) &&
- ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
- pr_err("CPU%u disabling wrong PMNC counter"
- " interrupt enable %d\n", smp_processor_id(), idx);
- return -1;
+ if (!armv7_pmnc_counter_valid(idx)) {
+ pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
+ smp_processor_id(), idx);
+ return -EINVAL;
}
- if (idx == ARMV7_CYCLE_COUNTER)
- val = ARMV7_INTENC_C;
- else
- val = ARMV7_INTENC_P(idx);
-
- asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
-
+ counter = ARMV7_IDX_TO_COUNTER(idx);
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
return idx;
}
@@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
printk(KERN_INFO "CCNT =0x%08x\n", val);
- for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
+ for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
- cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
+ ARMV7_IDX_TO_COUNTER(cnt), val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
- cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
+ ARMV7_IDX_TO_COUNTER(cnt), val);
}
}
#endif
@@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void)
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
@@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
/*
* Set event (if destined for PMNx counters)
- * We don't need to set the event if it's a cycle count
+ * We only need to set the event for the cycle counter if we
+ * have the ability to perform event filtering.
*/
- if (idx != ARMV7_CYCLE_COUNTER)
+ if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
/*
@@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
*/
armv7_pmnc_enable_counter(idx);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
/*
* Disable counter and interrupt
*/
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
/*
* Disable counter
@@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
*/
armv7_pmnc_disable_intens(idx);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{
- unsigned long pmnc;
+ u32 pmnc;
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
+ struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
@@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
@@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;
if (perf_event_overflow(event, &data, regs))
- armpmu->disable(hwc, idx);
+ cpu_pmu->disable(hwc, idx);
}
/*
@@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
static void armv7pmu_start(void)
{
unsigned long flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void armv7pmu_stop(void)
{
unsigned long flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
+static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx;
+ unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
- if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
- if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
+ if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
+ if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
- return ARMV7_CYCLE_COUNTER;
- } else {
- /*
- * For anything other than a cycle counter, try and use
- * the events counters
- */
- for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
- if (!test_and_set_bit(idx, cpuc->used_mask))
- return idx;
- }
+ return ARMV7_IDX_CYCLE_COUNTER;
+ }
- /* The counters are all in use. */
- return -EAGAIN;
+ /*
+ * For anything other than a cycle counter, try and use
+ * the events counters
+ */
+ for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
}
+
+ /* The counters are all in use. */
+ return -EAGAIN;
+}
+
+/*
+ * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ */
+static int armv7pmu_set_event_filter(struct hw_perf_event *event,
+ struct perf_event_attr *attr)
+{
+ unsigned long config_base = 0;
+
+ if (attr->exclude_idle)
+ return -EPERM;
+ if (attr->exclude_user)
+ config_base |= ARMV7_EXCLUDE_USER;
+ if (attr->exclude_kernel)
+ config_base |= ARMV7_EXCLUDE_PL1;
+ if (!attr->exclude_hv)
+ config_base |= ARMV7_INCLUDE_HYP;
+
+ /*
+ * Install the filter into config_base as this is used to
+ * construct the event type.
+ */
+ event->config_base = config_base;
+
+ return 0;
}
static void armv7pmu_reset(void *info)
{
- u32 idx, nb_cnt = armpmu->num_events;
+ u32 idx, nb_cnt = cpu_pmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
- for (idx = 1; idx < nb_cnt; ++idx)
+ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
armv7pmu_disable_event(NULL, idx);
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
}
+static int armv7_a8_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv7_a8_perf_map,
+ &armv7_a8_perf_cache_map, 0xFF);
+}
+
+static int armv7_a9_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv7_a9_perf_map,
+ &armv7_a9_perf_cache_map, 0xFF);
+}
+
+static int armv7_a5_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv7_a5_perf_map,
+ &armv7_a5_perf_cache_map, 0xFF);
+}
+
+static int armv7_a15_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv7_a15_perf_map,
+ &armv7_a15_perf_cache_map, 0xFF);
+}
+
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
@@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = {
.start = armv7pmu_start,
.stop = armv7pmu_stop,
.reset = armv7pmu_reset,
- .raw_event_mask = 0xFF,
.max_period = (1LLU << 32) - 1,
};
@@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void)
return nb_cnt + 1;
}
-static const struct arm_pmu *__init armv7_a8_pmu_init(void)
+static struct arm_pmu *__init armv7_a8_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8";
- armv7pmu.cache_map = &armv7_a8_perf_cache_map;
- armv7pmu.event_map = &armv7_a8_perf_map;
+ armv7pmu.map_event = armv7_a8_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
-static const struct arm_pmu *__init armv7_a9_pmu_init(void)
+static struct arm_pmu *__init armv7_a9_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9";
- armv7pmu.cache_map = &armv7_a9_perf_cache_map;
- armv7pmu.event_map = &armv7_a9_perf_map;
+ armv7pmu.map_event = armv7_a9_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
-static const struct arm_pmu *__init armv7_a5_pmu_init(void)
+static struct arm_pmu *__init armv7_a5_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA5;
armv7pmu.name = "ARMv7 Cortex-A5";
- armv7pmu.cache_map = &armv7_a5_perf_cache_map;
- armv7pmu.event_map = &armv7_a5_perf_map;
+ armv7pmu.map_event = armv7_a5_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
-static const struct arm_pmu *__init armv7_a15_pmu_init(void)
+static struct arm_pmu *__init armv7_a15_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA15;
armv7pmu.name = "ARMv7 Cortex-A15";
- armv7pmu.cache_map = &armv7_a15_perf_cache_map;
- armv7pmu.event_map = &armv7_a15_perf_map;
+ armv7pmu.map_event = armv7_a15_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
+ armv7pmu.set_event_filter = armv7pmu_set_event_filter;
return &armv7pmu;
}
#else
-static const struct arm_pmu *__init armv7_a8_pmu_init(void)
+static struct arm_pmu *__init armv7_a8_pmu_init(void)
{
return NULL;
}
-static const struct arm_pmu *__init armv7_a9_pmu_init(void)
+static struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
-static const struct arm_pmu *__init armv7_a5_pmu_init(void)
+static struct arm_pmu *__init armv7_a5_pmu_init(void)
{
return NULL;
}
-static const struct arm_pmu *__init armv7_a15_pmu_init(void)
+static struct arm_pmu *__init armv7_a15_pmu_init(void)
{
return NULL;
}
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3c4397491d08..e0cca10a8411 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -40,7 +40,7 @@ enum xscale_perf_types {
};
enum xscale_counters {
- XSCALE_CYCLE_COUNTER = 1,
+ XSCALE_CYCLE_COUNTER = 0,
XSCALE_COUNTER0,
XSCALE_COUNTER1,
XSCALE_COUNTER2,
@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
+ struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue;
if (perf_event_overflow(event, &data, regs))
- armpmu->disable(hwc, idx);
+ cpu_pmu->disable(hwc, idx);
}
irq_work_run();
@@ -284,6 +281,7 @@ static void
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return;
}
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return;
}
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int
-xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
+xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
if (XSCALE_PERFCTR_CCNT == event->config_base) {
@@ -368,24 +367,26 @@ static void
xscale1pmu_start(void)
{
unsigned long flags, val;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
xscale1pmu_stop(void)
{
unsigned long flags, val;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static inline u32
@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
}
}
-static const struct arm_pmu xscale1pmu = {
+static int xscale_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &xscale_perf_map,
+ &xscale_perf_cache_map, 0xFF);
+}
+
+static struct arm_pmu xscale1pmu = {
.id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1",
.handle_irq = xscale1pmu_handle_irq,
@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
.get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start,
.stop = xscale1pmu_stop,
- .cache_map = &xscale_perf_cache_map,
- .event_map = &xscale_perf_map,
- .raw_event_mask = 0xFF,
+ .map_event = xscale_map_event,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
-static const struct arm_pmu *__init xscale1pmu_init(void)
+static struct arm_pmu *__init xscale1pmu_init(void)
{
return &xscale1pmu;
}
@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
- struct cpu_hw_events *cpuc;
+ struct pmu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!test_bit(idx, cpuc->active_mask))
- continue;
-
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;
@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue;
if (perf_event_overflow(event, &data, regs))
- armpmu->disable(hwc, idx);
+ cpu_pmu->disable(hwc, idx);
}
irq_work_run();
@@ -616,6 +618,7 @@ static void
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return;
}
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return;
}
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static int
-xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
+xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx = xscale1pmu_get_event_idx(cpuc, event);
@@ -718,24 +722,26 @@ static void
xscale2pmu_start(void)
{
unsigned long flags, val;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static void
xscale2pmu_stop(void)
{
unsigned long flags, val;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
- raw_spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
static inline u32
@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
}
}
-static const struct arm_pmu xscale2pmu = {
+static struct arm_pmu xscale2pmu = {
.id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2",
.handle_irq = xscale2pmu_handle_irq,
@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
.get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start,
.stop = xscale2pmu_stop,
- .cache_map = &xscale_perf_cache_map,
- .event_map = &xscale_perf_map,
- .raw_event_mask = 0xFF,
+ .map_event = xscale_map_event,
.num_events = 5,
.max_period = (1LLU << 32) - 1,
};
-static const struct arm_pmu *__init xscale2pmu_init(void)
+static struct arm_pmu *__init xscale2pmu_init(void)
{
return &xscale2pmu;
}
#else
-static const struct arm_pmu *__init xscale1pmu_init(void)
+static struct arm_pmu *__init xscale1pmu_init(void)
{
return NULL;
}
-static const struct arm_pmu *__init xscale2pmu_init(void)
+static struct arm_pmu *__init xscale2pmu_init(void)
{
return NULL;
}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2b70709376c3..2c3407ee8576 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -10,192 +10,26 @@
*
*/
-#define pr_fmt(fmt) "PMU: " fmt
-
-#include <linux/cpumask.h>
#include <linux/err.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
#include <asm/pmu.h>
-static volatile long pmu_lock;
-
-static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
-
-static int __devinit pmu_register(struct platform_device *pdev,
- enum arm_pmu_type type)
-{
- if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
- pr_warning("received registration request for unknown "
- "device %d\n", type);
- return -EINVAL;
- }
-
- if (pmu_devices[type]) {
- pr_warning("rejecting duplicate registration of PMU device "
- "type %d.", type);
- return -ENOSPC;
- }
-
- pr_info("registered new PMU device of type %d\n", type);
- pmu_devices[type] = pdev;
- return 0;
-}
-
-#define OF_MATCH_PMU(_name, _type) { \
- .compatible = _name, \
- .data = (void *)_type, \
-}
-
-#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
-
-static struct of_device_id armpmu_of_device_ids[] = {
- OF_MATCH_CPU("arm,cortex-a9-pmu"),
- OF_MATCH_CPU("arm,cortex-a8-pmu"),
- OF_MATCH_CPU("arm,arm1136-pmu"),
- OF_MATCH_CPU("arm,arm1176-pmu"),
- {},
-};
-
-#define PLAT_MATCH_PMU(_name, _type) { \
- .name = _name, \
- .driver_data = _type, \
-}
-
-#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
-
-static struct platform_device_id armpmu_plat_device_ids[] = {
- PLAT_MATCH_CPU("arm-pmu"),
- {},
-};
-
-enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
-{
- const struct of_device_id *of_id;
- const struct platform_device_id *pdev_id;
-
- /* provided by of_device_id table */
- if (pdev->dev.of_node) {
- of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
- BUG_ON(!of_id);
- return (enum arm_pmu_type)of_id->data;
- }
-
- /* Provided by platform_device_id table */
- pdev_id = platform_get_device_id(pdev);
- BUG_ON(!pdev_id);
- return pdev_id->driver_data;
-}
-
-static int __devinit armpmu_device_probe(struct platform_device *pdev)
-{
- return pmu_register(pdev, armpmu_device_type(pdev));
-}
-
-static struct platform_driver armpmu_driver = {
- .driver = {
- .name = "arm-pmu",
- .of_match_table = armpmu_of_device_ids,
- },
- .probe = armpmu_device_probe,
- .id_table = armpmu_plat_device_ids,
-};
-
-static int __init register_pmu_driver(void)
-{
- return platform_driver_register(&armpmu_driver);
-}
-device_initcall(register_pmu_driver);
-
-struct platform_device *
-reserve_pmu(enum arm_pmu_type device)
-{
- struct platform_device *pdev;
-
- if (test_and_set_bit_lock(device, &pmu_lock)) {
- pdev = ERR_PTR(-EBUSY);
- } else if (pmu_devices[device] == NULL) {
- clear_bit_unlock(device, &pmu_lock);
- pdev = ERR_PTR(-ENODEV);
- } else {
- pdev = pmu_devices[device];
- }
-
- return pdev;
-}
-EXPORT_SYMBOL_GPL(reserve_pmu);
+/*
+ * PMU locking to ensure mutual exclusion between different subsystems.
+ */
+static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
int
-release_pmu(enum arm_pmu_type device)
-{
- if (WARN_ON(!pmu_devices[device]))
- return -EINVAL;
- clear_bit_unlock(device, &pmu_lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(release_pmu);
-
-static int
-set_irq_affinity(int irq,
- unsigned int cpu)
+reserve_pmu(enum arm_pmu_type type)
{
-#ifdef CONFIG_SMP
- int err = irq_set_affinity(irq, cpumask_of(cpu));
- if (err)
- pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, cpu);
- return err;
-#else
- return -EINVAL;
-#endif
-}
-
-static int
-init_cpu_pmu(void)
-{
- int i, irqs, err = 0;
- struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
-
- if (!pdev)
- return -ENODEV;
-
- irqs = pdev->num_resources;
-
- /*
- * If we have a single PMU interrupt that we can't shift, assume that
- * we're running on a uniprocessor machine and continue.
- */
- if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
- return 0;
-
- for (i = 0; i < irqs; ++i) {
- err = set_irq_affinity(platform_get_irq(pdev, i), i);
- if (err)
- break;
- }
-
- return err;
+ return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
}
+EXPORT_SYMBOL_GPL(reserve_pmu);
-int
-init_pmu(enum arm_pmu_type device)
+void
+release_pmu(enum arm_pmu_type type)
{
- int err = 0;
-
- switch (device) {
- case ARM_PMU_DEVICE_CPU:
- err = init_cpu_pmu();
- break;
- default:
- pr_warning("attempt to initialise unknown device %d\n",
- device);
- err = -EINVAL;
- }
-
- return err;
+ clear_bit_unlock(type, pmu_lock);
}
-EXPORT_SYMBOL_GPL(init_pmu);
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 9cf4cbf8f95b..d0cdedf4864d 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -57,7 +57,8 @@ relocate_new_kernel:
mov r0,#0
ldr r1,kexec_mach_type
ldr r2,kexec_boot_atags
- mov pc,lr
+ ARM( mov pc, lr )
+ THUMB( bx lr )
.align
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 78d197d6ec34..6136144f8f8d 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -280,18 +280,19 @@ static void __init cacheid_init(void)
if (arch >= CPU_ARCH_ARMv6) {
if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
+ arch = CPU_ARCH_ARMv7;
cacheid = CACHEID_VIPT_NONALIASING;
if ((cachetype & (3 << 14)) == 1 << 14)
cacheid |= CACHEID_ASID_TAGGED;
- else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
- cacheid |= CACHEID_VIPT_I_ALIASING;
- } else if (cachetype & (1 << 23)) {
- cacheid = CACHEID_VIPT_ALIASING;
} else {
- cacheid = CACHEID_VIPT_NONALIASING;
- if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
- cacheid |= CACHEID_VIPT_I_ALIASING;
+ arch = CPU_ARCH_ARMv6;
+ if (cachetype & (1 << 23))
+ cacheid = CACHEID_VIPT_ALIASING;
+ else
+ cacheid = CACHEID_VIPT_NONALIASING;
}
+ if (cpu_has_aliasing_icache(arch))
+ cacheid |= CACHEID_VIPT_I_ALIASING;
} else {
cacheid = CACHEID_VIVT;
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2c277d40cee6..01c186222f3b 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -137,8 +137,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
+ clockevents_register_device(clk);
+
/* Make sure our local interrupt controller has this enabled */
gic_enable_ppi(clk->irq);
-
- clockevents_register_device(clk);
}
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index d522b47e30b5..6c8e3b5f669f 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -157,7 +157,7 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
- CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc1_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
diff --git a/arch/arm/mach-ep93xx/include/mach/ts72xx.h b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
index 0eabec62cd9d..f1397a13e76b 100644
--- a/arch/arm/mach-ep93xx/include/mach/ts72xx.h
+++ b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
@@ -6,7 +6,7 @@
* TS72xx memory map:
*
* virt phys size
- * febff000 22000000 4K model number register
+ * febff000 22000000 4K model number register (bits 0-2)
* febfe000 22400000 4K options register
* febfd000 22800000 4K options register #2
* febf9000 10800000 4K TS-5620 RTC index register
@@ -20,6 +20,9 @@
#define TS72XX_MODEL_TS7200 0x00
#define TS72XX_MODEL_TS7250 0x01
#define TS72XX_MODEL_TS7260 0x02
+#define TS72XX_MODEL_TS7300 0x03
+#define TS72XX_MODEL_TS7400 0x04
+#define TS72XX_MODEL_MASK 0x07
#define TS72XX_OPTIONS_PHYS_BASE 0x22400000
@@ -51,19 +54,34 @@
#ifndef __ASSEMBLY__
+static inline int ts72xx_model(void)
+{
+ return __raw_readb(TS72XX_MODEL_VIRT_BASE) & TS72XX_MODEL_MASK;
+}
+
static inline int board_is_ts7200(void)
{
- return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7200;
+ return ts72xx_model() == TS72XX_MODEL_TS7200;
}
static inline int board_is_ts7250(void)
{
- return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7250;
+ return ts72xx_model() == TS72XX_MODEL_TS7250;
}
static inline int board_is_ts7260(void)
{
- return __raw_readb(TS72XX_MODEL_VIRT_BASE) == TS72XX_MODEL_TS7260;
+ return ts72xx_model() == TS72XX_MODEL_TS7260;
+}
+
+static inline int board_is_ts7300(void)
+{
+ return ts72xx_model() == TS72XX_MODEL_TS7300;
+}
+
+static inline int board_is_ts7400(void)
+{
+ return ts72xx_model() == TS72XX_MODEL_TS7400;
}
static inline int is_max197_installed(void)
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c
index 851dea018578..1561b036a9bf 100644
--- a/arch/arm/mach-exynos4/clock.c
+++ b/arch/arm/mach-exynos4/clock.c
@@ -520,7 +520,7 @@ static struct clk init_clocks_off[] = {
.ctrlbit = (1 << 21),
}, {
.name = "ac97",
- .id = -1,
+ .devname = "samsung-ac97",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 27),
}, {
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 2d8a40c9e6e5..746d6fc6d397 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -24,12 +24,13 @@
#include <plat/exynos4.h>
#include <plat/adc-core.h>
#include <plat/sdhci.h>
-#include <plat/devs.h>
#include <plat/fb-core.h>
#include <plat/fimc-core.h>
#include <plat/iic-core.h>
+#include <plat/reset.h>
#include <mach/regs-irq.h>
+#include <mach/regs-pmu.h>
extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
unsigned int irq_start);
@@ -128,6 +129,11 @@ static void exynos4_idle(void)
local_irq_enable();
}
+static void exynos4_sw_reset(void)
+{
+ __raw_writel(0x1, S5P_SWRESET);
+}
+
/*
* exynos4_map_io
*
@@ -241,5 +247,8 @@ int __init exynos4_init(void)
/* set idle function */
pm_idle = exynos4_idle;
+ /* set sw_reset function */
+ s5p_reset_hook = exynos4_sw_reset;
+
return sysdev_register(&exynos4_sysdev);
}
diff --git a/arch/arm/mach-exynos4/include/mach/irqs.h b/arch/arm/mach-exynos4/include/mach/irqs.h
index 934d2a493982..f8952f8f3757 100644
--- a/arch/arm/mach-exynos4/include/mach/irqs.h
+++ b/arch/arm/mach-exynos4/include/mach/irqs.h
@@ -80,9 +80,8 @@
#define IRQ_HSMMC3 IRQ_SPI(76)
#define IRQ_DWMCI IRQ_SPI(77)
-#define IRQ_MIPICSI0 IRQ_SPI(78)
-
-#define IRQ_MIPICSI1 IRQ_SPI(80)
+#define IRQ_MIPI_CSIS0 IRQ_SPI(78)
+#define IRQ_MIPI_CSIS1 IRQ_SPI(80)
#define IRQ_ONENAND_AUDI IRQ_SPI(82)
#define IRQ_ROTATOR IRQ_SPI(83)
diff --git a/arch/arm/mach-exynos4/include/mach/regs-pmu.h b/arch/arm/mach-exynos4/include/mach/regs-pmu.h
index fa49bbb8e7b0..cdf9b47c303c 100644
--- a/arch/arm/mach-exynos4/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-pmu.h
@@ -29,6 +29,8 @@
#define S5P_USE_STANDBY_WFE1 (1 << 25)
#define S5P_USE_MASK ((0x3 << 16) | (0x3 << 24))
+#define S5P_SWRESET S5P_PMUREG(0x0400)
+
#define S5P_WAKEUP_STAT S5P_PMUREG(0x0600)
#define S5P_EINT_WAKEUP_MASK S5P_PMUREG(0x0604)
#define S5P_WAKEUP_MASK S5P_PMUREG(0x0608)
diff --git a/arch/arm/mach-exynos4/irq-eint.c b/arch/arm/mach-exynos4/irq-eint.c
index 9d87d2ac7f68..badb8c66fc9b 100644
--- a/arch/arm/mach-exynos4/irq-eint.c
+++ b/arch/arm/mach-exynos4/irq-eint.c
@@ -23,6 +23,8 @@
#include <mach/regs-gpio.h>
+#include <asm/mach/irq.h>
+
static DEFINE_SPINLOCK(eint_lock);
static unsigned int eint0_15_data[16];
@@ -184,8 +186,11 @@ static inline void exynos4_irq_demux_eint(unsigned int start)
static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_get_chip(irq);
+ chained_irq_enter(chip, desc);
exynos4_irq_demux_eint(IRQ_EINT(16));
exynos4_irq_demux_eint(IRQ_EINT(24));
+ chained_irq_exit(chip, desc);
}
static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
@@ -193,6 +198,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
u32 *irq_data = irq_get_handler_data(irq);
struct irq_chip *chip = irq_get_chip(irq);
+ chained_irq_enter(chip, desc);
chip->irq_mask(&desc->irq_data);
if (chip->irq_ack)
@@ -201,6 +207,7 @@ static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(*irq_data);
chip->irq_unmask(&desc->irq_data);
+ chained_irq_exit(chip, desc);
}
int __init exynos4_init_irq_eint(void)
diff --git a/arch/arm/mach-exynos4/mach-universal_c210.c b/arch/arm/mach-exynos4/mach-universal_c210.c
index d7ec84d586f2..2aac6f755c8e 100644
--- a/arch/arm/mach-exynos4/mach-universal_c210.c
+++ b/arch/arm/mach-exynos4/mach-universal_c210.c
@@ -79,7 +79,7 @@ static struct s3c2410_uartcfg universal_uartcfgs[] __initdata = {
};
static struct regulator_consumer_supply max8952_consumer =
- REGULATOR_SUPPLY("vddarm", NULL);
+ REGULATOR_SUPPLY("vdd_arm", NULL);
static struct max8952_platform_data universal_max8952_pdata __initdata = {
.gpio_vid0 = EXYNOS4_GPX0(3),
@@ -105,7 +105,7 @@ static struct max8952_platform_data universal_max8952_pdata __initdata = {
};
static struct regulator_consumer_supply lp3974_buck1_consumer =
- REGULATOR_SUPPLY("vddint", NULL);
+ REGULATOR_SUPPLY("vdd_int", NULL);
static struct regulator_consumer_supply lp3974_buck2_consumer =
REGULATOR_SUPPLY("vddg3d", NULL);
diff --git a/arch/arm/mach-exynos4/setup-usb-phy.c b/arch/arm/mach-exynos4/setup-usb-phy.c
index 0883c1b824b9..39aca045f660 100644
--- a/arch/arm/mach-exynos4/setup-usb-phy.c
+++ b/arch/arm/mach-exynos4/setup-usb-phy.c
@@ -82,7 +82,7 @@ static int exynos4_usb_phy1_init(struct platform_device *pdev)
rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK);
writel(rstcon, EXYNOS4_RSTCON);
- udelay(50);
+ udelay(80);
clk_disable(otg_clk);
clk_put(otg_clk);
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index dc26fff22cf0..c8e7afcf14ec 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -62,6 +62,7 @@ config ARCH_EBSA285_HOST
config ARCH_NETWINDER
bool "NetWinder"
select CLKSRC_I8253
+ select CLKEVT_I8253
select FOOTBRIDGE_HOST
select ISA
select ISA_DMA
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index 1331fff51ae2..18c32a5541d9 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -18,6 +18,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <video/vga.h>
#include <asm/irq.h>
#include <asm/system.h>
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 881add0fbe5b..b1ec2cf53bb0 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -310,7 +310,7 @@ static struct sys_timer eukrea_cpuimx27_timer = {
.init = eukrea_cpuimx27_timer_init,
};
-MACHINE_START(CPUIMX27, "EUKREA CPUIMX27")
+MACHINE_START(EUKREA_CPUIMX27, "EUKREA CPUIMX27")
.atag_offset = 0x100,
.map_io = mx27_map_io,
.init_early = imx27_init_early,
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 10b89139da48..470b654b0e6e 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -192,7 +192,7 @@ struct sys_timer eukrea_cpuimx35_timer = {
.init = eukrea_cpuimx35_timer_init,
};
-MACHINE_START(EUKREA_CPUIMX35, "Eukrea CPUIMX35")
+MACHINE_START(EUKREA_CPUIMX35SD, "Eukrea CPUIMX35")
/* Maintainer: Eukrea Electromatique */
.atag_offset = 0x100,
.map_io = mx35_map_io,
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index d8699b54268d..9163318e95a2 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -161,7 +161,7 @@ static struct sys_timer eukrea_cpuimx25_timer = {
.init = eukrea_cpuimx25_timer_init,
};
-MACHINE_START(EUKREA_CPUIMX25, "Eukrea CPUIMX25")
+MACHINE_START(EUKREA_CPUIMX25SD, "Eukrea CPUIMX25")
/* Maintainer: Eukrea Electromatique */
.atag_offset = 0x100,
.map_io = mx25_map_io,
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index e25f60f5db8f..9e5c1663fc4f 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -77,7 +77,7 @@ static int __init dns323_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
/*
* Check for devices with hard-wired IRQs.
*/
- irq = orion5x_pci_map_irq(const dev, slot, pin);
+ irq = orion5x_pci_map_irq(dev, slot, pin);
if (irq != -1)
return irq;
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index 28b8760ab9fa..bc4a920e26ee 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -14,6 +14,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mbus.h>
+#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
#include <plat/pcie.h>
diff --git a/arch/arm/mach-realview/include/mach/system.h b/arch/arm/mach-realview/include/mach/system.h
index a30f2e3ec178..6657ff231161 100644
--- a/arch/arm/mach-realview/include/mach/system.h
+++ b/arch/arm/mach-realview/include/mach/system.h
@@ -44,6 +44,7 @@ static inline void arch_reset(char mode, const char *cmd)
*/
if (realview_reset)
realview_reset(mode);
+ dsb();
}
#endif
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 8bad64370689..055e2858b0dd 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -16,6 +16,7 @@
#include <linux/suspend.h>
#include <linux/serial_core.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <mach/map.h>
#include <mach/irqs.h>
diff --git a/arch/arm/mach-s5p64x0/irq-eint.c b/arch/arm/mach-s5p64x0/irq-eint.c
index 69ed4545112b..fe7380f5c3cd 100644
--- a/arch/arm/mach-s5p64x0/irq-eint.c
+++ b/arch/arm/mach-s5p64x0/irq-eint.c
@@ -129,7 +129,7 @@ static int s5p64x0_alloc_gc(void)
}
ct = gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
ct->chip.irq_set_type = s5p64x0_irq_eint_set_type;
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index 309e388a8a83..f149d278377b 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
SAVE_ITEM(S3C2410_TCNTO(0)),
};
-void s5pv210_cpu_suspend(unsigned long arg)
+static int s5pv210_cpu_suspend(unsigned long arg)
{
unsigned long tmp;
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 167a67c5ca54..5fde49da399a 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -342,6 +342,7 @@ static struct platform_device mipidsi0_device = {
static struct sh_mobile_sdhi_info sdhi0_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
.tmio_caps = MMC_CAP_SD_HIGHSPEED,
.tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
};
@@ -383,7 +384,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
}
static struct sh_mobile_sdhi_info sh_sdhi1_info = {
- .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
+ .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
.tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
.tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.set_pwr = ag5evm_sdhi1_set_pwr,
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index e7c5379069ef..de2253d7f157 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -642,6 +642,8 @@ static struct usbhs_private usbhs0_private = {
},
.driver_param = {
.buswait_bwait = 4,
+ .d0_tx_id = SHDMA_SLAVE_USB0_TX,
+ .d1_rx_id = SHDMA_SLAVE_USB0_RX,
},
},
};
@@ -811,6 +813,8 @@ static struct usbhs_private usbhs1_private = {
.buswait_bwait = 4,
.pipe_type = usbhs1_pipe_cfg,
.pipe_size = ARRAY_SIZE(usbhs1_pipe_cfg),
+ .d0_tx_id = SHDMA_SLAVE_USB1_TX,
+ .d1_rx_id = SHDMA_SLAVE_USB1_RX,
},
},
};
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index e6e11e4e2d43..66975921e646 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -503,16 +503,17 @@ static struct clk *late_main_clks[] = {
&sh7372_fsidivb_clk,
};
-enum { MSTP001,
+enum { MSTP001, MSTP000,
MSTP131, MSTP130,
MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
MSTP118, MSTP117, MSTP116, MSTP113,
MSTP106, MSTP101, MSTP100,
MSTP223,
- MSTP218, MSTP217, MSTP216,
- MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
- MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
- MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403,
+ MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207,
+ MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
+ MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
+ MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406,
+ MSTP405, MSTP404, MSTP403, MSTP400,
MSTP_NR };
#define MSTP(_parent, _reg, _bit, _flags) \
@@ -520,6 +521,7 @@ enum { MSTP001,
static struct clk mstp_clks[MSTP_NR] = {
[MSTP001] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 1, 0), /* IIC2 */
+ [MSTP000] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR0, 0, 0), /* MSIOF0 */
[MSTP131] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 31, 0), /* VEU3 */
[MSTP130] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 30, 0), /* VEU2 */
[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* VEU1 */
@@ -538,14 +540,16 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
[MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
[MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
+ [MSTP214] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */
+ [MSTP208] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 8, 0), /* MSIOF1 */
[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
+ [MSTP205] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 5, 0), /* MSIOF2 */
[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
[MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
[MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
- [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
[MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
[MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
[MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
@@ -557,8 +561,12 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP413] = MSTP(&pllc1_div2_clk, SMSTPCR4, 13, 0), /* HDMI */
[MSTP411] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 11, 0), /* IIC3 */
[MSTP410] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 10, 0), /* IIC4 */
+ [MSTP407] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 7, 0), /* USB-DMAC1 */
[MSTP406] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR4, 6, 0), /* USB1 */
+ [MSTP405] = MSTP(&r_clk, SMSTPCR4, 5, 0), /* CMT4 */
+ [MSTP404] = MSTP(&r_clk, SMSTPCR4, 4, 0), /* CMT3 */
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
+ [MSTP400] = MSTP(&r_clk, SMSTPCR4, 0, 0), /* CMT2 */
};
static struct clk_lookup lookups[] = {
@@ -609,6 +617,7 @@ static struct clk_lookup lookups[] = {
/* MSTP32 clocks */
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */
+ CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[MSTP000]), /* MSIOF0 */
CLKDEV_DEV_ID("uio_pdrv_genirq.4", &mstp_clks[MSTP131]), /* VEU3 */
CLKDEV_DEV_ID("uio_pdrv_genirq.3", &mstp_clks[MSTP130]), /* VEU2 */
CLKDEV_DEV_ID("uio_pdrv_genirq.2", &mstp_clks[MSTP129]), /* VEU1 */
@@ -629,14 +638,16 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
+ CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), /* USB-DMAC0 */
+ CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[MSTP208]), /* MSIOF1 */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
+ CLKDEV_DEV_ID("spi_sh_msiof.2", &mstp_clks[MSTP205]), /* MSIOF2 */
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
- CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
@@ -650,10 +661,14 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-mobile-hdmi", &mstp_clks[MSTP413]), /* HDMI */
CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* IIC3 */
CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* IIC4 */
+ CLKDEV_DEV_ID("sh-dma-engine.4", &mstp_clks[MSTP407]), /* USB-DMAC1 */
CLKDEV_DEV_ID("r8a66597_hcd.1", &mstp_clks[MSTP406]), /* USB1 */
CLKDEV_DEV_ID("r8a66597_udc.1", &mstp_clks[MSTP406]), /* USB1 */
CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[MSTP406]), /* USB1 */
+ CLKDEV_DEV_ID("sh_cmt.4", &mstp_clks[MSTP405]), /* CMT4 */
+ CLKDEV_DEV_ID("sh_cmt.3", &mstp_clks[MSTP404]), /* CMT3 */
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
+ CLKDEV_DEV_ID("sh_cmt.2", &mstp_clks[MSTP400]), /* CMT2 */
CLKDEV_ICK_ID("hdmi", "sh_mobile_lcdc_fb.1",
&div6_reparent_clks[DIV6_HDMI]),
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 6db2ccabc2bf..61a846bb30f2 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -365,7 +365,7 @@ void __init sh73a0_clock_init(void)
__raw_writel(0x108, SD2CKCR);
/* detect main clock parent */
- switch ((__raw_readl(CKSCR) >> 24) & 0x03) {
+ switch ((__raw_readl(CKSCR) >> 28) & 0x03) {
case 0:
main_clk.parent = &sh73a0_extal1_clk;
break;
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index ce595cee86cd..24e63a85e669 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -459,6 +459,10 @@ enum {
SHDMA_SLAVE_SDHI2_TX,
SHDMA_SLAVE_MMCIF_RX,
SHDMA_SLAVE_MMCIF_TX,
+ SHDMA_SLAVE_USB0_TX,
+ SHDMA_SLAVE_USB0_RX,
+ SHDMA_SLAVE_USB1_TX,
+ SHDMA_SLAVE_USB1_RX,
};
extern struct clk sh7372_extal1_clk;
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 3b28743c77eb..739315e30eb9 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -379,7 +379,7 @@ enum {
/* BBIF2 */
VPU,
TSIF1,
- _3DG_SGX530,
+ /* 3DG */
_2DDMAC,
IIC2_ALI2, IIC2_TACKI2, IIC2_WAITI2, IIC2_DTEI2,
IPMMU_IPMMUR, IPMMU_IPMMUR2,
@@ -436,7 +436,7 @@ static struct intc_vect intcs_vectors[] = {
/* BBIF2 */
INTCS_VECT(VPU, 0x980),
INTCS_VECT(TSIF1, 0x9a0),
- INTCS_VECT(_3DG_SGX530, 0x9e0),
+ /* 3DG */
INTCS_VECT(_2DDMAC, 0xa00),
INTCS_VECT(IIC2_ALI2, 0xa80), INTCS_VECT(IIC2_TACKI2, 0xaa0),
INTCS_VECT(IIC2_WAITI2, 0xac0), INTCS_VECT(IIC2_DTEI2, 0xae0),
@@ -521,7 +521,7 @@ static struct intc_mask_reg intcs_mask_registers[] = {
RTDMAC_1_DEI3, RTDMAC_1_DEI2, RTDMAC_1_DEI1, RTDMAC_1_DEI0 } },
{ 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
{ 0, 0, MSIOF, 0,
- _3DG_SGX530, 0, 0, 0 } },
+ 0, 0, 0, 0 } },
{ 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
{ 0, TMU_TUNI2, TMU_TUNI1, TMU_TUNI0,
0, 0, 0, 0 } },
@@ -561,7 +561,6 @@ static struct intc_prio_reg intcs_prio_registers[] = {
TMU_TUNI2, TSIF1 } },
{ 0xffd2001c, 0, 16, 4, /* IPRHS */ { 0, 0, VEU, BEU } },
{ 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, IIC0 } },
- { 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, _3DG_SGX530, 0, 0 } },
{ 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, 0, LMB, 0 } },
{ 0xffd2002c, 0, 16, 4, /* IPRLS */ { IPMMU, 0, 0, 0 } },
{ 0xffd20030, 0, 16, 4, /* IPRMS */ { IIC2, 0, 0, 0 } },
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 79f0413d8725..2d9b1b1a2538 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -169,35 +169,35 @@ static struct platform_device scif6_device = {
};
/* CMT */
-static struct sh_timer_config cmt10_platform_data = {
- .name = "CMT10",
- .channel_offset = 0x10,
- .timer_bit = 0,
+static struct sh_timer_config cmt2_platform_data = {
+ .name = "CMT2",
+ .channel_offset = 0x40,
+ .timer_bit = 5,
.clockevent_rating = 125,
.clocksource_rating = 125,
};
-static struct resource cmt10_resources[] = {
+static struct resource cmt2_resources[] = {
[0] = {
- .name = "CMT10",
- .start = 0xe6138010,
- .end = 0xe613801b,
+ .name = "CMT2",
+ .start = 0xe6130040,
+ .end = 0xe613004b,
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = evt2irq(0x0b00), /* CMT1_CMT10 */
+ .start = evt2irq(0x0b80), /* CMT2 */
.flags = IORESOURCE_IRQ,
},
};
-static struct platform_device cmt10_device = {
+static struct platform_device cmt2_device = {
.name = "sh_cmt",
- .id = 10,
+ .id = 2,
.dev = {
- .platform_data = &cmt10_platform_data,
+ .platform_data = &cmt2_platform_data,
},
- .resource = cmt10_resources,
- .num_resources = ARRAY_SIZE(cmt10_resources),
+ .resource = cmt2_resources,
+ .num_resources = ARRAY_SIZE(cmt2_resources),
};
/* TMU */
@@ -602,6 +602,150 @@ static struct platform_device dma2_device = {
},
};
+/*
+ * USB-DMAC
+ */
+
+unsigned int usbts_shift[] = {3, 4, 5};
+
+enum {
+ XMIT_SZ_8BYTE = 0,
+ XMIT_SZ_16BYTE = 1,
+ XMIT_SZ_32BYTE = 2,
+};
+
+#define USBTS_INDEX2VAL(i) (((i) & 3) << 6)
+
+static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = {
+ {
+ .offset = 0,
+ }, {
+ .offset = 0x20,
+ },
+};
+
+/* USB DMAC0 */
+static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_USB0_TX,
+ .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
+ }, {
+ .slave_id = SHDMA_SLAVE_USB0_RX,
+ .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
+ },
+};
+
+static struct sh_dmae_pdata usb_dma0_platform_data = {
+ .slave = sh7372_usb_dmae0_slaves,
+ .slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves),
+ .channel = sh7372_usb_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
+ .ts_low_shift = 6,
+ .ts_low_mask = 0xc0,
+ .ts_high_shift = 0,
+ .ts_high_mask = 0,
+ .ts_shift = usbts_shift,
+ .ts_shift_num = ARRAY_SIZE(usbts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chcr_offset = 0x14,
+ .chcr_ie_bit = 1 << 5,
+ .dmaor_is_32bit = 1,
+ .needs_tend_set = 1,
+ .no_dmars = 1,
+};
+
+static struct resource sh7372_usb_dmae0_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xe68a0020,
+ .end = 0xe68a0064 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* VCR/SWR/DMICR */
+ .start = 0xe68a0000,
+ .end = 0xe68a0014 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* IRQ for channels */
+ .start = evt2irq(0x0a00),
+ .end = evt2irq(0x0a00),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 3,
+ .resource = sh7372_usb_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7372_usb_dmae0_resources),
+ .dev = {
+ .platform_data = &usb_dma0_platform_data,
+ },
+};
+
+/* USB DMAC1 */
+static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_USB1_TX,
+ .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
+ }, {
+ .slave_id = SHDMA_SLAVE_USB1_RX,
+ .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
+ },
+};
+
+static struct sh_dmae_pdata usb_dma1_platform_data = {
+ .slave = sh7372_usb_dmae1_slaves,
+ .slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves),
+ .channel = sh7372_usb_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
+ .ts_low_shift = 6,
+ .ts_low_mask = 0xc0,
+ .ts_high_shift = 0,
+ .ts_high_mask = 0,
+ .ts_shift = usbts_shift,
+ .ts_shift_num = ARRAY_SIZE(usbts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chcr_offset = 0x14,
+ .chcr_ie_bit = 1 << 5,
+ .dmaor_is_32bit = 1,
+ .needs_tend_set = 1,
+ .no_dmars = 1,
+};
+
+static struct resource sh7372_usb_dmae1_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xe68c0020,
+ .end = 0xe68c0064 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* VCR/SWR/DMICR */
+ .start = 0xe68c0000,
+ .end = 0xe68c0014 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* IRQ for channels */
+ .start = evt2irq(0x1d00),
+ .end = evt2irq(0x1d00),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 4,
+ .resource = sh7372_usb_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7372_usb_dmae1_resources),
+ .dev = {
+ .platform_data = &usb_dma1_platform_data,
+ },
+};
+
/* VPU */
static struct uio_info vpu_platform_data = {
.name = "VPU5HG",
@@ -818,7 +962,7 @@ static struct platform_device *sh7372_early_devices[] __initdata = {
&scif4_device,
&scif5_device,
&scif6_device,
- &cmt10_device,
+ &cmt2_device,
&tmu00_device,
&tmu01_device,
};
@@ -829,6 +973,8 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
&dma0_device,
&dma1_device,
&dma2_device,
+ &usb_dma0_device,
+ &usb_dma1_device,
&vpu_device,
&veu0_device,
&veu1_device,
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index de72058180a6..1fafc3244607 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -318,6 +318,10 @@ static struct clk v2m_sp804_clk = {
.rate = 1000000,
};
+static struct clk v2m_ref_clk = {
+ .rate = 32768,
+};
+
static struct clk dummy_apb_pclk;
static struct clk_lookup v2m_lookups[] = {
@@ -348,6 +352,9 @@ static struct clk_lookup v2m_lookups[] = {
}, { /* CLCD */
.dev_id = "mb:clcd",
.clk = &osc1_clk,
+ }, { /* SP805 WDT */
+ .dev_id = "mb:wdt",
+ .clk = &v2m_ref_clk,
}, { /* SP804 timers */
.dev_id = "sp804",
.con_id = "v2m-timer0",
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 92bd102e3982..2e6849b41f66 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -379,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
-.equ cpu_arm920_suspend_size, 4 * 3
+.equ cpu_arm920_suspend_size, 4 * 4
#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r7, lr}
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 2bbcf053dffd..cd8f79c3a282 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -394,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
-.equ cpu_arm926_suspend_size, 4 * 3
+.equ cpu_arm926_suspend_size, 4 * 4
#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r7, lr}
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 07219c2ae114..69e7f2ef7384 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -182,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend)
ENTRY(cpu_sa1100_do_resume)
ldmia r0, {r4 - r7} @ load cp regs
- mov r1, #0
- mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs
- mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache
- mcr p15, 0, r1, c9, c0, 0 @ invalidate RB
- mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB
+ mov ip, #0
+ mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs
+ mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache
+ mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
+ mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB
mcr p15, 0, r4, c3, c0, 0 @ domain ID
mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 219138d2f158..a923aa0fd00d 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -223,6 +223,22 @@ __v6_setup:
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
+#ifdef CONFIG_ARM_ERRATA_364296
+ /*
+ * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data
+ * corruption with hit-under-miss enabled). The conditional code below
+ * (setting the undocumented bit 31 in the auxiliary control register
+ * and the FI bit in the control register) disables hit-under-miss
+ * without putting the processor into full low interrupt latency mode.
+ */
+ ldr r6, =0x4107b362 @ id for ARM1136 r0p2
+ mrc p15, 0, r5, c0, c0, 0 @ get processor id
+ teq r5, r6 @ check for the faulty core
+ mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg
+ orreq r5, r5, #(1 << 31) @ set the undocumented bit 31
+ mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
+ orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
+#endif
mov pc, lr @ return to head.S:__ret
/*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index a30e78542ccf..9049c0764db2 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -66,6 +66,7 @@ ENDPROC(cpu_v7_proc_fin)
ENTRY(cpu_v7_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
+ THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
mcr p15, 0, r1, c1, c0, 0 @ disable MMU
isb
mov pc, r0
@@ -247,13 +248,16 @@ ENTRY(cpu_v7_do_resume)
mcr p15, 0, r7, c2, c0, 0 @ TTB 0
mcr p15, 0, r8, c2, c0, 1 @ TTB 1
mcr p15, 0, ip, c2, c0, 2 @ TTB control register
- mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register
+ mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
+ teq r4, r10 @ Is it already set?
+ mcrne p15, 0, r10, c1, c0, 1 @ No, so write it
mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control
ldr r4, =PRRR @ PRRR
ldr r5, =NMRR @ NMRR
mcr p15, 0, r4, c10, c2, 0 @ write PRRR
mcr p15, 0, r5, c10, c2, 1 @ write NMRR
isb
+ dsb
mov r0, r9 @ control register
mov r2, r7, lsr #14 @ get TTB0 base
mov r2, r2, lsl #14
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 28c72a2006a1..755e1bf22681 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.align
.globl cpu_xsc3_suspend_size
-.equ cpu_xsc3_suspend_size, 4 * 8
+.equ cpu_xsc3_suspend_size, 4 * 7
#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r10, lr}
@@ -418,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend)
mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
mrc p15, 0, r10, c1, c0, 0 @ control reg
bic r4, r4, #2 @ clear frequency change bit
- stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs
+ stmia r0, {r4 - r10} @ store cp regs
ldmia sp!, {r4 - r10, pc}
ENDPROC(cpu_xsc3_do_suspend)
ENTRY(cpu_xsc3_do_resume)
- ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs
+ ldmia r0, {r4 - r10} @ load cp regs
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index 02af235298e2..5f84a3f13ef9 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -192,7 +192,7 @@ unsigned long s5p_spdif_get_rate(struct clk *clk)
if (IS_ERR(pclk))
return -EINVAL;
- rate = pclk->ops->get_rate(clk);
+ rate = pclk->ops->get_rate(pclk);
clk_put(pclk);
return rate;
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index 327ab9f662e8..f71078ef6bb5 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -23,6 +23,8 @@
#include <plat/gpio-core.h>
#include <plat/gpio-cfg.h>
+#include <asm/mach/irq.h>
+
#define GPIO_BASE(chip) (((unsigned long)(chip)->base) & 0xFFFFF000u)
#define CON_OFFSET 0x700
@@ -81,6 +83,9 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
int group, pend_offset, mask_offset;
unsigned int pend, mask;
+ struct irq_chip *chip = irq_get_chip(irq);
+ chained_irq_enter(chip, desc);
+
for (group = 0; group < bank->nr_groups; group++) {
struct s3c_gpio_chip *chip = bank->chips[group];
if (!chip)
@@ -102,6 +107,7 @@ static void s5p_gpioint_handler(unsigned int irq, struct irq_desc *desc)
pend &= ~BIT(offset);
}
}
+ chained_irq_exit(chip, desc);
}
static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
diff --git a/arch/arm/plat-samsung/include/plat/backlight.h b/arch/arm/plat-samsung/include/plat/backlight.h
index 51d8da846a62..ad530c78fe8c 100644
--- a/arch/arm/plat-samsung/include/plat/backlight.h
+++ b/arch/arm/plat-samsung/include/plat/backlight.h
@@ -20,7 +20,7 @@ struct samsung_bl_gpio_info {
int func;
};
-extern void samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
+extern void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
struct platform_pwm_backlight_data *bl_data);
#endif /* __ASM_PLAT_BACKLIGHT_H */
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
index f714d060370d..51583cd30164 100644
--- a/arch/arm/plat-samsung/irq-vic-timer.c
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -22,9 +22,14 @@
#include <plat/irq-vic-timer.h>
#include <plat/regs-timer.h>
+#include <asm/mach/irq.h>
+
static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_get_chip(irq);
+ chained_irq_enter(chip, desc);
generic_handle_irq((int)desc->irq_data.handler_data);
+ chained_irq_exit(chip, desc);
}
/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index fff68d0d521b..62cc8f981171 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -351,7 +351,7 @@ centro MACH_CENTRO CENTRO 1944
nokia_rx51 MACH_NOKIA_RX51 NOKIA_RX51 1955
omap_zoom2 MACH_OMAP_ZOOM2 OMAP_ZOOM2 1967
cpuat9260 MACH_CPUAT9260 CPUAT9260 1973
-eukrea_cpuimx27 MACH_CPUIMX27 CPUIMX27 1975
+eukrea_cpuimx27 MACH_EUKREA_CPUIMX27 EUKREA_CPUIMX27 1975
acs5k MACH_ACS5K ACS5K 1982
snapper_9260 MACH_SNAPPER_9260 SNAPPER_9260 1987
dsm320 MACH_DSM320 DSM320 1988
@@ -476,8 +476,8 @@ cns3420vb MACH_CNS3420VB CNS3420VB 2776
omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791
ti8168evm MACH_TI8168EVM TI8168EVM 2800
teton_bga MACH_TETON_BGA TETON_BGA 2816
-eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25 EUKREA_CPUIMX25 2820
-eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35 EUKREA_CPUIMX35 2821
+eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25SD EUKREA_CPUIMX25SD 2820
+eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35SD EUKREA_CPUIMX35SD 2821
eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822
eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823
smdkc210 MACH_SMDKC210 SMDKC210 2838
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index e66366fd2abc..3735abd7f8f6 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -259,7 +259,7 @@
ENTRY_SAME(ni_syscall) /* query_module */
ENTRY_SAME(poll)
/* structs contain pointers and an in_addr... */
- ENTRY_COMP(nfsservctl)
+ ENTRY_SAME(ni_syscall) /* was nfsservctl */
ENTRY_SAME(setresgid) /* 170 */
ENTRY_SAME(getresgid)
ENTRY_SAME(prctl)
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index f6736b7da463..fa0d27a400de 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -171,7 +171,7 @@ SYSCALL_SPU(setresuid)
SYSCALL_SPU(getresuid)
SYSCALL(ni_syscall)
SYSCALL_SPU(poll)
-COMPAT_SYS(nfsservctl)
+SYSCALL(ni_syscall)
SYSCALL_SPU(setresgid)
SYSCALL_SPU(getresgid)
COMPAT_SYS_SPU(prctl)
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h
index b97baf81a87b..2d3679b2447f 100644
--- a/arch/sh/include/asm/ptrace.h
+++ b/arch/sh/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
struct perf_event;
struct perf_sample_data;
-extern void ptrace_triggered(struct perf_event *bp, int nmi,
+extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
#define task_pt_regs(task) \
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index e915deafac89..05559295d2ca 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -15,6 +15,7 @@
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/dma-mapping.h>
#include <linux/sh_timer.h>
#include <linux/sh_dma.h>
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index 32114e0941ae..db4ecd731a00 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -22,7 +22,7 @@
#include <linux/atomic.h>
#include <asm/smp.h>
-static void (*pm_idle)(void);
+void (*pm_idle)(void);
static int hlt_counter;
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index d9006f8ffc14..7bbef95c9d1b 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -316,6 +316,35 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
break;
}
break;
+
+ case 9: /* mov.w @(disp,PC),Rn */
+ srcu = (unsigned char __user *)regs->pc;
+ srcu += 4;
+ srcu += (instruction & 0x00FF) << 1;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+ dst += 2;
+#endif
+
+ if (ma->from(dst, srcu, 2))
+ goto fetch_fault;
+ sign_extend(2, dst);
+ ret = 0;
+ break;
+
+ case 0xd: /* mov.l @(disp,PC),Rn */
+ srcu = (unsigned char __user *)(regs->pc & ~0x3);
+ srcu += 4;
+ srcu += (instruction & 0x00FF) << 2;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
+
+ if (ma->from(dst, srcu, 4))
+ goto fetch_fault;
+ ret = 0;
+ break;
}
return ret;
@@ -466,6 +495,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
case 0x0500: /* mov.w @(disp,Rm),R0 */
goto simple;
case 0x0B00: /* bf lab - no delayslot*/
+ ret = 0;
break;
case 0x0F00: /* bf/s lab */
ret = handle_delayslot(regs, instruction, ma);
@@ -479,6 +509,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
}
break;
case 0x0900: /* bt lab - no delayslot */
+ ret = 0;
break;
case 0x0D00: /* bt/s lab */
ret = handle_delayslot(regs, instruction, ma);
@@ -494,6 +525,9 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
}
break;
+ case 0x9000: /* mov.w @(disp,Rm),Rn */
+ goto simple;
+
case 0xA000: /* bra label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
@@ -507,6 +541,9 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
regs->pc += SH_PC_12BIT_OFFSET(instruction);
}
break;
+
+ case 0xD000: /* mov.l @(disp,Rm),Rn */
+ goto simple;
}
return ret;
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index 100b9c204e78..42851122bbd9 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -88,7 +88,7 @@ BTFIXUPDEF_CALL(void, set_irq_udt, int)
#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu)
/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
-#define SUN4D_IPI_IRQ 14
+#define SUN4D_IPI_IRQ 13
extern void sun4d_ipi_interrupt(void);
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index c9296ab0b1f4..edbec45d4688 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -68,7 +68,7 @@ sys_call_table32:
.word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
.word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
-/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
+/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys_nis_syscall
.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy