summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-22 09:56:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-22 09:56:51 -0800
commit7fa850ab4fc992717b3cc6284d3445c88978ca7e (patch)
tree3d5bef4812e3881d602a437d9a266b2a79214bb5 /arch
parentc874e6fc3596322b0248df3158dd9d5f43a958d0 (diff)
parent0c403462d6822227ea37fb0293a3e9f511e6929f (diff)
downloadlinux-7fa850ab4fc992717b3cc6284d3445c88978ca7e.tar.bz2
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King: "Some small fixes for this merge window, most of them quite self explanatory - the biggest thing here is a fix for the ARMv7 LPAE suspend/resume support" * 'fixes' of git://git.linaro.org/people/rmk/linux-arm: ARM: 7894/1: kconfig: select GENERIC_CLOCKEVENTS if HAVE_ARM_ARCH_TIMER ARM: 7893/1: bitops: only emit .arch_extension mp if CONFIG_SMP ARM: 7892/1: Fix warning for V7M builds ARM: 7888/1: seccomp: not compatible with ARM OABI ARM: 7886/1: make OABI default to off ARM: 7885/1: Save/Restore 64-bit TTBR registers on LPAE suspend/resume ARM: 7884/1: mm: Fix ECC mem policy printk ARM: 7883/1: fix mov to mvn conversion in case of 64 bit phys_addr_t and BE ARM: 7882/1: mm: fix __phys_to_virt to work with 64 bit phys_addr_t in BE case ARM: 7881/1: __fixup_smp read of SCU config should do byteswap in BE case ARM: Fix nommu.c build warning
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig11
-rw-r--r--arch/arm/include/asm/memory.h9
-rw-r--r--arch/arm/kernel/head.S7
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/lib/bitops.h2
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/nommu.c1
-rw-r--r--arch/arm/mm/proc-v7.S17
8 files changed, 39 insertions, 14 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 214b698cefea..c1f1a7eee953 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -25,7 +25,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
- select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
select HAVE_CONTEXT_TRACKING
@@ -1496,6 +1496,7 @@ config HAVE_ARM_ARCH_TIMER
bool "Architected timer support"
depends on CPU_V7
select ARM_ARCH_TIMER
+ select GENERIC_CLOCKEVENTS
help
This option enables support for the ARM architected timer
@@ -1719,7 +1720,6 @@ config AEABI
config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
depends on AEABI && !THUMB2_KERNEL
- default y
help
This option preserves the old syscall interface along with the
new (ARM EABI) one. It also provides a compatibility layer to
@@ -1727,11 +1727,16 @@ config OABI_COMPAT
in memory differs between the legacy ABI and the new ARM EABI
(only for non "thumb" binaries). This option adds a tiny
overhead to all syscalls and produces a slightly larger kernel.
+
+ The seccomp filter system will not be available when this is
+ selected, since there is no way yet to sensibly distinguish
+ between calling conventions during filtering.
+
If you know you'll be using only pure EABI user space then you
can say N here. If this option is not selected and you attempt
to execute a legacy ABI binary then the result will be
UNPREDICTABLE (in fact it can be predicted that it won't work
- at all). If in doubt say Y.
+ at all). If in doubt say N.
config ARCH_HAS_HOLES_MEMORYMODEL
bool
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 4dd21457ef9d..9ecccc865046 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -226,7 +226,14 @@ static inline phys_addr_t __virt_to_phys(unsigned long x)
static inline unsigned long __phys_to_virt(phys_addr_t x)
{
unsigned long t;
- __pv_stub(x, t, "sub", __PV_BITS_31_24);
+
+ /*
+ * 'unsigned long' cast discard upper word when
+ * phys_addr_t is 64 bit, and makes sure that inline
+ * assembler expression receives 32 bit argument
+ * in place where 'r' 32 bit operand is expected.
+ */
+ __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
return t;
}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 7801866e626a..11d59b32fb8d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -508,6 +508,7 @@ __fixup_smp:
teq r0, #0x0 @ '0' on actual UP A9 hardware
beq __fixup_smp_on_up @ So its an A9 UP
ldr r0, [r0, #4] @ read SCU Config
+ARM_BE8(rev r0, r0) @ byteswap if big endian
and r0, r0, #0x3 @ number of CPUs
teq r0, #0x0 @ is 1?
movne pc, lr
@@ -644,7 +645,11 @@ ARM_BE8(rev16 ip, ip)
bcc 1b
bx lr
#else
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
+#else
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
+#endif
b 2f
1: ldr ip, [r7, r3]
#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -653,7 +658,7 @@ ARM_BE8(rev16 ip, ip)
tst ip, #0x000f0000 @ check the rotation field
orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
biceq ip, ip, #0x00004000 @ clear bit 22
- orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0
+ orreq ip, ip, r0 @ mask in offset bits 7-0
#else
bic ip, ip, #0x000000ff
tst ip, #0xf00 @ check the rotation field
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6125f259b7b5..dbf0923e8d76 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -856,7 +856,7 @@ static void __init kuser_init(void *vectors)
memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
#else
-static void __init kuser_init(void *vectors)
+static inline void __init kuser_init(void *vectors)
{
}
#endif
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index e0c68d5bb7dc..52886b89706c 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -10,7 +10,7 @@ UNWIND( .fnstart )
and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
-#if __LINUX_ARM_ARCH__ >= 7
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
ALT_UP(W(nop))
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 78eeeca78f5a..580ef2de82d7 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -558,8 +558,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
break;
}
- printk("Memory policy: ECC %sabled, Data cache %s\n",
- ecc_mask ? "en" : "dis", cp->policy);
+ pr_info("Memory policy: %sData cache %s\n",
+ ecc_mask ? "ECC enabled, " : "", cp->policy);
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
struct mem_type *t = &mem_types[i];
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 5c668b7a31f9..55764a7ef1f0 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -18,6 +18,7 @@
#include <asm/mach/arch.h>
#include <asm/cputype.h>
#include <asm/mpu.h>
+#include <asm/procinfo.h>
#include "mm.h"
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 60920f62fdf5..bd1781979a39 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -92,7 +92,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
-.equ cpu_v7_suspend_size, 4 * 8
+.equ cpu_v7_suspend_size, 4 * 9
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r10, lr}
@@ -101,13 +101,17 @@ ENTRY(cpu_v7_do_suspend)
stmia r0!, {r4 - r5}
#ifdef CONFIG_MMU
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
+#ifdef CONFIG_ARM_LPAE
+ mrrc p15, 1, r5, r7, c2 @ TTB 1
+#else
mrc p15, 0, r7, c2, c0, 1 @ TTB 1
+#endif
mrc p15, 0, r11, c2, c0, 2 @ TTB control register
#endif
mrc p15, 0, r8, c1, c0, 0 @ Control register
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
- stmia r0, {r6 - r11}
+ stmia r0, {r5 - r11}
ldmfd sp!, {r4 - r10, pc}
ENDPROC(cpu_v7_do_suspend)
@@ -118,16 +122,19 @@ ENTRY(cpu_v7_do_resume)
ldmia r0!, {r4 - r5}
mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID
- ldmia r0, {r6 - r11}
+ ldmia r0, {r5 - r11}
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
-#ifndef CONFIG_ARM_LPAE
+#ifdef CONFIG_ARM_LPAE
+ mcrr p15, 0, r1, ip, c2 @ TTB 0
+ mcrr p15, 1, r5, r7, c2 @ TTB 1
+#else
ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
ALT_UP(orr r1, r1, #TTB_FLAGS_UP)
-#endif
mcr p15, 0, r1, c2, c0, 0 @ TTB 0
mcr p15, 0, r7, c2, c0, 1 @ TTB 1
+#endif
mcr p15, 0, r11, c2, c0, 2 @ TTB control register
ldr r4, =PRRR @ PRRR
ldr r5, =NMRR @ NMRR