summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@armlinux.org.uk>2019-10-22 17:37:29 +0100
committerRussell King <rmk+kernel@armlinux.org.uk>2019-10-22 17:37:29 +0100
commit39f4d441048444f38fcfcfb1aa3102c030544e86 (patch)
treebee4fe1443f9a7f17eea21a1f2e8902176b13844 /arch/arm
parent2ecb287998a47cc0a766f6071f63bc185f338540 (diff)
parente963408e8ff439e2b9da20e5399d7dca21462fcc (diff)
downloadlinux-39f4d441048444f38fcfcfb1aa3102c030544e86.tar.bz2
Merge branch 'misc' into fixes
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig7
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/Makefile22
-rw-r--r--arch/arm/boot/compressed/head.S14
-rw-r--r--arch/arm/include/asm/hardware/cache-aurora-l2.h (renamed from arch/arm/mm/cache-aurora-l2.h)50
-rw-r--r--arch/arm/kernel/perf_event_v7.c6
-rw-r--r--arch/arm/kernel/smp.c6
-rw-r--r--arch/arm/kernel/vdso.c1
-rw-r--r--arch/arm/lib/Makefile8
-rw-r--r--arch/arm/lib/backtrace-clang.S217
-rw-r--r--arch/arm/mach-iop13xx/pci.c4
-rw-r--r--arch/arm/mm/cache-l2x0.c18
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/fault.h1
-rw-r--r--arch/arm/mm/mmu.c19
15 files changed, 349 insertions, 30 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 24360211534a..b587a3b3939a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -82,7 +82,7 @@ config ARM
select HAVE_FAST_GUP if ARM_LPAE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
- select HAVE_FUNCTION_TRACER if !XIP_KERNEL
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000)
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA
@@ -1572,8 +1572,9 @@ config ARM_PATCH_IDIV
code to do integer division.
config AEABI
- bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
- default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
+ bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
+ !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
+ default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
help
This option allows for the kernel to be compiled using the latest
ARM ABI (aka EABI). This is only useful if you are using a user
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 85710e078afb..b9c674ec19e0 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -56,7 +56,7 @@ choice
config UNWINDER_FRAME_POINTER
bool "Frame pointer unwinder"
- depends on !THUMB2_KERNEL && !CC_IS_CLANG
+ depends on !THUMB2_KERNEL
select ARCH_WANT_FRAME_POINTERS
select FRAME_POINTER
help
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c3624ca6c0bc..d6273b660629 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -36,7 +36,10 @@ KBUILD_CFLAGS += $(call cc-option,-mno-unaligned-access)
endif
ifeq ($(CONFIG_FRAME_POINTER),y)
-KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
+KBUILD_CFLAGS +=-fno-omit-frame-pointer
+ifeq ($(CONFIG_CC_IS_GCC),y)
+KBUILD_CFLAGS += -mapcs -mno-sched-prolog
+endif
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
@@ -112,6 +115,10 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
CFLAGS_ABI +=-funwind-tables
endif
+ifeq ($(CONFIG_CC_IS_CLANG),y)
+CFLAGS_ABI += -meabi gnu
+endif
+
# Accept old syntax despite ".syntax unified"
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
@@ -270,14 +277,9 @@ endif
export TEXT_OFFSET GZFLAGS MMUEXT
-# Do we have FASTFPE?
-FASTFPE :=arch/arm/fastfpe
-ifeq ($(FASTFPE),$(wildcard $(FASTFPE)))
-FASTFPE_OBJ :=$(FASTFPE)/
-endif
-
core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
-core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
+# Put arch/arm/fastfpe/ to use this.
+core-$(CONFIG_FPE_FASTFPE) += $(patsubst $(srctree)/%,%,$(wildcard $(srctree)/arch/arm/fastfpe/))
core-$(CONFIG_VFP) += arch/arm/vfp/
core-$(CONFIG_XEN) += arch/arm/xen/
core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
@@ -290,6 +292,10 @@ core-y += arch/arm/net/
core-y += arch/arm/crypto/
core-y += $(machdirs) $(platdirs)
+# For cleaning
+core- += $(patsubst %,arch/arm/mach-%/, $(machine-))
+core- += $(patsubst %,arch/arm/plat-%/, $(plat-))
+
drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
libs-y := arch/arm/lib/ $(libs-y)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index e59d14679fb0..93dffed0ac6e 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -153,11 +153,23 @@
AR_CLASS( .arm )
start:
.type start,#function
+ /*
+ * These 7 nops along with the 1 nop immediately below for
+ * !THUMB2 form 8 nops that make the compressed kernel bootable
+ * on legacy ARM systems that were assuming the kernel in a.out
+ * binary format. The boot loaders on these systems would
+ * jump 32 bytes into the image to skip the a.out header.
+ * with these 8 nops filling exactly 32 bytes, things still
+ * work as expected on these legacy systems. Thumb2 mode keeps
+ * 7 of the nops as it turns out that some boot loaders
+ * were patching the initial instructions of the kernel, i.e
+ * had started to exploit this "patch area".
+ */
.rept 7
__nop
.endr
#ifndef CONFIG_THUMB2_KERNEL
- mov r0, r0
+ __nop
#else
AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
M_CLASS( nop.w ) @ M: already in Thumb2 mode
diff --git a/arch/arm/mm/cache-aurora-l2.h b/arch/arm/include/asm/hardware/cache-aurora-l2.h
index c86124769831..39769ffa0051 100644
--- a/arch/arm/mm/cache-aurora-l2.h
+++ b/arch/arm/include/asm/hardware/cache-aurora-l2.h
@@ -31,6 +31,9 @@
#define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \
(3 << AURORA_ACR_REPLACEMENT_OFFSET)
+#define AURORA_ACR_PARITY_EN (1 << 21)
+#define AURORA_ACR_ECC_EN (1 << 20)
+
#define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0
#define AURORA_ACR_FORCE_WRITE_POLICY_MASK \
(0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
@@ -41,7 +44,52 @@
#define AURORA_ACR_FORCE_WRITE_THRO_POLICY \
(2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
-#define MAX_RANGE_SIZE 1024
+#define AURORA_ERR_CNT_REG 0x600
+#define AURORA_ERR_ATTR_CAP_REG 0x608
+#define AURORA_ERR_ADDR_CAP_REG 0x60c
+#define AURORA_ERR_WAY_CAP_REG 0x610
+#define AURORA_ERR_INJECT_CTL_REG 0x614
+#define AURORA_ERR_INJECT_MASK_REG 0x618
+
+#define AURORA_ERR_CNT_CLR_OFFSET 31
+#define AURORA_ERR_CNT_CLR \
+ (0x1 << AURORA_ERR_CNT_CLR_OFFSET)
+#define AURORA_ERR_CNT_UE_OFFSET 16
+#define AURORA_ERR_CNT_UE_MASK \
+ (0x7fff << AURORA_ERR_CNT_UE_OFFSET)
+#define AURORA_ERR_CNT_CE_OFFSET 0
+#define AURORA_ERR_CNT_CE_MASK \
+ (0xffff << AURORA_ERR_CNT_CE_OFFSET)
+
+#define AURORA_ERR_ATTR_SRC_OFF 16
+#define AURORA_ERR_ATTR_SRC_MSK \
+ (0x7 << AURORA_ERR_ATTR_SRC_OFF)
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_ATTR_TXN_MSK \
+ (0xf << AURORA_ERR_ATTR_TXN_OFF)
+#define AURORA_ERR_ATTR_ERR_OFF 8
+#define AURORA_ERR_ATTR_ERR_MSK \
+ (0x3 << AURORA_ERR_ATTR_ERR_OFF)
+#define AURORA_ERR_ATTR_CAP_VALID_OFF 0
+#define AURORA_ERR_ATTR_CAP_VALID \
+ (0x1 << AURORA_ERR_ATTR_CAP_VALID_OFF)
+
+#define AURORA_ERR_ADDR_CAP_ADDR_MASK 0xffffffe0
+
+#define AURORA_ERR_WAY_IDX_OFF 8
+#define AURORA_ERR_WAY_IDX_MSK \
+ (0xfff << AURORA_ERR_WAY_IDX_OFF)
+#define AURORA_ERR_WAY_CAP_WAY_OFFSET 1
+#define AURORA_ERR_WAY_CAP_WAY_MASK \
+ (0xf << AURORA_ERR_WAY_CAP_WAY_OFFSET)
+
+#define AURORA_ERR_INJECT_CTL_ADDR_MASK 0xfffffff0
+#define AURORA_ERR_ATTR_TXN_OFF 12
+#define AURORA_ERR_INJECT_CTL_EN_MASK 0x3
+#define AURORA_ERR_INJECT_CTL_EN_PARITY 0x2
+#define AURORA_ERR_INJECT_CTL_EN_ECC 0x1
+
+#define AURORA_MAX_RANGE_SIZE 1024
#define AURORA_WAY_SIZE_SHIFT 2
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index a4fb0f8b8f84..2924d7910b10 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -697,9 +697,9 @@ static struct attribute_group armv7_pmuv2_events_attr_group = {
/*
* Event filters for PMUv2
*/
-#define ARMV7_EXCLUDE_PL1 (1 << 31)
-#define ARMV7_EXCLUDE_USER (1 << 30)
-#define ARMV7_INCLUDE_HYP (1 << 27)
+#define ARMV7_EXCLUDE_PL1 BIT(31)
+#define ARMV7_EXCLUDE_USER BIT(30)
+#define ARMV7_INCLUDE_HYP BIT(27)
/*
* Secure debug enable reg
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index aab8ba40ce38..4b0bab2607e4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -264,15 +264,13 @@ int __cpu_disable(void)
return 0;
}
-static DECLARE_COMPLETION(cpu_died);
-
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
void __cpu_die(unsigned int cpu)
{
- if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ if (!cpu_wait_death(cpu, 5)) {
pr_err("CPU%u: cpu didn't die\n", cpu);
return;
}
@@ -319,7 +317,7 @@ void arch_cpu_idle_dead(void)
* this returns, power and/or clocks can be removed at any point
* from this CPU and its cache by platform_cpu_kill().
*/
- complete(&cpu_died);
+ (void)cpu_report_death();
/*
* Ensure that the cache lines associated with that completion are
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 8872acf9ff99..9bf16c93ee6a 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -194,7 +194,6 @@ static int __init vdso_init(void)
}
text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
- pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
/* Allocate the VDSO text pagelist */
vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index b25c54585048..6d2ba454f25b 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -5,7 +5,7 @@
# Copyright (C) 1995-2000 Russell King
#
-lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
+lib-y := changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
delay.o delay-loop.o findbit.o memchr.o memcpy.o \
memmove.o memset.o setbit.o \
@@ -19,6 +19,12 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
mmu-y := clear_user.o copy_page.o getuser.o putuser.o \
copy_from_user.o copy_to_user.o
+ifdef CONFIG_CC_IS_CLANG
+ lib-y += backtrace-clang.o
+else
+ lib-y += backtrace.o
+endif
+
# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
diff --git a/arch/arm/lib/backtrace-clang.S b/arch/arm/lib/backtrace-clang.S
new file mode 100644
index 000000000000..2ff375144b55
--- /dev/null
+++ b/arch/arm/lib/backtrace-clang.S
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/arch/arm/lib/backtrace-clang.S
+ *
+ * Copyright (C) 2019 Nathan Huckleberry
+ *
+ */
+#include <linux/kern_levels.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+/* fp is 0 or stack frame */
+
+#define frame r4
+#define sv_fp r5
+#define sv_pc r6
+#define mask r7
+#define sv_lr r8
+
+ENTRY(c_backtrace)
+
+#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
+ ret lr
+ENDPROC(c_backtrace)
+#else
+
+
+/*
+ * Clang does not store pc or sp in function prologues so we don't know exactly
+ * where the function starts.
+ *
+ * We can treat the current frame's lr as the saved pc and the preceding
+ * frame's lr as the current frame's lr, but we can't trace the most recent
+ * call. Inserting a false stack frame allows us to reference the function
+ * called last in the stacktrace.
+ *
+ * If the call instruction was a bl we can look at the callers branch
+ * instruction to calculate the saved pc. We can recover the pc in most cases,
+ * but in cases such as calling function pointers we cannot. In this case,
+ * default to using the lr. This will be some address in the function, but will
+ * not be the function start.
+ *
+ * Unfortunately due to the stack frame layout we can't dump r0 - r3, but these
+ * are less frequently saved.
+ *
+ * Stack frame layout:
+ * <larger addresses>
+ * saved lr
+ * frame=> saved fp
+ * optionally saved caller registers (r4 - r10)
+ * optionally saved arguments (r0 - r3)
+ * <top of stack frame>
+ * <smaller addresses>
+ *
+ * Functions start with the following code sequence:
+ * corrected pc => stmfd sp!, {..., fp, lr}
+ * add fp, sp, #x
+ * stmfd sp!, {r0 - r3} (optional)
+ *
+ *
+ *
+ *
+ *
+ *
+ * The diagram below shows an example stack setup for dump_stack.
+ *
+ * The frame for c_backtrace has pointers to the code of dump_stack. This is
+ * why the frame of c_backtrace is used to for the pc calculation of
+ * dump_stack. This is why we must move back a frame to print dump_stack.
+ *
+ * The stored locals for dump_stack are in dump_stack's frame. This means that
+ * to fully print dump_stack's frame we need both the frame for dump_stack (for
+ * locals) and the frame that was called by dump_stack (for pc).
+ *
+ * To print locals we must know where the function start is. If we read the
+ * function prologue opcodes we can determine which variables are stored in the
+ * stack frame.
+ *
+ * To find the function start of dump_stack we can look at the stored LR of
+ * show_stack. It points at the instruction directly after the bl dump_stack.
+ * We can then read the offset from the bl opcode to determine where the branch
+ * takes us. The address calculated must be the start of dump_stack.
+ *
+ * c_backtrace frame dump_stack:
+ * {[LR] } ============| ...
+ * {[FP] } =======| | bl c_backtrace
+ * | |=> ...
+ * {[R4-R10]} |
+ * {[R0-R3] } | show_stack:
+ * dump_stack frame | ...
+ * {[LR] } =============| bl dump_stack
+ * {[FP] } <=======| |=> ...
+ * {[R4-R10]}
+ * {[R0-R3] }
+ */
+
+ stmfd sp!, {r4 - r9, fp, lr} @ Save an extra register
+ @ to ensure 8 byte alignment
+ movs frame, r0 @ if frame pointer is zero
+ beq no_frame @ we have no stack frames
+ tst r1, #0x10 @ 26 or 32-bit mode?
+ moveq mask, #0xfc000003
+ movne mask, #0 @ mask for 32-bit
+
+/*
+ * Switches the current frame to be the frame for dump_stack.
+ */
+ add frame, sp, #24 @ switch to false frame
+for_each_frame: tst frame, mask @ Check for address exceptions
+ bne no_frame
+
+/*
+ * sv_fp is the stack frame with the locals for the current considered
+ * function.
+ *
+ * sv_pc is the saved lr frame the frame above. This is a pointer to a code
+ * address within the current considered function, but it is not the function
+ * start. This value gets updated to be the function start later if it is
+ * possible.
+ */
+1001: ldr sv_pc, [frame, #4] @ get saved 'pc'
+1002: ldr sv_fp, [frame, #0] @ get saved fp
+
+ teq sv_fp, mask @ make sure next frame exists
+ beq no_frame
+
+/*
+ * sv_lr is the lr from the function that called the current function. This is
+ * a pointer to a code address in the current function's caller. sv_lr-4 is
+ * the instruction used to call the current function.
+ *
+ * This sv_lr can be used to calculate the function start if the function was
+ * called using a bl instruction. If the function start can be recovered sv_pc
+ * is overwritten with the function start.
+ *
+ * If the current function was called using a function pointer we cannot
+ * recover the function start and instead continue with sv_pc as an arbitrary
+ * value within the current function. If this is the case we cannot print
+ * registers for the current function, but the stacktrace is still printed
+ * properly.
+ */
+1003: ldr sv_lr, [sv_fp, #4] @ get saved lr from next frame
+
+ ldr r0, [sv_lr, #-4] @ get call instruction
+ ldr r3, .Lopcode+4
+ and r2, r3, r0 @ is this a bl call
+ teq r2, r3
+ bne finished_setup @ give up if it's not
+ and r0, #0xffffff @ get call offset 24-bit int
+ lsl r0, r0, #8 @ sign extend offset
+ asr r0, r0, #8
+ ldr sv_pc, [sv_fp, #4] @ get lr address
+ add sv_pc, sv_pc, #-4 @ get call instruction address
+ add sv_pc, sv_pc, #8 @ take care of prefetch
+ add sv_pc, sv_pc, r0, lsl #2@ find function start
+
+finished_setup:
+
+ bic sv_pc, sv_pc, mask @ mask PC/LR for the mode
+
+/*
+ * Print the function (sv_pc) and where it was called from (sv_lr).
+ */
+1004: mov r0, sv_pc
+
+ mov r1, sv_lr
+ mov r2, frame
+ bic r1, r1, mask @ mask PC/LR for the mode
+ bl dump_backtrace_entry
+
+/*
+ * Test if the function start is a stmfd instruction to determine which
+ * registers were stored in the function prologue.
+ *
+ * If we could not recover the sv_pc because we were called through a function
+ * pointer the comparison will fail and no registers will print. Unwinding will
+ * continue as if there had been no registers stored in this frame.
+ */
+1005: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, lr}
+ ldr r3, .Lopcode @ instruction exists,
+ teq r3, r1, lsr #11
+ ldr r0, [frame] @ locals are stored in
+ @ the preceding frame
+ subeq r0, r0, #4
+ bleq dump_backtrace_stm @ dump saved registers
+
+/*
+ * If we are out of frames or if the next frame is invalid.
+ */
+ teq sv_fp, #0 @ zero saved fp means
+ beq no_frame @ no further frames
+
+ cmp sv_fp, frame @ next frame must be
+ mov frame, sv_fp @ above the current frame
+ bhi for_each_frame
+
+1006: adr r0, .Lbad
+ mov r1, frame
+ bl printk
+no_frame: ldmfd sp!, {r4 - r9, fp, pc}
+ENDPROC(c_backtrace)
+ .pushsection __ex_table,"a"
+ .align 3
+ .long 1001b, 1006b
+ .long 1002b, 1006b
+ .long 1003b, 1006b
+ .long 1004b, 1006b
+ .long 1005b, 1006b
+ .popsection
+
+.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+ .align
+.Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr}
+ .word 0x0b000000 @ bl if these bits are set
+
+#endif
diff --git a/arch/arm/mach-iop13xx/pci.c b/arch/arm/mach-iop13xx/pci.c
index 46ea06e906cc..94d30bc7bba1 100644
--- a/arch/arm/mach-iop13xx/pci.c
+++ b/arch/arm/mach-iop13xx/pci.c
@@ -289,7 +289,7 @@ iop13xx_atux_write_config(struct pci_bus *bus, unsigned int devfn, int where,
if (size != 4) {
val = iop13xx_atux_read(addr);
- if (!iop13xx_atux_pci_status(1) == 0)
+ if (iop13xx_atux_pci_status(1))
return PCIBIOS_SUCCESSFUL;
where = (where & 3) * 8;
@@ -436,7 +436,7 @@ iop13xx_atue_write_config(struct pci_bus *bus, unsigned int devfn, int where,
if (size != 4) {
val = iop13xx_atue_read(addr);
- if (!iop13xx_atue_pci_status(1) == 0)
+ if (iop13xx_atue_pci_status(1))
return PCIBIOS_SUCCESSFUL;
where = (where & 3) * 8;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 5b251c8ecd45..12c26eb88afb 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -18,8 +18,8 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/cache-aurora-l2.h>
#include "cache-tauros3.h"
-#include "cache-aurora-l2.h"
struct l2c_init_data {
const char *type;
@@ -1352,8 +1352,8 @@ static unsigned long aurora_range_end(unsigned long start, unsigned long end)
* since cache range operations stall the CPU pipeline
* until completion.
*/
- if (end > start + MAX_RANGE_SIZE)
- end = start + MAX_RANGE_SIZE;
+ if (end > start + AURORA_MAX_RANGE_SIZE)
+ end = start + AURORA_MAX_RANGE_SIZE;
/*
* Cache range operations can't straddle a page boundary.
@@ -1493,6 +1493,18 @@ static void __init aurora_of_parse(const struct device_node *np,
mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
}
+ if (of_property_read_bool(np, "marvell,ecc-enable")) {
+ mask |= AURORA_ACR_ECC_EN;
+ val |= AURORA_ACR_ECC_EN;
+ }
+
+ if (of_property_read_bool(np, "arm,parity-enable")) {
+ mask |= AURORA_ACR_PARITY_EN;
+ val |= AURORA_ACR_PARITY_EN;
+ } else if (of_property_read_bool(np, "arm,parity-disable")) {
+ mask |= AURORA_ACR_PARITY_EN;
+ }
+
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 890eeaac3cbb..bd0f4821f7e1 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -191,7 +191,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
- if (fsr & FSR_WRITE)
+ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
mask = VM_WRITE;
if (fsr & FSR_LNX_PF)
mask = VM_EXEC;
@@ -262,7 +262,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (fsr & FSR_WRITE)
+ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
flags |= FAULT_FLAG_WRITE;
/*
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index c063708fa503..9ecc2097a87a 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -6,6 +6,7 @@
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
+#define FSR_CM (1 << 13)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d9a0038774a6..25da9b2d9610 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1177,10 +1177,29 @@ void __init adjust_lowmem_bounds(void)
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+ /*
+ * The first usable region must be PMD aligned. Mark its start
+ * as MEMBLOCK_NOMAP if it isn't
+ */
+ for_each_memblock(memory, reg) {
+ if (!memblock_is_nomap(reg)) {
+ if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
+ phys_addr_t len;
+
+ len = round_up(reg->base, PMD_SIZE) - reg->base;
+ memblock_mark_nomap(reg->base, len);
+ }
+ break;
+ }
+ }
+
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
+ if (memblock_is_nomap(reg))
+ continue;
+
if (reg->base < vmalloc_limit) {
if (block_end > lowmem_limit)
/*