summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig8
-rw-r--r--arch/arm/include/asm/cmpxchg.h12
-rw-r--r--arch/arm/include/asm/irqflags.h10
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/smp.h4
-rw-r--r--arch/arm/include/asm/unistd.h7
-rw-r--r--arch/arm/kernel/devtree.c12
-rw-r--r--arch/arm/kernel/entry-armv.S33
-rw-r--r--arch/arm/kernel/hw_breakpoint.c1
-rw-r--r--arch/arm/kernel/kgdb.c31
-rw-r--r--arch/arm/kernel/smp.c12
-rw-r--r--arch/arm/kernel/smp_twd.c11
-rw-r--r--arch/arm/kernel/traps.c52
-rw-r--r--arch/arm/lib/clear_user.S4
-rw-r--r--arch/arm/mm/Kconfig12
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/fault.c22
-rw-r--r--arch/arm/mm/fault.h1
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/vdso/vdsomunge.c17
22 files changed, 125 insertions, 141 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 72ad724c67ae..823f90ea65c4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -645,6 +645,7 @@ config ARCH_SHMOBILE_LEGACY
config ARCH_RPC
bool "RiscPC"
+ depends on MMU
select ARCH_ACORN
select ARCH_MAY_HAVE_PC_FDC
select ARCH_SPARSEMEM_ENABLE
@@ -1410,7 +1411,6 @@ config HAVE_ARM_ARCH_TIMER
config HAVE_ARM_TWD
bool
- depends on SMP
select CLKSRC_OF if OF
help
This options enables support for the ARM timer and watchdog unit
@@ -1470,6 +1470,8 @@ choice
config VMSPLIT_3G
bool "3G/1G user/kernel split"
+ config VMSPLIT_3G_OPT
+ bool "3G/1G user/kernel split (for full 1G low memory)"
config VMSPLIT_2G
bool "2G/2G user/kernel split"
config VMSPLIT_1G
@@ -1481,6 +1483,7 @@ config PAGE_OFFSET
default PHYS_OFFSET if !MMU
default 0x40000000 if VMSPLIT_1G
default 0x80000000 if VMSPLIT_2G
+ default 0xB0000000 if VMSPLIT_3G_OPT
default 0xC0000000
config NR_CPUS
@@ -1695,8 +1698,9 @@ config HIGHMEM
If unsure, say n.
config HIGHPTE
- bool "Allocate 2nd-level pagetables from highmem"
+ bool "Allocate 2nd-level pagetables from highmem" if EXPERT
depends on HIGHMEM
+ default y
help
The VM uses one page of physical memory for each page table.
For systems with a lot of processes, this can use a lot of
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 916a2744d5c6..97882f9bad12 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -39,6 +39,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
+#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
case 1:
asm volatile("@ __xchg1\n"
"1: ldrexb %0, [%3]\n"
@@ -49,6 +50,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
: "r" (x), "r" (ptr)
: "memory", "cc");
break;
+ case 2:
+ asm volatile("@ __xchg2\n"
+ "1: ldrexh %0, [%3]\n"
+ " strexh %1, %2, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b"
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+#endif
case 4:
asm volatile("@ __xchg4\n"
"1: ldrex %0, [%3]\n"
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 43908146a5cf..e6b70d9d084e 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -54,6 +54,14 @@ static inline void arch_local_irq_disable(void)
#define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
#define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
+
+#ifndef CONFIG_CPU_V7M
+#define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc")
+#define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc")
+#else
+#define local_abt_enable() do { } while (0)
+#define local_abt_disable() do { } while (0)
+#endif
#else
/*
@@ -136,6 +144,8 @@ static inline void arch_local_irq_disable(void)
: "memory", "cc"); \
})
+#define local_abt_enable() do { } while (0)
+#define local_abt_disable() do { } while (0)
#endif
/*
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index cb3a40717edd..5c1ad11aa392 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -47,7 +47,7 @@ struct machine_desc {
unsigned l2c_aux_val; /* L2 cache aux value */
unsigned l2c_aux_mask; /* L2 cache aux mask */
void (*l2c_write_sec)(unsigned long, unsigned);
- struct smp_operations *smp; /* SMP operations */
+ const struct smp_operations *smp; /* SMP operations */
bool (*smp_init)(void);
void (*fixup)(struct tag *, char **);
void (*dt_fixup)(void);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 98d58bb04ac5..c79b57bf71c4 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -76,10 +76,12 @@
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
+#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Allow 16MB-aligned ioremap pages
*/
#define IOREMAP_MAX_ORDER 24
+#endif
#else /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f40354198bad..348caabb7625 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -43,7 +43,7 @@
*/
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#define VMALLOC_END 0xff000000UL
+#define VMALLOC_END 0xff800000UL
#define LIBRARY_TEXT_START 0x0c000000
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index ef356659b4f4..3d6dc8b460e4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -112,7 +112,7 @@ struct smp_operations {
struct of_cpu_method {
const char *method;
- struct smp_operations *ops;
+ const struct smp_operations *ops;
};
#define CPU_METHOD_OF_DECLARE(name, _method, _ops) \
@@ -122,6 +122,6 @@ struct of_cpu_method {
/*
* set platform specific SMP operations
*/
-extern void smp_set_ops(struct smp_operations *);
+extern void smp_set_ops(const struct smp_operations *);
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7cba573c2cc9..7b84657fba35 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -21,13 +21,6 @@
*/
#define __NR_syscalls (392)
-/*
- * *NOTE*: This is a ghost syscall private to the kernel. Only the
- * __kuser_cmpxchg code in entry-armv.S should be aware of its
- * existence. Don't ever use this from user code.
- */
-#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
-
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 11c54de9f8cf..65addcbf5b30 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -101,6 +101,7 @@ void __init arm_dt_init_cpu_maps(void)
if (of_property_read_u32(cpu, "reg", &hwid)) {
pr_debug(" * %s missing reg property\n",
cpu->full_name);
+ of_node_put(cpu);
return;
}
@@ -108,8 +109,10 @@ void __init arm_dt_init_cpu_maps(void)
* 8 MSBs must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
*/
- if (hwid & ~MPIDR_HWID_BITMASK)
+ if (hwid & ~MPIDR_HWID_BITMASK) {
+ of_node_put(cpu);
return;
+ }
/*
* Duplicate MPIDRs are a recipe for disaster.
@@ -119,9 +122,11 @@ void __init arm_dt_init_cpu_maps(void)
* to avoid matching valid MPIDR[23:0] values.
*/
for (j = 0; j < cpuidx; j++)
- if (WARN(tmp_map[j] == hwid, "Duplicate /cpu reg "
- "properties in the DT\n"))
+ if (WARN(tmp_map[j] == hwid,
+ "Duplicate /cpu reg properties in the DT\n")) {
+ of_node_put(cpu);
return;
+ }
/*
* Build a stashed array of MPIDR values. Numbering scheme
@@ -143,6 +148,7 @@ void __init arm_dt_init_cpu_maps(void)
"max cores %u, capping them\n",
cpuidx, nr_cpu_ids)) {
cpuidx = nr_cpu_ids;
+ of_node_put(cpu);
break;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3e1c26eb32b4..3ce377f7251f 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -427,8 +427,7 @@ ENDPROC(__fiq_abt)
.endm
.macro kuser_cmpxchg_check
-#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
- !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
@@ -859,20 +858,7 @@ __kuser_helper_start:
__kuser_cmpxchg64: @ 0xffff0f60
-#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
-
- /*
- * Poor you. No fast solution possible...
- * The kernel itself must perform the operation.
- * A special ghost syscall is used for that (see traps.c).
- */
- stmfd sp!, {r7, lr}
- ldr r7, 1f @ it's 20 bits
- swi __ARM_NR_cmpxchg64
- ldmfd sp!, {r7, pc}
-1: .word __ARM_NR_cmpxchg64
-
-#elif defined(CONFIG_CPU_32v6K)
+#if defined(CONFIG_CPU_32v6K)
stmfd sp!, {r4, r5, r6, r7}
ldrd r4, r5, [r0] @ load old val
@@ -948,20 +934,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
__kuser_cmpxchg: @ 0xffff0fc0
-#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
-
- /*
- * Poor you. No fast solution possible...
- * The kernel itself must perform the operation.
- * A special ghost syscall is used for that (see traps.c).
- */
- stmfd sp!, {r7, lr}
- ldr r7, 1f @ it's 20 bits
- swi __ARM_NR_cmpxchg
- ldmfd sp!, {r7, pc}
-1: .word __ARM_NR_cmpxchg
-
-#elif __LINUX_ARM_ARCH__ < 6
+#if __LINUX_ARM_ARCH__ < 6
#ifdef CONFIG_MMU
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index dc7d0a95bd36..6284779d64ee 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -35,7 +35,6 @@
#include <asm/cputype.h>
#include <asm/current.h>
#include <asm/hw_breakpoint.h>
-#include <asm/kdebug.h>
#include <asm/traps.h>
/* Breakpoint currently in use for each BRP. */
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index fd9eefce0a7b..9232caee7060 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -74,7 +74,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void
sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
{
- struct pt_regs *thread_regs;
+ struct thread_info *ti;
int regno;
/* Just making sure... */
@@ -86,24 +86,17 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
gdb_regs[regno] = 0;
/* Otherwise, we have only some registers from switch_to() */
- thread_regs = task_pt_regs(task);
- gdb_regs[_R0] = thread_regs->ARM_r0;
- gdb_regs[_R1] = thread_regs->ARM_r1;
- gdb_regs[_R2] = thread_regs->ARM_r2;
- gdb_regs[_R3] = thread_regs->ARM_r3;
- gdb_regs[_R4] = thread_regs->ARM_r4;
- gdb_regs[_R5] = thread_regs->ARM_r5;
- gdb_regs[_R6] = thread_regs->ARM_r6;
- gdb_regs[_R7] = thread_regs->ARM_r7;
- gdb_regs[_R8] = thread_regs->ARM_r8;
- gdb_regs[_R9] = thread_regs->ARM_r9;
- gdb_regs[_R10] = thread_regs->ARM_r10;
- gdb_regs[_FP] = thread_regs->ARM_fp;
- gdb_regs[_IP] = thread_regs->ARM_ip;
- gdb_regs[_SPT] = thread_regs->ARM_sp;
- gdb_regs[_LR] = thread_regs->ARM_lr;
- gdb_regs[_PC] = thread_regs->ARM_pc;
- gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
+ ti = task_thread_info(task);
+ gdb_regs[_R4] = ti->cpu_context.r4;
+ gdb_regs[_R5] = ti->cpu_context.r5;
+ gdb_regs[_R6] = ti->cpu_context.r6;
+ gdb_regs[_R7] = ti->cpu_context.r7;
+ gdb_regs[_R8] = ti->cpu_context.r8;
+ gdb_regs[_R9] = ti->cpu_context.r9;
+ gdb_regs[_R10] = ti->cpu_context.sl;
+ gdb_regs[_FP] = ti->cpu_context.fp;
+ gdb_regs[_SPT] = ti->cpu_context.sp;
+ gdb_regs[_PC] = ti->cpu_context.pc;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 48185a773852..b26361355dae 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -80,7 +80,7 @@ static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops;
-void __init smp_set_ops(struct smp_operations *ops)
+void __init smp_set_ops(const struct smp_operations *ops)
{
if (ops)
smp_ops = *ops;
@@ -400,6 +400,7 @@ asmlinkage void secondary_start_kernel(void)
local_irq_enable();
local_fiq_enable();
+ local_abt_enable();
/*
* OK, it's off to the idle thread for us
@@ -748,6 +749,15 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
+ /*
+ * Generate the backtrace directly if we are running in a calling
+ * context that is not preemptible by the backtrace IPI. Note
+ * that nmi_cpu_backtrace() automatically removes the current cpu
+ * from mask.
+ */
+ if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
+ nmi_cpu_backtrace(NULL);
+
smp_cross_call(mask, IPI_CPU_BACKTRACE);
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index e9035cda1485..1bfa7a7f5533 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -23,7 +23,6 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <asm/smp_plat.h>
#include <asm/smp_twd.h>
/* set up by the platform code */
@@ -34,6 +33,8 @@ static unsigned long twd_timer_rate;
static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu *twd_evt;
+static unsigned int twd_features =
+ CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
static int twd_ppi;
static int twd_shutdown(struct clock_event_device *clk)
@@ -294,8 +295,7 @@ static void twd_timer_setup(void)
writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
- clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_C3STOP;
+ clk->features = twd_features;
clk->rating = 350;
clk->set_state_shutdown = twd_shutdown;
clk->set_state_periodic = twd_set_periodic;
@@ -350,6 +350,8 @@ static int __init twd_local_timer_common_register(struct device_node *np)
goto out_irq;
twd_get_clock(np);
+ if (!of_property_read_bool(np, "always-on"))
+ twd_features |= CLOCK_EVT_FEAT_C3STOP;
/*
* Immediately configure the timer on the boot CPU, unless we need
@@ -392,9 +394,6 @@ static void __init twd_local_timer_of_register(struct device_node *np)
{
int err;
- if (!is_smp() || !setup_max_cpus)
- return;
-
twd_ppi = irq_of_parse_and_map(np, 0);
if (!twd_ppi) {
err = -EINVAL;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 969f9d9e665f..bc698383e822 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -625,58 +625,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
set_tls(regs->ARM_r0);
return 0;
-#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
- /*
- * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
- * Return zero in r0 if *MEM was changed or non-zero if no exchange
- * happened. Also set the user C flag accordingly.
- * If access permissions have to be fixed up then non-zero is
- * returned and the operation has to be re-attempted.
- *
- * *NOTE*: This is a ghost syscall private to the kernel. Only the
- * __kuser_cmpxchg code in entry-armv.S should be aware of its
- * existence. Don't ever use this from user code.
- */
- case NR(cmpxchg):
- for (;;) {
- extern void do_DataAbort(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs);
- unsigned long val;
- unsigned long addr = regs->ARM_r2;
- struct mm_struct *mm = current->mm;
- pgd_t *pgd; pmd_t *pmd; pte_t *pte;
- spinlock_t *ptl;
-
- regs->ARM_cpsr &= ~PSR_C_BIT;
- down_read(&mm->mmap_sem);
- pgd = pgd_offset(mm, addr);
- if (!pgd_present(*pgd))
- goto bad_access;
- pmd = pmd_offset(pgd, addr);
- if (!pmd_present(*pmd))
- goto bad_access;
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
- pte_unmap_unlock(pte, ptl);
- goto bad_access;
- }
- val = *(unsigned long *)addr;
- val -= regs->ARM_r0;
- if (val == 0) {
- *(unsigned long *)addr = regs->ARM_r1;
- regs->ARM_cpsr |= PSR_C_BIT;
- }
- pte_unmap_unlock(pte, ptl);
- up_read(&mm->mmap_sem);
- return val;
-
- bad_access:
- up_read(&mm->mmap_sem);
- /* simulate a write access fault */
- do_DataAbort(addr, 15 + (1 << 11), regs);
- }
-#endif
-
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 970d6c043774..e936352ccb00 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/unwind.h>
.text
@@ -20,6 +21,8 @@
*/
ENTRY(__clear_user_std)
WEAK(arm_clear_user)
+UNWIND(.fnstart)
+UNWIND(.save {r1, lr})
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
@@ -44,6 +47,7 @@ WEAK(arm_clear_user)
USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
+UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index df7537f12469..c21941349b3e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -419,28 +419,24 @@ config CPU_THUMBONLY
config CPU_32v3
bool
select CPU_USE_DOMAINS if MMU
- select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
- select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
- select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
- select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select NEED_KUSER_HELPERS
select TLS_REG_EMUL if SMP || !MMU
@@ -805,14 +801,6 @@ config TLS_REG_EMUL
a few prototypes like that in existence) and therefore access to
that required register must be emulated.
-config NEEDS_SYSCALL_FOR_CMPXCHG
- bool
- select NEED_KUSER_HELPERS
- help
- SMP on a pre-ARMv6 processor? Well OK then.
- Forget about fast user space cmpxchg support.
- It is just not possible.
-
config NEED_KUSER_HELPERS
bool
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1a7815e5421b..ad4eb2d26e16 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1407,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long off = vma->vm_pgoff;
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
if (!pages)
return -ENXIO;
+ if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+ return -ENXIO;
+
+ pages += off;
+
do {
int ret = vm_insert_page(vma, uaddr, *pages++);
if (ret) {
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 0d629b8f973f..daafcf121ce0 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -593,6 +593,28 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, ifsr, 0);
}
+/*
+ * Abort handler to be used only during first unmasking of asynchronous aborts
+ * on the boot CPU. This makes sure that the machine will not die if the
+ * firmware/bootloader left an imprecise abort pending for us to trip over.
+ */
+static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
+ "first unmask, this is most likely caused by a "
+ "firmware/bootloader bug.\n", fsr);
+
+ return 0;
+}
+
+void __init early_abt_enable(void)
+{
+ fsr_info[22].fn = early_abort_handler;
+ local_abt_enable();
+ fsr_info[22].fn = do_bad;
+}
+
#ifndef CONFIG_ARM_LPAE
static int __init exceptions_init(void)
{
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index cf08bdfbe0d6..05ec5e0df32d 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -24,5 +24,6 @@ static inline int fsr_fs(unsigned int fsr)
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr);
+void early_abt_enable(void);
#endif /* __ARCH_ARM_FAULT_H */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 7cd15143a507..4867f5daf82c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -38,6 +38,7 @@
#include <asm/mach/pci.h>
#include <asm/fixmap.h>
+#include "fault.h"
#include "mm.h"
#include "tcm.h"
@@ -1363,6 +1364,9 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
*/
local_flush_tlb_all();
flush_cache_all();
+
+ /* Enable asynchronous aborts */
+ early_abt_enable();
}
static void __init kmap_init(void)
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
index aedec81d1198..f6455273b2f8 100644
--- a/arch/arm/vdso/vdsomunge.c
+++ b/arch/arm/vdso/vdsomunge.c
@@ -45,7 +45,6 @@
* it does.
*/
-#include <byteswap.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
@@ -59,6 +58,16 @@
#include <sys/types.h>
#include <unistd.h>
+#define swab16(x) \
+ ((((x) & 0x00ff) << 8) | \
+ (((x) & 0xff00) >> 8))
+
+#define swab32(x) \
+ ((((x) & 0x000000ff) << 24) | \
+ (((x) & 0x0000ff00) << 8) | \
+ (((x) & 0x00ff0000) >> 8) | \
+ (((x) & 0xff000000) >> 24))
+
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define HOST_ORDER ELFDATA2LSB
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
@@ -104,17 +113,17 @@ static void cleanup(void)
static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
{
- return swap ? bswap_32(word) : word;
+ return swap ? swab32(word) : word;
}
static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
{
- return swap ? bswap_16(half) : half;
+ return swap ? swab16(half) : half;
}
static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
{
- *dst = swap ? bswap_32(val) : val;
+ *dst = swap ? swab32(val) : val;
}
int main(int argc, char **argv)