summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 16:27:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-03 16:27:01 -0700
commitc706c7eb0d08098f0d768aeef945d7cf1f8858b4 (patch)
treec629c9705514b70dd3e8f8dc67fa8d849871886c /arch/arm/include/asm
parent79b0691d0c1d3d4a976e3304a1512e6eca9671ec (diff)
parent3ff32a0def6e0d2e21a6c5ad1b00726592774018 (diff)
downloadlinux-c706c7eb0d08098f0d768aeef945d7cf1f8858b4.tar.bz2
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM development updates from Russell King: "Included in this update: - moving PSCI code from ARM64/ARM to drivers/ - removal of some architecture internals from global kernel view - addition of software based "privileged no access" support using the old domains register to turn off the ability for kernel loads/stores to access userspace. Only the proper accessors will be usable. - addition of early fixup support for early console - re-addition (and reimplementation) of OMAP special interconnect barrier - removal of finish_arch_switch() - only expose cpuX/online in sysfs if hotpluggable - a number of code cleanups" * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (41 commits) ARM: software-based priviledged-no-access support ARM: entry: provide uaccess assembly macro hooks ARM: entry: get rid of multiple macro definitions ARM: 8421/1: smp: Collapse arch_cpu_idle_dead() into cpu_die() ARM: uaccess: provide uaccess_save_and_enable() and uaccess_restore() ARM: mm: improve do_ldrd_abort macro ARM: entry: ensure that IRQs are enabled when calling syscall_trace_exit() ARM: entry: efficiency cleanups ARM: entry: get rid of asm_trace_hardirqs_on_cond ARM: uaccess: simplify user access assembly ARM: domains: remove DOMAIN_TABLE ARM: domains: keep vectors in separate domain ARM: domains: get rid of manager mode for user domain ARM: domains: move initial domain setting value to asm/domains.h ARM: domains: provide domain_mask() ARM: domains: switch to keeping domain value in register ARM: 8419/1: dma-mapping: harmonize definition of DMA_ERROR_CODE ARM: 8417/1: refactor bitops functions with BIT_MASK() and BIT_WORD() ARM: 8416/1: Feroceon: use of_iomap() to map register base ARM: 8415/1: early fixmap support for earlycon ...
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h69
-rw-r--r--arch/arm/include/asm/barrier.h13
-rw-r--r--arch/arm/include/asm/bitops.h24
-rw-r--r--arch/arm/include/asm/cacheflush.h21
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/include/asm/domain.h53
-rw-r--r--arch/arm/include/asm/fixmap.h15
-rw-r--r--arch/arm/include/asm/futex.h19
-rw-r--r--arch/arm/include/asm/glue-cache.h2
-rw-r--r--arch/arm/include/asm/outercache.h17
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pmu.h154
-rw-r--r--arch/arm/include/asm/psci.h23
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/smp_plat.h9
-rw-r--r--arch/arm/include/asm/thread_info.h23
-rw-r--r--arch/arm/include/asm/uaccess.h132
18 files changed, 285 insertions, 295 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 30b3bc1666d2..be648eb47cd9 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
-generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
generic-y += param.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe57279c66..7bbf325a4f31 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -108,33 +108,37 @@
.endm
#endif
- .macro asm_trace_hardirqs_off
+ .macro asm_trace_hardirqs_off, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
+ .if \save
stmdb sp!, {r0-r3, ip, lr}
+ .endif
bl trace_hardirqs_off
+ .if \save
ldmia sp!, {r0-r3, ip, lr}
+ .endif
#endif
.endm
- .macro asm_trace_hardirqs_on_cond, cond
+ .macro asm_trace_hardirqs_on, cond=al, save=1
#if defined(CONFIG_TRACE_IRQFLAGS)
/*
* actually the registers should be pushed and pop'd conditionally, but
* after bl the flags are certainly clobbered
*/
+ .if \save
stmdb sp!, {r0-r3, ip, lr}
+ .endif
bl\cond trace_hardirqs_on
+ .if \save
ldmia sp!, {r0-r3, ip, lr}
+ .endif
#endif
.endm
- .macro asm_trace_hardirqs_on
- asm_trace_hardirqs_on_cond al
- .endm
-
- .macro disable_irq
+ .macro disable_irq, save=1
disable_irq_notrace
- asm_trace_hardirqs_off
+ asm_trace_hardirqs_off \save
.endm
.macro enable_irq
@@ -173,7 +177,7 @@
.macro restore_irqs, oldcpsr
tst \oldcpsr, #PSR_I_BIT
- asm_trace_hardirqs_on_cond eq
+ asm_trace_hardirqs_on cond=eq
restore_irqs_notrace \oldcpsr
.endm
@@ -445,6 +449,53 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ mrc p15, 0, \tmp, c3, c0, 0
+ str \tmp, [sp, #S_FRAME_SIZE]
+#endif
+ .endm
+
+ .macro uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ ldr r0, [sp, #S_FRAME_SIZE]
+ mcr p15, 0, r0, c3, c0, 0
+#endif
+ .endm
+
+ .macro uaccess_save_and_disable, tmp
+ uaccess_save \tmp
+ uaccess_disable \tmp
+ .endm
+
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 70393574e0fa..3ff5642d9788 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,7 +2,6 @@
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
-#include <asm/outercache.h>
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
@@ -37,12 +36,20 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
#ifdef CONFIG_ARCH_HAS_BARRIERS
#include <mach/barriers.h>
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb() do { dsb(); outer_sync(); } while (0)
+#define mb() __arm_heavy_mb()
#define rmb() dsb()
-#define wmb() do { dsb(st); outer_sync(); } while (0)
+#define wmb() __arm_heavy_mb(st)
#define dma_rmb() dmb(osh)
#define dma_wmb() dmb(oshst)
#else
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 56380995f4c3..e943e6cee254 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p |= mask;
@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p &= ~mask;
@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p ^= mask;
@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda8fd17..d5525bfc7e3e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_map_area cpu_cache.dma_map_area
-#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side. This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+ phys_addr_t phys = __pa(addr);
+
+ __cpuc_flush_dcache_area((void *)addr, size);
+ outer_flush_range(phys, phys + size);
+}
+
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d37ec7..a68b9d8a71fe 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,7 +14,7 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-#define DMA_ERROR_CODE (~0)
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..e878129f2fee 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,15 +34,14 @@
*/
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
-#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2
-#define DOMAIN_TABLE 2
#define DOMAIN_USER 1
#define DOMAIN_IO 0
#endif
+#define DOMAIN_VECTORS 3
/*
* Domain types
@@ -55,11 +54,46 @@
#define DOMAIN_MANAGER 1
#endif
-#define domain_val(dom,type) ((type) << (2*(dom)))
+#define domain_mask(dom) ((3) << (2 * (dom)))
+#define domain_val(dom,type) ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+ domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+ unsigned int domain;
+
+ asm(
+ "mrc p15, 0, %0, c3, c0 @ get domain"
+ : "=r" (domain));
+
+ return domain;
+}
+
static inline void set_domain(unsigned val)
{
asm volatile(
@@ -68,17 +102,16 @@ static inline void set_domain(unsigned val)
isb();
}
+#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \
do { \
- struct thread_info *thread = current_thread_info(); \
- unsigned int domain = thread->cpu_domain; \
- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
- thread->cpu_domain = domain | domain_val(dom, type); \
- set_domain(thread->cpu_domain); \
+ unsigned int domain = get_domain(); \
+ domain &= ~domain_mask(dom); \
+ domain = domain | domain_val(dom, type); \
+ set_domain(domain); \
} while (0)
#else
-static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae1df27..58cfe9f1a687 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
enum fixed_addresses {
- FIX_KMAP_BEGIN,
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
+#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
#include <asm-generic/fixmap.h>
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed82809d82..6795368ad023 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags; \
smp_mb(); \
prefetchw(uaddr); \
+ __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret;
u32 val;
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
smp_mb();
*uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret = 0;
u32 val;
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
preempt_disable();
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
*uval = val;
preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd5b7c8..cab07f69382d 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 563b92fc2f41..c2bf24f40177 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
#endif
-#ifdef CONFIG_OUTER_CACHE_SYNC
-/**
- * outer_sync - perform a sync point for outer cache
- *
- * Ensure that all outer cache operations are complete and any store
- * buffers are drained.
- */
-static inline void outer_sync(void)
-{
- if (outer_cache.sync)
- outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278e953e..d0131ee6f6af 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
deleted file mode 100644
index 3fc87dfd77e6..000000000000
--- a/arch/arm/include/asm/pmu.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * linux/arch/arm/include/asm/pmu.h
- *
- * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __ARM_PMU_H__
-#define __ARM_PMU_H__
-
-#include <linux/interrupt.h>
-#include <linux/perf_event.h>
-
-#include <asm/cputype.h>
-
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- * interrupt and passed the address of the low level handler,
- * and can be used to implement any platform specific handling
- * before or after calling it.
- */
-struct arm_pmu_platdata {
- irqreturn_t (*handle_irq)(int irq, void *dev,
- irq_handler_t pmu_handler);
-};
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
- */
-#define ARMPMU_MAX_HWEVENTS 32
-
-#define HW_OP_UNSUPPORTED 0xFFFF
-#define C(_x) PERF_COUNT_HW_CACHE_##_x
-#define CACHE_OP_UNSUPPORTED 0xFFFF
-
-#define PERF_MAP_ALL_UNSUPPORTED \
- [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
-
-#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
-[0 ... C(MAX) - 1] = { \
- [0 ... C(OP_MAX) - 1] = { \
- [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
- }, \
-}
-
-/* The events for a given PMU register set. */
-struct pmu_hw_events {
- /*
- * The events that are active on the PMU for the given index.
- */
- struct perf_event *events[ARMPMU_MAX_HWEVENTS];
-
- /*
- * A 1 bit for an index indicates that the counter is being used for
- * an event. A 0 means that the counter can be used.
- */
- DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
-
- /*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-
- /*
- * When using percpu IRQs, we need a percpu dev_id. Place it here as we
- * already have to allocate this struct per cpu.
- */
- struct arm_pmu *percpu_pmu;
-};
-
-struct arm_pmu {
- struct pmu pmu;
- cpumask_t active_irqs;
- cpumask_t supported_cpus;
- int *irq_affinity;
- char *name;
- irqreturn_t (*handle_irq)(int irq_num, void *dev);
- void (*enable)(struct perf_event *event);
- void (*disable)(struct perf_event *event);
- int (*get_event_idx)(struct pmu_hw_events *hw_events,
- struct perf_event *event);
- void (*clear_event_idx)(struct pmu_hw_events *hw_events,
- struct perf_event *event);
- int (*set_event_filter)(struct hw_perf_event *evt,
- struct perf_event_attr *attr);
- u32 (*read_counter)(struct perf_event *event);
- void (*write_counter)(struct perf_event *event, u32 val);
- void (*start)(struct arm_pmu *);
- void (*stop)(struct arm_pmu *);
- void (*reset)(void *);
- int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
- void (*free_irq)(struct arm_pmu *);
- int (*map_event)(struct perf_event *event);
- int num_events;
- atomic_t active_events;
- struct mutex reserve_mutex;
- u64 max_period;
- struct platform_device *plat_device;
- struct pmu_hw_events __percpu *hw_events;
- struct notifier_block hotplug_nb;
-};
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-int armpmu_register(struct arm_pmu *armpmu, int type);
-
-u64 armpmu_event_update(struct perf_event *event);
-
-int armpmu_event_set_period(struct perf_event *event);
-
-int armpmu_map_event(struct perf_event *event,
- const unsigned (*event_map)[PERF_COUNT_HW_MAX],
- const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX],
- u32 raw_event_mask);
-
-struct pmu_probe_info {
- unsigned int cpuid;
- unsigned int mask;
- int (*init)(struct arm_pmu *);
-};
-
-#define PMU_PROBE(_cpuid, _mask, _fn) \
-{ \
- .cpuid = (_cpuid), \
- .mask = (_mask), \
- .init = (_fn), \
-}
-
-#define ARM_PMU_PROBE(_cpuid, _fn) \
- PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
-
-#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
-
-#define XSCALE_PMU_PROBE(_version, _fn) \
- PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
-
-int arm_pmu_device_probe(struct platform_device *pdev,
- const struct of_device_id *of_table,
- const struct pmu_probe_info *probe_table);
-
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index c25ef3ec6d1f..68ee3ce17b82 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -14,34 +14,11 @@
#ifndef __ASM_ARM_PSCI_H
#define __ASM_ARM_PSCI_H
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
- int (*affinity_info)(unsigned long target_affinity,
- unsigned long lowest_affinity_level);
- int (*migrate_info_type)(void);
-};
-
-extern struct psci_operations psci_ops;
extern struct smp_operations psci_smp_ops;
#ifdef CONFIG_ARM_PSCI
-int psci_init(void);
bool psci_smp_available(void);
#else
-static inline int psci_init(void) { return 0; }
static inline bool psci_smp_available(void) { return false; }
#endif
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1ba6fb4..ef356659b4f4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@ struct smp_operations {
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_kill)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
+ bool (*cpu_can_disable)(unsigned int cpu);
int (*cpu_disable)(unsigned int cpu);
#endif
#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e5224d8f7..f9080717fc88 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
extern int platform_can_secondary_boot(void);
extern int platform_can_cpu_hotplug(void);
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32eded3e50..d0a1119dcaf3 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
}
#define init_thread_info (init_thread_union.thread_info)
@@ -136,22 +133,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/*
* thread information flags:
- * TIF_SYSCALL_TRACE - syscall trace active
- * TIF_SYSCAL_AUDIT - syscall auditing active
- * TIF_SIGPENDING - signal pending
- * TIF_NEED_RESCHED - rescheduling necessary
- * TIF_NOTIFY_RESUME - callback before returning to user
* TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/
-#define TIF_SIGPENDING 0
-#define TIF_NEED_RESCHED 1
+#define TIF_SIGPENDING 0 /* signal pending */
+#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-#define TIF_UPROBE 7
-#define TIF_SYSCALL_TRACE 8
-#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
+#define TIF_UPROBE 3 /* breakpointed or singlestepping */
+#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d09ef7a..8cc85a4ebec2 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ unsigned int old_domain = get_domain();
+
+ /* Set the current domain access to permit user accesses */
+ set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+ domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+ return old_domain;
+#else
+ return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /* Restore the user access mask */
+ set_domain(flags);
+#endif
+}
+
+/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
*/
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
break; \
default: __e = __get_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
__e; \
})
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
break; \
default: __e = __put_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
__e; \
})
@@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
+ unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user_asm_byte(x, addr, err) \
+#define __get_user_asm(x, addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(ldrb) " %1,[%2],#0\n" \
+ "1: " TUSER(instr) " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -329,6 +365,9 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
+#define __get_user_asm_byte(x, addr, err) \
+ __get_user_asm(x, addr, err, ldrb)
+
#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \
({ \
@@ -348,22 +387,7 @@ do { \
#endif
#define __get_user_asm_word(x, addr, err) \
- __asm__ __volatile__( \
- "1: " TUSER(ldr) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .text.fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %0, %3\n" \
- " mov %1, #0\n" \
- " b 2b\n" \
- " .popsection\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .popsection" \
- : "+r" (err), "=&r" (x) \
- : "r" (addr), "i" (-EFAULT) \
- : "cc")
+ __get_user_asm(x, addr, err, ldr)
#define __put_user(x, ptr) \
({ \
@@ -381,9 +405,11 @@ do { \
#define __put_user_err(x, ptr, err) \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
+ unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
@@ -391,11 +417,12 @@ do { \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
} while (0)
-#define __put_user_asm_byte(x, __pu_addr, err) \
+#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
- "1: " TUSER(strb) " %1,[%2],#0\n" \
+ "1: " TUSER(instr) " %1, [%2], #0\n" \
"2:\n" \
" .pushsection .text.fixup,\"ax\"\n" \
" .align 2\n" \
@@ -410,6 +437,9 @@ do { \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
+#define __put_user_asm_byte(x, __pu_addr, err) \
+ __put_user_asm(x, __pu_addr, err, strb)
+
#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \
({ \
@@ -427,21 +457,7 @@ do { \
#endif
#define __put_user_asm_word(x, __pu_addr, err) \
- __asm__ __volatile__( \
- "1: " TUSER(str) " %1,[%2],#0\n" \
- "2:\n" \
- " .pushsection .text.fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %0, %3\n" \
- " b 2b\n" \
- " .popsection\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 3b\n" \
- " .popsection" \
- : "+r" (err) \
- : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
- : "cc")
+ __put_user_asm(x, __pu_addr, err, str)
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
@@ -474,11 +490,46 @@ do { \
#ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_from_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_to_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_clear_user(addr, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
#else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
+/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);