diff options
-rw-r--r-- | arch/arm/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/elf.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/mmu.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 103 | ||||
-rw-r--r-- | arch/arm/kernel/fiq.c | 19 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 45 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 52 | ||||
-rw-r--r-- | arch/arm/kernel/signal.h | 12 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 46 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 17 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 34 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 14 |
13 files changed, 253 insertions, 99 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 82f069829ac0..995b55311c55 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -217,7 +217,8 @@ config VECTORS_BASE default DRAM_BASE if REMAP_VECTORS_TO_RAM default 0x00000000 help - The base address of exception vectors. + The base address of exception vectors. This must be two pages + in size. config ARM_PATCH_PHYS_VIRT bool "Patch physical to virtual translations at runtime" if EMBEDDED diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 38050b1c4800..9c9b30717fda 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -130,4 +130,8 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +int arch_setup_additional_pages(struct linux_binprm *, int); + #endif diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index d1b4998e4f43..6f18da09668b 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -10,6 +10,7 @@ typedef struct { int switch_pending; #endif unsigned int vmalloc_seq; + unsigned long sigpage; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 6363f3d1d505..4355f0ec44d6 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +#ifdef CONFIG_KUSER_HELPERS #define __HAVE_ARCH_GATE_AREA 1 +#endif #ifdef CONFIG_ARM_LPAE #include <asm/pgtable-3level-types.h> diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a39cfc2a1f90..d40d0ef389db 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -742,6 +742,18 @@ ENDPROC(__switch_to) #endif .endm + .macro kuser_pad, sym, size + .if (. - \sym) & 3 + .rept 4 - (. - \sym) & 3 + .byte 0 + .endr + .endif + .rept (\size - (. - \sym)) / 4 + .word 0xe7fddef1 + .endr + .endm + +#ifdef CONFIG_KUSER_HELPERS .align 5 .globl __kuser_helper_start __kuser_helper_start: @@ -832,18 +844,13 @@ kuser_cmpxchg64_fixup: #error "incoherent kernel configuration" #endif - /* pad to next slot */ - .rept (16 - (. - __kuser_cmpxchg64)/4) - .word 0 - .endr - - .align 5 + kuser_pad __kuser_cmpxchg64, 64 __kuser_memory_barrier: @ 0xffff0fa0 smp_dmb arm usr_ret lr - .align 5 + kuser_pad __kuser_memory_barrier, 32 __kuser_cmpxchg: @ 0xffff0fc0 @@ -916,13 +923,14 @@ kuser_cmpxchg32_fixup: #endif - .align 5 + kuser_pad __kuser_cmpxchg, 32 __kuser_get_tls: @ 0xffff0fe0 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init usr_ret lr mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code - .rep 4 + kuser_pad __kuser_get_tls, 16 + .rep 3 .word 0 @ 0xffff0ff0 software TLS value, then .endr @ pad up to __kuser_helper_version @@ -932,14 +940,16 @@ __kuser_helper_version: @ 0xffff0ffc .globl __kuser_helper_end __kuser_helper_end: +#endif + THUMB( .thumb ) /* * Vector stubs. * - * This code is copied to 0xffff0200 so we can use branches in the - * vectors, rather than ldr's. Note that this code must not - * exceed 0x300 bytes. + * This code is copied to 0xffff1000 so we can use branches in the + * vectors, rather than ldr's. Note that this code must not exceed + * a page size. * * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC @@ -986,8 +996,17 @@ ENDPROC(vector_\name) 1: .endm - .globl __stubs_start + .section .stubs, "ax", %progbits __stubs_start: + @ This must be the first word + .word vector_swi + +vector_rst: + ARM( swi SYS_ERROR0 ) + THUMB( svc #0 ) + THUMB( nop ) + b vector_und + /* * Interrupt dispatcher */ @@ -1082,6 +1101,16 @@ __stubs_start: .align 5 /*============================================================================= + * Address exception handler + *----------------------------------------------------------------------------- + * These aren't too critical. + * (they're not supposed to happen, and won't happen in 32-bit data mode). + */ + +vector_addrexcptn: + b vector_addrexcptn + +/*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC @@ -1094,45 +1123,19 @@ __stubs_start: vector_fiq: subs pc, lr, #4 -/*============================================================================= - * Address exception handler - *----------------------------------------------------------------------------- - * These aren't too critical. - * (they're not supposed to happen, and won't happen in 32-bit data mode). - */ - -vector_addrexcptn: - b vector_addrexcptn - -/* - * We group all the following data together to optimise - * for CPUs with separate I & D caches. - */ - .align 5 - -.LCvswi: - .word vector_swi - - .globl __stubs_end -__stubs_end: - - .equ stubs_offset, __vectors_start + 0x200 - __stubs_start + .globl vector_fiq_offset + .equ vector_fiq_offset, vector_fiq - .globl __vectors_start + .section .vectors, "ax", %progbits __vectors_start: - ARM( swi SYS_ERROR0 ) - THUMB( svc #0 ) - THUMB( nop ) - W(b) vector_und + stubs_offset - W(ldr) pc, .LCvswi + stubs_offset - W(b) vector_pabt + stubs_offset - W(b) vector_dabt + stubs_offset - W(b) vector_addrexcptn + stubs_offset - W(b) vector_irq + stubs_offset - W(b) vector_fiq + stubs_offset - - .globl __vectors_end -__vectors_end: + W(b) vector_rst + W(b) vector_und + W(ldr) pc, __vectors_start + 0x1000 + W(b) vector_pabt + W(b) vector_dabt + W(b) vector_addrexcptn + W(b) vector_irq + W(b) vector_fiq .data diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 2adda11f712f..25442f451148 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -47,6 +47,11 @@ #include <asm/irq.h> #include <asm/traps.h> +#define FIQ_OFFSET ({ \ + extern void *vector_fiq_offset; \ + (unsigned)&vector_fiq_offset; \ + }) + static unsigned long no_fiq_insn; /* Default reacquire function @@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec) void set_fiq_handler(void *start, unsigned int length) { #if defined(CONFIG_CPU_USE_DOMAINS) - memcpy((void *)0xffff001c, start, length); + void *base = (void *)0xffff0000; #else - memcpy(vectors_page + 0x1c, start, length); + void *base = vectors_page; #endif - flush_icache_range(0xffff001c, 0xffff001c + length); + unsigned offset = FIQ_OFFSET; + + memcpy(base + offset, start, length); + flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); if (!vectors_high()) - flush_icache_range(0x1c, 0x1c + length); + flush_icache_range(offset, offset + length); } int claim_fiq(struct fiq_handler *f) @@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq); void __init init_FIQ(int start) { - no_fiq_insn = *(unsigned long *)0xffff001c; + unsigned offset = FIQ_OFFSET; + no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); fiq_start = start; } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 08b47ebd3144..16ed3f7c4980 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -429,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) } #ifdef CONFIG_MMU +#ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the - * atomic helpers and the signal restart code. Insert it into the - * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. + * atomic helpers. Insert it into the gate_vma so that it is visible + * through ptrace and /proc/<pid>/mem. */ static struct vm_area_struct gate_vma = { .vm_start = 0xffff0000, @@ -461,9 +462,47 @@ int in_gate_area_no_mm(unsigned long addr) { return in_gate_area(NULL, addr); } +#define is_gate_vma(vma) ((vma) = &gate_vma) +#else +#define is_gate_vma(vma) 0 +#endif const char *arch_vma_name(struct vm_area_struct *vma) { - return (vma == &gate_vma) ? "[vectors]" : NULL; + return is_gate_vma(vma) ? "[vectors]" : + (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? + "[sigpage]" : NULL; +} + +extern struct page *get_signal_page(void); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + struct page *page; + unsigned long addr; + int ret; + + page = get_signal_page(); + if (!page) + return -ENOMEM; + + down_write(&mm->mmap_sem); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + &page); + + if (ret == 0) + mm->context.sigpage = addr; + + up_fail: + up_write(&mm->mmap_sem); + return ret; } #endif diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1c16c35c271a..0f17e06d51e6 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include <linux/errno.h> +#include <linux/random.h> #include <linux/signal.h> #include <linux/personality.h> #include <linux/uaccess.h> @@ -15,12 +16,11 @@ #include <asm/elf.h> #include <asm/cacheflush.h> +#include <asm/traps.h> #include <asm/ucontext.h> #include <asm/unistd.h> #include <asm/vfp.h> -#include "signal.h" - /* * For ARM syscalls, we encode the syscall number into the instruction. */ @@ -40,11 +40,13 @@ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) -const unsigned long sigreturn_codes[7] = { +static const unsigned long sigreturn_codes[7] = { MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; +static unsigned long signal_return_offset; + #ifdef CONFIG_CRUNCH static int preserve_crunch_context(struct crunch_sigframe __user *frame) { @@ -401,12 +403,15 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, return 1; if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { + struct mm_struct *mm = current->mm; + /* - * 32-bit code can use the new high-page - * signal return code support except when the MPU has - * protected the vectors page from PL0 + * 32-bit code can use the signal return page + * except when the MPU has protected the vectors + * page from PL0 */ - retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; + retcode = mm->context.sigpage + signal_return_offset + + (idx << 2) + thumb; } else { /* * Ensure that the instruction cache sees @@ -608,3 +613,36 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } while (thread_flags & _TIF_WORK_MASK); return 0; } + +static struct page *signal_page; + +struct page *get_signal_page(void) +{ + if (!signal_page) { + unsigned long ptr; + unsigned offset; + void *addr; + + signal_page = alloc_pages(GFP_KERNEL, 0); + + if (!signal_page) + return NULL; + + addr = page_address(signal_page); + + /* Give the signal return code some randomness */ + offset = 0x200 + (get_random_int() & 0x7fc); + signal_return_offset = offset; + + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); + + ptr = (unsigned long)addr + offset; + flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); + } + + return signal_page; +} diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h deleted file mode 100644 index 5ff067b7c752..000000000000 --- a/arch/arm/kernel/signal.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * linux/arch/arm/kernel/signal.h - * - * Copyright (C) 2005-2009 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) - -extern const unsigned long sigreturn_codes[7]; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cab094c234ee..ab517fcce21b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -35,8 +35,6 @@ #include <asm/tls.h> #include <asm/system_misc.h> -#include "signal.h" - static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; void *vectors_page; @@ -800,15 +798,26 @@ void __init trap_init(void) return; } -static void __init kuser_get_tls_init(unsigned long vectors) +#ifdef CONFIG_KUSER_HELPERS +static void __init kuser_init(void *vectors) { + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; + + memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); + /* * vectors + 0xfe0 = __kuser_get_tls * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 */ if (tls_emu || has_tls_reg) - memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); + memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); } +#else +static void __init kuser_init(void *vectors) +{ +} +#endif void __init early_trap_init(void *vectors_base) { @@ -816,33 +825,30 @@ void __init early_trap_init(void *vectors_base) unsigned long vectors = (unsigned long)vectors_base; extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; - extern char __kuser_helper_start[], __kuser_helper_end[]; - int kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned i; vectors_page = vectors_base; /* + * Poison the vectors page with an undefined instruction. This + * instruction is chosen to be undefined for both ARM and Thumb + * ISAs. The Thumb version is an undefined instruction with a + * branch back to the undefined instruction. + */ + for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) + ((u32 *)vectors_base)[i] = 0xe7fddef1; + + /* * Copy the vectors, stubs and kuser helpers (in entry-armv.S) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream. */ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); - memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); - memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); + memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); - /* - * Do processor specific fixups for the kuser helpers - */ - kuser_get_tls_init(vectors); - - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ - memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), - sigreturn_codes, sizeof(sigreturn_codes)); + kuser_init(vectors_base); - flush_icache_range(vectors, vectors + PAGE_SIZE); + flush_icache_range(vectors, vectors + PAGE_SIZE * 2); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); #else /* ifndef CONFIG_CPU_V7M */ /* diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index fa25e4e425f6..7bcee5c9b604 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -148,6 +148,23 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; #endif + /* + * The vectors and stubs are relocatable code, and the + * only thing that matters is their relative offsets + */ + __vectors_start = .; + .vectors 0 : AT(__vectors_start) { + *(.vectors) + } + . = __vectors_start + SIZEOF(.vectors); + __vectors_end = .; + + __stubs_start = .; + .stubs 0x1000 : AT(__stubs_start) { + *(.stubs) + } + . = __stubs_start + SIZEOF(.stubs); + __stubs_end = .; INIT_TEXT_SECTION(8) .exit.text : { diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 6cacdc8dd654..db5c2cab8fda 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -421,24 +421,28 @@ config CPU_32v3 select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4T bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v5 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v6 bool @@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE config TLS_REG_EMUL bool + select NEED_KUSER_HELPERS help An SMP system using a pre-ARMv6 processor (there are apparently a few prototypes like that in existence) and therefore access to @@ -783,11 +788,40 @@ config TLS_REG_EMUL config NEEDS_SYSCALL_FOR_CMPXCHG bool + select NEED_KUSER_HELPERS help SMP on a pre-ARMv6 processor? Well OK then. Forget about fast user space cmpxchg support. It is just not possible. +config NEED_KUSER_HELPERS + bool + +config KUSER_HELPERS + bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + default y + help + Warning: disabling this option may break user programs. + + Provide kuser helpers in the vector page. The kernel provides + helper code to userspace in read only form at a fixed location + in the high vector page to allow userspace to be independent of + the CPU type fitted to the system. This permits binaries to be + run on ARMv4 through to ARMv7 without modification. + + However, the fixed address nature of these helpers can be used + by ROP (return orientated programming) authors when creating + exploits. + + If all of the binaries and libraries which run on your platform + are built specifically for your platform, and make no use of + these helpers, then you can turn this option off. However, + when such an binary or library is run, it will receive a SIGILL + signal, which will terminate the program. + + Say N here only if you are absolutely certain that you do not + need these helpers; otherwise, the safe option is to say Y. + config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" depends on CPU_V6K && SMP diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b3fdb63783e3..53cdbd39ec8e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1195,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE * 2); early_trap_init(vectors); @@ -1240,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; +#ifdef CONFIG_KUSER_HELPERS map.type = MT_HIGH_VECTORS; +#else + map.type = MT_LOW_VECTORS; +#endif create_mapping(&map); if (!vectors_high()) { map.virtual = 0; + map.length = PAGE_SIZE * 2; map.type = MT_LOW_VECTORS; create_mapping(&map); } + /* Now create a kernel read-only mapping */ + map.pfn += 1; + map.virtual = 0xffff0000 + PAGE_SIZE; + map.length = PAGE_SIZE; + map.type = MT_LOW_VECTORS; + create_mapping(&map); + /* * Ask the machine support to map in the statically mapped devices. */ |