From b60d5db690f72a7cfe5e39f191f0719a9ef3a096 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 18 Jul 2013 17:20:32 +0100 Subject: ARM: 7786/1: hyp: fix macro parameterisation Currently, compare_cpu_mode_with_primary uses a mixture of macro arguments and hardcoded registers, and does so incorrectly, as it stores (__boot_cpu_mode_offset | BOOT_CPU_MODE_MISMATCH) to (__boot_cpu_mode + &__boot_cpu_mode_offset), which could corrupt an arbitrary portion of memory. This patch fixes up compare_cpu_mode_with_primary to use the macro arguments, correctly updating __boot_cpu_mode. Signed-off-by: Mark Rutland Acked-by: Dave Martin Acked-by: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Russell King --- arch/arm/kernel/hyp-stub.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 4910232c4833..797b1a6a4906 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode) ldr \reg3, [\reg2] ldr \reg1, [\reg2, \reg3] cmp \mode, \reg1 @ matches primary CPU boot mode? - orrne r7, r7, #BOOT_CPU_MODE_MISMATCH - strne r7, [r5, r6] @ record what happened and give up + orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH + strne \reg1, [\reg2, \reg3] @ record what happened and give up .endm #else /* ZIMAGE */ -- cgit v1.2.3 From ab8d46c0609843a83aef3f486365ca5e7c21d537 Mon Sep 17 00:00:00 2001 From: Tetsuyuki Kobayashi Date: Mon, 22 Jul 2013 14:58:17 +0100 Subject: ARM: 7788/1: elf: fix lpae hwcap feature reporting in proc/cpuinfo Commit a469abd0f868 ("ARM: elf: add new hwcap for identifying atomic ldrd/strd instructions") added a new hwcap to identify LPAE on CPUs which support it. Whilst the hwcap data is correct, the string reported in /proc/cpuinfo actually matches on HWCAP_VFPD32, which was missing an entry in the string table. This patch fixes this problem by adding a "vfpd32" string at the correct offset, preventing us from falsely advertising LPAE on CPUs which do not support it. [will: added commit message] Acked-by: Will Deacon Tested-by: Will Deacon Signed-off-by: Tetsuyuki Kobayashi Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/setup.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 63af9a7ae512..96286cb383bd 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -971,6 +971,7 @@ static const char *hwcap_str[] = { "vfpv4", "idiva", "idivt", + "vfpd32", "lpae", NULL }; -- cgit v1.2.3 From 8fbac214e5c594a0c2fe78c14adf2cdbb1febc92 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 18 Jul 2013 17:20:33 +0100 Subject: ARM: 7787/1: virt: ensure visibility of __boot_cpu_mode Secondary CPUs write to __boot_cpu_mode with caches disabled, and thus a cached value of __boot_cpu_mode may be incoherent with that in memory. This could lead to a failure to detect mismatched boot modes. This patch adds flushing to ensure that writes by secondaries to __boot_cpu_mode are made visible before we test against it. Signed-off-by: Mark Rutland Acked-by: Dave Martin Acked-by: Marc Zyngier Cc: Christoffer Dall Signed-off-by: Russell King --- arch/arm/include/asm/virt.h | 12 ++++++++++++ arch/arm/kernel/setup.c | 2 ++ 2 files changed, 14 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 50af92bac737..4371f45c5784 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -29,6 +29,7 @@ #define BOOT_CPU_MODE_MISMATCH PSR_N_BIT #ifndef __ASSEMBLY__ +#include #ifdef CONFIG_ARM_VIRT_EXT /* @@ -41,10 +42,21 @@ */ extern int __boot_cpu_mode; +static inline void sync_boot_mode(void) +{ + /* + * As secondaries write to __boot_cpu_mode with caches disabled, we + * must flush the corresponding cache entries to ensure the visibility + * of their writes. + */ + sync_cache_r(&__boot_cpu_mode); +} + void __hyp_set_vectors(unsigned long phys_vector_base); unsigned long __hyp_get_vectors(void); #else #define __boot_cpu_mode (SVC_MODE) +#define sync_boot_mode() #endif #ifndef ZIMAGE diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 96286cb383bd..afc2489ee13b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b) void __init hyp_mode_check(void) { #ifdef CONFIG_ARM_VIRT_EXT + sync_boot_mode(); + if (is_hyp_mode_available()) { pr_info("CPU: All CPU(s) started in HYP mode.\n"); pr_info("CPU: Virtualization extensions available.\n"); -- cgit v1.2.3 From 1f49856bb029779d8f1b63517a3a3b34ffe672c7 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 23 Jul 2013 15:13:06 +0100 Subject: ARM: 7789/1: Do not run dummy_flush_tlb_a15_erratum() on non-Cortex-A15 Commit 93dc688 (ARM: 7684/1: errata: Workaround for Cortex-A15 erratum 798181 (TLBI/DSB operations)) causes the following undefined instruction error on a mx53 (Cortex-A8): Internal error: Oops - undefined instruction: 0 [#1] SMP ARM CPU: 0 PID: 275 Comm: modprobe Not tainted 3.11.0-rc2-next-20130722-00009-g9b0f371 #881 task: df46cc00 ti: df48e000 task.ti: df48e000 PC is at check_and_switch_context+0x17c/0x4d0 LR is at check_and_switch_context+0xdc/0x4d0 This problem happens because check_and_switch_context() calls dummy_flush_tlb_a15_erratum() without checking if we are really running on a Cortex-A15 or not. To avoid this issue, only call dummy_flush_tlb_a15_erratum() inside check_and_switch_context() if erratum_a15_798181() returns true, which means that we are really running on a Cortex-A15. Signed-off-by: Fabio Estevam Acked-by: Catalin Marinas Reviewed-by: Roger Quadros Signed-off-by: Russell King --- arch/arm/include/asm/tlbflush.h | 16 ++++++++++++++++ arch/arm/kernel/smp_tlb.c | 17 ----------------- arch/arm/mm/context.c | 3 ++- 3 files changed, 18 insertions(+), 18 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index fdbb9e369745..f467e9b3f8d5 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -443,7 +443,18 @@ static inline void local_flush_bp_all(void) isb(); } +#include #ifdef CONFIG_ARM_ERRATA_798181 +static inline int erratum_a15_798181(void) +{ + unsigned int midr = read_cpuid_id(); + + /* Cortex-A15 r0p0..r3p2 affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) + return 0; + return 1; +} + static inline void dummy_flush_tlb_a15_erratum(void) { /* @@ -453,6 +464,11 @@ static inline void dummy_flush_tlb_a15_erratum(void) dsb(); } #else +static inline int erratum_a15_798181(void) +{ + return 0; +} + static inline void dummy_flush_tlb_a15_erratum(void) { } diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index a98b62dca2fa..c2edfff573c2 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored) local_flush_bp_all(); } -#ifdef CONFIG_ARM_ERRATA_798181 -static int erratum_a15_798181(void) -{ - unsigned int midr = read_cpuid_id(); - - /* Cortex-A15 r0p0..r3p2 affected */ - if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) - return 0; - return 1; -} -#else -static int erratum_a15_798181(void) -{ - return 0; -} -#endif - static void ipi_flush_tlb_a15_erratum(void *arg) { dmb(); diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b55b1015724b..4a0544492f10 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -245,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { local_flush_bp_all(); local_flush_tlb_all(); - dummy_flush_tlb_a15_erratum(); + if (erratum_a15_798181()) + dummy_flush_tlb_a15_erratum(); } atomic64_set(&per_cpu(active_asids, cpu), asid); -- cgit v1.2.3 From bed859c1eea89feaba2acbb57d967a6d6f68af09 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Tue, 30 Jul 2013 11:33:00 +0100 Subject: ARM: 7800/1: ARMv7-M: Fix name of NVIC handler function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The name changed in response to review comments for the nvic irqchip driver when the original name was already accepted into Russell King's tree. Signed-off-by: Uwe Kleine-König Signed-off-by: Russell King --- arch/arm/kernel/entry-v7m.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index e00621f1403f..52b26432c9a9 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -49,7 +49,7 @@ __irq_entry: mov r1, sp stmdb sp!, {lr} @ routine called with r0 = irq number, r1 = struct pt_regs * - bl nvic_do_IRQ + bl nvic_handle_irq pop {lr} @ -- cgit v1.2.3 From f928d4f2a86f46b030fa0850385b4391fc2b5918 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 4 Jul 2013 11:00:23 +0100 Subject: ARM: poison the vectors page Fill the empty regions of the vectors page with an exception generating instruction. This ensures that any inappropriate branch to the vector page is appropriately trapped, rather than just encountering some code to execute. (The vectors page was filled with zero before, which corresponds with the "andeq r0, r0, r0" instruction - a no-op.) Cc: Acked-by Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/traps.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index cab094c234ee..9433e8a12b5e 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -818,9 +818,19 @@ void __init early_trap_init(void *vectors_base) extern char __vectors_start[], __vectors_end[]; extern char __kuser_helper_start[], __kuser_helper_end[]; int kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned i; vectors_page = vectors_base; + /* + * Poison the vectors page with an undefined instruction. This + * instruction is chosen to be undefined for both ARM and Thumb + * ISAs. The Thumb version is an undefined instruction with a + * branch back to the undefined instruction. + */ + for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) + ((u32 *)vectors_base)[i] = 0xe7fddef1; + /* * Copy the vectors, stubs and kuser helpers (in entry-armv.S) * into the vector page, mapped at 0xffff0000, and ensure these -- cgit v1.2.3 From 5b43e7a383d69381ffe53423e46dd0fafae07da3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 4 Jul 2013 11:32:04 +0100 Subject: ARM: poison memory between kuser helpers Poison the memory between each kuser helper. This ensures that any branch between the kuser helpers will be appropriately trapped. Cc: Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index a39cfc2a1f90..02437345289a 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -742,6 +742,17 @@ ENDPROC(__switch_to) #endif .endm + .macro kuser_pad, sym, size + .if (. - \sym) & 3 + .rept 4 - (. - \sym) & 3 + .byte 0 + .endr + .endif + .rept (\size - (. - \sym)) / 4 + .word 0xe7fddef1 + .endr + .endm + .align 5 .globl __kuser_helper_start __kuser_helper_start: @@ -832,18 +843,13 @@ kuser_cmpxchg64_fixup: #error "incoherent kernel configuration" #endif - /* pad to next slot */ - .rept (16 - (. - __kuser_cmpxchg64)/4) - .word 0 - .endr - - .align 5 + kuser_pad __kuser_cmpxchg64, 64 __kuser_memory_barrier: @ 0xffff0fa0 smp_dmb arm usr_ret lr - .align 5 + kuser_pad __kuser_memory_barrier, 32 __kuser_cmpxchg: @ 0xffff0fc0 @@ -916,13 +922,14 @@ kuser_cmpxchg32_fixup: #endif - .align 5 + kuser_pad __kuser_cmpxchg, 32 __kuser_get_tls: @ 0xffff0fe0 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init usr_ret lr mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code - .rep 4 + kuser_pad __kuser_get_tls, 16 + .rep 3 .word 0 @ 0xffff0ff0 software TLS value, then .endr @ pad up to __kuser_helper_version -- cgit v1.2.3 From 19accfd373847ac3d10623c5d20f948846299741 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 4 Jul 2013 11:40:32 +0100 Subject: ARM: move vector stubs Move the machine vector stubs into the page above the vector page, which we can prevent from being visible to userspace. Also move the reset stub, and place the swi vector at a location that the 'ldr' can get to it. This hides pointers into the kernel which could give valuable information to attackers, and reduces the number of exploitable instructions at a fixed address. Cc: Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/Kconfig | 3 ++- arch/arm/kernel/entry-armv.S | 50 +++++++++++++++++++++----------------------- arch/arm/kernel/traps.c | 4 ++-- arch/arm/mm/mmu.c | 10 ++++++++- 4 files changed, 37 insertions(+), 30 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ba412e02ec0c..123b7924904f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -218,7 +218,8 @@ config VECTORS_BASE default DRAM_BASE if REMAP_VECTORS_TO_RAM default 0x00000000 help - The base address of exception vectors. + The base address of exception vectors. This must be two pages + in size. config ARM_PATCH_PHYS_VIRT bool "Patch physical to virtual translations at runtime" if EMBEDDED diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 02437345289a..79a41fa978f5 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -944,9 +944,9 @@ __kuser_helper_end: /* * Vector stubs. * - * This code is copied to 0xffff0200 so we can use branches in the - * vectors, rather than ldr's. Note that this code must not - * exceed 0x300 bytes. + * This code is copied to 0xffff1000 so we can use branches in the + * vectors, rather than ldr's. Note that this code must not exceed + * a page size. * * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC @@ -995,6 +995,15 @@ ENDPROC(vector_\name) .globl __stubs_start __stubs_start: + @ This must be the first word + .word vector_swi + +vector_rst: + ARM( swi SYS_ERROR0 ) + THUMB( svc #0 ) + THUMB( nop ) + b vector_und + /* * Interrupt dispatcher */ @@ -1088,6 +1097,16 @@ __stubs_start: .align 5 +/*============================================================================= + * Address exception handler + *----------------------------------------------------------------------------- + * These aren't too critical. + * (they're not supposed to happen, and won't happen in 32-bit data mode). + */ + +vector_addrexcptn: + b vector_addrexcptn + /*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- @@ -1101,35 +1120,14 @@ __stubs_start: vector_fiq: subs pc, lr, #4 -/*============================================================================= - * Address exception handler - *----------------------------------------------------------------------------- - * These aren't too critical. - * (they're not supposed to happen, and won't happen in 32-bit data mode). - */ - -vector_addrexcptn: - b vector_addrexcptn - -/* - * We group all the following data together to optimise - * for CPUs with separate I & D caches. - */ - .align 5 - -.LCvswi: - .word vector_swi - .globl __stubs_end __stubs_end: - .equ stubs_offset, __vectors_start + 0x200 - __stubs_start + .equ stubs_offset, __vectors_start + 0x1000 - __stubs_start .globl __vectors_start __vectors_start: - ARM( swi SYS_ERROR0 ) - THUMB( svc #0 ) - THUMB( nop ) + W(b) vector_rst + stubs_offset W(b) vector_und + stubs_offset W(ldr) pc, .LCvswi + stubs_offset W(b) vector_pabt + stubs_offset diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 9433e8a12b5e..2c8c7fa78b8c 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -837,7 +837,7 @@ void __init early_trap_init(void *vectors_base) * are visible to the instruction stream. */ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); - memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); + memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); /* @@ -852,7 +852,7 @@ void __init early_trap_init(void *vectors_base) memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), sigreturn_codes, sizeof(sigreturn_codes)); - flush_icache_range(vectors, vectors + PAGE_SIZE); + flush_icache_range(vectors, vectors + PAGE_SIZE * 2); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); #else /* ifndef CONFIG_CPU_V7M */ /* diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f56617a2392..9ea274d1af69 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1160,7 +1160,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE * 2); early_trap_init(vectors); @@ -1210,10 +1210,18 @@ static void __init devicemaps_init(struct machine_desc *mdesc) if (!vectors_high()) { map.virtual = 0; + map.length = PAGE_SIZE * 2; map.type = MT_LOW_VECTORS; create_mapping(&map); } + /* Now create a kernel read-only mapping */ + map.pfn += 1; + map.virtual = 0xffff0000 + PAGE_SIZE; + map.length = PAGE_SIZE; + map.type = MT_LOW_VECTORS; + create_mapping(&map); + /* * Ask the machine support to map in the statically mapped devices. */ -- cgit v1.2.3 From b9b32bf70f2fb710b07c94e13afbc729afe221da Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 4 Jul 2013 12:03:31 +0100 Subject: ARM: use linker magic for vectors and vector stubs Use linker magic to create the vectors and vector stubs: we can tell the linker to place them at an appropriate VMA, but keep the LMA within the kernel. This gets rid of some unnecessary symbol manipulation, and have the linker calculate the relocations appropriately. Cc: Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 28 ++++++++++------------------ arch/arm/kernel/vmlinux.lds.S | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 18 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 79a41fa978f5..7e1ae91e5b1a 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -993,7 +993,7 @@ ENDPROC(vector_\name) 1: .endm - .globl __stubs_start + .section .stubs, "ax", %progbits __stubs_start: @ This must be the first word .word vector_swi @@ -1120,24 +1120,16 @@ vector_addrexcptn: vector_fiq: subs pc, lr, #4 - .globl __stubs_end -__stubs_end: - - .equ stubs_offset, __vectors_start + 0x1000 - __stubs_start - - .globl __vectors_start + .section .vectors, "ax", %progbits __vectors_start: - W(b) vector_rst + stubs_offset - W(b) vector_und + stubs_offset - W(ldr) pc, .LCvswi + stubs_offset - W(b) vector_pabt + stubs_offset - W(b) vector_dabt + stubs_offset - W(b) vector_addrexcptn + stubs_offset - W(b) vector_irq + stubs_offset - W(b) vector_fiq + stubs_offset - - .globl __vectors_end -__vectors_end: + W(b) vector_rst + W(b) vector_und + W(ldr) pc, __vectors_start + 0x1000 + W(b) vector_pabt + W(b) vector_dabt + W(b) vector_addrexcptn + W(b) vector_irq + W(b) vector_fiq .data diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index fa25e4e425f6..7bcee5c9b604 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -148,6 +148,23 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; #endif + /* + * The vectors and stubs are relocatable code, and the + * only thing that matters is their relative offsets + */ + __vectors_start = .; + .vectors 0 : AT(__vectors_start) { + *(.vectors) + } + . = __vectors_start + SIZEOF(.vectors); + __vectors_end = .; + + __stubs_start = .; + .stubs 0x1000 : AT(__stubs_start) { + *(.stubs) + } + . = __stubs_start + SIZEOF(.stubs); + __stubs_end = .; INIT_TEXT_SECTION(8) .exit.text : { -- cgit v1.2.3 From e39e3f3ebfef03450cf7bfa7a974a8c61f7980c8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 9 Jul 2013 01:03:17 +0100 Subject: ARM: update FIQ support for relocation of vectors FIQ should no longer copy the FIQ code into the user visible vector page. Instead, it should use the hidden page. This change makes that happen. Cc: Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 3 +++ arch/arm/kernel/fiq.c | 19 ++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 7e1ae91e5b1a..94caefb550bf 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -1120,6 +1120,9 @@ vector_addrexcptn: vector_fiq: subs pc, lr, #4 + .globl vector_fiq_offset + .equ vector_fiq_offset, vector_fiq + .section .vectors, "ax", %progbits __vectors_start: W(b) vector_rst diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 2adda11f712f..25442f451148 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -47,6 +47,11 @@ #include #include +#define FIQ_OFFSET ({ \ + extern void *vector_fiq_offset; \ + (unsigned)&vector_fiq_offset; \ + }) + static unsigned long no_fiq_insn; /* Default reacquire function @@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec) void set_fiq_handler(void *start, unsigned int length) { #if defined(CONFIG_CPU_USE_DOMAINS) - memcpy((void *)0xffff001c, start, length); + void *base = (void *)0xffff0000; #else - memcpy(vectors_page + 0x1c, start, length); + void *base = vectors_page; #endif - flush_icache_range(0xffff001c, 0xffff001c + length); + unsigned offset = FIQ_OFFSET; + + memcpy(base + offset, start, length); + flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); if (!vectors_high()) - flush_icache_range(0x1c, 0x1c + length); + flush_icache_range(offset, offset + length); } int claim_fiq(struct fiq_handler *f) @@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq); void __init init_FIQ(int start) { - no_fiq_insn = *(unsigned long *)0xffff001c; + unsigned offset = FIQ_OFFSET; + no_fiq_insn = *(unsigned long *)(0xffff0000 + offset); fiq_start = start; } -- cgit v1.2.3 From f6f91b0d9fd971c630cef908dde8fe8795aefbf8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 23 Jul 2013 18:37:00 +0100 Subject: ARM: allow kuser helpers to be removed from the vector page Provide a kernel configuration option to allow the kernel user helpers to be removed from the vector page, thereby preventing their use with ROP (return orientated programming) attacks. This option is only visible for CPU architectures which natively support all the operations which kernel user helpers would normally provide, and must be enabled with caution. Cc: Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 3 +++ arch/arm/kernel/traps.c | 23 ++++++++++++++--------- arch/arm/mm/Kconfig | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 9 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 94caefb550bf..d40d0ef389db 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -753,6 +753,7 @@ ENDPROC(__switch_to) .endr .endm +#ifdef CONFIG_KUSER_HELPERS .align 5 .globl __kuser_helper_start __kuser_helper_start: @@ -939,6 +940,8 @@ __kuser_helper_version: @ 0xffff0ffc .globl __kuser_helper_end __kuser_helper_end: +#endif + THUMB( .thumb ) /* diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 2c8c7fa78b8c..e3ca35ccd38e 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -800,15 +800,26 @@ void __init trap_init(void) return; } -static void __init kuser_get_tls_init(unsigned long vectors) +#ifdef CONFIG_KUSER_HELPERS +static void __init kuser_init(void *vectors) { + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; + + memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); + /* * vectors + 0xfe0 = __kuser_get_tls * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 */ if (tls_emu || has_tls_reg) - memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); + memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); +} +#else +static void __init kuser_init(void *vectors) +{ } +#endif void __init early_trap_init(void *vectors_base) { @@ -816,8 +827,6 @@ void __init early_trap_init(void *vectors_base) unsigned long vectors = (unsigned long)vectors_base; extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; - extern char __kuser_helper_start[], __kuser_helper_end[]; - int kuser_sz = __kuser_helper_end - __kuser_helper_start; unsigned i; vectors_page = vectors_base; @@ -838,12 +847,8 @@ void __init early_trap_init(void *vectors_base) */ memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); - memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); - /* - * Do processor specific fixups for the kuser helpers - */ - kuser_get_tls_init(vectors); + kuser_init(vectors_base); /* * Copy signal return handlers into the vector page, and diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 6cacdc8dd654..db5c2cab8fda 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -421,24 +421,28 @@ config CPU_32v3 select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4T bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v5 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v6 bool @@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE config TLS_REG_EMUL bool + select NEED_KUSER_HELPERS help An SMP system using a pre-ARMv6 processor (there are apparently a few prototypes like that in existence) and therefore access to @@ -783,11 +788,40 @@ config TLS_REG_EMUL config NEEDS_SYSCALL_FOR_CMPXCHG bool + select NEED_KUSER_HELPERS help SMP on a pre-ARMv6 processor? Well OK then. Forget about fast user space cmpxchg support. It is just not possible. +config NEED_KUSER_HELPERS + bool + +config KUSER_HELPERS + bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + default y + help + Warning: disabling this option may break user programs. + + Provide kuser helpers in the vector page. The kernel provides + helper code to userspace in read only form at a fixed location + in the high vector page to allow userspace to be independent of + the CPU type fitted to the system. This permits binaries to be + run on ARMv4 through to ARMv7 without modification. + + However, the fixed address nature of these helpers can be used + by ROP (return orientated programming) authors when creating + exploits. + + If all of the binaries and libraries which run on your platform + are built specifically for your platform, and make no use of + these helpers, then you can turn this option off. However, + when such an binary or library is run, it will receive a SIGILL + signal, which will terminate the program. + + Say N here only if you are absolutely certain that you do not + need these helpers; otherwise, the safe option is to say Y. + config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" depends on CPU_V6K && SMP -- cgit v1.2.3 From 48be69a026b2c17350a5ef18a1959a919f60be7d Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 24 Jul 2013 00:29:18 +0100 Subject: ARM: move signal handlers into a vdso-like page Move the signal handlers into a VDSO page rather than keeping them in the vectors page. This allows us to place them randomly within this page, and also map the page at a random location within userspace further protecting these code fragments from ROP attacks. The new VDSO page is also poisoned in the same way as the vector page. Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 4 ++++ arch/arm/include/asm/mmu.h | 1 + arch/arm/kernel/process.c | 40 ++++++++++++++++++++++++++++++++--- arch/arm/kernel/signal.c | 52 +++++++++++++++++++++++++++++++++++++++------- arch/arm/kernel/signal.h | 12 ----------- arch/arm/kernel/traps.c | 9 -------- 6 files changed, 87 insertions(+), 31 deletions(-) delete mode 100644 arch/arm/kernel/signal.h (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 38050b1c4800..9c9b30717fda 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -130,4 +130,8 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +int arch_setup_additional_pages(struct linux_binprm *, int); + #endif diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index e3d55547e755..7345e37155d5 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -8,6 +8,7 @@ typedef struct { atomic64_t id; #endif unsigned int vmalloc_seq; + unsigned long sigpage; } mm_context_t; #ifdef CONFIG_CPU_HAS_ASID diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d3ca4f6915af..566d0d71a1e7 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -428,8 +428,8 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) #ifdef CONFIG_MMU /* * The vectors page is always readable from user space for the - * atomic helpers and the signal restart code. Insert it into the - * gate_vma so that it is visible through ptrace and /proc//mem. + * atomic helpers. Insert it into the gate_vma so that it is visible + * through ptrace and /proc//mem. */ static struct vm_area_struct gate_vma = { .vm_start = 0xffff0000, @@ -461,6 +461,40 @@ int in_gate_area_no_mm(unsigned long addr) const char *arch_vma_name(struct vm_area_struct *vma) { - return (vma == &gate_vma) ? "[vectors]" : NULL; + return (vma == &gate_vma) ? "[vectors]" : + (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? + "[sigpage]" : NULL; +} + +extern struct page *get_signal_page(void); + +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + struct page *page; + unsigned long addr; + int ret; + + page = get_signal_page(); + if (!page) + return -ENOMEM; + + down_write(&mm->mmap_sem); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + &page); + + if (ret == 0) + mm->context.sigpage = addr; + + up_fail: + up_write(&mm->mmap_sem); + return ret; } #endif diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 1c16c35c271a..0f17e06d51e6 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include +#include #include #include #include @@ -15,12 +16,11 @@ #include #include +#include #include #include #include -#include "signal.h" - /* * For ARM syscalls, we encode the syscall number into the instruction. */ @@ -40,11 +40,13 @@ #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) -const unsigned long sigreturn_codes[7] = { +static const unsigned long sigreturn_codes[7] = { MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, }; +static unsigned long signal_return_offset; + #ifdef CONFIG_CRUNCH static int preserve_crunch_context(struct crunch_sigframe __user *frame) { @@ -401,12 +403,15 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, return 1; if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { + struct mm_struct *mm = current->mm; + /* - * 32-bit code can use the new high-page - * signal return code support except when the MPU has - * protected the vectors page from PL0 + * 32-bit code can use the signal return page + * except when the MPU has protected the vectors + * page from PL0 */ - retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; + retcode = mm->context.sigpage + signal_return_offset + + (idx << 2) + thumb; } else { /* * Ensure that the instruction cache sees @@ -608,3 +613,36 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } while (thread_flags & _TIF_WORK_MASK); return 0; } + +static struct page *signal_page; + +struct page *get_signal_page(void) +{ + if (!signal_page) { + unsigned long ptr; + unsigned offset; + void *addr; + + signal_page = alloc_pages(GFP_KERNEL, 0); + + if (!signal_page) + return NULL; + + addr = page_address(signal_page); + + /* Give the signal return code some randomness */ + offset = 0x200 + (get_random_int() & 0x7fc); + signal_return_offset = offset; + + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); + + ptr = (unsigned long)addr + offset; + flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); + } + + return signal_page; +} diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h deleted file mode 100644 index 5ff067b7c752..000000000000 --- a/arch/arm/kernel/signal.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * linux/arch/arm/kernel/signal.h - * - * Copyright (C) 2005-2009 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) - -extern const unsigned long sigreturn_codes[7]; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index e3ca35ccd38e..ab517fcce21b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -35,8 +35,6 @@ #include #include -#include "signal.h" - static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; void *vectors_page; @@ -850,13 +848,6 @@ void __init early_trap_init(void *vectors_base) kuser_init(vectors_base); - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ - memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), - sigreturn_codes, sizeof(sigreturn_codes)); - flush_icache_range(vectors, vectors + PAGE_SIZE * 2); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); #else /* ifndef CONFIG_CPU_V7M */ -- cgit v1.2.3 From a5463cd3435475386cbbe7b06e01292ac169d36f Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 31 Jul 2013 21:58:56 +0100 Subject: ARM: make vectors page inaccessible from userspace If kuser helpers are not provided by the kernel, disable user access to the vectors page. With the kuser helpers gone, there is no reason for this page to be visible to userspace. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 2 ++ arch/arm/kernel/process.c | 7 ++++++- arch/arm/mm/mmu.c | 4 ++++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 6363f3d1d505..4355f0ec44d6 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +#ifdef CONFIG_KUSER_HELPERS #define __HAVE_ARCH_GATE_AREA 1 +#endif #ifdef CONFIG_ARM_LPAE #include diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 566d0d71a1e7..1e6c33d01c05 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -426,6 +426,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) } #ifdef CONFIG_MMU +#ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the * atomic helpers. Insert it into the gate_vma so that it is visible @@ -458,10 +459,14 @@ int in_gate_area_no_mm(unsigned long addr) { return in_gate_area(NULL, addr); } +#define is_gate_vma(vma) ((vma) = &gate_vma) +#else +#define is_gate_vma(vma) 0 +#endif const char *arch_vma_name(struct vm_area_struct *vma) { - return (vma == &gate_vma) ? "[vectors]" : + return is_gate_vma(vma) ? "[vectors]" : (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? "[sigpage]" : NULL; } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9ea274d1af69..ca46f413d867 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1205,7 +1205,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc) map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; +#ifdef CONFIG_KUSER_HELPERS map.type = MT_HIGH_VECTORS; +#else + map.type = MT_LOW_VECTORS; +#endif create_mapping(&map); if (!vectors_high()) { -- cgit v1.2.3 From 44424c34049f41123a3a8b4853822f47f4ff03a2 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 30 Jul 2013 23:09:46 +0100 Subject: ARM: 7803/1: Fix deadlock scenario with smp_send_stop() If one process calls sys_reboot and that process then stops other CPUs while those CPUs are within a spin_lock() region we can potentially encounter a deadlock scenario like below. CPU 0 CPU 1 ----- ----- spin_lock(my_lock) smp_send_stop() handle_IPI() disable_preemption/irqs while(1); spin_lock(my_lock) <--- Waits forever We shouldn't attempt to run any other tasks after we send a stop IPI to a CPU so disable preemption so that this task runs to completion. We use local_irq_disable() here for cross-arch consistency with x86. Reported-by: Sundarajan Srinivasan Signed-off-by: Stephen Boyd Signed-off-by: Russell King --- arch/arm/kernel/process.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index d3ca4f6915af..08b47ebd3144 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -197,6 +197,7 @@ void machine_shutdown(void) */ void machine_halt(void) { + local_irq_disable(); smp_send_stop(); local_irq_disable(); @@ -211,6 +212,7 @@ void machine_halt(void) */ void machine_power_off(void) { + local_irq_disable(); smp_send_stop(); if (pm_power_off) @@ -230,6 +232,7 @@ void machine_power_off(void) */ void machine_restart(char *cmd) { + local_irq_disable(); smp_send_stop(); arm_pm_restart(reboot_mode, cmd); -- cgit v1.2.3 From 2449189bb7c73b5fe55a18bc0d289e39bdcd4998 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 31 Jul 2013 11:37:17 +0100 Subject: ARM: Add .text annotations where required after __CPUINIT removal Commit 8bd26e3a7 (arm: delete __cpuinit/__CPUINIT usage from all ARM users) caused some code to leak into sections which are discarded through the removal of __CPUINIT annotations. Add appropriate .text annotations to bring these back into the kernel text. Signed-off-by: Russell King --- arch/arm/kernel/head-nommu.S | 1 + arch/arm/kernel/head.S | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index b361de143756..14235ba64a90 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -87,6 +87,7 @@ ENTRY(stext) ENDPROC(stext) #ifdef CONFIG_SMP + .text ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 9cf6063020ae..2c7cc1e03473 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -343,6 +343,7 @@ __turn_mmu_on_loc: .long __turn_mmu_on_end #if defined(CONFIG_SMP) + .text ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. -- cgit v1.2.3 From e0d407564b532d978b03ceccebd224a05d02f111 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 3 Aug 2013 10:30:05 +0100 Subject: ARM: fix a cockup in 48be69a02 (ARM: move signal handlers into a vdso-like page) Unfortunately, I never committed the fix to a nasty oops which can occur as a result of that commit: ------------[ cut here ]------------ kernel BUG at /home/olof/work/batch/include/linux/mm.h:414! Internal error: Oops - BUG: 0 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 490 Comm: killall5 Not tainted 3.11.0-rc3-00288-gabe0308 #53 task: e90acac0 ti: e9be8000 task.ti: e9be8000 PC is at special_mapping_fault+0xa4/0xc4 LR is at __do_fault+0x68/0x48c This doesn't show up unless you do quite a bit of testing; a simple boot test does not do this, so all my nightly tests were passing fine. The reason for this is that install_special_mapping() expects the page array to stick around, and as this was only inserting one page which was stored on the kernel stack, that's why this was blowing up. Reported-by: Olof Johansson Tested-by: Olof Johansson Signed-off-by: Russell King --- arch/arm/kernel/process.c | 9 +++++---- arch/arm/kernel/signal.c | 41 +++++++++++++++++++---------------------- 2 files changed, 24 insertions(+), 26 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 1e6c33d01c05..d03b5bd889c5 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -471,17 +471,18 @@ const char *arch_vma_name(struct vm_area_struct *vma) "[sigpage]" : NULL; } +static struct page *signal_page; extern struct page *get_signal_page(void); int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - struct page *page; unsigned long addr; int ret; - page = get_signal_page(); - if (!page) + if (!signal_page) + signal_page = get_signal_page(); + if (!signal_page) return -ENOMEM; down_write(&mm->mmap_sem); @@ -493,7 +494,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ret = install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - &page); + &signal_page); if (ret == 0) mm->context.sigpage = addr; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 0f17e06d51e6..39e7105a9b70 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -614,35 +614,32 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) return 0; } -static struct page *signal_page; - struct page *get_signal_page(void) { - if (!signal_page) { - unsigned long ptr; - unsigned offset; - void *addr; + unsigned long ptr; + unsigned offset; + struct page *page; + void *addr; - signal_page = alloc_pages(GFP_KERNEL, 0); + page = alloc_pages(GFP_KERNEL, 0); - if (!signal_page) - return NULL; + if (!page) + return NULL; - addr = page_address(signal_page); + addr = page_address(page); - /* Give the signal return code some randomness */ - offset = 0x200 + (get_random_int() & 0x7fc); - signal_return_offset = offset; + /* Give the signal return code some randomness */ + offset = 0x200 + (get_random_int() & 0x7fc); + signal_return_offset = offset; - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); + /* + * Copy signal return handlers into the vector page, and + * set sigreturn to be a pointer to these. + */ + memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); - ptr = (unsigned long)addr + offset; - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); - } + ptr = (unsigned long)addr + offset; + flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); - return signal_page; + return page; } -- cgit v1.2.3 From 8c0cc8a5d90bc7373a7a9e7f7a40eb41f51e03fc Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 3 Aug 2013 10:39:51 +0100 Subject: ARM: fix nommu builds with 48be69a02 (ARM: move signal handlers into a vdso-like page) Olof reports that noMMU builds error out with: arch/arm/kernel/signal.c: In function 'setup_return': arch/arm/kernel/signal.c:413:25: error: 'mm_context_t' has no member named 'sigpage' This shows one of the evilnesses of IS_ENABLED(). Get rid of it here and replace it with #ifdef's - and as no noMMU platform can make use of sigpage, depend on CONIFG_MMU not CONFIG_ARM_MPU. Reported-by: Olof Johansson Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 2 ++ arch/arm/kernel/signal.c | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 9c9b30717fda..56211f2084ef 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -130,8 +130,10 @@ struct mm_struct; extern unsigned long arch_randomize_brk(struct mm_struct *mm); #define arch_randomize_brk arch_randomize_brk +#ifdef CONFIG_MMU #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 struct linux_binprm; int arch_setup_additional_pages(struct linux_binprm *, int); +#endif #endif diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 39e7105a9b70..ab3304225272 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -402,7 +402,8 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, __put_user(sigreturn_codes[idx+1], rc+1)) return 1; - if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) { +#ifdef CONFIG_MMU + if (cpsr & MODE32_BIT) { struct mm_struct *mm = current->mm; /* @@ -412,7 +413,9 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, */ retcode = mm->context.sigpage + signal_return_offset + (idx << 2) + thumb; - } else { + } else +#endif + { /* * Ensure that the instruction cache sees * the return code written onto the stack. -- cgit v1.2.3