From b615bbbff1c4d6fcd13007e75d75f6510aeb3808 Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Wed, 13 Aug 2014 09:04:49 -0700 Subject: arm: use generic fixmap.h ARM is different from other architectures in that fixmap pages are indexed with a positive offset from FIXADDR_START. Other architectures index with a negative offset from FIXADDR_TOP. In order to use the generic fixmap.h definitions, this patch redefines FIXADDR_TOP to be inclusive of the useable range. That is, FIXADDR_TOP is the virtual address of the topmost fixed page. The newly defined FIXADDR_END is the first virtual address past the fixed mappings. Signed-off-by: Mark Salter Reviewed-by: Doug Anderson [kees: update for a05e54c103b0 ("ARM: 8031/2: change fixmap ...")] Signed-off-by: Kees Cook Cc: Laura Abbott Cc: Rob Herring Acked-by: Nicolas Pitre --- arch/arm/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm/init.c') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 659c75d808dc..ad82c05bfc3a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -570,7 +570,7 @@ void __init mem_init(void) MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif - MLK(FIXADDR_START, FIXADDR_TOP), + MLK(FIXADDR_START, FIXADDR_END), MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM -- cgit v1.2.3 From 1e6b48116a95046ec51f3d40f83aff8b006674d7 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 3 Apr 2014 17:28:11 -0700 Subject: ARM: mm: allow non-text sections to be non-executable Adds CONFIG_ARM_KERNMEM_PERMS to separate the kernel memory regions into section-sized areas that can have different permisions. Performs the NX permission changes during free_initmem, so that init memory can be reclaimed. This uses section size instead of PMD size to reduce memory lost to padding on non-LPAE systems. Based on work by Brad Spengler, Larry Bassel, and Laura Abbott. Signed-off-by: Kees Cook Tested-by: Laura Abbott Acked-by: Nicolas Pitre --- arch/arm/kernel/vmlinux.lds.S | 16 +++++++ arch/arm/mm/Kconfig | 9 ++++ arch/arm/mm/init.c | 101 +++++++++++++++++++++++++++++++++++++++++- arch/arm/mm/mmu.c | 9 +++- 4 files changed, 133 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm/init.c') diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 6f57cb94367f..18fd68a295ea 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -8,6 +8,9 @@ #include #include #include +#ifdef CONFIG_ARM_KERNMEM_PERMS +#include +#endif #define PROC_INFO \ . = ALIGN(4); \ @@ -90,6 +93,11 @@ SECTIONS _text = .; HEAD_TEXT } + +#ifdef CONFIG_ARM_KERNMEM_PERMS + . = ALIGN(1< #include #include +#include #include #include @@ -615,7 +616,99 @@ void __init mem_init(void) } } -void free_initmem(void) +#ifdef CONFIG_ARM_KERNMEM_PERMS +struct section_perm { + unsigned long start; + unsigned long end; + pmdval_t mask; + pmdval_t prot; +}; + +struct section_perm nx_perms[] = { + /* Make pages tables, etc before _stext RW (set NX). */ + { + .start = PAGE_OFFSET, + .end = (unsigned long)_stext, + .mask = ~PMD_SECT_XN, + .prot = PMD_SECT_XN, + }, + /* Make init RW (set NX). */ + { + .start = (unsigned long)__init_begin, + .end = (unsigned long)_sdata, + .mask = ~PMD_SECT_XN, + .prot = PMD_SECT_XN, + }, +}; + +/* + * Updates section permissions only for the current mm (sections are + * copied into each mm). During startup, this is the init_mm. Is only + * safe to be called with preemption disabled, as under stop_machine(). + */ +static inline void section_update(unsigned long addr, pmdval_t mask, + pmdval_t prot) +{ + struct mm_struct *mm; + pmd_t *pmd; + + mm = current->active_mm; + pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); + +#ifdef CONFIG_ARM_LPAE + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); +#else + if (addr & SECTION_SIZE) + pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); + else + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); +#endif + flush_pmd_entry(pmd); + local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); +} + +/* Make sure extended page tables are in use. */ +static inline bool arch_has_strict_perms(void) +{ + if (cpu_architecture() < CPU_ARCH_ARMv6) + return false; + + return !!(get_cr() & CR_XP); +} + +#define set_section_perms(perms, field) { \ + size_t i; \ + unsigned long addr; \ + \ + if (!arch_has_strict_perms()) \ + return; \ + \ + for (i = 0; i < ARRAY_SIZE(perms); i++) { \ + if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ + !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ + pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ + perms[i].start, perms[i].end, \ + SECTION_SIZE); \ + continue; \ + } \ + \ + for (addr = perms[i].start; \ + addr < perms[i].end; \ + addr += SECTION_SIZE) \ + section_update(addr, perms[i].mask, \ + perms[i].field); \ + } \ +} + +static inline void fix_kernmem_perms(void) +{ + set_section_perms(nx_perms, prot); +} +#else +static inline void fix_kernmem_perms(void) { } +#endif /* CONFIG_ARM_KERNMEM_PERMS */ + +void free_tcmmem(void) { #ifdef CONFIG_HAVE_TCM extern char __tcm_start, __tcm_end; @@ -623,6 +716,12 @@ void free_initmem(void) poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); #endif +} + +void free_initmem(void) +{ + fix_kernmem_perms(); + free_tcmmem(); poison_init_mem(__init_begin, __init_end - __init_begin); if (!machine_is_integrator() && !machine_is_cintegrator()) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index bdf5c94f7c36..1c52c8e94372 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1373,12 +1373,19 @@ static void __init map_lowmem(void) if (start >= end) break; - if (end < kernel_x_start || start >= kernel_x_end) { + if (end < kernel_x_start) { map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY_RWX; + create_mapping(&map); + } else if (start >= kernel_x_end) { + map.pfn = __phys_to_pfn(start); + map.virtual = __phys_to_virt(start); + map.length = end - start; + map.type = MT_MEMORY_RW; + create_mapping(&map); } else { /* This better cover the entire kernel */ -- cgit v1.2.3 From 80d6b0c2eed2a504f6740cd1f5ea76dc50abfc4d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 3 Apr 2014 13:29:50 -0700 Subject: ARM: mm: allow text and rodata sections to be read-only This introduces CONFIG_DEBUG_RODATA, making kernel text and rodata read-only. Additionally, this splits rodata from text so that rodata can also be NX, which may lead to wasted memory when aligning to SECTION_SIZE. The read-only areas are made writable during ftrace updates and kexec. Signed-off-by: Kees Cook Tested-by: Laura Abbott Acked-by: Nicolas Pitre --- arch/arm/include/asm/cacheflush.h | 10 ++++++++ arch/arm/kernel/ftrace.c | 19 ++++++++++++++++ arch/arm/kernel/machine_kexec.c | 1 + arch/arm/kernel/vmlinux.lds.S | 3 +++ arch/arm/mm/Kconfig | 12 ++++++++++ arch/arm/mm/init.c | 48 ++++++++++++++++++++++++++++++++++++++- 6 files changed, 92 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm/init.c') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 10e78d00a0bb..2d46862e7bef 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); +#ifdef CONFIG_DEBUG_RODATA +void mark_rodata_ro(void); +void set_kernel_text_rw(void); +void set_kernel_text_ro(void); +#else +static inline void set_kernel_text_rw(void) { } +static inline void set_kernel_text_ro(void) { } +#endif + void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, void *kaddr, unsigned long len); + #endif diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index af9a8a927a4e..b8c75e45a950 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -35,6 +36,22 @@ #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ +static int __ftrace_modify_code(void *data) +{ + int *command = data; + + set_kernel_text_rw(); + ftrace_modify_all_code(*command); + set_kernel_text_ro(); + + return 0; +} + +void arch_ftrace_update_code(int command) +{ + stop_machine(__ftrace_modify_code, &command, NULL); +} + static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) { return rec->arch.old_mcount ? OLD_NOP : NOP; @@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void) int ftrace_arch_code_modify_post_process(void) { set_all_modules_text_ro(); + /* Make sure any TLB misses during machine stop are cleared. */ + flush_tlb_all(); return 0; } diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 8f75250cbe30..4423a565ef6f 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -164,6 +164,7 @@ void machine_kexec(struct kimage *image) reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ + set_kernel_text_rw(); kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 18fd68a295ea..3afcb6c2cf06 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -120,6 +120,9 @@ SECTIONS ARM_CPU_KEEP(PROC_INFO) } +#ifdef CONFIG_DEBUG_RODATA + . = ALIGN(1<