diff options
author | Russell King <rmk+kernel@armlinux.org.uk> | 2017-07-05 11:06:59 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@armlinux.org.uk> | 2017-07-05 11:06:59 +0100 |
commit | 98becb781e3e27d74efe5f3653b948d39f694cfb (patch) | |
tree | 63d7bcd1ab22c73fb2ecd1670e6bc5c67f6ab560 | |
parent | 9e25ebfe56ece7541cd10a20d715cbdd148a2e06 (diff) | |
parent | cd83935be8f5c28ea099ad1efa6321c737e2e12a (diff) | |
download | linux-98becb781e3e27d74efe5f3653b948d39f694cfb.tar.bz2 |
Merge branches 'fixes' and 'misc' into for-linus
-rw-r--r-- | arch/arm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/bitops.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/ftrace.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/page-nommu.h | 6 | ||||
-rw-r--r-- | arch/arm/kernel/entry-ftrace.S | 100 | ||||
-rw-r--r-- | arch/arm/kernel/ftrace.c | 37 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 16 | ||||
-rw-r--r-- | arch/arm/kernel/vdso.c | 18 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 6 | ||||
-rw-r--r-- | arch/x86/entry/vdso/vma.c | 3 | ||||
-rw-r--r-- | mm/mmap.c | 4 |
11 files changed, 185 insertions, 18 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c0fcab6a5504..6900aab4f757 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -56,6 +56,7 @@ config ARM select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU + select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU select HAVE_EXIT_THREAD select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index e943e6cee254..f308c8c40cb9 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -159,16 +159,16 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p); /* * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. */ -extern int _find_first_zero_bit_le(const void * p, unsigned size); -extern int _find_next_zero_bit_le(const void * p, int size, int offset); +extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size); +extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset); extern int _find_first_bit_le(const unsigned long *p, unsigned size); extern int _find_next_bit_le(const unsigned long *p, int size, int offset); /* * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. */ -extern int _find_first_zero_bit_be(const void * p, unsigned size); -extern int _find_next_zero_bit_be(const void * p, int size, int offset); +extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size); +extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset); extern int _find_first_bit_be(const unsigned long *p, unsigned size); extern int _find_next_bit_be(const unsigned long *p, int size, int offset); diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 22b73112b75f..f379881d5cc3 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -1,6 +1,10 @@ #ifndef _ASM_ARM_FTRACE #define _ASM_ARM_FTRACE +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#endif + #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h index 503f488053de..8f2c47bec375 100644 --- a/arch/arm/include/asm/page-nommu.h +++ b/arch/arm/include/asm/page-nommu.h @@ -11,12 +11,6 @@ #ifndef _ASMARM_PAGE_NOMMU_H #define _ASMARM_PAGE_NOMMU_H -#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13 -#define KTHREAD_SIZE (8192) -#else -#define KTHREAD_SIZE PAGE_SIZE -#endif - #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index c73c4030ca5d..efcd9f25a14b 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -92,12 +92,95 @@ 2: mcount_exit .endm +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + +.macro __ftrace_regs_caller + + sub sp, sp, #8 @ space for PC and CPSR OLD_R0, + @ OLD_R0 will overwrite previous LR + + add ip, sp, #12 @ move in IP the value of SP as it was + @ before the push {lr} of the mcount mechanism + + str lr, [sp, #0] @ store LR instead of PC + + ldr lr, [sp, #8] @ get previous LR + + str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR + + stmdb sp!, {ip, lr} + stmdb sp!, {r0-r11, lr} + + @ stack content at this point: + @ 0 4 48 52 56 60 64 68 72 + @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 | + + mov r3, sp @ struct pt_regs* + + ldr r2, =function_trace_op + ldr r2, [r2] @ pointer to the current + @ function tracing op + + ldr r1, [sp, #S_LR] @ lr of instrumented func + + ldr lr, [sp, #S_PC] @ get LR + + mcount_adjust_addr r0, lr @ instrumented function + + .globl ftrace_regs_call +ftrace_regs_call: + bl ftrace_stub + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_regs_call +ftrace_graph_regs_call: + mov r0, r0 +#endif + + @ pop saved regs + ldmia sp!, {r0-r12} @ restore r0 through r12 + ldr ip, [sp, #8] @ restore PC + ldr lr, [sp, #4] @ restore LR + ldr sp, [sp, #0] @ restore SP + mov pc, ip @ return +.endm + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +.macro __ftrace_graph_regs_caller + + sub r0, fp, #4 @ lr of instrumented routine (parent) + + @ called from __ftrace_regs_caller + ldr r1, [sp, #S_PC] @ instrumented routine (func) + mcount_adjust_addr r1, r1 + + mov r2, fp @ frame pointer + bl prepare_ftrace_return + + @ pop registers saved in ftrace_regs_caller + ldmia sp!, {r0-r12} @ restore r0 through r12 + ldr ip, [sp, #8] @ restore PC + ldr lr, [sp, #4] @ restore LR + ldr sp, [sp, #0] @ restore SP + mov pc, ip @ return + +.endm +#endif +#endif + .macro __ftrace_caller suffix mcount_enter mcount_get_lr r1 @ lr of instrumented func mcount_adjust_addr r0, lr @ instrumented function +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + ldr r2, =function_trace_op + ldr r2, [r2] @ pointer to the current + @ function tracing op + mov r3, #0 @ regs is NULL +#endif + .globl ftrace_call\suffix ftrace_call\suffix: bl ftrace_stub @@ -212,6 +295,15 @@ UNWIND(.fnstart) __ftrace_caller UNWIND(.fnend) ENDPROC(ftrace_caller) + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +ENTRY(ftrace_regs_caller) +UNWIND(.fnstart) + __ftrace_regs_caller +UNWIND(.fnend) +ENDPROC(ftrace_regs_caller) +#endif + #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -220,6 +312,14 @@ UNWIND(.fnstart) __ftrace_graph_caller UNWIND(.fnend) ENDPROC(ftrace_graph_caller) + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +ENTRY(ftrace_graph_regs_caller) +UNWIND(.fnstart) + __ftrace_graph_regs_caller +UNWIND(.fnend) +ENDPROC(ftrace_graph_regs_caller) +#endif #endif .purgem mcount_enter diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 833c991075a1..5617932a83df 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -141,6 +141,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ret = ftrace_modify_code(pc, 0, new, false); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (!ret) { + pc = (unsigned long)&ftrace_regs_call; + new = ftrace_call_replace(pc, (unsigned long)func); + + ret = ftrace_modify_code(pc, 0, new, false); + } +#endif + #ifdef CONFIG_OLD_MCOUNT if (!ret) { pc = (unsigned long)&ftrace_call_old; @@ -159,11 +168,29 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned long ip = rec->ip; old = ftrace_nop_replace(rec); + + new = ftrace_call_replace(ip, adjust_address(rec, addr)); + + return ftrace_modify_code(rec->ip, old, new, true); +} + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + unsigned long new, old; + unsigned long ip = rec->ip; + + old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); + new = ftrace_call_replace(ip, adjust_address(rec, addr)); return ftrace_modify_code(rec->ip, old, new, true); } +#endif + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { @@ -231,6 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, extern unsigned long ftrace_graph_call; extern unsigned long ftrace_graph_call_old; extern void ftrace_graph_caller_old(void); +extern unsigned long ftrace_graph_regs_call; +extern void ftrace_graph_regs_caller(void); static int __ftrace_modify_caller(unsigned long *callsite, void (*func) (void), bool enable) @@ -253,6 +282,14 @@ static int ftrace_modify_graph_caller(bool enable) ftrace_graph_caller, enable); +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + if (!ret) + ret = __ftrace_modify_caller(&ftrace_graph_regs_call, + ftrace_graph_regs_caller, + enable); +#endif + + #ifdef CONFIG_OLD_MCOUNT if (!ret) ret = __ftrace_modify_caller(&ftrace_graph_call_old, diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 939e8b58c59d..d96714e1858c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -123,10 +123,10 @@ void __show_regs(struct pt_regs *regs) print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->ARM_lr); - printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" - "sp : %08lx ip : %08lx fp : %08lx\n", - regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, - regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); + printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n", + regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr); + printk("sp : %08lx ip : %08lx fp : %08lx\n", + regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); printk("r10: %08lx r9 : %08lx r8 : %08lx\n", regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); @@ -404,9 +404,17 @@ static unsigned long sigpage_addr(const struct mm_struct *mm, static struct page *signal_page; extern struct page *get_signal_page(void); +static int sigpage_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + current->mm->context.sigpage = new_vma->vm_start; + return 0; +} + static const struct vm_special_mapping sigpage_mapping = { .name = "[sigpage]", .pages = &signal_page, + .mremap = sigpage_mremap, }; int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 53cf86cf2d1a..a4d6dc0f2427 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -54,8 +54,26 @@ static const struct vm_special_mapping vdso_data_mapping = { .pages = &vdso_data_page, }; +static int vdso_mremap(const struct vm_special_mapping *sm, + struct vm_area_struct *new_vma) +{ + unsigned long new_size = new_vma->vm_end - new_vma->vm_start; + unsigned long vdso_size; + + /* without VVAR page */ + vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT; + + if (vdso_size != new_size) + return -EINVAL; + + current->mm->context.vdso = new_vma->vm_start; + + return 0; +} + static struct vm_special_mapping vdso_text_mapping __ro_after_init = { .name = "[vdso]", + .mremap = vdso_mremap, }; struct elfinfo { diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c6c4c9c8824b..4f68659abe6c 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -679,7 +679,7 @@ config ARCH_DMA_ADDR_T_64BIT bool config ARM_THUMB - bool "Support Thumb user binaries" if !CPU_THUMBONLY + bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT depends on CPU_THUMB_CAPABLE default y help @@ -690,6 +690,10 @@ config ARM_THUMB instruction set resulting in smaller binaries at the expense of slightly less efficient code. + If this option is disabled, and you run userspace that switches to + Thumb mode, signal handling will not work correctly, resulting in + segmentation faults or illegal instruction aborts. + If you don't know what this all is, saying Y is a safe choice. config ARM_THUMBEE diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 139ad7726e10..726355ce8497 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -78,9 +78,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm, if (image->size != new_size) return -EINVAL; - if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) - return -EFAULT; - vdso_fix_landing(image, new_vma); current->mm->context.vdso = (void __user *)new_vma->vm_start; diff --git a/mm/mmap.c b/mm/mmap.c index f82741e199c0..c0a8bf1df665 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3152,8 +3152,12 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; + if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) + return -EFAULT; + if (sm->mremap) return sm->mremap(sm, new_vma); + return 0; } |