diff options
| author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-05-29 08:18:15 +0200 | 
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-05-29 08:18:15 +0200 | 
| commit | aa668632ae8c25ffc2d94c865af099cca15944b4 (patch) | |
| tree | f490dfa51fff3f1cd3b7de1ba4783d0abf37172b /arch/x86/kernel | |
| parent | d087e7a991f1f61ee2c07db1be7c5cc2aa373f5d (diff) | |
| parent | 5ed02dbb497422bf225783f46e6eadd237d23d6b (diff) | |
| download | linux-aa668632ae8c25ffc2d94c865af099cca15944b4.tar.bz2 | |
Merge 4.12-rc3 into tty-next
We need the tty fixes/changes here to handle future work.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/x86/kernel')
| -rw-r--r-- | arch/x86/kernel/alternative.c | 9 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 13 | ||||
| -rw-r--r-- | arch/x86/kernel/fpu/init.c | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/ftrace.c | 20 | ||||
| -rw-r--r-- | arch/x86/kernel/kprobes/core.c | 9 | ||||
| -rw-r--r-- | arch/x86/kernel/setup.c | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/unwind_frame.c | 49 | 
7 files changed, 79 insertions, 26 deletions
| diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c5b8f760473c..32e14d137416 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,  		memcpy(insnbuf, replacement, a->replacementlen);  		insnbuf_sz = a->replacementlen; -		/* 0xe8 is a relative jump; fix the offset. */ -		if (*insnbuf == 0xe8 && a->replacementlen == 5) { +		/* +		 * 0xe8 is a relative jump; fix the offset. +		 * +		 * Instruction length is checked before the opcode to avoid +		 * accessing uninitialized bytes for zero-length replacements. +		 */ +		if (a->replacementlen == 5 && *insnbuf == 0xe8) {  			*(s32 *)(insnbuf + 1) += replacement - instr;  			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",  				*(s32 *)(insnbuf + 1), diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 5abd4bf73d6e..5cfbaeb6529a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -499,16 +499,14 @@ static int mce_usable_address(struct mce *m)  	return 1;  } -static bool memory_error(struct mce *m) +bool mce_is_memory_error(struct mce *m)  { -	struct cpuinfo_x86 *c = &boot_cpu_data; - -	if (c->x86_vendor == X86_VENDOR_AMD) { +	if (m->cpuvendor == X86_VENDOR_AMD) {  		/* ErrCodeExt[20:16] */  		u8 xec = (m->status >> 16) & 0x1f;  		return (xec == 0x0 || xec == 0x8); -	} else if (c->x86_vendor == X86_VENDOR_INTEL) { +	} else if (m->cpuvendor == X86_VENDOR_INTEL) {  		/*  		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes  		 * @@ -529,6 +527,7 @@ static bool memory_error(struct mce *m)  	return false;  } +EXPORT_SYMBOL_GPL(mce_is_memory_error);  static bool cec_add_mce(struct mce *m)  { @@ -536,7 +535,7 @@ static bool cec_add_mce(struct mce *m)  		return false;  	/* We eat only correctable DRAM errors with usable addresses. */ -	if (memory_error(m) && +	if (mce_is_memory_error(m) &&  	    !(m->status & MCI_STATUS_UC) &&  	    mce_usable_address(m))  		if (!cec_add_elem(m->addr >> PAGE_SHIFT)) @@ -713,7 +712,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)  		severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); -		if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) +		if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))  			if (m.status & MCI_STATUS_ADDRV)  				m.severity = severity; diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index c2f8dde3255c..d5d44c452624 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -90,6 +90,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)   * Boot time FPU feature detection code:   */  unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; +EXPORT_SYMBOL_GPL(mxcsr_feature_mask);  static void __init fpu__init_system_mxcsr(void)  { diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 0651e974dcb3..9bef1bbeba63 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size)  {  	return module_alloc(size);  } -static inline void tramp_free(void *tramp) +static inline void tramp_free(void *tramp, int size)  { +	int npages = PAGE_ALIGN(size) >> PAGE_SHIFT; + +	set_memory_nx((unsigned long)tramp, npages); +	set_memory_rw((unsigned long)tramp, npages);  	module_memfree(tramp);  }  #else @@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size)  {  	return NULL;  } -static inline void tramp_free(void *tramp) { } +static inline void tramp_free(void *tramp, int size) { }  #endif  /* Defined as markers to the end of the ftrace default trampolines */ @@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)  	/* Copy ftrace_caller onto the trampoline memory */  	ret = probe_kernel_read(trampoline, (void *)start_offset, size);  	if (WARN_ON(ret < 0)) { -		tramp_free(trampoline); +		tramp_free(trampoline, *tramp_size);  		return 0;  	} @@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)  	/* Are we pointing to the reference? */  	if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { -		tramp_free(trampoline); +		tramp_free(trampoline, *tramp_size);  		return 0;  	} @@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)  	unsigned long offset;  	unsigned long ip;  	unsigned int size; -	int ret; +	int ret, npages;  	if (ops->trampoline) {  		/* @@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)  		 */  		if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))  			return; +		npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT; +		set_memory_rw(ops->trampoline, npages);  	} else {  		ops->trampoline = create_trampoline(ops, &size);  		if (!ops->trampoline)  			return;  		ops->trampoline_size = size; +		npages = PAGE_ALIGN(size) >> PAGE_SHIFT;  	}  	offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); @@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)  	/* Do a safe modify in case the trampoline is executing */  	new = ftrace_call_replace(ip, (unsigned long)func);  	ret = update_ftrace_func(ip, new); +	set_memory_ro(ops->trampoline, npages);  	/* The update should never fail */  	WARN_ON(ret); @@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)  	if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))  		return; -	tramp_free((void *)ops->trampoline); +	tramp_free((void *)ops->trampoline, ops->trampoline_size);  	ops->trampoline = 0;  } diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 5b2bbfbb3712..6b877807598b 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -52,6 +52,7 @@  #include <linux/ftrace.h>  #include <linux/frame.h>  #include <linux/kasan.h> +#include <linux/moduleloader.h>  #include <asm/text-patching.h>  #include <asm/cacheflush.h> @@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn)  	}  } +/* Recover page to RW mode before releasing it */ +void free_insn_page(void *page) +{ +	set_memory_nx((unsigned long)page & PAGE_MASK, 1); +	set_memory_rw((unsigned long)page & PAGE_MASK, 1); +	module_memfree(page); +} +  static int arch_copy_kprobe(struct kprobe *p)  {  	struct insn insn; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0b4d3c686b1e..f81823695014 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p)  	 */  	x86_configure_nx(); -	simple_udelay_calibration(); -  	parse_early_param();  #ifdef CONFIG_MEMORY_HOTPLUG @@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p)  	 */  	init_hypervisor_platform(); +	simple_udelay_calibration(); +  	x86_init.resources.probe_roms();  	/* after parse_early_param, so could debug it */ diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 82c6d7f1fd73..b9389d72b2f7 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state)  	return (unsigned long *)task_pt_regs(state->task) - 2;  } +static bool is_last_frame(struct unwind_state *state) +{ +	return state->bp == last_frame(state); +} +  #ifdef CONFIG_X86_32  #define GCC_REALIGN_WORDS 3  #else @@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state)  	return last_frame(state) - GCC_REALIGN_WORDS;  } -static bool is_last_task_frame(struct unwind_state *state) +static bool is_last_aligned_frame(struct unwind_state *state)  {  	unsigned long *last_bp = last_frame(state);  	unsigned long *aligned_bp = last_aligned_frame(state);  	/* -	 * We have to check for the last task frame at two different locations -	 * because gcc can occasionally decide to realign the stack pointer and -	 * change the offset of the stack frame in the prologue of a function -	 * called by head/entry code.  Examples: +	 * GCC can occasionally decide to realign the stack pointer and change +	 * the offset of the stack frame in the prologue of a function called +	 * by head/entry code.  Examples:  	 *  	 * <start_secondary>:  	 *      push   %edi @@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state)  	 *      push   %rbp  	 *      mov    %rsp,%rbp  	 * -	 * Note that after aligning the stack, it pushes a duplicate copy of -	 * the return address before pushing the frame pointer. +	 * After aligning the stack, it pushes a duplicate copy of the return +	 * address before pushing the frame pointer. +	 */ +	return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1)); +} + +static bool is_last_ftrace_frame(struct unwind_state *state) +{ +	unsigned long *last_bp = last_frame(state); +	unsigned long *last_ftrace_bp = last_bp - 3; + +	/* +	 * When unwinding from an ftrace handler of a function called by entry +	 * code, the stack layout of the last frame is: +	 * +	 *   bp +	 *   parent ret addr +	 *   bp +	 *   function ret addr +	 *   parent ret addr +	 *   pt_regs +	 *   -----------------  	 */ -	return (state->bp == last_bp || -		(state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); +	return (state->bp == last_ftrace_bp && +		*state->bp == *(state->bp + 2) && +		*(state->bp + 1) == *(state->bp + 4)); +} + +static bool is_last_task_frame(struct unwind_state *state) +{ +	return is_last_frame(state) || is_last_aligned_frame(state) || +	       is_last_ftrace_frame(state);  }  /* |