diff options
41 files changed, 602 insertions, 219 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index d309f30cf7af..5fc76b755510 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -650,6 +650,7 @@ ENTRY(__switch_to_asm) pushl %ebx pushl %edi pushl %esi + pushfl /* switch stack */ movl %esp, TASK_threadsp(%eax) @@ -672,6 +673,7 @@ ENTRY(__switch_to_asm) #endif /* restore callee-saved registers */ + popfl popl %esi popl %edi popl %ebx diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 321fe5f5d0e9..4d5fcd47ab75 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -61,9 +61,8 @@ } while (0) #define RELOAD_SEG(seg) { \ - unsigned int pre = GET_SEG(seg); \ + unsigned int pre = (seg) | 3; \ unsigned int cur = get_user_seg(seg); \ - pre |= 3; \ if (pre != cur) \ set_user_seg(seg, pre); \ } @@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_32 __user *sc) { unsigned int tmpflags, err = 0; + u16 gs, fs, es, ds; void __user *buf; u32 tmp; @@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, current->restart_block.fn = do_no_restart_syscall; get_user_try { - /* - * Reload fs and gs if they have changed in the signal - * handler. This does not handle long fs/gs base changes in - * the handler, but does not clobber them at least in the - * normal case. - */ - RELOAD_SEG(gs); - RELOAD_SEG(fs); - RELOAD_SEG(ds); - RELOAD_SEG(es); + gs = GET_SEG(gs); + fs = GET_SEG(fs); + ds = GET_SEG(ds); + es = GET_SEG(es); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); COPY(ax); @@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, buf = compat_ptr(tmp); } get_user_catch(err); + /* + * Reload fs and gs if they have changed in the signal + * handler. This does not handle long fs/gs base changes in + * the handler, but does not clobber them at least in the + * normal case. + */ + RELOAD_SEG(gs); + RELOAD_SEG(fs); + RELOAD_SEG(ds); + RELOAD_SEG(es); + err |= fpu__restore_sig(buf, 1); force_iret(); diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 31b627b43a8e..464034db299f 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -20,6 +20,17 @@ #endif /* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +.macro ANNOTATE_IGNORE_ALTERNATIVE + .Lannotate_\@: + .pushsection .discard.ignore_alts + .long .Lannotate_\@ - . + .popsection +.endm + +/* * Issue one struct alt_instr descriptor entry (need to put it into * the section .altinstructions, see below). This entry contains * enough information for the alternatives patching code to patch an diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 4c74073a19cc..094fbc9c0b1c 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -45,6 +45,16 @@ #define LOCK_PREFIX "" #endif +/* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +#define ANNOTATE_IGNORE_ALTERNATIVE \ + "999:\n\t" \ + ".pushsection .discard.ignore_alts\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + struct alt_instr { s32 instr_offset; /* original instruction */ s32 repl_offset; /* offset to replacement instruction */ diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 6467757bb39f..3ff577c0b102 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -148,30 +148,6 @@ _ASM_PTR (entry); \ .popsection -.macro ALIGN_DESTINATION - /* check for bad alignment of destination */ - movl %edi,%ecx - andl $7,%ecx - jz 102f /* already aligned */ - subl $8,%ecx - negl %ecx - subl %ecx,%edx -100: movb (%rsi),%al -101: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 100b -102: - .section .fixup,"ax" -103: addl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail - .previous - - _ASM_EXTABLE_UA(100b, 103b) - _ASM_EXTABLE_UA(101b, 103b) - .endm - #else # define _EXPAND_EXTABLE_HANDLE(x) #x # define _ASM_EXTABLE_HANDLE(from, to, handler) \ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index dad12b767ba0..daf25b60c9e3 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -11,6 +11,15 @@ #include <asm/msr-index.h> /* + * This should be used immediately before a retpoline alternative. It tells + * objtool where the retpolines are so that it can make sense of the control + * flow by just reading the original instruction(s) and ignoring the + * alternatives. + */ +#define ANNOTATE_NOSPEC_ALTERNATIVE \ + ANNOTATE_IGNORE_ALTERNATIVE + +/* * Fill the CPU return stack buffer. * * Each entry in the RSB, if used for a speculative 'ret', contains an @@ -57,19 +66,6 @@ #ifdef __ASSEMBLY__ /* - * This should be used immediately before a retpoline alternative. It tells - * objtool where the retpolines are so that it can make sense of the control - * flow by just reading the original instruction(s) and ignoring the - * alternatives. - */ -.macro ANNOTATE_NOSPEC_ALTERNATIVE - .Lannotate_\@: - .pushsection .discard.nospec - .long .Lannotate_\@ - . - .popsection -.endm - -/* * This should be used immediately before an indirect jump/call. It tells * objtool the subsequent indirect jump/call is vouched safe for retpoline * builds. @@ -152,12 +148,6 @@ #else /* __ASSEMBLY__ */ -#define ANNOTATE_NOSPEC_ALTERNATIVE \ - "999:\n\t" \ - ".pushsection .discard.nospec\n\t" \ - ".long 999b - .\n\t" \ - ".popsection\n\t" - #define ANNOTATE_RETPOLINE_SAFE \ "999:\n\t" \ ".pushsection .discard.retpoline_safe\n\t" \ diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index db333300bd4b..f94a7d0ddd49 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -13,13 +13,12 @@ #ifndef _ASM_X86_SMAP_H #define _ASM_X86_SMAP_H -#include <linux/stringify.h> #include <asm/nops.h> #include <asm/cpufeatures.h> /* "Raw" instruction opcodes */ -#define __ASM_CLAC .byte 0x0f,0x01,0xca -#define __ASM_STAC .byte 0x0f,0x01,0xcb +#define __ASM_CLAC ".byte 0x0f,0x01,0xca" +#define __ASM_STAC ".byte 0x0f,0x01,0xcb" #ifdef __ASSEMBLY__ @@ -28,10 +27,10 @@ #ifdef CONFIG_X86_SMAP #define ASM_CLAC \ - ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP + ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP #define ASM_STAC \ - ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP + ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP #else /* CONFIG_X86_SMAP */ @@ -49,26 +48,46 @@ static __always_inline void clac(void) { /* Note: a barrier is implicit in alternative() */ - alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); + alternative("", __ASM_CLAC, X86_FEATURE_SMAP); } static __always_inline void stac(void) { /* Note: a barrier is implicit in alternative() */ - alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); + alternative("", __ASM_STAC, X86_FEATURE_SMAP); +} + +static __always_inline unsigned long smap_save(void) +{ + unsigned long flags; + + asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC, + X86_FEATURE_SMAP) + : "=rm" (flags) : : "memory", "cc"); + + return flags; +} + +static __always_inline void smap_restore(unsigned long flags) +{ + asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP) + : : "g" (flags) : "memory", "cc"); } /* These macros can be used in asm() statements */ #define ASM_CLAC \ - ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP) + ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP) #define ASM_STAC \ - ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP) + ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP) #else /* CONFIG_X86_SMAP */ static inline void clac(void) { } static inline void stac(void) { } +static inline unsigned long smap_save(void) { return 0; } +static inline void smap_restore(unsigned long flags) { } + #define ASM_CLAC #define ASM_STAC diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 7cf1a270d891..18a4b6890fa8 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -46,6 +46,7 @@ struct inactive_task_frame { unsigned long r13; unsigned long r12; #else + unsigned long flags; unsigned long si; unsigned long di; #endif diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 1954dd5552a2..bb21913885a3 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -427,10 +427,11 @@ do { \ ({ \ __label__ __pu_label; \ int __pu_err = -EFAULT; \ - __typeof__(*(ptr)) __pu_val; \ - __pu_val = x; \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(ptr) __pu_ptr = (ptr); \ + __typeof__(size) __pu_size = (size); \ __uaccess_begin(); \ - __put_user_size(__pu_val, (ptr), (size), __pu_label); \ + __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \ __pu_err = 0; \ __pu_label: \ __uaccess_end(); \ @@ -705,7 +706,7 @@ extern struct movsl_mask { * checking before using them, but you have to surround them with the * user_access_begin/end() pair. */ -static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) +static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr,len))) return 0; @@ -715,6 +716,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define user_access_begin(a,b) user_access_begin(a,b) #define user_access_end() __uaccess_end() +#define user_access_save() smap_save() +#define user_access_restore(x) smap_restore(x) + #define unsafe_put_user(x, ptr, label) \ __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index a9d637bc301d..5cd1caa8bc65 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -208,9 +208,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) } unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len); - -unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len); #endif /* _ASM_X86_UACCESS_64_H */ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 2863c2026655..d50c7b747d8b 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -217,6 +217,22 @@ xen_single_call(unsigned int call, return (long)__res; } +static __always_inline void __xen_stac(void) +{ + /* + * Suppress objtool seeing the STAC/CLAC and getting confused about it + * calling random code with AC=1. + */ + asm volatile(ANNOTATE_IGNORE_ALTERNATIVE + ASM_STAC ::: "memory", "flags"); +} + +static __always_inline void __xen_clac(void) +{ + asm volatile(ANNOTATE_IGNORE_ALTERNATIVE + ASM_CLAC ::: "memory", "flags"); +} + static inline long privcmd_call(unsigned int call, unsigned long a1, unsigned long a2, @@ -225,9 +241,9 @@ privcmd_call(unsigned int call, { long res; - stac(); + __xen_stac(); res = xen_single_call(call, a1, a2, a3, a4, a5); - clac(); + __xen_clac(); return res; } @@ -424,9 +440,9 @@ HYPERVISOR_dm_op( domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs) { int ret; - stac(); + __xen_stac(); ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs); - clac(); + __xen_clac(); return ret; } diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index e471d8e6f0b2..70933193878c 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct task_struct *tsk; int err; + /* + * For a new task use the RESET flags value since there is no before. + * All the status flags are zero; DF and all the system flags must also + * be 0, specifically IF must be 0 because we context switch to the new + * task with interrupts disabled. + */ + frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6a62f4af9fcf..844a28b29967 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -392,6 +392,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; + frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 08dfd4c1a4f9..c8aa58a2bab9 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs, COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); -#ifdef CONFIG_X86_64 - /* - * Fix up SS if needed for the benefit of old DOSEMU and - * CRIU. - */ - if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && - user_64bit_mode(regs))) - force_valid_ss(regs); -#endif - get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ @@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs, buf = (void __user *)buf_val; } get_user_catch(err); +#ifdef CONFIG_X86_64 + /* + * Fix up SS if needed for the benefit of old DOSEMU and + * CRIU. + */ + if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) + force_valid_ss(regs); +#endif + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32)); force_iret(); @@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, { struct rt_sigframe __user *frame; void __user *fp = NULL; + unsigned long uc_flags; int err = 0; frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); @@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); save_altstack_ex(&frame->uc.uc_stack, regs->sp); @@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig, { #ifdef CONFIG_X86_X32_ABI struct rt_sigframe_x32 __user *frame; + unsigned long uc_flags; void __user *restorer; int err = 0; void __user *fpstate = NULL; @@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); put_user_ex(0, &frame->uc.uc__pad0); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index db4e5aa0858b..b2f1822084ae 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -16,6 +16,30 @@ #include <asm/smap.h> #include <asm/export.h> +.macro ALIGN_DESTINATION + /* check for bad alignment of destination */ + movl %edi,%ecx + andl $7,%ecx + jz 102f /* already aligned */ + subl $8,%ecx + negl %ecx + subl %ecx,%edx +100: movb (%rsi),%al +101: movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 100b +102: + .section .fixup,"ax" +103: addl %ecx,%edx /* ecx is zerorest also */ + jmp copy_user_handle_tail + .previous + + _ASM_EXTABLE_UA(100b, 103b) + _ASM_EXTABLE_UA(101b, 103b) + .endm + /* * copy_user_generic_unrolled - memory copy with exception handling. * This version is for CPUs like P4 that don't have efficient micro @@ -194,6 +218,30 @@ ENDPROC(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* + * Try to copy last bytes and clear the rest if needed. + * Since protection fault in copy_from/to_user is not a normal situation, + * it is not necessary to optimize tail handling. + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +ALIGN; +copy_user_handle_tail: + movl %edx,%ecx +1: rep movsb +2: mov %ecx,%eax + ASM_CLAC + ret + + _ASM_EXTABLE_UA(1b, 2b) +ENDPROC(copy_user_handle_tail) + +/* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination out of cache for more performance. * diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 3b24dc05251c..9d05572370ed 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe) /* Copy successful. Return zero */ .L_done_memcpy_trap: xorl %eax, %eax +.L_done: ret ENDPROC(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) @@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe) addl %edx, %ecx .E_trailing_bytes: mov %ecx, %eax - ret + jmp .L_done /* * For write fault handling, given the destination is unaligned, diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index ee42bb0cbeb3..9952a01cad24 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -55,26 +55,6 @@ unsigned long clear_user(void __user *to, unsigned long n) EXPORT_SYMBOL(clear_user); /* - * Try to copy last bytes and clear the rest if needed. - * Since protection fault in copy_from/to_user is not a normal situation, - * it is not necessary to optimize tail handling. - */ -__visible unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len) -{ - for (; len; --len, to++) { - char c; - - if (__get_user_nocheck(c, from++, sizeof(char))) - break; - if (__put_user_nocheck(c, to, sizeof(char))) - break; - } - clac(); - return len; -} - -/* * Similar to copy_user_handle_tail, probe for the write fault point, * but reuse __memcpy_mcsafe in case a new read error is encountered. * clac() is handled in _copy_to_iter_mcsafe(). diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 02adcaf6ebea..16f80a448820 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1667,6 +1667,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) len)) { end_user: user_access_end(); +end: kvfree(relocs); err = -EFAULT; goto err; @@ -1686,7 +1687,7 @@ end_user: * relocations were valid. */ if (!user_access_begin(urelocs, size)) - goto end_user; + goto end; for (copied = 0; copied < nreloc; copied++) unsafe_put_user(-1, @@ -2695,7 +2696,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, * when we did the "copy_from_user()" above. */ if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) - goto end_user; + goto end; for (i = 0; i < args->buffer_count; i++) { if (!(exec2_list[i].offset & UPDATE)) @@ -2709,6 +2710,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, } end_user: user_access_end(); +end:; } args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 445348facea9..d58aa0db05f9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -67,7 +67,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, .line = __LINE__, \ }; \ ______r = !!(cond); \ - ______f.miss_hit[______r]++; \ + ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\ ______r; \ })) #endif /* CONFIG_PROFILE_ALL_BRANCHES */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 37b226e8df13..2b70130af585 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #define user_access_end() do { } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) +static inline unsigned long user_access_save(void) { return 0UL; } +static inline void user_access_restore(unsigned long flags) { } #endif #ifdef CONFIG_HARDENED_USERCOPY diff --git a/kernel/Makefile b/kernel/Makefile index 6c57e78817da..62471e75a2b0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n # Don't self-instrument. KCOV_INSTRUMENT_kcov.o := n KASAN_SANITIZE_kcov.o := n +CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) # cond_syscall is currently not LTO compatible CFLAGS_sys_ni.o = $(DISABLE_LTO) diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 4ad967453b6f..3ea65cdff30d 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect, int is_constant) { + unsigned long flags = user_access_save(); + /* A constant is always correct */ if (is_constant) { f->constant++; @@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, f->data.correct++; else f->data.incorrect++; + + user_access_restore(flags); } EXPORT_SYMBOL(ftrace_likely_update); diff --git a/lib/Makefile b/lib/Makefile index 18c2be516ab4..e16e7aadc41a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -279,6 +279,7 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o obj-$(CONFIG_UBSAN) += ubsan.o UBSAN_SANITIZE_ubsan.o := n +CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) obj-$(CONFIG_SBITMAP) += sbitmap.o diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 58eacd41526c..023ba9f3b99f 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -23,10 +23,11 @@ * hit it), 'max' is the address space maximum (and we return * -EFAULT if we hit it). */ -static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) +static inline long do_strncpy_from_user(char *dst, const char __user *src, + unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long res = 0; + unsigned long res = 0; /* * Truncate 'max' to the user-specified limit, so that diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 1c1a1b0e38a5..7f2db3fe311f 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -28,7 +28,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long align, res = 0; + unsigned long align, res = 0; unsigned long c; /* @@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Do everything aligned. But that means that we * need to also expand the maximum.. */ - align = (sizeof(long) - 1) & (unsigned long)src; + align = (sizeof(unsigned long) - 1) & (unsigned long)src; src -= align; max += align; diff --git a/lib/ubsan.c b/lib/ubsan.c index c4859c2d2f1b..ecc179338094 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> +#include <linux/uaccess.h> #include "ubsan.h" @@ -311,6 +312,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data, static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, unsigned long ptr) { + unsigned long flags = user_access_save(); if (!ptr) handle_null_ptr_deref(data); @@ -318,6 +320,8 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, handle_misaligned_access(data, ptr); else handle_object_size_mismatch(data, ptr); + + user_access_restore(flags); } void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 5d1065efbd47..613dfe681e9f 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -2,11 +2,13 @@ KASAN_SANITIZE := n UBSAN_SANITIZE_common.o := n UBSAN_SANITIZE_generic.o := n +UBSAN_SANITIZE_generic_report.o := n UBSAN_SANITIZE_tags.o := n KCOV_INSTRUMENT := n CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_generic.o = -pg +CFLAGS_REMOVE_generic_report.o = -pg CFLAGS_REMOVE_tags.o = -pg # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 @@ -14,6 +16,7 @@ CFLAGS_REMOVE_tags.o = -pg CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) obj-$(CONFIG_KASAN) := common.o init.o report.o diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 80bbe62b16cd..09c586474511 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -36,6 +36,7 @@ #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/bug.h> +#include <linux/uaccess.h> #include "kasan.h" #include "../slab.h" @@ -614,6 +615,15 @@ void kasan_free_shadow(const struct vm_struct *vm) vfree(kasan_mem_to_shadow(vm->addr)); } +extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); + +void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) +{ + unsigned long flags = user_access_save(); + __kasan_report(addr, size, is_write, ip); + user_access_restore(flags); +} + #ifdef CONFIG_MEMORY_HOTPLUG static bool shadow_mapped(unsigned long addr) { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index ca9418fe9232..0772820ad098 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -281,8 +281,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip) end_report(&flags); } -void kasan_report(unsigned long addr, size_t size, - bool is_write, unsigned long ip) +void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { struct kasan_access_info info; void *tagged_addr; diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 76ca30cc4791..0c5969fa795f 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -222,6 +222,9 @@ endif ifdef CONFIG_RETPOLINE objtool_args += --retpoline endif +ifdef CONFIG_X86_SMAP + objtool_args += --uaccess +endif # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h index b0d7dc3d71b5..7a111a77b7aa 100644 --- a/tools/objtool/arch.h +++ b/tools/objtool/arch.h @@ -33,7 +33,11 @@ #define INSN_STACK 8 #define INSN_BUG 9 #define INSN_NOP 10 -#define INSN_OTHER 11 +#define INSN_STAC 11 +#define INSN_CLAC 12 +#define INSN_STD 13 +#define INSN_CLD 14 +#define INSN_OTHER 15 #define INSN_LAST INSN_OTHER enum op_dest_type { @@ -41,6 +45,7 @@ enum op_dest_type { OP_DEST_REG_INDIRECT, OP_DEST_MEM, OP_DEST_PUSH, + OP_DEST_PUSHF, OP_DEST_LEAVE, }; @@ -55,6 +60,7 @@ enum op_src_type { OP_SRC_REG_INDIRECT, OP_SRC_CONST, OP_SRC_POP, + OP_SRC_POPF, OP_SRC_ADD, OP_SRC_AND, }; diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 540a209b78ab..472e991f6512 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -357,19 +357,26 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, /* pushf */ *type = INSN_STACK; op->src.type = OP_SRC_CONST; - op->dest.type = OP_DEST_PUSH; + op->dest.type = OP_DEST_PUSHF; break; case 0x9d: /* popf */ *type = INSN_STACK; - op->src.type = OP_SRC_POP; + op->src.type = OP_SRC_POPF; op->dest.type = OP_DEST_MEM; break; case 0x0f: - if (op2 >= 0x80 && op2 <= 0x8f) { + if (op2 == 0x01) { + + if (modrm == 0xca) + *type = INSN_CLAC; + else if (modrm == 0xcb) + *type = INSN_STAC; + + } else if (op2 >= 0x80 && op2 <= 0x8f) { *type = INSN_JUMP_CONDITIONAL; @@ -444,6 +451,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, *type = INSN_CALL; break; + case 0xfc: + *type = INSN_CLD; + break; + + case 0xfd: + *type = INSN_STD; + break; + case 0xff: if (modrm_reg == 2 || modrm_reg == 3) diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 694abc628e9b..f3b378126011 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -29,7 +29,7 @@ #include "builtin.h" #include "check.h" -bool no_fp, no_unreachable, retpoline, module; +bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess; static const char * const check_usage[] = { "objtool check [<options>] file.o", @@ -41,6 +41,8 @@ const struct option check_options[] = { OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"), OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"), OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"), + OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"), + OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"), OPT_END(), }; diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h index 28ff40e19a14..69762f9c5602 100644 --- a/tools/objtool/builtin.h +++ b/tools/objtool/builtin.h @@ -20,7 +20,7 @@ #include <subcmd/parse-options.h> extern const struct option check_options[]; -extern bool no_fp, no_unreachable, retpoline, module; +extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess; extern int cmd_check(int argc, const char **argv); extern int cmd_orc(int argc, const char **argv); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 479196aeb409..ac743a1d53ab 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -31,6 +31,7 @@ struct alternative { struct list_head list; struct instruction *insn; + bool skip_orig; }; const char *objname; @@ -105,29 +106,6 @@ static struct instruction *next_insn_same_func(struct objtool_file *file, insn = next_insn_same_sec(file, insn)) /* - * Check if the function has been manually whitelisted with the - * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted - * due to its use of a context switching instruction. - */ -static bool ignore_func(struct objtool_file *file, struct symbol *func) -{ - struct rela *rela; - - /* check for STACK_FRAME_NON_STANDARD */ - if (file->whitelist && file->whitelist->rela) - list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) { - if (rela->sym->type == STT_SECTION && - rela->sym->sec == func->sec && - rela->addend == func->offset) - return true; - if (rela->sym->type == STT_FUNC && rela->sym == func) - return true; - } - - return false; -} - -/* * This checks to see if the given function is a "noreturn" function. * * For global functions which are outside the scope of this object file, we @@ -437,18 +415,107 @@ static void add_ignores(struct objtool_file *file) struct instruction *insn; struct section *sec; struct symbol *func; + struct rela *rela; - for_each_sec(file, sec) { - list_for_each_entry(func, &sec->symbol_list, list) { - if (func->type != STT_FUNC) - continue; + sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); + if (!sec) + return; - if (!ignore_func(file, func)) + list_for_each_entry(rela, &sec->rela_list, list) { + switch (rela->sym->type) { + case STT_FUNC: + func = rela->sym; + break; + + case STT_SECTION: + func = find_symbol_by_offset(rela->sym->sec, rela->addend); + if (!func || func->type != STT_FUNC) continue; + break; - func_for_each_insn_all(file, func, insn) - insn->ignore = true; + default: + WARN("unexpected relocation symbol type in %s: %d", sec->name, rela->sym->type); + continue; } + + func_for_each_insn_all(file, func, insn) + insn->ignore = true; + } +} + +/* + * This is a whitelist of functions that is allowed to be called with AC set. + * The list is meant to be minimal and only contains compiler instrumentation + * ABI and a few functions used to implement *_{to,from}_user() functions. + * + * These functions must not directly change AC, but may PUSHF/POPF. + */ +static const char *uaccess_safe_builtin[] = { + /* KASAN */ + "kasan_report", + "check_memory_region", + /* KASAN out-of-line */ + "__asan_loadN_noabort", + "__asan_load1_noabort", + "__asan_load2_noabort", + "__asan_load4_noabort", + "__asan_load8_noabort", + "__asan_load16_noabort", + "__asan_storeN_noabort", + "__asan_store1_noabort", + "__asan_store2_noabort", + "__asan_store4_noabort", + "__asan_store8_noabort", + "__asan_store16_noabort", + /* KASAN in-line */ + "__asan_report_load_n_noabort", + "__asan_report_load1_noabort", + "__asan_report_load2_noabort", + "__asan_report_load4_noabort", + "__asan_report_load8_noabort", + "__asan_report_load16_noabort", + "__asan_report_store_n_noabort", + "__asan_report_store1_noabort", + "__asan_report_store2_noabort", + "__asan_report_store4_noabort", + "__asan_report_store8_noabort", + "__asan_report_store16_noabort", + /* KCOV */ + "write_comp_data", + "__sanitizer_cov_trace_pc", + "__sanitizer_cov_trace_const_cmp1", + "__sanitizer_cov_trace_const_cmp2", + "__sanitizer_cov_trace_const_cmp4", + "__sanitizer_cov_trace_const_cmp8", + "__sanitizer_cov_trace_cmp1", + "__sanitizer_cov_trace_cmp2", + "__sanitizer_cov_trace_cmp4", + "__sanitizer_cov_trace_cmp8", + /* UBSAN */ + "ubsan_type_mismatch_common", + "__ubsan_handle_type_mismatch", + "__ubsan_handle_type_mismatch_v1", + /* misc */ + "csum_partial_copy_generic", + "__memcpy_mcsafe", + "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ + NULL +}; + +static void add_uaccess_safe(struct objtool_file *file) +{ + struct symbol *func; + const char **name; + + if (!uaccess) + return; + + for (name = uaccess_safe_builtin; *name; name++) { + func = find_symbol_by_name(file->elf, *name); + if (!func) + continue; + + func->alias->uaccess_safe = true; } } @@ -458,13 +525,13 @@ static void add_ignores(struct objtool_file *file) * But it at least allows objtool to understand the control flow *around* the * retpoline. */ -static int add_nospec_ignores(struct objtool_file *file) +static int add_ignore_alternatives(struct objtool_file *file) { struct section *sec; struct rela *rela; struct instruction *insn; - sec = find_section_by_name(file->elf, ".rela.discard.nospec"); + sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); if (!sec) return 0; @@ -476,7 +543,7 @@ static int add_nospec_ignores(struct objtool_file *file) insn = find_insn(file, rela->sym->sec, rela->addend); if (!insn) { - WARN("bad .discard.nospec entry"); + WARN("bad .discard.ignore_alts entry"); return -1; } @@ -525,7 +592,8 @@ static int add_jump_destinations(struct objtool_file *file) continue; } else { /* sibling call */ - insn->jump_dest = 0; + insn->call_dest = rela->sym; + insn->jump_dest = NULL; continue; } @@ -547,25 +615,38 @@ static int add_jump_destinations(struct objtool_file *file) } /* - * For GCC 8+, create parent/child links for any cold - * subfunctions. This is _mostly_ redundant with a similar - * initialization in read_symbols(). - * - * If a function has aliases, we want the *first* such function - * in the symbol table to be the subfunction's parent. In that - * case we overwrite the initialization done in read_symbols(). - * - * However this code can't completely replace the - * read_symbols() code because this doesn't detect the case - * where the parent function's only reference to a subfunction - * is through a switch table. + * Cross-function jump. */ if (insn->func && insn->jump_dest->func && - insn->func != insn->jump_dest->func && - !strstr(insn->func->name, ".cold.") && - strstr(insn->jump_dest->func->name, ".cold.")) { - insn->func->cfunc = insn->jump_dest->func; - insn->jump_dest->func->pfunc = insn->func; + insn->func != insn->jump_dest->func) { + + /* + * For GCC 8+, create parent/child links for any cold + * subfunctions. This is _mostly_ redundant with a + * similar initialization in read_symbols(). + * + * If a function has aliases, we want the *first* such + * function in the symbol table to be the subfunction's + * parent. In that case we overwrite the + * initialization done in read_symbols(). + * + * However this code can't completely replace the + * read_symbols() code because this doesn't detect the + * case where the parent function's only reference to a + * subfunction is through a switch table. + */ + if (!strstr(insn->func->name, ".cold.") && + strstr(insn->jump_dest->func->name, ".cold.")) { + insn->func->cfunc = insn->jump_dest->func; + insn->jump_dest->func->pfunc = insn->func; + + } else if (insn->jump_dest->func->pfunc != insn->func->pfunc && + insn->jump_dest->offset == insn->jump_dest->func->offset) { + + /* sibling class */ + insn->call_dest = insn->jump_dest->func; + insn->jump_dest = NULL; + } } } @@ -634,9 +715,6 @@ static int add_call_destinations(struct objtool_file *file) * conditionally jumps to the _end_ of the entry. We have to modify these * jumps' destinations to point back to .text rather than the end of the * entry in .altinstr_replacement. - * - * 4. It has been requested that we don't validate the !POPCNT feature path - * which is a "very very small percentage of machines". */ static int handle_group_alt(struct objtool_file *file, struct special_alt *special_alt, @@ -652,9 +730,6 @@ static int handle_group_alt(struct objtool_file *file, if (insn->offset >= special_alt->orig_off + special_alt->orig_len) break; - if (special_alt->skip_orig) - insn->type = INSN_NOP; - insn->alt_group = true; last_orig_insn = insn; } @@ -696,6 +771,7 @@ static int handle_group_alt(struct objtool_file *file, last_new_insn = insn; insn->ignore = orig_insn->ignore_alts; + insn->func = orig_insn->func; if (insn->type != INSN_JUMP_CONDITIONAL && insn->type != INSN_JUMP_UNCONDITIONAL) @@ -818,6 +894,8 @@ static int add_special_section_alts(struct objtool_file *file) } alt->insn = new_insn; + alt->skip_orig = special_alt->skip_orig; + orig_insn->ignore_alts |= special_alt->skip_alt; list_add_tail(&alt->list, &orig_insn->alts); list_del(&special_alt->list); @@ -1239,8 +1317,9 @@ static int decode_sections(struct objtool_file *file) return ret; add_ignores(file); + add_uaccess_safe(file); - ret = add_nospec_ignores(file); + ret = add_ignore_alternatives(file); if (ret) return ret; @@ -1320,11 +1399,11 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s return 0; /* push */ - if (op->dest.type == OP_DEST_PUSH) + if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) cfa->offset += 8; /* pop */ - if (op->src.type == OP_SRC_POP) + if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) cfa->offset -= 8; /* add immediate to sp */ @@ -1581,6 +1660,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_SRC_POP: + case OP_SRC_POPF: if (!state->drap && op->dest.type == OP_DEST_REG && op->dest.reg == cfa->base) { @@ -1645,6 +1725,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_DEST_PUSH: + case OP_DEST_PUSHF: state->stack_size += 8; if (cfa->base == CFI_SP) cfa->offset += 8; @@ -1735,7 +1816,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_DEST_MEM: - if (op->src.type != OP_SRC_POP) { + if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { WARN_FUNC("unknown stack-related memory operation", insn->sec, insn->offset); return -1; @@ -1799,6 +1880,50 @@ static bool insn_state_match(struct instruction *insn, struct insn_state *state) return false; } +static inline bool func_uaccess_safe(struct symbol *func) +{ + if (func) + return func->alias->uaccess_safe; + + return false; +} + +static inline const char *insn_dest_name(struct instruction *insn) +{ + if (insn->call_dest) + return insn->call_dest->name; + + return "{dynamic}"; +} + +static int validate_call(struct instruction *insn, struct insn_state *state) +{ + if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { + WARN_FUNC("call to %s() with UACCESS enabled", + insn->sec, insn->offset, insn_dest_name(insn)); + return 1; + } + + if (state->df) { + WARN_FUNC("call to %s() with DF set", + insn->sec, insn->offset, insn_dest_name(insn)); + return 1; + } + + return 0; +} + +static int validate_sibling_call(struct instruction *insn, struct insn_state *state) +{ + if (has_modified_stack_frame(state)) { + WARN_FUNC("sibling call from callable instruction with modified stack frame", + insn->sec, insn->offset); + return 1; + } + + return validate_call(insn, state); +} + /* * Follow the branch starting at the given instruction, and recursively follow * any other branches (jumps). Meanwhile, track the frame pointer state at @@ -1844,7 +1969,9 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, if (!insn->hint && !insn_state_match(insn, &state)) return 1; - return 0; + /* If we were here with AC=0, but now have AC=1, go again */ + if (insn->state.uaccess || !state.uaccess) + return 0; } if (insn->hint) { @@ -1893,16 +2020,42 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, insn->visited = true; if (!insn->ignore_alts) { + bool skip_orig = false; + list_for_each_entry(alt, &insn->alts, list) { + if (alt->skip_orig) + skip_orig = true; + ret = validate_branch(file, alt->insn, state); - if (ret) - return 1; + if (ret) { + if (backtrace) + BT_FUNC("(alt)", insn); + return ret; + } } + + if (skip_orig) + return 0; } switch (insn->type) { case INSN_RETURN: + if (state.uaccess && !func_uaccess_safe(func)) { + WARN_FUNC("return with UACCESS enabled", sec, insn->offset); + return 1; + } + + if (!state.uaccess && func_uaccess_safe(func)) { + WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", sec, insn->offset); + return 1; + } + + if (state.df) { + WARN_FUNC("return with DF set", sec, insn->offset); + return 1; + } + if (func && has_modified_stack_frame(&state)) { WARN_FUNC("return with modified stack frame", sec, insn->offset); @@ -1918,17 +2071,22 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, return 0; case INSN_CALL: - if (is_fentry_call(insn)) - break; + case INSN_CALL_DYNAMIC: + ret = validate_call(insn, &state); + if (ret) + return ret; - ret = dead_end_function(file, insn->call_dest); - if (ret == 1) - return 0; - if (ret == -1) - return 1; + if (insn->type == INSN_CALL) { + if (is_fentry_call(insn)) + break; + + ret = dead_end_function(file, insn->call_dest); + if (ret == 1) + return 0; + if (ret == -1) + return 1; + } - /* fallthrough */ - case INSN_CALL_DYNAMIC: if (!no_fp && func && !has_valid_stack_frame(&state)) { WARN_FUNC("call without frame pointer save/setup", sec, insn->offset); @@ -1938,18 +2096,21 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, case INSN_JUMP_CONDITIONAL: case INSN_JUMP_UNCONDITIONAL: - if (insn->jump_dest && - (!func || !insn->jump_dest->func || - insn->jump_dest->func->pfunc == func)) { - ret = validate_branch(file, insn->jump_dest, - state); + if (func && !insn->jump_dest) { + ret = validate_sibling_call(insn, &state); if (ret) - return 1; + return ret; - } else if (func && has_modified_stack_frame(&state)) { - WARN_FUNC("sibling call from callable instruction with modified stack frame", - sec, insn->offset); - return 1; + } else if (insn->jump_dest && + (!func || !insn->jump_dest->func || + insn->jump_dest->func->pfunc == func)) { + ret = validate_branch(file, insn->jump_dest, + state); + if (ret) { + if (backtrace) + BT_FUNC("(branch)", insn); + return ret; + } } if (insn->type == INSN_JUMP_UNCONDITIONAL) @@ -1958,11 +2119,10 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, break; case INSN_JUMP_DYNAMIC: - if (func && list_empty(&insn->alts) && - has_modified_stack_frame(&state)) { - WARN_FUNC("sibling call from callable instruction with modified stack frame", - sec, insn->offset); - return 1; + if (func && list_empty(&insn->alts)) { + ret = validate_sibling_call(insn, &state); + if (ret) + return ret; } return 0; @@ -1979,6 +2139,63 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, if (update_insn_state(insn, &state)) return 1; + if (insn->stack_op.dest.type == OP_DEST_PUSHF) { + if (!state.uaccess_stack) { + state.uaccess_stack = 1; + } else if (state.uaccess_stack >> 31) { + WARN_FUNC("PUSHF stack exhausted", sec, insn->offset); + return 1; + } + state.uaccess_stack <<= 1; + state.uaccess_stack |= state.uaccess; + } + + if (insn->stack_op.src.type == OP_SRC_POPF) { + if (state.uaccess_stack) { + state.uaccess = state.uaccess_stack & 1; + state.uaccess_stack >>= 1; + if (state.uaccess_stack == 1) + state.uaccess_stack = 0; + } + } + + break; + + case INSN_STAC: + if (state.uaccess) { + WARN_FUNC("recursive UACCESS enable", sec, insn->offset); + return 1; + } + + state.uaccess = true; + break; + + case INSN_CLAC: + if (!state.uaccess && insn->func) { + WARN_FUNC("redundant UACCESS disable", sec, insn->offset); + return 1; + } + + if (func_uaccess_safe(func) && !state.uaccess_stack) { + WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); + return 1; + } + + state.uaccess = false; + break; + + case INSN_STD: + if (state.df) + WARN_FUNC("recursive STD", sec, insn->offset); + + state.df = true; + break; + + case INSN_CLD: + if (!state.df && insn->func) + WARN_FUNC("redundant CLD", sec, insn->offset); + + state.df = false; break; default: @@ -2015,6 +2232,8 @@ static int validate_unwind_hints(struct objtool_file *file) for_each_insn(file, insn) { if (insn->hint && !insn->visited) { ret = validate_branch(file, insn, state); + if (ret && backtrace) + BT_FUNC("<=== (hint)", insn); warnings += ret; } } @@ -2142,7 +2361,11 @@ static int validate_functions(struct objtool_file *file) if (!insn || insn->ignore) continue; + state.uaccess = func->alias->uaccess_safe; + ret = validate_branch(file, insn, state); + if (ret && backtrace) + BT_FUNC("<=== (func)", insn); warnings += ret; } } @@ -2199,7 +2422,6 @@ int check(const char *_objname, bool orc) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); - file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); file.c_file = find_section_by_name(file.elf, ".comment"); file.ignore_unreachables = no_unreachable; file.hints = false; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index e6e8a655b556..71e54f97dbcd 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -31,7 +31,8 @@ struct insn_state { int stack_size; unsigned char type; bool bp_scratch; - bool drap, end; + bool drap, end, uaccess, df; + unsigned int uaccess_stack; int drap_reg, drap_offset; struct cfi_reg vals[CFI_NUM_REGS]; }; @@ -60,7 +61,6 @@ struct objtool_file { struct elf *elf; struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 16); - struct section *whitelist; bool ignore_unreachables, c_file, hints, rodata; }; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index b8f3cca8e58b..dd198d53387d 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -219,7 +219,7 @@ static int read_sections(struct elf *elf) static int read_symbols(struct elf *elf) { struct section *symtab, *sec; - struct symbol *sym, *pfunc; + struct symbol *sym, *pfunc, *alias; struct list_head *entry, *tmp; int symbols_nr, i; char *coldstr; @@ -239,6 +239,7 @@ static int read_symbols(struct elf *elf) return -1; } memset(sym, 0, sizeof(*sym)); + alias = sym; sym->idx = i; @@ -288,11 +289,17 @@ static int read_symbols(struct elf *elf) break; } - if (sym->offset == s->offset && sym->len >= s->len) { - entry = tmp; - break; + if (sym->offset == s->offset) { + if (sym->len == s->len && alias == sym) + alias = s; + + if (sym->len >= s->len) { + entry = tmp; + break; + } } } + sym->alias = alias; list_add(&sym->list, entry); hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx); } diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index bc97ed86b9cd..2cc2ed49322d 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -61,7 +61,8 @@ struct symbol { unsigned char bind, type; unsigned long offset; unsigned int len; - struct symbol *pfunc, *cfunc; + struct symbol *pfunc, *cfunc, *alias; + bool uaccess_safe; }; struct rela { diff --git a/tools/objtool/special.c b/tools/objtool/special.c index 50af4e1274b3..4e50563d87c6 100644 --- a/tools/objtool/special.c +++ b/tools/objtool/special.c @@ -23,6 +23,7 @@ #include <stdlib.h> #include <string.h> +#include "builtin.h" #include "special.h" #include "warn.h" @@ -42,6 +43,7 @@ #define ALT_NEW_LEN_OFFSET 11 #define X86_FEATURE_POPCNT (4*32+23) +#define X86_FEATURE_SMAP (9*32+20) struct special_entry { const char *sec; @@ -110,6 +112,22 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry, */ if (feature == X86_FEATURE_POPCNT) alt->skip_orig = true; + + /* + * If UACCESS validation is enabled; force that alternative; + * otherwise force it the other way. + * + * What we want to avoid is having both the original and the + * alternative code flow at the same time, in that case we can + * find paths that see the STAC but take the NOP instead of + * CLAC and the other way around. + */ + if (feature == X86_FEATURE_SMAP) { + if (uaccess) + alt->skip_orig = true; + else + alt->skip_alt = true; + } } orig_rela = find_rela_by_dest(sec, offset + entry->orig); diff --git a/tools/objtool/special.h b/tools/objtool/special.h index fad1d092f679..d5c062e718ef 100644 --- a/tools/objtool/special.h +++ b/tools/objtool/special.h @@ -26,6 +26,7 @@ struct special_alt { bool group; bool skip_orig; + bool skip_alt; bool jump_or_nop; struct section *orig_sec; diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h index afd9f7a05f6d..f4fbb972b611 100644 --- a/tools/objtool/warn.h +++ b/tools/objtool/warn.h @@ -64,6 +64,14 @@ static inline char *offstr(struct section *sec, unsigned long offset) free(_str); \ }) +#define BT_FUNC(format, insn, ...) \ +({ \ + struct instruction *_insn = (insn); \ + char *_str = offstr(_insn->sec, _insn->offset); \ + WARN(" %s: " format, _str, ##__VA_ARGS__); \ + free(_str); \ +}) + #define WARN_ELF(format, ...) \ WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1)) |