From b968e84b509da593c50dc3db679e1d33de701f78 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 17 Sep 2021 11:20:04 +0200 Subject: x86/iopl: Fake iopl(3) CLI/STI usage Since commit c8137ace5638 ("x86/iopl: Restrict iopl() permission scope") it's possible to emulate iopl(3) using ioperm(), except for the CLI/STI usage. Userspace CLI/STI usage is very dubious (read broken), since any exception taken during that window can lead to rescheduling anyway (or worse). The IOPL(2) manpage even states that usage of CLI/STI is highly discouraged and might even crash the system. Of course, that won't stop people and HP has the dubious honour of being the first vendor to be found using this in their hp-health package. In order to enable this 'software' to still 'work', have the #GP treat the CLI/STI instructions as NOPs when iopl(3). Warn the user that their program is doing dubious things. Fixes: a24ca9976843 ("x86/iopl: Remove legacy IOPL option") Reported-by: Ondrej Zary Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Cc: stable@kernel.org # v5.5+ Link: https://lkml.kernel.org/r/20210918090641.GD5106@worktop.programming.kicks-ass.net --- arch/x86/include/asm/insn-eval.h | 1 + arch/x86/include/asm/processor.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h index 91d7182ad2d6..4ec3613551e3 100644 --- a/arch/x86/include/asm/insn-eval.h +++ b/arch/x86/include/asm/insn-eval.h @@ -21,6 +21,7 @@ int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs); int insn_get_modrm_reg_off(struct insn *insn, struct pt_regs *regs); unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx); int insn_get_code_seg_params(struct pt_regs *regs); +int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip); int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE]); int insn_fetch_from_user_inatomic(struct pt_regs *regs, diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 9ad2acaaae9b..577f342dbfb2 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -518,6 +518,7 @@ struct thread_struct { */ unsigned long iopl_emul; + unsigned int iopl_warn:1; unsigned int sig_on_uaccess_err:1; /* -- cgit v1.2.3 From 44b979fa302cab91bdd2cc982823e5c13202cd4e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 15 Sep 2021 17:12:59 +0200 Subject: x86/mm/64: Improve stack overflow warnings Current code has an explicit check for hitting the task stack guard; but overflowing any of the other stacks will get you a non-descript general #DF warning. Improve matters by using get_stack_info_noinstr() to detetrmine if and which stack guard page got hit, enabling a better stack warning. In specific, Michael Wang reported what turned out to be an NMI exception stack overflow, which is now clearly reported as such: [] BUG: NMI stack guard page was hit at 0000000085fd977b (stack is 000000003a55b09e..00000000d8cce1a5) Reported-by: Michael Wang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Michael Wang Link: https://lkml.kernel.org/r/YUTE/NuqnaWbST8n@hirez.programming.kicks-ass.net --- arch/x86/include/asm/irq_stack.h | 37 +++++++++++++++++++++++++------------ arch/x86/include/asm/stacktrace.h | 10 ++++++++++ arch/x86/include/asm/traps.h | 6 +++--- arch/x86/kernel/dumpstack_64.c | 6 ++++++ arch/x86/kernel/traps.c | 25 +++++++++++++------------ arch/x86/mm/fault.c | 20 ++++++++++---------- 6 files changed, 67 insertions(+), 37 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h index 562854c60808..8d55bd11848c 100644 --- a/arch/x86/include/asm/irq_stack.h +++ b/arch/x86/include/asm/irq_stack.h @@ -77,11 +77,11 @@ * Function calls can clobber anything except the callee-saved * registers. Tell the compiler. */ -#define call_on_irqstack(func, asm_call, argconstr...) \ +#define call_on_stack(stack, func, asm_call, argconstr...) \ { \ register void *tos asm("r11"); \ \ - tos = ((void *)__this_cpu_read(hardirq_stack_ptr)); \ + tos = ((void *)(stack)); \ \ asm_inline volatile( \ "movq %%rsp, (%[tos]) \n" \ @@ -98,6 +98,25 @@ ); \ } +#define ASM_CALL_ARG0 \ + "call %P[__func] \n" + +#define ASM_CALL_ARG1 \ + "movq %[arg1], %%rdi \n" \ + ASM_CALL_ARG0 + +#define ASM_CALL_ARG2 \ + "movq %[arg2], %%rsi \n" \ + ASM_CALL_ARG1 + +#define ASM_CALL_ARG3 \ + "movq %[arg3], %%rdx \n" \ + ASM_CALL_ARG2 + +#define call_on_irqstack(func, asm_call, argconstr...) \ + call_on_stack(__this_cpu_read(hardirq_stack_ptr), \ + func, asm_call, argconstr) + /* Macros to assert type correctness for run_*_on_irqstack macros */ #define assert_function_type(func, proto) \ static_assert(__builtin_types_compatible_p(typeof(&func), proto)) @@ -147,8 +166,7 @@ */ #define ASM_CALL_SYSVEC \ "call irq_enter_rcu \n" \ - "movq %[arg1], %%rdi \n" \ - "call %P[__func] \n" \ + ASM_CALL_ARG1 \ "call irq_exit_rcu \n" #define SYSVEC_CONSTRAINTS , [arg1] "r" (regs) @@ -168,12 +186,10 @@ */ #define ASM_CALL_IRQ \ "call irq_enter_rcu \n" \ - "movq %[arg1], %%rdi \n" \ - "movl %[arg2], %%esi \n" \ - "call %P[__func] \n" \ + ASM_CALL_ARG2 \ "call irq_exit_rcu \n" -#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" (vector) +#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector) #define run_irq_on_irqstack_cond(func, regs, vector) \ { \ @@ -185,9 +201,6 @@ IRQ_CONSTRAINTS, regs, vector); \ } -#define ASM_CALL_SOFTIRQ \ - "call %P[__func] \n" - /* * Macro to invoke __do_softirq on the irq stack. This is only called from * task context when bottom halves are about to be reenabled and soft @@ -197,7 +210,7 @@ #define do_softirq_own_stack() \ { \ __this_cpu_write(hardirq_stack_inuse, true); \ - call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \ + call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \ __this_cpu_write(hardirq_stack_inuse, false); \ } diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index f248eb2ac2d4..3881b5333eb8 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -38,6 +38,16 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, struct stack_info *info); +static __always_inline +bool get_stack_guard_info(unsigned long *stack, struct stack_info *info) +{ + /* make sure it's not in the stack proper */ + if (get_stack_info_noinstr(stack, current, info)) + return false; + /* but if it is in the page below it, we hit a guard */ + return get_stack_info_noinstr((void *)stack + PAGE_SIZE, current, info); +} + const char *stack_type_name(enum stack_type type); static inline bool on_stack(struct stack_info *info, void *addr, size_t len) diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 7f7200021bd1..6221be7cafc3 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -40,9 +40,9 @@ void math_emulate(struct math_emu_info *); bool fault_in_kernel_space(unsigned long address); #ifdef CONFIG_VMAP_STACK -void __noreturn handle_stack_overflow(const char *message, - struct pt_regs *regs, - unsigned long fault_address); +void __noreturn handle_stack_overflow(struct pt_regs *regs, + unsigned long fault_address, + struct stack_info *info); #endif #endif /* _ASM_X86_TRAPS_H */ diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 5601b95944fa..6c5defd6569a 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -32,9 +32,15 @@ const char *stack_type_name(enum stack_type type) { BUILD_BUG_ON(N_EXCEPTION_STACKS != 6); + if (type == STACK_TYPE_TASK) + return "TASK"; + if (type == STACK_TYPE_IRQ) return "IRQ"; + if (type == STACK_TYPE_SOFTIRQ) + return "SOFTIRQ"; + if (type == STACK_TYPE_ENTRY) { /* * On 64-bit, we have a generic entry stack that we diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index f3f3034b06f3..cc6de3a01293 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -313,17 +313,19 @@ out: } #ifdef CONFIG_VMAP_STACK -__visible void __noreturn handle_stack_overflow(const char *message, - struct pt_regs *regs, - unsigned long fault_address) +__visible void __noreturn handle_stack_overflow(struct pt_regs *regs, + unsigned long fault_address, + struct stack_info *info) { - printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n", - (void *)fault_address, current->stack, - (char *)current->stack + THREAD_SIZE - 1); - die(message, regs, 0); + const char *name = stack_type_name(info->type); + + printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n", + name, (void *)fault_address, info->begin, info->end); + + die("stack guard page", regs, 0); /* Be absolutely certain we don't return. */ - panic("%s", message); + panic("%s stack guard hit", name); } #endif @@ -353,6 +355,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault) #ifdef CONFIG_VMAP_STACK unsigned long address = read_cr2(); + struct stack_info info; #endif #ifdef CONFIG_X86_ESPFIX64 @@ -455,10 +458,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault) * stack even if the actual trigger for the double fault was * something else. */ - if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) { - handle_stack_overflow("kernel stack overflow (double-fault)", - regs, address); - } + if (get_stack_guard_info((void *)address, &info)) + handle_stack_overflow(regs, address, &info); #endif pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b2eefdefc108..edb5152f0866 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -32,6 +32,7 @@ #include /* VMALLOC_START, ... */ #include /* kvm_handle_async_pf */ #include /* fixup_vdso_exception() */ +#include #define CREATE_TRACE_POINTS #include @@ -631,6 +632,9 @@ static noinline void page_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address) { +#ifdef CONFIG_VMAP_STACK + struct stack_info info; +#endif unsigned long flags; int sig; @@ -649,9 +653,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code, * that we're in vmalloc space to avoid this. */ if (is_vmalloc_addr((void *)address) && - (((unsigned long)current->stack - 1 - address < PAGE_SIZE) || - address - ((unsigned long)current->stack + THREAD_SIZE) < PAGE_SIZE)) { - unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *); + get_stack_guard_info((void *)address, &info)) { /* * We're likely to be running with very little stack space * left. It's plausible that we'd hit this condition but @@ -662,13 +664,11 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code, * and then double-fault, though, because we're likely to * break the console driver and lose most of the stack dump. */ - asm volatile ("movq %[stack], %%rsp\n\t" - "call handle_stack_overflow\n\t" - "1: jmp 1b" - : ASM_CALL_CONSTRAINT - : "D" ("kernel stack overflow (page fault)"), - "S" (regs), "d" (address), - [stack] "rm" (stack)); + call_on_stack(__this_cpu_ist_top_va(DF) - sizeof(void*), + handle_stack_overflow, + ASM_CALL_ARG3, + , [arg1] "r" (regs), [arg2] "r" (address), [arg3] "r" (&info)); + unreachable(); } #endif -- cgit v1.2.3 From 7fae4c24a2b84a66c7be399727aca11e7a888462 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 15 Sep 2021 16:19:46 +0200 Subject: x86: Increase exception stack sizes It turns out that a single page of stack is trivial to overflow with all the tracing gunk enabled. Raise the exception stacks to 2 pages, which is still half the interrupt stacks, which are at 4 pages. Reported-by: Michael Wang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/YUIO9Ye98S5Eb68w@hirez.programming.kicks-ass.net --- arch/x86/include/asm/page_64_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index a8d4ad856568..e9e2c3ba5923 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -15,7 +15,7 @@ #define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) -#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER) +#define EXCEPTION_STACK_ORDER (1 + KASAN_STACK_ORDER) #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) #define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER) -- cgit v1.2.3 From 541ac97186d9ea88491961a46284de3603c914fd Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 1 Oct 2021 21:41:20 +0200 Subject: x86/sev: Make the #VC exception stacks part of the default stacks storage The size of the exception stacks was increased by the commit in Fixes, resulting in stack sizes greater than a page in size. The #VC exception handling was only mapping the first (bottom) page, resulting in an SEV-ES guest failing to boot. Make the #VC exception stacks part of the default exception stacks storage and allocate them with a CONFIG_AMD_MEM_ENCRYPT=y .config. Map them only when a SEV-ES guest has been detected. Rip out the custom VC stacks mapping and storage code. [ bp: Steal and adapt Tom's commit message. ] Fixes: 7fae4c24a2b8 ("x86: Increase exception stack sizes") Signed-off-by: Borislav Petkov Tested-by: Tom Lendacky Tested-by: Brijesh Singh Link: https://lkml.kernel.org/r/YVt1IMjIs7pIZTRR@zn.tnic --- arch/x86/include/asm/cpu_entry_area.h | 8 +++++++- arch/x86/kernel/sev.c | 32 -------------------------------- arch/x86/mm/cpu_entry_area.c | 7 +++++++ 3 files changed, 14 insertions(+), 33 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index 3d52b094850a..dd5ea1bdf04c 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -10,6 +10,12 @@ #ifdef CONFIG_X86_64 +#ifdef CONFIG_AMD_MEM_ENCRYPT +#define VC_EXCEPTION_STKSZ EXCEPTION_STKSZ +#else +#define VC_EXCEPTION_STKSZ 0 +#endif + /* Macro to enforce the same ordering and stack sizes */ #define ESTACKS_MEMBERS(guardsize, optional_stack_size) \ char DF_stack_guard[guardsize]; \ @@ -28,7 +34,7 @@ /* The exception stacks' physical storage. No guard pages required */ struct exception_stacks { - ESTACKS_MEMBERS(0, 0) + ESTACKS_MEMBERS(0, VC_EXCEPTION_STKSZ) }; /* The effective cpu entry area mapping with guard pages. */ diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 53a6837d354b..4d0d1c2b65e1 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -46,16 +46,6 @@ static struct ghcb __initdata *boot_ghcb; struct sev_es_runtime_data { struct ghcb ghcb_page; - /* Physical storage for the per-CPU IST stack of the #VC handler */ - char ist_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE); - - /* - * Physical storage for the per-CPU fall-back stack of the #VC handler. - * The fall-back stack is used when it is not safe to switch back to the - * interrupted stack in the #VC entry code. - */ - char fallback_stack[EXCEPTION_STKSZ] __aligned(PAGE_SIZE); - /* * Reserve one page per CPU as backup storage for the unencrypted GHCB. * It is needed when an NMI happens while the #VC handler uses the real @@ -99,27 +89,6 @@ DEFINE_STATIC_KEY_FALSE(sev_es_enable_key); /* Needed in vc_early_forward_exception */ void do_early_exception(struct pt_regs *regs, int trapnr); -static void __init setup_vc_stacks(int cpu) -{ - struct sev_es_runtime_data *data; - struct cpu_entry_area *cea; - unsigned long vaddr; - phys_addr_t pa; - - data = per_cpu(runtime_data, cpu); - cea = get_cpu_entry_area(cpu); - - /* Map #VC IST stack */ - vaddr = CEA_ESTACK_BOT(&cea->estacks, VC); - pa = __pa(data->ist_stack); - cea_set_pte((void *)vaddr, pa, PAGE_KERNEL); - - /* Map VC fall-back stack */ - vaddr = CEA_ESTACK_BOT(&cea->estacks, VC2); - pa = __pa(data->fallback_stack); - cea_set_pte((void *)vaddr, pa, PAGE_KERNEL); -} - static __always_inline bool on_vc_stack(struct pt_regs *regs) { unsigned long sp = regs->sp; @@ -787,7 +756,6 @@ void __init sev_es_init_vc_handling(void) for_each_possible_cpu(cpu) { alloc_runtime_data(cpu); init_ghcb(cpu); - setup_vc_stacks(cpu); } sev_es_setup_play_dead(); diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index f5e1e60c9095..6c2f1b76a0b6 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -110,6 +110,13 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu) cea_map_stack(NMI); cea_map_stack(DB); cea_map_stack(MCE); + + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) { + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) { + cea_map_stack(VC); + cea_map_stack(VC2); + } + } } #else static inline void percpu_setup_exception_stacks(unsigned int cpu) -- cgit v1.2.3