From db4e919d9a119b7c54eb5e5ac9bee5d3eb4cb859 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 28 Aug 2019 17:39:46 +0200 Subject: x86/math64: Provide a sane mul_u64_u32_div() implementation for x86_64 On x86_64 we can do a u64 * u64 -> u128 widening multiply followed by a u128 / u64 -> u64 division to implement a sane version of mul_u64_u32_div(). Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/div64.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h index 20a46150e0a8..9b8cb50768c2 100644 --- a/arch/x86/include/asm/div64.h +++ b/arch/x86/include/asm/div64.h @@ -73,6 +73,19 @@ static inline u64 mul_u32_u32(u32 a, u32 b) #else # include + +static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div) +{ + u64 q; + + asm ("mulq %2; divq %3" : "=a" (q) + : "a" (a), "rm" ((u64)mul), "rm" ((u64)div) + : "rdx"); + + return q; +} +#define mul_u64_u32_div mul_u64_u32_div + #endif /* CONFIG_X86_32 */ #endif /* _ASM_X86_DIV64_H */ -- cgit v1.2.3 From 559ceeed62a5121783a8955c63aeb18aaa0ef224 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 6 Sep 2019 09:55:49 +0200 Subject: x86/asm/suspend: Get rid of bogus_64_magic bogus_64_magic is only a dead-end loop. There is no need for an out-of-order function (and unannotated local label), so just handle it in-place and also store 0xbad-m-a-g-i-c to %rcx beforehand, in case someone is inspecting registers. Here a qemu+gdb example: Remote debugging using localhost:1235 wakeup_long64 () at arch/x86/kernel/acpi/wakeup_64.S:26 26 jmp 1b (gdb) info registers rax 0x123456789abcdef0 1311768467463790320 rbx 0x0 0 rcx 0xbad6d61676963 3286910041024867 ^^^^^^^^^^^^^^^ [ bp: Add the gdb example. ] Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Len Brown Cc: linux-pm@vger.kernel.org Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20190906075550.23435-1-jslaby@suse.cz --- arch/x86/kernel/acpi/wakeup_64.S | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index b0715c3ac18d..7f9ade13bbcf 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -18,8 +18,13 @@ ENTRY(wakeup_long64) movq saved_magic, %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax - jne bogus_64_magic + je 2f + /* stop here on a saved_magic mismatch */ + movq $0xbad6d61676963, %rcx +1: + jmp 1b +2: movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %ds @@ -37,9 +42,6 @@ ENTRY(wakeup_long64) jmp *%rax ENDPROC(wakeup_long64) -bogus_64_magic: - jmp bogus_64_magic - ENTRY(do_suspend_lowlevel) FRAME_BEGIN subq $8, %rsp -- cgit v1.2.3 From 98ededb61fafd303f2337f68b0326a4b95e3cebe Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 6 Sep 2019 09:55:50 +0200 Subject: x86/asm: Make some functions local labels Boris suggests to make a local label (prepend ".L") to these functions to eliminate them from the symbol table. These are functions with very local names and really should not be visible anywhere. Note that objtool won't see these functions anymore (to generate ORC debug info). But all the functions are not annotated with ENDPROC, so they won't have objtool's attention anyway. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Cao jin Cc: Greg Kroah-Hartman Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Steve Winslow Cc: Thomas Gleixner Cc: Wei Huang Cc: x86-ml Cc: Xiaoyao Li Link: https://lkml.kernel.org/r/20190906075550.23435-2-jslaby@suse.cz --- arch/x86/boot/compressed/head_32.S | 4 ++-- arch/x86/boot/compressed/head_64.S | 18 +++++++++--------- arch/x86/entry/entry_64.S | 4 ++-- arch/x86/lib/copy_user_64.S | 14 +++++++------- arch/x86/lib/getuser.S | 16 ++++++++-------- arch/x86/lib/putuser.S | 22 +++++++++++----------- 6 files changed, 39 insertions(+), 39 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 37380c0d5999..5e30eaaf8576 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -140,7 +140,7 @@ ENTRY(startup_32) /* * Jump to the relocated address. */ - leal relocated(%ebx), %eax + leal .Lrelocated(%ebx), %eax jmp *%eax ENDPROC(startup_32) @@ -209,7 +209,7 @@ ENDPROC(efi32_stub_entry) #endif .text -relocated: +.Lrelocated: /* * Clear BSS (stack is currently empty) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 6233ae35d0d9..d98cd483377e 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -87,7 +87,7 @@ ENTRY(startup_32) call verify_cpu testl %eax, %eax - jnz no_longmode + jnz .Lno_longmode /* * Compute the delta between where we were compiled to run at @@ -322,7 +322,7 @@ ENTRY(startup_64) 1: popq %rdi subq $1b, %rdi - call adjust_got + call .Ladjust_got /* * At this point we are in long mode with 4-level paging enabled, @@ -421,7 +421,7 @@ trampoline_return: /* The new adjustment is the relocation address */ movq %rbx, %rdi - call adjust_got + call .Ladjust_got /* * Copy the compressed kernel to the end of our buffer @@ -440,7 +440,7 @@ trampoline_return: /* * Jump to the relocated address. */ - leaq relocated(%rbx), %rax + leaq .Lrelocated(%rbx), %rax jmp *%rax #ifdef CONFIG_EFI_STUB @@ -511,7 +511,7 @@ ENDPROC(efi64_stub_entry) #endif .text -relocated: +.Lrelocated: /* * Clear BSS (stack is currently empty) @@ -548,7 +548,7 @@ relocated: * first time we touch GOT). * RDI is the new adjustment to apply. */ -adjust_got: +.Ladjust_got: /* Walk through the GOT adding the address to the entries */ leaq _got(%rip), %rdx leaq _egot(%rip), %rcx @@ -622,7 +622,7 @@ ENTRY(trampoline_32bit_src) movl %eax, %cr4 /* Calculate address of paging_enabled() once we are executing in the trampoline */ - leal paging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax + leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax /* Prepare the stack for far return to Long Mode */ pushl $__KERNEL_CS @@ -635,7 +635,7 @@ ENTRY(trampoline_32bit_src) lret .code64 -paging_enabled: +.Lpaging_enabled: /* Return from the trampoline */ jmp *%rdi @@ -647,7 +647,7 @@ paging_enabled: .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE .code32 -no_longmode: +.Lno_longmode: /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 1: hlt diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index be9ca198c581..cf273242691b 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1058,10 +1058,10 @@ ENTRY(native_load_gs_index) ENDPROC(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) - _ASM_EXTABLE(.Lgs_change, bad_gs) + _ASM_EXTABLE(.Lgs_change, .Lbad_gs) .section .fixup, "ax" /* running with kernelgs */ -bad_gs: +.Lbad_gs: SWAPGS /* switch back to user gs */ .macro ZAP_GS /* This can't be a string because the preprocessor needs to see it. */ diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 4fe1601dbc5d..86976b55ae74 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -33,7 +33,7 @@ 102: .section .fixup,"ax" 103: addl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail + jmp .Lcopy_user_handle_tail .previous _ASM_EXTABLE_UA(100b, 103b) @@ -113,7 +113,7 @@ ENTRY(copy_user_generic_unrolled) 40: leal (%rdx,%rcx,8),%edx jmp 60f 50: movl %ecx,%edx -60: jmp copy_user_handle_tail /* ecx is zerorest also */ +60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */ .previous _ASM_EXTABLE_UA(1b, 30b) @@ -177,7 +177,7 @@ ENTRY(copy_user_generic_string) .section .fixup,"ax" 11: leal (%rdx,%rcx,8),%ecx 12: movl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail + jmp .Lcopy_user_handle_tail .previous _ASM_EXTABLE_UA(1b, 11b) @@ -210,7 +210,7 @@ ENTRY(copy_user_enhanced_fast_string) .section .fixup,"ax" 12: movl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail + jmp .Lcopy_user_handle_tail .previous _ASM_EXTABLE_UA(1b, 12b) @@ -231,7 +231,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) * eax uncopied bytes or 0 if successful. */ ALIGN; -copy_user_handle_tail: +.Lcopy_user_handle_tail: movl %edx,%ecx 1: rep movsb 2: mov %ecx,%eax @@ -239,7 +239,7 @@ copy_user_handle_tail: ret _ASM_EXTABLE_UA(1b, 2b) -END(copy_user_handle_tail) +END(.Lcopy_user_handle_tail) /* * copy_user_nocache - Uncached memory copy with exception handling @@ -364,7 +364,7 @@ ENTRY(__copy_user_nocache) movl %ecx,%edx .L_fixup_handle_tail: sfence - jmp copy_user_handle_tail + jmp .Lcopy_user_handle_tail .previous _ASM_EXTABLE_UA(1b, .L_fixup_4x8b_copy) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 304f958c27b2..9578eb88fc87 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -115,7 +115,7 @@ ENDPROC(__get_user_8) EXPORT_SYMBOL(__get_user_8) -bad_get_user_clac: +.Lbad_get_user_clac: ASM_CLAC bad_get_user: xor %edx,%edx @@ -123,7 +123,7 @@ bad_get_user: ret #ifdef CONFIG_X86_32 -bad_get_user_8_clac: +.Lbad_get_user_8_clac: ASM_CLAC bad_get_user_8: xor %edx,%edx @@ -132,12 +132,12 @@ bad_get_user_8: ret #endif - _ASM_EXTABLE_UA(1b, bad_get_user_clac) - _ASM_EXTABLE_UA(2b, bad_get_user_clac) - _ASM_EXTABLE_UA(3b, bad_get_user_clac) + _ASM_EXTABLE_UA(1b, .Lbad_get_user_clac) + _ASM_EXTABLE_UA(2b, .Lbad_get_user_clac) + _ASM_EXTABLE_UA(3b, .Lbad_get_user_clac) #ifdef CONFIG_X86_64 - _ASM_EXTABLE_UA(4b, bad_get_user_clac) + _ASM_EXTABLE_UA(4b, .Lbad_get_user_clac) #else - _ASM_EXTABLE_UA(4b, bad_get_user_8_clac) - _ASM_EXTABLE_UA(5b, bad_get_user_8_clac) + _ASM_EXTABLE_UA(4b, .Lbad_get_user_8_clac) + _ASM_EXTABLE_UA(5b, .Lbad_get_user_8_clac) #endif diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 14bf78341d3c..126dd6a9ec9b 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -37,7 +37,7 @@ ENTRY(__put_user_1) ENTER cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX - jae bad_put_user + jae .Lbad_put_user ASM_STAC 1: movb %al,(%_ASM_CX) xor %eax,%eax @@ -51,7 +51,7 @@ ENTRY(__put_user_2) mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX - jae bad_put_user + jae .Lbad_put_user ASM_STAC 2: movw %ax,(%_ASM_CX) xor %eax,%eax @@ -65,7 +65,7 @@ ENTRY(__put_user_4) mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX - jae bad_put_user + jae .Lbad_put_user ASM_STAC 3: movl %eax,(%_ASM_CX) xor %eax,%eax @@ -79,7 +79,7 @@ ENTRY(__put_user_8) mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX - jae bad_put_user + jae .Lbad_put_user ASM_STAC 4: mov %_ASM_AX,(%_ASM_CX) #ifdef CONFIG_X86_32 @@ -91,16 +91,16 @@ ENTRY(__put_user_8) ENDPROC(__put_user_8) EXPORT_SYMBOL(__put_user_8) -bad_put_user_clac: +.Lbad_put_user_clac: ASM_CLAC -bad_put_user: +.Lbad_put_user: movl $-EFAULT,%eax RET - _ASM_EXTABLE_UA(1b, bad_put_user_clac) - _ASM_EXTABLE_UA(2b, bad_put_user_clac) - _ASM_EXTABLE_UA(3b, bad_put_user_clac) - _ASM_EXTABLE_UA(4b, bad_put_user_clac) + _ASM_EXTABLE_UA(1b, .Lbad_put_user_clac) + _ASM_EXTABLE_UA(2b, .Lbad_put_user_clac) + _ASM_EXTABLE_UA(3b, .Lbad_put_user_clac) + _ASM_EXTABLE_UA(4b, .Lbad_put_user_clac) #ifdef CONFIG_X86_32 - _ASM_EXTABLE_UA(5b, bad_put_user_clac) + _ASM_EXTABLE_UA(5b, .Lbad_put_user_clac) #endif -- cgit v1.2.3 From e86c2c8b9380440bbe761b8e2f63ab6b04a45ac2 Mon Sep 17 00:00:00 2001 From: Brendan Shanks Date: Thu, 5 Sep 2019 16:22:21 -0700 Subject: x86/umip: Add emulation (spoofing) for UMIP covered instructions in 64-bit processes as well Add emulation (spoofing) of the SGDT, SIDT, and SMSW instructions for 64-bit processes. Wine users have encountered a number of 64-bit Windows games that use these instructions (particularly SGDT), and were crashing when run on UMIP-enabled systems. Originally-by: Ricardo Neri Signed-off-by: Brendan Shanks Reviewed-by: Ricardo Neri Reviewed-by: H. Peter Anvin (Intel) Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: Eric W. Biederman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20190905232222.14900-1-bshanks@codeweavers.com [ Minor edits: capitalization, added 'spoofing' wording. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/umip.c | 65 +++++++++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 27 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index 5b345add550f..548fefed71ee 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -19,7 +19,7 @@ /** DOC: Emulation for User-Mode Instruction Prevention (UMIP) * * The feature User-Mode Instruction Prevention present in recent Intel - * processor prevents a group of instructions (sgdt, sidt, sldt, smsw, and str) + * processor prevents a group of instructions (SGDT, SIDT, SLDT, SMSW and STR) * from being executed with CPL > 0. Otherwise, a general protection fault is * issued. * @@ -36,8 +36,8 @@ * DOSEMU2) rely on this subset of instructions to function. * * The instructions protected by UMIP can be split in two groups. Those which - * return a kernel memory address (sgdt and sidt) and those which return a - * value (sldt, str and smsw). + * return a kernel memory address (SGDT and SIDT) and those which return a + * value (SLDT, STR and SMSW). * * For the instructions that return a kernel memory address, applications * such as WineHQ rely on the result being located in the kernel memory space, @@ -45,15 +45,13 @@ * value that, lies close to the top of the kernel memory. The limit for the GDT * and the IDT are set to zero. * - * Given that sldt and str are not commonly used in programs that run on WineHQ + * Given that SLDT and STR are not commonly used in programs that run on WineHQ * or DOSEMU2, they are not emulated. * * The instruction smsw is emulated to return the value that the register CR0 * has at boot time as set in the head_32. * - * Also, emulation is provided only for 32-bit processes; 64-bit processes - * that attempt to use the instructions that UMIP protects will receive the - * SIGSEGV signal issued as a consequence of the general protection fault. + * Emulation is provided for both 32-bit and 64-bit processes. * * Care is taken to appropriately emulate the results when segmentation is * used. That is, rather than relying on USER_DS and USER_CS, the function @@ -63,17 +61,18 @@ * application uses a local descriptor table. */ -#define UMIP_DUMMY_GDT_BASE 0xfffe0000 -#define UMIP_DUMMY_IDT_BASE 0xffff0000 +#define UMIP_DUMMY_GDT_BASE 0xfffffffffffe0000ULL +#define UMIP_DUMMY_IDT_BASE 0xffffffffffff0000ULL /* * The SGDT and SIDT instructions store the contents of the global descriptor * table and interrupt table registers, respectively. The destination is a * memory operand of X+2 bytes. X bytes are used to store the base address of - * the table and 2 bytes are used to store the limit. In 32-bit processes, the - * only processes for which emulation is provided, X has a value of 4. + * the table and 2 bytes are used to store the limit. In 32-bit processes X + * has a value of 4, in 64-bit processes X has a value of 8. */ -#define UMIP_GDT_IDT_BASE_SIZE 4 +#define UMIP_GDT_IDT_BASE_SIZE_64BIT 8 +#define UMIP_GDT_IDT_BASE_SIZE_32BIT 4 #define UMIP_GDT_IDT_LIMIT_SIZE 2 #define UMIP_INST_SGDT 0 /* 0F 01 /0 */ @@ -189,6 +188,7 @@ static int identify_insn(struct insn *insn) * @umip_inst: A constant indicating the instruction to emulate * @data: Buffer into which the dummy result is stored * @data_size: Size of the emulated result + * @x86_64: true if process is 64-bit, false otherwise * * Emulate an instruction protected by UMIP and provide a dummy result. The * result of the emulation is saved in @data. The size of the results depends @@ -202,11 +202,8 @@ static int identify_insn(struct insn *insn) * 0 on success, -EINVAL on error while emulating. */ static int emulate_umip_insn(struct insn *insn, int umip_inst, - unsigned char *data, int *data_size) + unsigned char *data, int *data_size, bool x86_64) { - unsigned long dummy_base_addr, dummy_value; - unsigned short dummy_limit = 0; - if (!data || !data_size || !insn) return -EINVAL; /* @@ -219,6 +216,9 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, * is always returned irrespective of the operand size. */ if (umip_inst == UMIP_INST_SGDT || umip_inst == UMIP_INST_SIDT) { + u64 dummy_base_addr; + u16 dummy_limit = 0; + /* SGDT and SIDT do not use registers operands. */ if (X86_MODRM_MOD(insn->modrm.value) == 3) return -EINVAL; @@ -228,13 +228,24 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst, else dummy_base_addr = UMIP_DUMMY_IDT_BASE; - *data_size = UMIP_GDT_IDT_LIMIT_SIZE + UMIP_GDT_IDT_BASE_SIZE; + /* + * 64-bit processes use the entire dummy base address. + * 32-bit processes use the lower 32 bits of the base address. + * dummy_base_addr is always 64 bits, but we memcpy the correct + * number of bytes from it to the destination. + */ + if (x86_64) + *data_size = UMIP_GDT_IDT_BASE_SIZE_64BIT; + else + *data_size = UMIP_GDT_IDT_BASE_SIZE_32BIT; + + memcpy(data + 2, &dummy_base_addr, *data_size); - memcpy(data + 2, &dummy_base_addr, UMIP_GDT_IDT_BASE_SIZE); + *data_size += UMIP_GDT_IDT_LIMIT_SIZE; memcpy(data, &dummy_limit, UMIP_GDT_IDT_LIMIT_SIZE); } else if (umip_inst == UMIP_INST_SMSW) { - dummy_value = CR0_STATE; + unsigned long dummy_value = CR0_STATE; /* * Even though the CR0 register has 4 bytes, the number @@ -290,11 +301,10 @@ static void force_sig_info_umip_fault(void __user *addr, struct pt_regs *regs) * fixup_umip_exception() - Fixup a general protection fault caused by UMIP * @regs: Registers as saved when entering the #GP handler * - * The instructions sgdt, sidt, str, smsw, sldt cause a general protection - * fault if executed with CPL > 0 (i.e., from user space). If the offending - * user-space process is not in long mode, this function fixes the exception - * up and provides dummy results for sgdt, sidt and smsw; str and sldt are not - * fixed up. Also long mode user-space processes are not fixed up. + * The instructions SGDT, SIDT, STR, SMSW and SLDT cause a general protection + * fault if executed with CPL > 0 (i.e., from user space). This function fixes + * the exception up and provides dummy results for SGDT, SIDT and SMSW; STR + * and SLDT are not fixed up. * * If operands are memory addresses, results are copied to user-space memory as * indicated by the instruction pointed by eIP using the registers indicated in @@ -373,13 +383,14 @@ bool fixup_umip_exception(struct pt_regs *regs) umip_pr_warning(regs, "%s instruction cannot be used by applications.\n", umip_insns[umip_inst]); - /* Do not emulate SLDT, STR or user long mode processes. */ - if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT || user_64bit_mode(regs)) + /* Do not emulate (spoof) SLDT or STR. */ + if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT) return false; umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n"); - if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size)) + if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size, + user_64bit_mode(regs))) return false; /* -- cgit v1.2.3