summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.c')
-rw-r--r--arch/x86/kvm/vmx/vmx.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 79b42197ed7e..1dcd3f157a70 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6416,6 +6416,7 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
/* Enter guest mode */
"call vmx_vmenter\n\t"
+ "jbe 2f \n\t"
/* Temporarily save guest's RCX. */
"push %%" _ASM_CX " \n\t"
@@ -6423,9 +6424,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
/* Reload the vcpu_vmx pointer to RCX. */
"mov %c[wordsize](%%" _ASM_SP "), %%" _ASM_CX " \n\t"
- /* Set vmx->fail based on EFLAGS.{CF,ZF} */
- "setbe %c[fail](%%" _ASM_CX ")\n\t"
-
/* Save all guest registers, including RCX from the stack */
"mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
"mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
@@ -6443,15 +6441,22 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
"mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
"mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
+#endif
+
+ /* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */
+ "xor %%ebx, %%ebx \n\t"
/*
- * Clear all general purpose registers (except RSP, which is loaded by
- * the CPU during VM-Exit) to prevent speculative use of the guest's
- * values, even those that are saved/loaded via the stack. In theory,
- * an L1 cache miss when restoring registers could lead to speculative
- * execution with the guest's values. Zeroing XORs are dirt cheap,
- * i.e. the extra paranoia is essentially free.
+ * Clear all general purpose registers except RSP and RBX to prevent
+ * speculative use of the guest's values, even those that are reloaded
+ * via the stack. In theory, an L1 cache miss when restoring registers
+ * could lead to speculative execution with the guest's values.
+ * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
+ * free. RSP and RBX are exempt as RSP is restored by hardware during
+ * VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail.
*/
+ "1: \n\t"
+#ifdef CONFIG_X86_64
"xor %%r8d, %%r8d \n\t"
"xor %%r9d, %%r9d \n\t"
"xor %%r10d, %%r10d \n\t"
@@ -6462,7 +6467,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
"xor %%r15d, %%r15d \n\t"
#endif
"xor %%eax, %%eax \n\t"
- "xor %%ebx, %%ebx \n\t"
"xor %%ecx, %%ecx \n\t"
"xor %%edx, %%edx \n\t"
"xor %%esi, %%esi \n\t"
@@ -6472,7 +6476,15 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
/* "POP" the vcpu_vmx pointer. */
"add $%c[wordsize], %%" _ASM_SP " \n\t"
"pop %%" _ASM_BP " \n\t"
- : ASM_CALL_CONSTRAINT, "=b"((int){0}),
+ "jmp 3f \n\t"
+
+ /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
+ "2: \n\t"
+ "mov $1, %%ebx \n\t"
+ "jmp 1b \n\t"
+ "3: \n\t"
+
+ : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
#ifdef CONFIG_X86_64
"=D"((int){0})
: "D"(vmx),
@@ -6481,7 +6493,6 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
: "a"(vmx),
#endif
"b"(vmx->loaded_vmcs->launched),
- [fail]"i"(offsetof(struct vcpu_vmx, fail)),
[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
[rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
[rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),