summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiaoyao Li <xiaoyao.li@intel.com>2020-04-10 13:54:01 +0200
committerBorislav Petkov <bp@suse.de>2020-04-11 16:40:55 +0200
commit9de6fe3c28d6d8feadfad907961f1f31b85c6985 (patch)
tree7f2d97a3a8e88b134b5309f72275c092fb454c2d
parentd7e94dbdac1a40924626b0efc7ff530c8baf5e4a (diff)
downloadlinux-9de6fe3c28d6d8feadfad907961f1f31b85c6985.tar.bz2
KVM: x86: Emulate split-lock access as a write in emulator
Emulate split-lock accesses as writes if split lock detection is on to avoid #AC during emulation, which will result in a panic(). This should never occur for a well-behaved guest, but a malicious guest can manipulate the TLB to trigger emulation of a locked instruction[1]. More discussion can be found at [2][3]. [1] https://lkml.kernel.org/r/8c5b11c9-58df-38e7-a514-dc12d687b198@redhat.com [2] https://lkml.kernel.org/r/20200131200134.GD18946@linux.intel.com [3] https://lkml.kernel.org/r/20200227001117.GX9940@linux.intel.com Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Link: https://lkml.kernel.org/r/20200410115517.084300242@linutronix.de
-rw-r--r--arch/x86/kvm/x86.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 027dfd278a97..3bf2ecafd027 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5839,6 +5839,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
{
struct kvm_host_map map;
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ u64 page_line_mask;
gpa_t gpa;
char *kaddr;
bool exchanged;
@@ -5853,7 +5854,16 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write;
- if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
+ /*
+ * Emulate the atomic as a straight write to avoid #AC if SLD is
+ * enabled in the host and the access splits a cache line.
+ */
+ if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+ page_line_mask = ~(cache_line_size() - 1);
+ else
+ page_line_mask = PAGE_MASK;
+
+ if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
goto emul_write;
if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))