summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-07-02 13:03:48 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-07-04 20:49:39 +0200
commit3fa045be4c720146b18a19cea7a767dc6ad5df94 (patch)
tree039611f64fd9ffb68469713fa3481330b67a9d0b /arch/x86/kvm
parenta47dd5f06714c844b33f3b5f517b6f3e81ce57b5 (diff)
downloadlinux-3fa045be4c720146b18a19cea7a767dc6ad5df94.tar.bz2
x86/KVM/VMX: Add L1D MSR based flush
336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR (IA32_FLUSH_CMD aka 0x10B) which has similar write-only semantics to other MSRs defined in the document. The semantics of this MSR is to allow "finer granularity invalidation of caching structures than existing mechanisms like WBINVD. It will writeback and invalidate the L1 data cache, including all cachelines brought in by preceding instructions, without invalidating all caches (eg. L2 or LLC). Some processors may also invalidate the first level level instruction cache on a L1D_FLUSH command. The L1 data and instruction caches may be shared across the logical processors of a core." Use it instead of the loop based L1 flush algorithm. A copy of this document is available at https://bugzilla.kernel.org/show_bug.cgi?id=199511 [ tglx: Avoid allocating pages when the MSR is available ] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/vmx.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b072264eb464..a1dbc17c7a03 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9580,6 +9580,11 @@ static void __maybe_unused vmx_l1d_flush(void)
{
int size = PAGE_SIZE << L1D_CACHE_ORDER;
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+ return;
+ }
+
asm volatile(
/* First ensure the pages are in the TLB */
"xorl %%eax, %%eax\n"
@@ -13158,11 +13163,13 @@ static int __init vmx_setup_l1d_flush(void)
!boot_cpu_has_bug(X86_BUG_L1TF))
return 0;
- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
- if (!page)
- return -ENOMEM;
+ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+ if (!page)
+ return -ENOMEM;
+ vmx_l1d_flush_pages = page_address(page);
+ }
- vmx_l1d_flush_pages = page_address(page);
static_branch_enable(&vmx_l1d_should_flush);
return 0;
}