summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu/spte.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu/spte.h')
-rw-r--r--arch/x86/kvm/mmu/spte.h21
1 files changed, 20 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 398fd1bb13a7..6de3950fd704 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -131,6 +131,25 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT
/*
+ * If a thread running without exclusive control of the MMU lock must perform a
+ * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a
+ * non-present intermediate value. Other threads which encounter this value
+ * should not modify the SPTE.
+ *
+ * This constant works because it is considered non-present on both AMD and
+ * Intel CPUs and does not create a L1TF vulnerability because the pfn section
+ * is zeroed out.
+ *
+ * Only used by the TDP MMU.
+ */
+#define REMOVED_SPTE (1ull << 59)
+
+static inline bool is_removed_spte(u64 spte)
+{
+ return spte == REMOVED_SPTE;
+}
+
+/*
* In some cases, we need to preserve the GFN of a non-present or reserved
* SPTE when we usurp the upper five bits of the physical address space to
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
@@ -187,7 +206,7 @@ static inline bool is_access_track_spte(u64 spte)
static inline bool is_shadow_present_pte(u64 pte)
{
- return (pte != 0) && !is_mmio_spte(pte);
+ return (pte != 0) && !is_mmio_spte(pte) && !is_removed_spte(pte);
}
static inline bool is_large_pte(u64 pte)