summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorKai Huang <kai.huang@linux.intel.com>2015-01-09 16:44:30 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2015-01-09 10:23:55 +0100
commit7e71a59b250330fd52ee7293eb9d31952f16682e (patch)
tree65d42aee4577a6ff7ae14ecf02816b20c532fc3f /arch/x86/kvm
parentdefcf51fa93929bd5d3ce5b91f8e6a106dae5e46 (diff)
downloadlinux-7e71a59b250330fd52ee7293eb9d31952f16682e.tar.bz2
KVM: x86: flush TLB when D bit is manually changed.
When software changes D bit (either from 1 to 0, or 0 to 1), the corresponding TLB entity in the hardware won't be updated immediately. We should flush it to guarantee the consistence of D bit between TLB and MMU page table in memory. This is especially important when clearing the D bit, since it may cause false negatives in reporting dirtiness. Sanity test was done on my machine with Intel processor. Signed-off-by: Kai Huang <kai.huang@linux.intel.com> [Check A bit too. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a0985ebb5512..0d0fdd6f002c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -532,6 +532,11 @@ static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
return (old_spte & bit_mask) && !(new_spte & bit_mask);
}
+static bool spte_is_bit_changed(u64 old_spte, u64 new_spte, u64 bit_mask)
+{
+ return (old_spte & bit_mask) != (new_spte & bit_mask);
+}
+
/* Rules for using mmu_spte_set:
* Set the sptep from nonpresent to present.
* Note: the sptep being assigned *must* be either not present
@@ -582,6 +587,14 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
if (!shadow_accessed_mask)
return ret;
+ /*
+ * Flush TLB when accessed/dirty bits are changed in the page tables,
+ * to guarantee consistency between TLB and page tables.
+ */
+ if (spte_is_bit_changed(old_spte, new_spte,
+ shadow_accessed_mask | shadow_dirty_mask))
+ ret = true;
+
if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))