summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2009-03-12 18:18:43 +0100
committerAvi Kivity <avi@redhat.com>2009-03-24 11:03:14 +0200
commit4539b35881ae9664b0e2953438dd83f5ee02c0b4 (patch)
tree2bade12f83d50ff15947176d64c868c34f8df70a /arch/x86
parent36463146ffb7eee4582ed785a8c8be213b8ed110 (diff)
downloadlinux-4539b35881ae9664b0e2953438dd83f5ee02c0b4.tar.bz2
KVM: Fix missing smp tlb flush in invlpg
When kvm emulates an invlpg instruction, it can drop a shadow pte, but leaves the guest tlbs intact. This can cause memory corruption when swapping out. Without this the other cpu can still write to a freed host physical page. tlb smp flush must happen if rmap_remove is called always before mmu_lock is released because the VM will take the mmu_lock before it can finally add the page to the freelist after swapout. mmu notifier makes it safe to flush the tlb after freeing the page (otherwise it would never be safe) so we can do a single flush for multiple sptes invalidated. Cc: stable@kernel.org Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 0f11792fafa6..6bd70206c561 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -446,6 +446,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t pte_gpa = -1;
int level;
u64 *sptep;
+ int need_flush = 0;
spin_lock(&vcpu->kvm->mmu_lock);
@@ -465,6 +466,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
rmap_remove(vcpu->kvm, sptep);
if (is_large_pte(*sptep))
--vcpu->kvm->stat.lpages;
+ need_flush = 1;
}
set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
break;
@@ -474,6 +476,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
break;
}
+ if (need_flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
spin_unlock(&vcpu->kvm->mmu_lock);
if (pte_gpa == -1)