diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-06 14:23:39 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-06 14:23:39 +0200 |
commit | 68083e05d72d94f347293d8cc0067050ba904bfa (patch) | |
tree | 842e71365bd90866be7add181661a4039d891564 /arch/x86/kvm/mmu.c | |
parent | 7baac8b91f9871ba8cb09af84de4ae1d86d07812 (diff) | |
parent | b7279469d66b55119784b8b9529c99c1955fe747 (diff) | |
download | linux-68083e05d72d94f347293d8cc0067050ba904bfa.tar.bz2 |
Merge commit 'v2.6.26-rc9' into cpus4096
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 36c5406b1813..7e7c3969f7a2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -640,6 +640,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) rmap_remove(kvm, spte); --kvm->stat.lpages; set_shadow_pte(spte, shadow_trap_nonpresent_pte); + spte = NULL; write_protected = 1; } spte = rmap_next(kvm, rmapp, spte); @@ -658,7 +659,7 @@ static int is_empty_shadow_page(u64 *spt) u64 *end; for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) - if (*pos != shadow_trap_nonpresent_pte) { + if (is_shadow_present_pte(*pos)) { printk(KERN_ERR "%s: %p %llx\n", __func__, pos, *pos); return 0; @@ -1082,10 +1083,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, struct kvm_mmu_page *shadow; spte |= PT_WRITABLE_MASK; - if (user_fault) { - mmu_unshadow(vcpu->kvm, gfn); - goto unshadowed; - } shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); if (shadow || @@ -1102,8 +1099,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, } } -unshadowed: - if (pte_access & ACC_WRITE_MASK) mark_page_dirty(vcpu->kvm, gfn); @@ -1580,11 +1575,13 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, u64 *spte, const void *new) { - if ((sp->role.level != PT_PAGE_TABLE_LEVEL) - && !vcpu->arch.update_pte.largepage) { - ++vcpu->kvm->stat.mmu_pde_zapped; - return; - } + if (sp->role.level != PT_PAGE_TABLE_LEVEL) { + if (!vcpu->arch.update_pte.largepage || + sp->role.glevels == PT32_ROOT_LEVEL) { + ++vcpu->kvm->stat.mmu_pde_zapped; + return; + } + } ++vcpu->kvm->stat.mmu_pte_updated; if (sp->role.glevels == PT32_ROOT_LEVEL) @@ -1858,6 +1855,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) sp = container_of(vcpu->kvm->arch.active_mmu_pages.next, struct kvm_mmu_page, link); kvm_mmu_zap_page(vcpu->kvm, sp); + cond_resched(); } free_page((unsigned long)vcpu->arch.mmu.pae_root); } @@ -1996,7 +1994,7 @@ static struct shrinker mmu_shrinker = { .seeks = DEFAULT_SEEKS * 10, }; -void mmu_destroy_caches(void) +static void mmu_destroy_caches(void) { if (pte_chain_cache) kmem_cache_destroy(pte_chain_cache); |