diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2010-11-19 17:03:22 +0800 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 11:29:46 +0200 |
commit | 9bdbba13b8868815198e4fdbd484769ef76392d9 (patch) | |
tree | 83dbbc37752a9a1e15b68d2b2c045125c52cd188 | |
parent | b330aa0c7df1ece45f45566c45ea44f01e379ab0 (diff) | |
download | linux-9bdbba13b8868815198e4fdbd484769ef76392d9.tar.bz2 |
KVM: MMU: rename 'reset_host_protection' to 'host_writable'
Rename it to fit its sense better
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu.c | 8 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 10 |
2 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 29b2ec46bf1e..59104927c582 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1958,7 +1958,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int user_fault, int write_fault, int dirty, int level, gfn_t gfn, pfn_t pfn, bool speculative, - bool can_unsync, bool reset_host_protection) + bool can_unsync, bool host_writable) { u64 spte, entry = *sptep; int ret = 0; @@ -1985,7 +1985,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, kvm_is_mmio_pfn(pfn)); - if (reset_host_protection) + if (host_writable) spte |= SPTE_HOST_WRITEABLE; spte |= (u64)pfn << PAGE_SHIFT; @@ -2048,7 +2048,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, int user_fault, int write_fault, int dirty, int *ptwrite, int level, gfn_t gfn, pfn_t pfn, bool speculative, - bool reset_host_protection) + bool host_writable) { int was_rmapped = 0; int rmap_count; @@ -2083,7 +2083,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, dirty, level, gfn, pfn, speculative, true, - reset_host_protection)) { + host_writable)) { if (write_fault) *ptwrite = 1; kvm_mmu_flush_tlb(vcpu); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index ca0e5e834724..57619ed4beee 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -329,7 +329,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, return; kvm_get_pfn(pfn); /* - * we call mmu_set_spte() with reset_host_protection = true beacuse that + * we call mmu_set_spte() with host_writable = true beacuse that * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). */ mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, @@ -744,7 +744,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool clear_unsync) { int i, offset, nr_present; - bool reset_host_protection; + bool host_writable; gpa_t first_pte_gpa; offset = nr_present = 0; @@ -794,14 +794,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) { pte_access &= ~ACC_WRITE_MASK; - reset_host_protection = 0; + host_writable = 0; } else { - reset_host_protection = 1; + host_writable = 1; } set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, spte_to_pfn(sp->spt[i]), true, false, - reset_host_protection); + host_writable); } return !nr_present; |