summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>2012-06-20 15:56:53 +0800
committerAvi Kivity <avi@redhat.com>2012-07-11 16:51:13 +0300
commit2f84569f978cd7d54970d893b4c4e68ef24dc1ec (patch)
treecd14dfd05c6196207024ae066ba4b6de86f6daab /arch/x86/kvm/mmu.c
parenta27685c33acccce91268ddef88d7896e3205fda5 (diff)
downloadlinux-2f84569f978cd7d54970d893b4c4e68ef24dc1ec.tar.bz2
KVM: MMU: return bool in __rmap_write_protect
The reture value of __rmap_write_protect is either 1 or 0, use true/false instead of these Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 569cd66ba24c..5dd22427251d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1050,11 +1050,12 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
rmap_remove(kvm, sptep);
}
-static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
+static bool
+__rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
{
u64 *sptep;
struct rmap_iterator iter;
- int write_protected = 0;
+ bool write_protected = false;
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
BUG_ON(!(*sptep & PT_PRESENT_MASK));
@@ -1075,7 +1076,7 @@ static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level
sptep = rmap_get_first(*rmapp, &iter);
}
- write_protected = 1;
+ write_protected = true;
}
return write_protected;
@@ -1106,12 +1107,12 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
}
}
-static int rmap_write_protect(struct kvm *kvm, u64 gfn)
+static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
{
struct kvm_memory_slot *slot;
unsigned long *rmapp;
int i;
- int write_protected = 0;
+ bool write_protected = false;
slot = gfn_to_memslot(kvm, gfn);
@@ -1700,7 +1701,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
kvm_mmu_pages_init(parent, &parents, &pages);
while (mmu_unsync_walk(parent, &pages)) {
- int protected = 0;
+ bool protected = false;
for_each_sp(pages, sp, parents, i)
protected |= rmap_write_protect(vcpu->kvm, sp->gfn);