diff options
| author | Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | 2010-04-12 19:35:35 +0900 | 
|---|---|---|
| committer | Avi Kivity <avi@redhat.com> | 2010-04-20 13:06:55 +0300 | 
| commit | 87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d (patch) | |
| tree | ae8ce63cecab98c036c0d76422de42cf78e042f4 /virt | |
| parent | 77662e0028c7c63e34257fda03ff9625c59d939d (diff) | |
| download | linux-87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d.tar.bz2 | |
KVM: fix the handling of dirty bitmaps to avoid overflows
Int is not long enough to store the size of a dirty bitmap.
This patch fixes this problem with the introduction of a wrapper
function to calculate the sizes of dirty bitmaps.
Note: in mark_page_dirty(), we have to consider the fact that
  __set_bit() takes the offset as int, not long.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
| -rw-r--r-- | virt/kvm/kvm_main.c | 13 | 
1 files changed, 8 insertions, 5 deletions
| diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5a0cd194dce0..364daacafb58 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -648,7 +648,7 @@ skip_lpage:  	/* Allocate page dirty bitmap if needed */  	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { -		unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; +		unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);  		new.dirty_bitmap = vmalloc(dirty_bytes);  		if (!new.dirty_bitmap) @@ -768,7 +768,7 @@ int kvm_get_dirty_log(struct kvm *kvm,  {  	struct kvm_memory_slot *memslot;  	int r, i; -	int n; +	unsigned long n;  	unsigned long any = 0;  	r = -EINVAL; @@ -780,7 +780,7 @@ int kvm_get_dirty_log(struct kvm *kvm,  	if (!memslot->dirty_bitmap)  		goto out; -	n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; +	n = kvm_dirty_bitmap_bytes(memslot);  	for (i = 0; !any && i < n/sizeof(long); ++i)  		any = memslot->dirty_bitmap[i]; @@ -1186,10 +1186,13 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)  	memslot = gfn_to_memslot_unaliased(kvm, gfn);  	if (memslot && memslot->dirty_bitmap) {  		unsigned long rel_gfn = gfn - memslot->base_gfn; +		unsigned long *p = memslot->dirty_bitmap + +					rel_gfn / BITS_PER_LONG; +		int offset = rel_gfn % BITS_PER_LONG;  		/* avoid RMW */ -		if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) -			generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); +		if (!generic_test_le_bit(offset, p)) +			generic___set_le_bit(offset, p);  	}  } |