summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-12-23 14:35:16 -0200
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 12:35:43 -0300
commit46a26bf55714c1e2f17e34683292a389acb8e601 (patch)
tree3df70225ce6a076d7e4be604a5d72465383043ee /virt
parent2044892d4a005a78796c92fd1aef4633be896698 (diff)
downloadlinux-46a26bf55714c1e2f17e34683292a389acb8e601.tar.bz2
KVM: modify memslots layout in struct kvm
Have a pointer to an allocated region inside struct kvm. [alex: fix ppc book 3s] Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/iommu.c18
-rw-r--r--virt/kvm/kvm_main.c36
2 files changed, 35 insertions, 19 deletions
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 15147583abd1..bc697a66a883 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -76,10 +76,13 @@ unmap_pages:
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
int i, r = 0;
+ struct kvm_memslots *slots;
- for (i = 0; i < kvm->nmemslots; i++) {
- r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
- kvm->memslots[i].npages);
+ slots = kvm->memslots;
+
+ for (i = 0; i < slots->nmemslots; i++) {
+ r = kvm_iommu_map_pages(kvm, slots->memslots[i].base_gfn,
+ slots->memslots[i].npages);
if (r)
break;
}
@@ -210,10 +213,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int i;
+ struct kvm_memslots *slots;
+
+ slots = kvm->memslots;
- for (i = 0; i < kvm->nmemslots; i++) {
- kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
- kvm->memslots[i].npages);
+ for (i = 0; i < slots->nmemslots; i++) {
+ kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
+ slots->memslots[i].npages);
}
return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index bc23b8e0609b..86dd8f3d29c9 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -375,12 +375,16 @@ static struct kvm *kvm_create_vm(void)
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
+ r = -ENOMEM;
+ kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!kvm->memslots)
+ goto out_err;
+
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- r = -ENOMEM;
+ if (!page)
goto out_err;
- }
+
kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif
@@ -416,6 +420,7 @@ out:
out_err:
hardware_disable_all();
out_err_nodisable:
+ kfree(kvm->memslots);
kfree(kvm);
return ERR_PTR(r);
}
@@ -450,9 +455,12 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
void kvm_free_physmem(struct kvm *kvm)
{
int i;
+ struct kvm_memslots *slots = kvm->memslots;
+
+ for (i = 0; i < slots->nmemslots; ++i)
+ kvm_free_physmem_slot(&slots->memslots[i], NULL);
- for (i = 0; i < kvm->nmemslots; ++i)
- kvm_free_physmem_slot(&kvm->memslots[i], NULL);
+ kfree(kvm->memslots);
}
static void kvm_destroy_vm(struct kvm *kvm)
@@ -533,7 +541,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
- memslot = &kvm->memslots[mem->slot];
+ memslot = &kvm->memslots->memslots[mem->slot];
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
@@ -554,7 +562,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *s = &kvm->memslots[i];
+ struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
@@ -656,8 +664,8 @@ skip_lpage:
kvm_arch_flush_shadow(kvm);
spin_lock(&kvm->mmu_lock);
- if (mem->slot >= kvm->nmemslots)
- kvm->nmemslots = mem->slot + 1;
+ if (mem->slot >= kvm->memslots->nmemslots)
+ kvm->memslots->nmemslots = mem->slot + 1;
*memslot = new;
spin_unlock(&kvm->mmu_lock);
@@ -727,7 +735,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -781,9 +789,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{
int i;
+ struct kvm_memslots *slots = kvm->memslots;
- for (i = 0; i < kvm->nmemslots; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ for (i = 0; i < slots->nmemslots; ++i) {
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
@@ -802,10 +811,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
+ struct kvm_memslots *slots = kvm->memslots;
gfn = unalias_gfn(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)