diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-12-12 09:09:03 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-12-12 09:09:03 +0100 |
commit | 3459f0d78ffe27a1b341c22eb158b622eaaea3fc (patch) | |
tree | 715b0575eec541d0181876ad367ca5480cdcecf3 /virt/kvm | |
parent | 9fc81d87420d0d3fd62d5e5529972c0ad9eab9cc (diff) | |
parent | bee2782f30f66898be3f74ad02e4d1f87a969694 (diff) | |
download | linux-3459f0d78ffe27a1b341c22eb158b622eaaea3fc.tar.bz2 |
Merge branch 'linus' into perf/urgent, to pick up the upstream merged bits
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/vgic.c | 8 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 16 |
2 files changed, 12 insertions, 12 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 3aaca49de325..aacdb59f30de 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -1933,7 +1933,7 @@ out: int kvm_vgic_create(struct kvm *kvm) { - int i, vcpu_lock_idx = -1, ret = 0; + int i, vcpu_lock_idx = -1, ret; struct kvm_vcpu *vcpu; mutex_lock(&kvm->lock); @@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm) * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure * that no other VCPUs are run while we create the vgic. */ + ret = -EBUSY; kvm_for_each_vcpu(i, vcpu, kvm) { if (!mutex_trylock(&vcpu->mutex)) goto out_unlock; @@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm) } kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu->arch.has_run_once) { - ret = -EBUSY; + if (vcpu->arch.has_run_once) goto out_unlock; - } } + ret = 0; spin_lock_init(&kvm->arch.vgic.lock); kvm->arch.vgic.in_kernel = true; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 25ffac9e947d..3cee7b167052 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; -bool kvm_is_mmio_pfn(pfn_t pfn) +bool kvm_is_reserved_pfn(pfn_t pfn) { if (pfn_valid(pfn)) - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); + return PageReserved(pfn_to_page(pfn)); return true; } @@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, else if ((vma->vm_flags & VM_PFNMAP)) { pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - BUG_ON(!kvm_is_mmio_pfn(pfn)); + BUG_ON(!kvm_is_reserved_pfn(pfn)); } else { if (async && vma_is_valid(vma, write_fault)) *async = true; @@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn) if (is_error_noslot_pfn(pfn)) return KVM_ERR_PTR_BAD_PAGE; - if (kvm_is_mmio_pfn(pfn)) { + if (kvm_is_reserved_pfn(pfn)) { WARN_ON(1); return KVM_ERR_PTR_BAD_PAGE; } @@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); @@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn) void kvm_set_pfn_dirty(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) { + if (!kvm_is_reserved_pfn(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) SetPageDirty(page); @@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); void kvm_set_pfn_accessed(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); void kvm_get_pfn(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn)) get_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_get_pfn); |