From 5233009fab8e0a037c38a8c2b28ba4b3df203935 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:01 -0800 Subject: KVM: MIPS: Use kvm_vcpu_cache to allocate vCPUs For reasons unknown, MIPS configures the vCPU allocation cache but allocates vCPUs via kzalloc(). Allocate from the vCPU cache in preparation for moving vCPU allocation to common KVM code. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 1109924560d8..5f985773417c 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -286,7 +286,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) void *gebase, *p, *handler, *refill_start, *refill_end; int i; - struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); + struct kvm_vcpu *vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) { err = -ENOMEM; @@ -401,7 +401,7 @@ out_uninit_cpu: kvm_vcpu_uninit(vcpu); out_free_cpu: - kfree(vcpu); + kmem_cache_free(kvm_vcpu_cache, vcpu); out: return ERR_PTR(err); @@ -418,7 +418,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.kseg0_commpage); - kfree(vcpu); + kmem_cache_free(kvm_vcpu_cache, vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 47d51e5eb5fe0bd7440cf7f1217936e2d85c63cf Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:02 -0800 Subject: KVM: MIPS: Drop kvm_arch_vcpu_free() Remove the superfluous kvm_arch_vcpu_free() as it is no longer called from commmon KVM code. Note, kvm_arch_vcpu_destroy() *is* called from common code, i.e. choosing which function to whack is not completely arbitrary. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 5f985773417c..d72bceb10439 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -156,7 +156,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm) struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { - kvm_arch_vcpu_free(vcpu); + kvm_arch_vcpu_destroy(vcpu); } mutex_lock(&kvm->lock); @@ -407,7 +407,7 @@ out: return ERR_PTR(err); } -void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { hrtimer_cancel(&vcpu->arch.comparecount_timer); @@ -421,11 +421,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kmem_cache_free(kvm_vcpu_cache, vcpu); } -void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) -{ - kvm_arch_vcpu_free(vcpu); -} - int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { -- cgit v1.2.3 From 897cc38eaab96d006ab17edd0f50a2f432f584cf Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:09 -0800 Subject: KVM: Add kvm_arch_vcpu_precreate() to handle pre-allocation issues Add a pre-allocation arch hook to handle checks that are currently done by arch specific code prior to allocating the vCPU object. This paves the way for moving the allocation to common KVM code. Acked-by: Christoffer Dall Signed-off-by: Sean Christopherson Reviewed-by: Cornelia Huck Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 5 +++++ arch/powerpc/kvm/powerpc.c | 5 +++++ arch/s390/kvm/kvm-s390.c | 12 ++++++++---- arch/x86/kvm/x86.c | 14 +++++++++----- include/linux/kvm_host.h | 1 + virt/kvm/arm/arm.c | 21 +++++++++++---------- virt/kvm/kvm_main.c | 4 ++++ 7 files changed, 43 insertions(+), 19 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index d72bceb10439..2e14455aec4e 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -280,6 +280,11 @@ static inline void dump_handler(const char *symbol, void *start, void *end) pr_debug("\tEND(%s)\n", symbol); } +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err, size; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index a2ba708f5cec..998ef60ac463 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -720,6 +720,11 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, kvmppc_core_flush_memslot(kvm, slot); } +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index d9e6bf3d54f0..57c6838dff37 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -3035,15 +3035,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) return rc; } +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) + return -EINVAL; + return 0; +} + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; struct sie_page *sie_page; - int rc = -EINVAL; - - if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) - goto out; + int rc; rc = -ENOMEM; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8188d6cac588..661e3c40529f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9170,17 +9170,21 @@ static void fx_init(struct kvm_vcpu *vcpu) vcpu->arch.cr0 |= X86_CR0_ET; } +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) + pr_warn_once("kvm: SMP vm created on host with unstable TSC; " + "guest TSC will not be reliable\n"); + + return 0; +} + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; int r; - if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) - printk_once(KERN_WARNING - "kvm: SMP vm created on host with unstable TSC; " - "guest TSC will not be reliable\n"); - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); if (!vcpu) return ERR_PTR(-ENOMEM); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4f7c8e2f378d..59ac53423361 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -874,6 +874,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index adae134cec59..af3ce2bb370d 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -279,21 +279,22 @@ void kvm_arch_free_vm(struct kvm *kvm) vfree(kvm); } +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) + return -EBUSY; + + if (id >= kvm->arch.max_vcpus) + return -EINVAL; + + return 0; +} + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err; struct kvm_vcpu *vcpu; - if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) { - err = -EBUSY; - goto out; - } - - if (id >= kvm->arch.max_vcpus) { - err = -EINVAL; - goto out; - } - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) { err = -ENOMEM; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 483c683408a2..7b52207f829c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2728,6 +2728,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) kvm->created_vcpus++; mutex_unlock(&kvm->lock); + r = kvm_arch_vcpu_precreate(kvm, id); + if (r) + goto vcpu_decrement; + vcpu = kvm_arch_vcpu_create(kvm, id); if (IS_ERR(vcpu)) { r = PTR_ERR(vcpu); -- cgit v1.2.3 From aaf532c57927982b2523994698f994255a722f5f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:12 -0800 Subject: KVM: MIPS: Invoke kvm_vcpu_uninit() immediately prior to freeing vcpu Move the call to kvm_vcpu_uninit() in kvm_arch_vcpu_destroy() down a few lines so that it is invoked immediately prior to freeing the vCPU. This paves the way for moving the uninit and free sequence to common KVM code without an associated functional change. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 2e14455aec4e..73360e021259 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -416,13 +416,13 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { hrtimer_cancel(&vcpu->arch.comparecount_timer); - kvm_vcpu_uninit(vcpu); - kvm_mips_dump_stats(vcpu); kvm_mmu_free_memory_caches(vcpu); kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.kseg0_commpage); + + kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } -- cgit v1.2.3 From 4543bdc08857e8026475a477e7ba88e461f38271 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:14 -0800 Subject: KVM: Introduce kvm_vcpu_destroy() Add kvm_vcpu_destroy() and wire up all architectures to call the common function instead of their arch specific implementation. The common destruction function will be used by future patches to move allocation and initialization of vCPUs to common KVM code, i.e. to free resources that are allocated by arch agnostic code. No functional change intended. Acked-by: Christoffer Dall Signed-off-by: Sean Christopherson Reviewed-by: Cornelia Huck Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/s390/kvm/kvm-s390.c | 2 +- arch/x86/kvm/x86.c | 2 +- include/linux/kvm_host.h | 1 + virt/kvm/arm/arm.c | 2 +- virt/kvm/kvm_main.c | 6 ++++++ 7 files changed, 12 insertions(+), 5 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 73360e021259..8546bc6e09e7 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -156,7 +156,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm) struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { - kvm_arch_vcpu_destroy(vcpu); + kvm_vcpu_destroy(vcpu); } mutex_lock(&kvm->lock); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 998ef60ac463..e3e2b88d3d8b 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -475,7 +475,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) #endif kvm_for_each_vcpu(i, vcpu, kvm) - kvm_arch_vcpu_destroy(vcpu); + kvm_vcpu_destroy(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 1f8ba074cbd6..8543d338a06a 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2541,7 +2541,7 @@ static void kvm_free_vcpus(struct kvm *kvm) struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) - kvm_arch_vcpu_destroy(vcpu); + kvm_vcpu_destroy(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 335762a17180..42b9149f6b40 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9680,7 +9680,7 @@ static void kvm_free_vcpus(struct kvm *kvm) kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) - kvm_arch_vcpu_destroy(vcpu); + kvm_vcpu_destroy(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 59ac53423361..432827ab7623 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -581,6 +581,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); +void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); void vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index af3ce2bb370d..0d8fb6973414 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -194,7 +194,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { - kvm_arch_vcpu_destroy(kvm->vcpus[i]); + kvm_vcpu_destroy(kvm->vcpus[i]); kvm->vcpus[i] = NULL; } } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7b52207f829c..62ba25e44189 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -375,6 +375,12 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); +void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_arch_vcpu_destroy(vcpu); +} +EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); + #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) { -- cgit v1.2.3 From e529ef66e6b53b34f9b8caac55950c8a55c79dac Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:15 -0800 Subject: KVM: Move vcpu alloc and init invocation to common code Now that all architectures tightly couple vcpu allocation/free with the mandatory calls to kvm_{un}init_vcpu(), move the sequences verbatim to common KVM code. Move both allocation and initialization in a single patch to eliminate thrash in arch specific code. The bisection benefits of moving the two pieces in separate patches is marginal at best, whereas the odds of introducing a transient arch specific bug are non-zero. Acked-by: Christoffer Dall Signed-off-by: Sean Christopherson Reviewed-by: Cornelia Huck Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 33 ++++++--------------------------- arch/powerpc/kvm/powerpc.c | 27 ++++----------------------- arch/s390/kvm/kvm-s390.c | 41 ++++++++++------------------------------- arch/x86/kvm/x86.c | 28 ++-------------------------- include/linux/kvm_host.h | 2 +- virt/kvm/arm/arm.c | 29 ++--------------------------- virt/kvm/kvm_main.c | 21 ++++++++++++++++++--- 7 files changed, 43 insertions(+), 138 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 8546bc6e09e7..92c9321b3f95 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -285,25 +285,14 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) return 0; } -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int err, size; void *gebase, *p, *handler, *refill_start, *refill_end; int i; - struct kvm_vcpu *vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - - if (!vcpu) { - err = -ENOMEM; - goto out; - } - - err = kvm_vcpu_init(vcpu, kvm, id); - - if (err) - goto out_free_cpu; - - kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); + kvm_debug("kvm @ %p: create cpu %d at %p\n", + vcpu->kvm, vcpu->vcpu_id, vcpu); /* * Allocate space for host mode exception handlers that handle @@ -318,7 +307,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) if (!gebase) { err = -ENOMEM; - goto out_uninit_cpu; + goto out; } kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", ALIGN(size, PAGE_SIZE), gebase); @@ -397,19 +386,12 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) vcpu->arch.last_sched_cpu = -1; vcpu->arch.last_exec_cpu = -1; - return vcpu; + return 0; out_free_gebase: kfree(gebase); - -out_uninit_cpu: - kvm_vcpu_uninit(vcpu); - -out_free_cpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); - out: - return ERR_PTR(err); + return err; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) @@ -421,9 +403,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.kseg0_commpage); - - kvm_vcpu_uninit(vcpu); - kmem_cache_free(kvm_vcpu_cache, vcpu); } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index e3e2b88d3d8b..fce1b4776e55 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -725,32 +725,17 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) return 0; } -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu; int err; - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - if (!vcpu) - return ERR_PTR(-ENOMEM); - - err = kvm_vcpu_init(vcpu, kvm, id); - if (err) - goto free_vcpu; - err = kvmppc_core_vcpu_create(vcpu); if (err) - goto uninit_vcpu; + return err; vcpu->arch.wqp = &vcpu->wq; - kvmppc_create_vcpu_debugfs(vcpu, id); - return vcpu; - -uninit_vcpu: - kvm_vcpu_uninit(vcpu); -free_vcpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); - return ERR_PTR(err); + kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id); + return 0; } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -780,10 +765,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) } kvmppc_core_vcpu_free(vcpu); - - kvm_vcpu_uninit(vcpu); - - kmem_cache_free(kvm_vcpu_cache, vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8543d338a06a..9cba1e5d033b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2530,9 +2530,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.use_cmma) kvm_s390_vcpu_unsetup_cmma(vcpu); free_page((unsigned long)(vcpu->arch.sie_block)); - - kvm_vcpu_uninit(vcpu); - kmem_cache_free(kvm_vcpu_cache, vcpu); } static void kvm_free_vcpus(struct kvm *kvm) @@ -3014,29 +3011,15 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) return 0; } -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, - unsigned int id) +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu; struct sie_page *sie_page; int rc; - rc = -ENOMEM; - - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - if (!vcpu) - goto out; - - rc = kvm_vcpu_init(vcpu, kvm, id); - if (rc) - goto out_free_cpu; - - rc = -ENOMEM; - BUILD_BUG_ON(sizeof(struct sie_page) != 4096); sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); if (!sie_page) - goto out_uninit_vcpu; + return -ENOMEM; vcpu->arch.sie_block = &sie_page->sie_block; vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; @@ -3045,9 +3028,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block->mso = 0; vcpu->arch.sie_block->msl = sclp.hamax; - vcpu->arch.sie_block->icpua = id; + vcpu->arch.sie_block->icpua = vcpu->vcpu_id; spin_lock_init(&vcpu->arch.local_int.lock); - vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin; + vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; if (vcpu->arch.sie_block->gd && sclp.has_gisaf) vcpu->arch.sie_block->gd |= GISA_FORMAT1; seqcount_init(&vcpu->arch.cputm_seqcount); @@ -3083,19 +3066,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, goto out_free_sie_block; } - VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu, - vcpu->arch.sie_block); - trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); + VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", + vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); + trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); + + return 0; - return vcpu; out_free_sie_block: free_page((unsigned long)(vcpu->arch.sie_block)); -out_uninit_vcpu: - kvm_vcpu_uninit(vcpu); -out_free_cpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); -out: - return ERR_PTR(rc); + return rc; } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 42b9149f6b40..7bbde6f658bf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9179,30 +9179,9 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) return 0; } -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, - unsigned int id) +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu; - int r; - - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); - if (!vcpu) - return ERR_PTR(-ENOMEM); - - r = kvm_vcpu_init(vcpu, kvm, id); - if (r) - goto free_vcpu; - - r = kvm_x86_ops->vcpu_create(vcpu); - if (r) - goto uninit_vcpu; - return vcpu; - -uninit_vcpu: - kvm_vcpu_uninit(vcpu); -free_vcpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); - return ERR_PTR(r); + return kvm_x86_ops->vcpu_create(vcpu); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) @@ -9254,9 +9233,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); - - kvm_vcpu_uninit(vcpu); - kmem_cache_free(kvm_vcpu_cache, vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 432827ab7623..405ea07068f1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -876,7 +876,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 0d8fb6973414..a7d661fc5683 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -290,32 +290,9 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) return 0; } -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { - int err; - struct kvm_vcpu *vcpu; - - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - if (!vcpu) { - err = -ENOMEM; - goto out; - } - - err = kvm_vcpu_init(vcpu, kvm, id); - if (err) - goto free_vcpu; - - err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); - if (err) - goto vcpu_uninit; - - return vcpu; -vcpu_uninit: - kvm_vcpu_uninit(vcpu); -free_vcpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); -out: - return ERR_PTR(err); + return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -330,8 +307,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kvm_timer_vcpu_terminate(vcpu); kvm_pmu_vcpu_destroy(vcpu); - kvm_vcpu_uninit(vcpu); - kmem_cache_free(kvm_vcpu_cache, vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 62ba25e44189..c84df40518c4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -378,6 +378,9 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); void kvm_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_arch_vcpu_destroy(vcpu); + + kvm_vcpu_uninit(vcpu); + kmem_cache_free(kvm_vcpu_cache, vcpu); } EXPORT_SYMBOL_GPL(kvm_vcpu_destroy); @@ -2738,12 +2741,20 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) if (r) goto vcpu_decrement; - vcpu = kvm_arch_vcpu_create(kvm, id); - if (IS_ERR(vcpu)) { - r = PTR_ERR(vcpu); + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + if (!vcpu) { + r = -ENOMEM; goto vcpu_decrement; } + r = kvm_vcpu_init(vcpu, kvm, id); + if (r) + goto vcpu_free; + + r = kvm_arch_vcpu_create(vcpu); + if (r) + goto vcpu_uninit; + preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); r = kvm_arch_vcpu_setup(vcpu); @@ -2787,6 +2798,10 @@ unlock_vcpu_destroy: debugfs_remove_recursive(vcpu->debugfs_dentry); vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); +vcpu_uninit: + kvm_vcpu_uninit(vcpu); +vcpu_free: + kmem_cache_free(kvm_vcpu_cache, vcpu); vcpu_decrement: mutex_lock(&kvm->lock); kvm->created_vcpus--; -- cgit v1.2.3 From 5259878432098ffd26cef7294b0a85ab5cfaf556 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:19 -0800 Subject: KVM: MIPS: Move .vcpu_setup() call to kvm_arch_vcpu_create() Fold setup() into create() now that the two are called back-to-back by common KVM code. This paves the way for removing kvm_arch_vcpu_setup(). Note, there is no unwind function associated with kvm_arch_vcpu_setup(), i.e. no teardown path that also needs to be moved. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 92c9321b3f95..b3a4435af66b 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -386,8 +386,15 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.last_sched_cpu = -1; vcpu->arch.last_exec_cpu = -1; + /* Initial guest state */ + err = kvm_mips_callbacks->vcpu_setup(vcpu); + if (err) + goto out_free_commpage; + return 0; +out_free_commpage: + kfree(vcpu->arch.kseg0_commpage); out_free_gebase: kfree(gebase); out: @@ -1237,10 +1244,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, return 0; } -/* Initial guest state */ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - return kvm_mips_callbacks->vcpu_setup(vcpu); + return 0; } static void kvm_mips_set_c0_status(void) -- cgit v1.2.3 From afede96df55e9cba948c8cc8a682e962244285b4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:22 -0800 Subject: KVM: Drop kvm_arch_vcpu_setup() Remove kvm_arch_vcpu_setup() now that all arch specific implementations are nops. Acked-by: Christoffer Dall Signed-off-by: Sean Christopherson Reviewed-by: Cornelia Huck Signed-off-by: Paolo Bonzini --- arch/arm/kvm/guest.c | 5 ----- arch/arm64/kvm/guest.c | 5 ----- arch/mips/kvm/mips.c | 5 ----- arch/powerpc/kvm/book3s.c | 5 ----- arch/powerpc/kvm/booke.c | 5 ----- arch/s390/kvm/kvm-s390.c | 5 ----- arch/x86/kvm/x86.c | 5 ----- include/linux/kvm_host.h | 1 - virt/kvm/kvm_main.c | 5 ----- 9 files changed, 41 deletions(-) (limited to 'arch/mips') diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 0e6f23504c26..9f7ae0d8690f 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -34,11 +34,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - static u64 core_reg_offset_from_id(u64 id) { return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 2fff06114a8f..2bd92301d32f 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -47,11 +47,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - static bool core_reg_offset_is_vreg(u64 off) { return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index b3a4435af66b..06366e2415a6 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1244,11 +1244,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, return 0; } -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - static void kvm_mips_set_c0_status(void) { u32 status = read_c0_status(); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 3f7adcb0ff63..d07a8e12fa15 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -471,11 +471,6 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, } EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) { return 0; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index e291c62187fe..9cb8257b4118 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -1377,11 +1377,6 @@ static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr) update_timer_ints(vcpu); } -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) { /* setup watchdog timer once */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index dca3d6aac2bb..a1bb47c7ba1e 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2931,11 +2931,6 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; } -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu) { int rc = 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e4a446990306..827d5fcba7a5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9197,11 +9197,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) return 0; } -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { struct msr_data msr; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 521f17cd2b26..87ca40f62b06 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -873,7 +873,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1ddb6d4cfbfd..8e9d24442d20 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2753,10 +2753,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) if (r) goto vcpu_uninit; - r = kvm_arch_vcpu_setup(vcpu); - if (r) - goto vcpu_destroy; - kvm_create_vcpu_debugfs(vcpu); mutex_lock(&kvm->lock); @@ -2792,7 +2788,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) unlock_vcpu_destroy: mutex_unlock(&kvm->lock); debugfs_remove_recursive(vcpu->debugfs_dentry); -vcpu_destroy: kvm_arch_vcpu_destroy(vcpu); vcpu_uninit: kvm_vcpu_uninit(vcpu); -- cgit v1.2.3 From d11dfed5d700b8973d5742300e04b2aaa9d11217 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:24 -0800 Subject: KVM: MIPS: Move all vcpu init code into kvm_arch_vcpu_create() Fold init() into create() now that the two are called back-to-back by common KVM code (kvm_vcpu_init() calls kvm_arch_vcpu_init() as its last action, and kvm_vm_ioctl_create_vcpu() calls kvm_arch_vcpu_create() immediately thereafter). Rinse and repeat for kvm_arch_vcpu_uninit() and kvm_arch_vcpu_destroy(). This paves the way for removing kvm_arch_vcpu_{un}init() entirely. No functional change intended. Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 06366e2415a6..879a7cbd5b54 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -294,6 +294,14 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) kvm_debug("kvm @ %p: create cpu %d at %p\n", vcpu->kvm, vcpu->vcpu_id, vcpu); + err = kvm_mips_callbacks->vcpu_init(vcpu); + if (err) + return err; + + hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; + /* * Allocate space for host mode exception handlers that handle * guest mode exits @@ -307,7 +315,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (!gebase) { err = -ENOMEM; - goto out; + goto out_uninit_vcpu; } kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", ALIGN(size, PAGE_SIZE), gebase); @@ -397,7 +405,8 @@ out_free_commpage: kfree(vcpu->arch.kseg0_commpage); out_free_gebase: kfree(gebase); -out: +out_uninit_vcpu: + kvm_mips_callbacks->vcpu_uninit(vcpu); return err; } @@ -410,6 +419,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.kseg0_commpage); + + kvm_mips_callbacks->vcpu_uninit(vcpu); } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, @@ -1221,21 +1232,12 @@ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { - int err; - - err = kvm_mips_callbacks->vcpu_init(vcpu); - if (err) - return err; - - hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL); - vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; return 0; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { - kvm_mips_callbacks->vcpu_uninit(vcpu); + } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, -- cgit v1.2.3 From ddd259c9aaba08244dba8877687ee856f79c4f45 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 18 Dec 2019 13:55:28 -0800 Subject: KVM: Drop kvm_arch_vcpu_init() and kvm_arch_vcpu_uninit() Remove kvm_arch_vcpu_init() and kvm_arch_vcpu_uninit() now that all arch specific implementations are nops. Acked-by: Christoffer Dall Signed-off-by: Sean Christopherson Reviewed-by: Cornelia Huck Signed-off-by: Paolo Bonzini --- arch/arm/include/asm/kvm_host.h | 1 - arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/reset.c | 5 ----- arch/mips/kvm/mips.c | 10 ---------- arch/powerpc/kvm/powerpc.c | 10 ---------- arch/s390/include/asm/kvm_host.h | 1 - arch/s390/kvm/kvm-s390.c | 5 ----- arch/x86/kvm/x86.c | 10 ---------- include/linux/kvm_host.h | 3 --- virt/kvm/arm/arm.c | 5 ----- virt/kvm/kvm_main.c | 16 ++-------------- 11 files changed, 2 insertions(+), 65 deletions(-) (limited to 'arch/mips') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index de81dd897a30..e26cad6d11b3 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -363,7 +363,6 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); static inline bool kvm_arch_requires_vhe(void) { return false; } static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {} diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6402b2de1844..8ab62944e514 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -54,7 +54,6 @@ int kvm_arm_init_sve(void); int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index ff3512a0ca97..30b7ea680f66 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -204,11 +204,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) return true; } -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - -} - void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) { kfree(vcpu->arch.sve_state); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 879a7cbd5b54..2606f3f02b54 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1230,16 +1230,6 @@ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) return kvm_mips_count_timeout(vcpu); } -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - return 0; -} - -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - -} - int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4fbf8690b8c5..1af96fb5dc6f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -801,16 +801,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) return kvmppc_core_pending_dec(vcpu); } -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - return 0; -} - -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - -} - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { #ifdef CONFIG_BOOKE diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 02f4c21c57f6..11ecc4071a29 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -914,7 +914,6 @@ extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc); static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index a1bb47c7ba1e..8646c99217f2 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2700,11 +2700,6 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS; } -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - return 0; -} - /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4469617adfd0..985066e1bda5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9575,16 +9575,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) struct static_key kvm_no_apic_vcpu __read_mostly; EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu); -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - return 0; -} - -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - -} - void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) { struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 87ca40f62b06..a654cf6df078 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -864,9 +864,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); int kvm_arch_init(void *opaque); void kvm_arch_exit(void); -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); - void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 937b4c7fb5be..1cfc108eca1e 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -360,11 +360,6 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) preempt_enable(); } -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - return 0; -} - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8e9d24442d20..6b496038cd7f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -324,7 +324,6 @@ void kvm_reload_remote_mmus(struct kvm *kvm) static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) { struct page *page; - int r; mutex_init(&vcpu->mutex); vcpu->cpu = -1; @@ -338,10 +337,8 @@ static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) INIT_LIST_HEAD(&vcpu->blocked_vcpu_list); page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) { - r = -ENOMEM; - goto fail; - } + if (!page) + return -ENOMEM; vcpu->run = page_address(page); kvm_vcpu_set_in_spin_loop(vcpu, false); @@ -350,15 +347,7 @@ static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) vcpu->ready = false; preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops); - r = kvm_arch_vcpu_init(vcpu); - if (r < 0) - goto fail_free_run; return 0; - -fail_free_run: - free_page((unsigned long)vcpu->run); -fail: - return r; } static void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) @@ -369,7 +358,6 @@ static void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) * descriptors are already gone. */ put_pid(rcu_dereference_protected(vcpu->pid, 1)); - kvm_arch_vcpu_uninit(vcpu); free_page((unsigned long)vcpu->run); } -- cgit v1.2.3