diff options
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r-- | arch/arm/kvm/arm.c | 69 | ||||
-rw-r--r-- | arch/arm/kvm/coproc.c | 24 | ||||
-rw-r--r-- | arch/arm/kvm/coproc.h | 18 | ||||
-rw-r--r-- | arch/arm/kvm/handle_exit.c | 8 | ||||
-rw-r--r-- | arch/arm/kvm/hyp/hyp-entry.S | 28 | ||||
-rw-r--r-- | arch/arm/kvm/init.S | 51 | ||||
-rw-r--r-- | arch/arm/kvm/interrupts.S | 4 | ||||
-rw-r--r-- | arch/arm/kvm/mmu.c | 36 | ||||
-rw-r--r-- | arch/arm/kvm/psci.c | 8 |
9 files changed, 151 insertions, 95 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 314eb6abe1ff..8a31906bdc9b 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -53,7 +53,6 @@ __asm__(".arch_extension virt"); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); static kvm_cpu_context_t __percpu *kvm_host_cpu_state; -static unsigned long hyp_default_vectors; /* Per-CPU variable containing the currently running vcpu. */ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); @@ -209,9 +208,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IMMEDIATE_EXIT: r = 1; break; - case KVM_CAP_COALESCED_MMIO: - r = KVM_COALESCED_MMIO_PAGE_OFFSET; - break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; break; @@ -230,6 +226,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) else r = kvm->arch.vgic.msis_require_devid; break; + case KVM_CAP_ARM_USER_IRQ: + /* + * 1: EL1_VTIMER, EL1_PTIMER, and PMU. + * (bump this number if adding more devices) + */ + r = 1; + break; default: r = kvm_arch_dev_ioctl_check_extension(kvm, ext); break; @@ -351,15 +354,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); kvm_arm_set_running_vcpu(vcpu); + + kvm_vgic_load(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { - /* - * The arch-generic KVM code expects the cpu field of a vcpu to be -1 - * if the vcpu is no longer assigned to a cpu. This is used for the - * optimized make_all_cpus_request path. - */ + kvm_vgic_put(vcpu); + vcpu->cpu = -1; kvm_arm_set_running_vcpu(NULL); @@ -517,13 +519,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) return ret; } - /* - * Enable the arch timers only if we have an in-kernel VGIC - * and it has been properly initialized, since we cannot handle - * interrupts from the virtual timer with a userspace gic. - */ - if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) - ret = kvm_timer_enable(vcpu); + ret = kvm_timer_enable(vcpu); return ret; } @@ -633,16 +629,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * non-preemptible context. */ preempt_disable(); + kvm_pmu_flush_hwstate(vcpu); + kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); local_irq_disable(); /* - * Re-check atomic conditions + * If we have a singal pending, or need to notify a userspace + * irqchip about timer or PMU level changes, then we exit (and + * update the timer level state in kvm_timer_update_run + * below). */ - if (signal_pending(current)) { + if (signal_pending(current) || + kvm_timer_should_notify_user(vcpu) || + kvm_pmu_should_notify_user(vcpu)) { ret = -EINTR; run->exit_reason = KVM_EXIT_INTR; } @@ -714,6 +717,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ret = handle_exit(vcpu, run, ret); } + /* Tell userspace about in-kernel device output levels */ + if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { + kvm_timer_update_run(vcpu); + kvm_pmu_update_run(vcpu); + } + if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return ret; @@ -1112,8 +1121,16 @@ static void cpu_init_hyp_mode(void *dummy) kvm_arm_init_debug(); } +static void cpu_hyp_reset(void) +{ + if (!is_kernel_in_hyp_mode()) + __hyp_reset_vectors(); +} + static void cpu_hyp_reinit(void) { + cpu_hyp_reset(); + if (is_kernel_in_hyp_mode()) { /* * __cpu_init_stage2() is safe to call even if the PM @@ -1121,21 +1138,13 @@ static void cpu_hyp_reinit(void) */ __cpu_init_stage2(); } else { - if (__hyp_get_vectors() == hyp_default_vectors) - cpu_init_hyp_mode(NULL); + cpu_init_hyp_mode(NULL); } if (vgic_present) kvm_vgic_init_cpu_hardware(); } -static void cpu_hyp_reset(void) -{ - if (!is_kernel_in_hyp_mode()) - __cpu_reset_hyp_mode(hyp_default_vectors, - kvm_get_idmap_start()); -} - static void _kvm_arch_hardware_enable(void *discard) { if (!__this_cpu_read(kvm_arm_hardware_enabled)) { @@ -1319,12 +1328,6 @@ static int init_hyp_mode(void) goto out_err; /* - * It is probably enough to obtain the default on one - * CPU. It's unlikely to be different on the others. - */ - hyp_default_vectors = __hyp_get_vectors(); - - /* * Allocate stack pages for Hypervisor-mode */ for_each_possible_cpu(cpu) { diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 3e5e4194ef86..2c14b69511e9 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -40,6 +40,24 @@ * Co-processor emulation *****************************************************************************/ +static bool write_to_read_only(struct kvm_vcpu *vcpu, + const struct coproc_params *params) +{ + WARN_ONCE(1, "CP15 write to read-only register\n"); + print_cp_instr(params); + kvm_inject_undefined(vcpu); + return false; +} + +static bool read_from_write_only(struct kvm_vcpu *vcpu, + const struct coproc_params *params) +{ + WARN_ONCE(1, "CP15 read to write-only register\n"); + print_cp_instr(params); + kvm_inject_undefined(vcpu); + return false; +} + /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ static u32 cache_levels; @@ -502,15 +520,15 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); - return 1; } - /* If access function fails, it should complain. */ } else { + /* If access function fails, it should complain. */ kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); print_cp_instr(params); + kvm_inject_undefined(vcpu); } - kvm_inject_undefined(vcpu); + return 1; } diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index eef1759c2b65..3a41b7d1eb86 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -81,24 +81,6 @@ static inline bool read_zero(struct kvm_vcpu *vcpu, return true; } -static inline bool write_to_read_only(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - kvm_debug("CP15 write to read-only register at: %08lx\n", - *vcpu_pc(vcpu)); - print_cp_instr(params); - return false; -} - -static inline bool read_from_write_only(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - kvm_debug("CP15 read to write-only register at: %08lx\n", - *vcpu_pc(vcpu)); - print_cp_instr(params); - return false; -} - /* Reset functions */ static inline void reset_unknown(struct kvm_vcpu *vcpu, const struct coproc_reg *r) diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 96af65a30d78..5fd7968cdae9 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -160,6 +160,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, case ARM_EXCEPTION_DATA_ABORT: kvm_inject_vabt(vcpu); return 1; + case ARM_EXCEPTION_HYP_GONE: + /* + * HYP has been reset to the hyp-stub. This happens + * when a guest is pre-empted by kvm_reboot()'s + * shutdown call. + */ + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + return 0; default: kvm_pr_unimpl("Unsupported exception type: %d", exception_index); diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 96beb53934c9..95a2faefc070 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -126,11 +126,29 @@ hyp_hvc: */ pop {r0, r1, r2} - /* Check for __hyp_get_vectors */ - cmp r0, #-1 - mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR - beq 1f + /* + * Check if we have a kernel function, which is guaranteed to be + * bigger than the maximum hyp stub hypercall + */ + cmp r0, #HVC_STUB_HCALL_NR + bhs 1f + /* + * Not a kernel function, treat it as a stub hypercall. + * Compute the physical address for __kvm_handle_stub_hvc + * (as the code lives in the idmaped page) and branch there. + * We hijack ip (r12) as a tmp register. + */ + push {r1} + ldr r1, =kimage_voffset + ldr r1, [r1] + ldr ip, =__kvm_handle_stub_hvc + sub ip, ip, r1 + pop {r1} + + bx ip + +1: push {lr} mov lr, r0 @@ -142,7 +160,7 @@ THUMB( orr lr, #1) blx lr @ Call the HYP function pop {lr} -1: eret + eret guest_trap: load_vcpu r0 @ Load VCPU pointer to r0 diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index bf89c919efc1..570ed4a9c261 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -23,6 +23,7 @@ #include <asm/kvm_asm.h> #include <asm/kvm_arm.h> #include <asm/kvm_mmu.h> +#include <asm/virt.h> /******************************************************************** * Hypervisor initialization @@ -39,6 +40,10 @@ * - Setup the page tables * - Enable the MMU * - Profit! (or eret, if you only care about the code). + * + * Another possibility is to get a HYP stub hypercall. + * We discriminate between the two by checking if r0 contains a value + * that is less than HVC_STUB_HCALL_NR. */ .text @@ -58,6 +63,10 @@ __kvm_hyp_init: W(b) . __do_hyp_init: + @ Check for a stub hypercall + cmp r0, #HVC_STUB_HCALL_NR + blo __kvm_handle_stub_hvc + @ Set stack pointer mov sp, r0 @@ -112,20 +121,46 @@ __do_hyp_init: eret - @ r0 : stub vectors address -ENTRY(__kvm_hyp_reset) +ENTRY(__kvm_handle_stub_hvc) + cmp r0, #HVC_SOFT_RESTART + bne 1f + + /* The target is expected in r1 */ + msr ELR_hyp, r1 + mrs r0, cpsr + bic r0, r0, #MODE_MASK + orr r0, r0, #HYP_MODE +THUMB( orr r0, r0, #PSR_T_BIT ) + msr spsr_cxsf, r0 + b reset + +1: cmp r0, #HVC_RESET_VECTORS + bne 1f + +reset: /* We're now in idmap, disable MMU */ mrc p15, 4, r1, c1, c0, 0 @ HSCTLR - ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I) - bic r1, r1, r2 + ldr r0, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I) + bic r1, r1, r0 mcr p15, 4, r1, c1, c0, 0 @ HSCTLR - /* Install stub vectors */ - mcr p15, 4, r0, c12, c0, 0 @ HVBAR - isb + /* + * Install stub vectors, using ardb's VA->PA trick. + */ +0: adr r0, 0b @ PA(0) + movw r1, #:lower16:__hyp_stub_vectors - 0b @ VA(stub) - VA(0) + movt r1, #:upper16:__hyp_stub_vectors - 0b + add r1, r1, r0 @ PA(stub) + mcr p15, 4, r1, c12, c0, 0 @ HVBAR + b exit + +1: ldr r0, =HVC_STUB_ERR + eret +exit: + mov r0, #0 eret -ENDPROC(__kvm_hyp_reset) +ENDPROC(__kvm_handle_stub_hvc) .ltorg diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index b1bd316f14c0..80a1d6cd261c 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -37,10 +37,6 @@ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are * passed in r0 (strictly 32bit). * - * A function pointer with a value of 0xffffffff has a special meaning, - * and is used to implement __hyp_get_vectors in the same way as in - * arch/arm/kernel/hyp_stub.S. - * * The calling convention follows the standard AAPCS: * r0 - r3: caller save * r12: caller save diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 582a972371cf..313ee646480f 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1524,7 +1524,8 @@ static int handle_hva_to_gpa(struct kvm *kvm, unsigned long start, unsigned long end, int (*handler)(struct kvm *kvm, - gpa_t gpa, void *data), + gpa_t gpa, u64 size, + void *data), void *data) { struct kvm_memslots *slots; @@ -1536,7 +1537,7 @@ static int handle_hva_to_gpa(struct kvm *kvm, /* we only care about the pages that the guest sees */ kvm_for_each_memslot(memslot, slots) { unsigned long hva_start, hva_end; - gfn_t gfn, gfn_end; + gfn_t gpa; hva_start = max(start, memslot->userspace_addr); hva_end = min(end, memslot->userspace_addr + @@ -1544,25 +1545,16 @@ static int handle_hva_to_gpa(struct kvm *kvm, if (hva_start >= hva_end) continue; - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - for (; gfn < gfn_end; ++gfn) { - gpa_t gpa = gfn << PAGE_SHIFT; - ret |= handler(kvm, gpa, data); - } + gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; + ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); } return ret; } -static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) +static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(kvm, gpa, PAGE_SIZE); + unmap_stage2_range(kvm, gpa, size); return 0; } @@ -1589,10 +1581,11 @@ int kvm_unmap_hva_range(struct kvm *kvm, return 0; } -static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) +static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { pte_t *pte = (pte_t *)data; + WARN_ON(size != PAGE_SIZE); /* * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE * flag clear because MMU notifiers will have unmapped a huge PMD before @@ -1618,11 +1611,12 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); } -static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) +static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { pmd_t *pmd; pte_t *pte; + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE); pmd = stage2_get_pmd(kvm, NULL, gpa); if (!pmd || pmd_none(*pmd)) /* Nothing there */ return 0; @@ -1637,11 +1631,12 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) return stage2_ptep_test_and_clear_young(pte); } -static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) +static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { pmd_t *pmd; pte_t *pte; + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE); pmd = stage2_get_pmd(kvm, NULL, gpa); if (!pmd || pmd_none(*pmd)) /* Nothing there */ return 0; @@ -1686,11 +1681,6 @@ phys_addr_t kvm_get_idmap_vector(void) return hyp_idmap_vector; } -phys_addr_t kvm_get_idmap_start(void) -{ - return hyp_idmap_start; -} - static int kvm_map_idmap_text(pgd_t *pgd) { int err; diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index c2b131527a64..a08d7a93aebb 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c @@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu) static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { - int ret = 1; + struct kvm *kvm = vcpu->kvm; unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); unsigned long val; + int ret = 1; switch (psci_fn) { case PSCI_0_2_FN_PSCI_VERSION: @@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) break; case PSCI_0_2_FN_CPU_ON: case PSCI_0_2_FN64_CPU_ON: + mutex_lock(&kvm->lock); val = kvm_psci_vcpu_on(vcpu); + mutex_unlock(&kvm->lock); break; case PSCI_0_2_FN_AFFINITY_INFO: case PSCI_0_2_FN64_AFFINITY_INFO: @@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); unsigned long val; @@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) val = PSCI_RET_SUCCESS; break; case KVM_PSCI_FN_CPU_ON: + mutex_lock(&kvm->lock); val = kvm_psci_vcpu_on(vcpu); + mutex_unlock(&kvm->lock); break; default: val = PSCI_RET_NOT_SUPPORTED; |