diff options
Diffstat (limited to 'arch/arm64/kvm')
33 files changed, 2263 insertions, 263 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 2b233cc72c79..3f98dbe09500 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1354,16 +1354,9 @@ static unsigned long nvhe_percpu_order(void) /* A lookup table holding the hypervisor VA for each vector slot */ static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; -static int __kvm_vector_slot2idx(enum arm64_hyp_spectre_vector slot) -{ - return slot - (slot != HYP_VECTOR_DIRECT); -} - static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot) { - int idx = __kvm_vector_slot2idx(slot); - - hyp_spectre_vector_selector[slot] = base + (idx * SZ_2K); + hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot); } static int kvm_init_vector_slots(void) @@ -1392,22 +1385,18 @@ static int kvm_init_vector_slots(void) return 0; } -static void cpu_init_hyp_mode(void) +static void cpu_prepare_hyp_mode(int cpu) { - struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params); - struct arm_smccc_res res; + struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); unsigned long tcr; - /* Switch from the HYP stub to our own HYP init vector */ - __hyp_set_vectors(kvm_get_idmap_vector()); - /* * Calculate the raw per-cpu offset without a translation from the * kernel's mapping to the linear mapping, and store it in tpidr_el2 * so that we can use adr_l to access per-cpu variables in EL2. * Also drop the KASAN tag which gets in the way... */ - params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) - + params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); params->mair_el2 = read_sysreg(mair_el1); @@ -1431,14 +1420,28 @@ static void cpu_init_hyp_mode(void) tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET; params->tcr_el2 = tcr; - params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE); + params->stack_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stack_page, cpu) + PAGE_SIZE); params->pgd_pa = kvm_mmu_get_httbr(); + if (is_protected_kvm_enabled()) + params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; + else + params->hcr_el2 = HCR_HOST_NVHE_FLAGS; + params->vttbr = params->vtcr = 0; /* * Flush the init params from the data cache because the struct will * be read while the MMU is off. */ kvm_flush_dcache_to_poc(params, sizeof(*params)); +} + +static void hyp_install_host_vector(void) +{ + struct kvm_nvhe_init_params *params; + struct arm_smccc_res res; + + /* Switch from the HYP stub to our own HYP init vector */ + __hyp_set_vectors(kvm_get_idmap_vector()); /* * Call initialization code, and switch to the full blown HYP code. @@ -1447,8 +1450,14 @@ static void cpu_init_hyp_mode(void) * cpus_have_const_cap() wrapper. */ BUG_ON(!system_capabilities_finalized()); + params = this_cpu_ptr_nvhe_sym(kvm_init_params); arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res); WARN_ON(res.a0 != SMCCC_RET_SUCCESS); +} + +static void cpu_init_hyp_mode(void) +{ + hyp_install_host_vector(); /* * Disabling SSBD on a non-VHE system requires us to enable SSBS @@ -1491,7 +1500,10 @@ static void cpu_set_hyp_vector(void) struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); void *vector = hyp_spectre_vector_selector[data->slot]; - *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; + if (!is_protected_kvm_enabled()) + *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; + else + kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); } static void cpu_hyp_reinit(void) @@ -1499,13 +1511,14 @@ static void cpu_hyp_reinit(void) kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); cpu_hyp_reset(); - cpu_set_hyp_vector(); if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); else cpu_init_hyp_mode(); + cpu_set_hyp_vector(); + kvm_arm_init_debug(); if (vgic_present) @@ -1701,18 +1714,62 @@ static void teardown_hyp_mode(void) } } +static int do_pkvm_init(u32 hyp_va_bits) +{ + void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base); + int ret; + + preempt_disable(); + hyp_install_host_vector(); + ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, + num_possible_cpus(), kern_hyp_va(per_cpu_base), + hyp_va_bits); + preempt_enable(); + + return ret; +} + +static int kvm_hyp_init_protection(u32 hyp_va_bits) +{ + void *addr = phys_to_virt(hyp_mem_base); + int ret; + + kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + + ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); + if (ret) + return ret; + + ret = do_pkvm_init(hyp_va_bits); + if (ret) + return ret; + + free_hyp_pgds(); + + return 0; +} + /** * Inits Hyp-mode on all online CPUs */ static int init_hyp_mode(void) { + u32 hyp_va_bits; int cpu; - int err = 0; + int err = -ENOMEM; + + /* + * The protected Hyp-mode cannot be initialized if the memory pool + * allocation has failed. + */ + if (is_protected_kvm_enabled() && !hyp_mem_base) + goto out_err; /* * Allocate Hyp PGD and setup Hyp identity mapping */ - err = kvm_mmu_init(); + err = kvm_mmu_init(&hyp_va_bits); if (err) goto out_err; @@ -1773,7 +1830,19 @@ static int init_hyp_mode(void) goto out_err; } - err = create_hyp_mappings(kvm_ksym_ref(__bss_start), + /* + * .hyp.bss is guaranteed to be placed at the beginning of the .bss + * section thanks to an assertion in the linker script. Map it RW and + * the rest of .bss RO. + */ + err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start), + kvm_ksym_ref(__hyp_bss_end), PAGE_HYP); + if (err) { + kvm_err("Cannot map hyp bss section: %d\n", err); + goto out_err; + } + + err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end), kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); if (err) { kvm_err("Cannot map bss section\n"); @@ -1794,19 +1863,19 @@ static int init_hyp_mode(void) } } - /* - * Map Hyp percpu pages - */ for_each_possible_cpu(cpu) { char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; char *percpu_end = percpu_begin + nvhe_percpu_size(); + /* Map Hyp percpu pages */ err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP); - if (err) { kvm_err("Cannot map hyp percpu region\n"); goto out_err; } + + /* Prepare the CPU initialization parameters */ + cpu_prepare_hyp_mode(cpu); } if (is_protected_kvm_enabled()) { @@ -1816,6 +1885,14 @@ static int init_hyp_mode(void) goto out_err; } + if (is_protected_kvm_enabled()) { + err = kvm_hyp_init_protection(hyp_va_bits); + if (err) { + kvm_err("Failed to init hyp memory protection\n"); + goto out_err; + } + } + return 0; out_err: @@ -1824,6 +1901,72 @@ out_err: return err; } +static void _kvm_host_prot_finalize(void *discard) +{ + WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)); +} + +static inline int pkvm_mark_hyp(phys_addr_t start, phys_addr_t end) +{ + return kvm_call_hyp_nvhe(__pkvm_mark_hyp, start, end); +} + +#define pkvm_mark_hyp_section(__section) \ + pkvm_mark_hyp(__pa_symbol(__section##_start), \ + __pa_symbol(__section##_end)) + +static int finalize_hyp_mode(void) +{ + int cpu, ret; + + if (!is_protected_kvm_enabled()) + return 0; + + ret = pkvm_mark_hyp_section(__hyp_idmap_text); + if (ret) + return ret; + + ret = pkvm_mark_hyp_section(__hyp_text); + if (ret) + return ret; + + ret = pkvm_mark_hyp_section(__hyp_rodata); + if (ret) + return ret; + + ret = pkvm_mark_hyp_section(__hyp_bss); + if (ret) + return ret; + + ret = pkvm_mark_hyp(hyp_mem_base, hyp_mem_base + hyp_mem_size); + if (ret) + return ret; + + for_each_possible_cpu(cpu) { + phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]); + phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order()); + + ret = pkvm_mark_hyp(start, end); + if (ret) + return ret; + + start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu)); + end = start + PAGE_SIZE; + ret = pkvm_mark_hyp(start, end); + if (ret) + return ret; + } + + /* + * Flip the static key upfront as that may no longer be possible + * once the host stage 2 is installed. + */ + static_branch_enable(&kvm_protected_mode_initialized); + on_each_cpu(_kvm_host_prot_finalize, NULL, 1); + + return 0; +} + static void check_kvm_target_cpu(void *ret) { *(int *)ret = kvm_target_cpu(); @@ -1898,11 +2041,6 @@ int kvm_arch_init(void *opaque) in_hyp_mode = is_kernel_in_hyp_mode(); - if (!in_hyp_mode && kvm_arch_requires_vhe()) { - kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n"); - return -ENODEV; - } - if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || cpus_have_final_cap(ARM64_WORKAROUND_1508412)) kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ @@ -1940,8 +2078,15 @@ int kvm_arch_init(void *opaque) if (err) goto out_hyp; + if (!in_hyp_mode) { + err = finalize_hyp_mode(); + if (err) { + kvm_err("Failed to finalize Hyp protection\n"); + goto out_hyp; + } + } + if (is_protected_kvm_enabled()) { - static_branch_enable(&kvm_protected_mode_initialized); kvm_info("Protected nVHE mode initialized successfully\n"); } else if (in_hyp_mode) { kvm_info("VHE mode initialized successfully\n"); diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 3e081d556e81..5621020b28de 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -11,6 +11,7 @@ #include <linux/kvm_host.h> #include <asm/fpsimd.h> #include <asm/kvm_asm.h> +#include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> #include <asm/sysreg.h> @@ -42,6 +43,17 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) if (ret) goto error; + if (vcpu->arch.sve_state) { + void *sve_end; + + sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu); + + ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end, + PAGE_HYP); + if (ret) + goto error; + } + vcpu->arch.host_thread_info = kern_hyp_va(ti); vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); error: @@ -109,11 +121,17 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { - fpsimd_save_and_flush_cpu_state(); + if (guest_has_sve) { + __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); + + /* Restore the VL that was saved when bound to the CPU */ + if (!has_vhe()) + sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, + SYS_ZCR_EL1); + } - if (guest_has_sve) - __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_s(SYS_ZCR_EL12); - } else if (host_has_sve) { + fpsimd_save_and_flush_cpu_state(); + } else if (has_vhe() && host_has_sve) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 9bbd30e62799..c763808cacdf 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -299,7 +299,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) memset(vqs, 0, sizeof(vqs)); - max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + max_vq = vcpu_sve_max_vq(vcpu); for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) if (sve_vq_available(vq)) vqs[vq_word(vq)] |= vq_mask(vq); @@ -427,7 +427,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) return -ENOENT; - vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + vq = vcpu_sve_max_vq(vcpu); reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - SVE_SIG_REGS_OFFSET; @@ -437,7 +437,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region, if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) return -ENOENT; - vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); + vq = vcpu_sve_max_vq(vcpu); reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - SVE_SIG_REGS_OFFSET; diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 687598e41b21..b726332eec49 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ -DDISABLE_BRANCH_PROFILING \ $(DISABLE_STACKLEAK_PLUGIN) -obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o +obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S index 01f114aa47b0..3c635929771a 100644 --- a/arch/arm64/kvm/hyp/fpsimd.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state) fpsimd_restore x0, 1 ret SYM_FUNC_END(__fpsimd_restore_state) + +SYM_FUNC_START(__sve_restore_state) + __sve_load 0, x1, 2 + ret +SYM_FUNC_END(__sve_restore_state) + +SYM_FUNC_START(__sve_save_state) + sve_save 0, x1, 2 + ret +SYM_FUNC_END(__sve_save_state) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 6c1f51f25eb3..cdf42e347d3f 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -160,18 +160,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) return true; } -static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) +static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault) { - u8 ec; - u64 esr; u64 hpfar, far; - esr = vcpu->arch.fault.esr_el2; - ec = ESR_ELx_EC(esr); - - if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) - return true; - far = read_sysreg_el2(SYS_FAR); /* @@ -194,33 +186,59 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) hpfar = read_sysreg(hpfar_el2); } - vcpu->arch.fault.far_el2 = far; - vcpu->arch.fault.hpfar_el2 = hpfar; + fault->far_el2 = far; + fault->hpfar_el2 = hpfar; return true; } +static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) +{ + u8 ec; + u64 esr; + + esr = vcpu->arch.fault.esr_el2; + ec = ESR_ELx_EC(esr); + + if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) + return true; + + return __get_fault_info(esr, &vcpu->arch.fault); +} + +static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu) +{ + struct thread_struct *thread; + + thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct, + uw.fpsimd_state); + + __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr); +} + +static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) +{ + sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); + __sve_restore_state(vcpu_sve_pffr(vcpu), + &vcpu->arch.ctxt.fp_regs.fpsr); + write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); +} + /* Check for an FPSIMD/SVE trap and handle as appropriate */ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) { - bool vhe, sve_guest, sve_host; + bool sve_guest, sve_host; u8 esr_ec; + u64 reg; if (!system_supports_fpsimd()) return false; - /* - * Currently system_supports_sve() currently implies has_vhe(), - * so the check is redundant. However, has_vhe() can be determined - * statically and helps the compiler remove dead code. - */ - if (has_vhe() && system_supports_sve()) { + if (system_supports_sve()) { sve_guest = vcpu_has_sve(vcpu); sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; - vhe = true; } else { sve_guest = false; sve_host = false; - vhe = has_vhe(); } esr_ec = kvm_vcpu_trap_get_class(vcpu); @@ -229,53 +247,38 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) return false; /* Don't handle SVE traps for non-SVE vcpus here: */ - if (!sve_guest) - if (esr_ec != ESR_ELx_EC_FP_ASIMD) - return false; + if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD) + return false; /* Valid trap. Switch the context: */ - - if (vhe) { - u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN; - + if (has_vhe()) { + reg = CPACR_EL1_FPEN; if (sve_guest) reg |= CPACR_EL1_ZEN; - write_sysreg(reg, cpacr_el1); + sysreg_clear_set(cpacr_el1, 0, reg); } else { - write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, - cptr_el2); - } + reg = CPTR_EL2_TFP; + if (sve_guest) + reg |= CPTR_EL2_TZ; + sysreg_clear_set(cptr_el2, reg, 0); + } isb(); if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { - /* - * In the SVE case, VHE is assumed: it is enforced by - * Kconfig and kvm_arch_init(). - */ - if (sve_host) { - struct thread_struct *thread = container_of( - vcpu->arch.host_fpsimd_state, - struct thread_struct, uw.fpsimd_state); - - sve_save_state(sve_pffr(thread), - &vcpu->arch.host_fpsimd_state->fpsr); - } else { + if (sve_host) + __hyp_sve_save_host(vcpu); + else __fpsimd_save_state(vcpu->arch.host_fpsimd_state); - } vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; } - if (sve_guest) { - sve_load_state(vcpu_sve_pffr(vcpu), - &vcpu->arch.ctxt.fp_regs.fpsr, - sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); - write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12); - } else { + if (sve_guest) + __hyp_sve_restore_guest(vcpu); + else __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); - } /* Skip restoring fpexc32 for AArch64 guests */ if (!(read_sysreg(hcr_el2) & HCR_RW)) diff --git a/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h b/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h new file mode 100644 index 000000000000..dc61aaa56f31 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __KVM_HYP_EARLY_ALLOC_H +#define __KVM_HYP_EARLY_ALLOC_H + +#include <asm/kvm_pgtable.h> + +void hyp_early_alloc_init(void *virt, unsigned long size); +unsigned long hyp_early_alloc_nr_used_pages(void); +void *hyp_early_alloc_page(void *arg); +void *hyp_early_alloc_contig(unsigned int nr_pages); + +extern struct kvm_pgtable_mm_ops hyp_early_alloc_mm_ops; + +#endif /* __KVM_HYP_EARLY_ALLOC_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h new file mode 100644 index 000000000000..55b3f0ce5bc8 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __KVM_HYP_GFP_H +#define __KVM_HYP_GFP_H + +#include <linux/list.h> + +#include <nvhe/memory.h> +#include <nvhe/spinlock.h> + +#define HYP_NO_ORDER UINT_MAX + +struct hyp_pool { + /* + * Spinlock protecting concurrent changes to the memory pool as well as + * the struct hyp_page of the pool's pages until we have a proper atomic + * API at EL2. + */ + hyp_spinlock_t lock; + struct list_head free_area[MAX_ORDER]; + phys_addr_t range_start; + phys_addr_t range_end; + unsigned int max_order; +}; + +static inline void hyp_page_ref_inc(struct hyp_page *p) +{ + struct hyp_pool *pool = hyp_page_to_pool(p); + + hyp_spin_lock(&pool->lock); + p->refcount++; + hyp_spin_unlock(&pool->lock); +} + +static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) +{ + struct hyp_pool *pool = hyp_page_to_pool(p); + int ret; + + hyp_spin_lock(&pool->lock); + p->refcount--; + ret = (p->refcount == 0); + hyp_spin_unlock(&pool->lock); + + return ret; +} + +static inline void hyp_set_page_refcounted(struct hyp_page *p) +{ + struct hyp_pool *pool = hyp_page_to_pool(p); + + hyp_spin_lock(&pool->lock); + if (p->refcount) { + hyp_spin_unlock(&pool->lock); + hyp_panic(); + } + p->refcount = 1; + hyp_spin_unlock(&pool->lock); +} + +/* Allocation */ +void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order); +void hyp_get_page(void *addr); +void hyp_put_page(void *addr); + +/* Used pages cannot be freed */ +int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, + unsigned int reserved_pages); +#endif /* __KVM_HYP_GFP_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h new file mode 100644 index 000000000000..42d81ec739fa --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#ifndef __KVM_NVHE_MEM_PROTECT__ +#define __KVM_NVHE_MEM_PROTECT__ +#include <linux/kvm_host.h> +#include <asm/kvm_hyp.h> +#include <asm/kvm_pgtable.h> +#include <asm/virt.h> +#include <nvhe/spinlock.h> + +struct host_kvm { + struct kvm_arch arch; + struct kvm_pgtable pgt; + struct kvm_pgtable_mm_ops mm_ops; + hyp_spinlock_t lock; +}; +extern struct host_kvm host_kvm; + +int __pkvm_prot_finalize(void); +int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end); + +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool); +void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt); + +static __always_inline void __load_host_stage2(void) +{ + if (static_branch_likely(&kvm_protected_mode_initialized)) + __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); + else + write_sysreg(0, vttbr_el2); +} +#endif /* __KVM_NVHE_MEM_PROTECT__ */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h new file mode 100644 index 000000000000..d2fb307c5952 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __KVM_HYP_MEMORY_H +#define __KVM_HYP_MEMORY_H + +#include <asm/page.h> + +#include <linux/types.h> + +struct hyp_pool; +struct hyp_page { + unsigned int refcount; + unsigned int order; + struct hyp_pool *pool; + struct list_head node; +}; + +extern s64 hyp_physvirt_offset; +extern u64 __hyp_vmemmap; +#define hyp_vmemmap ((struct hyp_page *)__hyp_vmemmap) + +#define __hyp_pa(virt) ((phys_addr_t)(virt) + hyp_physvirt_offset) +#define __hyp_va(phys) ((void *)((phys_addr_t)(phys) - hyp_physvirt_offset)) + +static inline void *hyp_phys_to_virt(phys_addr_t phys) +{ + return __hyp_va(phys); +} + +static inline phys_addr_t hyp_virt_to_phys(void *addr) +{ + return __hyp_pa(addr); +} + +#define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) +#define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT)) +#define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)]) +#define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt)) +#define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt)) + +#define hyp_page_to_pfn(page) ((struct hyp_page *)(page) - hyp_vmemmap) +#define hyp_page_to_phys(page) hyp_pfn_to_phys((hyp_page_to_pfn(page))) +#define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page)) +#define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool) + +static inline int hyp_page_count(void *addr) +{ + struct hyp_page *p = hyp_virt_to_page(addr); + + return p->refcount; +} + +#endif /* __KVM_HYP_MEMORY_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h new file mode 100644 index 000000000000..0095f6289742 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __KVM_HYP_MM_H +#define __KVM_HYP_MM_H + +#include <asm/kvm_pgtable.h> +#include <asm/spectre.h> +#include <linux/memblock.h> +#include <linux/types.h> + +#include <nvhe/memory.h> +#include <nvhe/spinlock.h> + +#define HYP_MEMBLOCK_REGIONS 128 +extern struct memblock_region kvm_nvhe_sym(hyp_memory)[]; +extern unsigned int kvm_nvhe_sym(hyp_memblock_nr); +extern struct kvm_pgtable pkvm_pgtable; +extern hyp_spinlock_t pkvm_pgd_lock; +extern struct hyp_pool hpool; +extern u64 __io_map_base; + +int hyp_create_idmap(u32 hyp_va_bits); +int hyp_map_vectors(void); +int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back); +int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot); +int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot); +int __pkvm_create_mappings(unsigned long start, unsigned long size, + unsigned long phys, enum kvm_pgtable_prot prot); +unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, + enum kvm_pgtable_prot prot); + +static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size, + unsigned long *start, unsigned long *end) +{ + unsigned long nr_pages = size >> PAGE_SHIFT; + struct hyp_page *p = hyp_phys_to_page(phys); + + *start = (unsigned long)p; + *end = *start + nr_pages * sizeof(struct hyp_page); + *start = ALIGN_DOWN(*start, PAGE_SIZE); + *end = ALIGN(*end, PAGE_SIZE); +} + +static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages) +{ + unsigned long total = 0, i; + + /* Provision the worst case scenario */ + for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) { + nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE); + total += nr_pages; + } + + return total; +} + +static inline unsigned long __hyp_pgtable_total_pages(void) +{ + unsigned long res = 0, i; + + /* Cover all of memory with page-granularity */ + for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) { + struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i]; + res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT); + } + + return res; +} + +static inline unsigned long hyp_s1_pgtable_pages(void) +{ + unsigned long res; + + res = __hyp_pgtable_total_pages(); + + /* Allow 1 GiB for private mappings */ + res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); + + return res; +} + +static inline unsigned long host_s2_mem_pgtable_pages(void) +{ + /* + * Include an extra 16 pages to safely upper-bound the worst case of + * concatenated pgds. + */ + return __hyp_pgtable_total_pages() + 16; +} + +static inline unsigned long host_s2_dev_pgtable_pages(void) +{ + /* Allow 1 GiB for MMIO mappings */ + return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT); +} + +#endif /* __KVM_HYP_MM_H */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h new file mode 100644 index 000000000000..76b537f8d1c6 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * A stand-alone ticket spinlock implementation for use by the non-VHE + * KVM hypervisor code running at EL2. + * + * Copyright (C) 2020 Google LLC + * Author: Will Deacon <will@kernel.org> + * + * Heavily based on the implementation removed by c11090474d70 which was: + * Copyright (C) 2012 ARM Ltd. + */ + +#ifndef __ARM64_KVM_NVHE_SPINLOCK_H__ +#define __ARM64_KVM_NVHE_SPINLOCK_H__ + +#include <asm/alternative.h> +#include <asm/lse.h> + +typedef union hyp_spinlock { + u32 __val; + struct { +#ifdef __AARCH64EB__ + u16 next, owner; +#else + u16 owner, next; +#endif + }; +} hyp_spinlock_t; + +#define hyp_spin_lock_init(l) \ +do { \ + *(l) = (hyp_spinlock_t){ .__val = 0 }; \ +} while (0) + +static inline void hyp_spin_lock(hyp_spinlock_t *lock) +{ + u32 tmp; + hyp_spinlock_t lockval, newval; + + asm volatile( + /* Atomically increment the next ticket. */ + ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ +" prfm pstl1strm, %3\n" +"1: ldaxr %w0, %3\n" +" add %w1, %w0, #(1 << 16)\n" +" stxr %w2, %w1, %3\n" +" cbnz %w2, 1b\n", + /* LSE atomics */ +" mov %w2, #(1 << 16)\n" +" ldadda %w2, %w0, %3\n" + __nops(3)) + + /* Did we get the lock? */ +" eor %w1, %w0, %w0, ror #16\n" +" cbz %w1, 3f\n" + /* + * No: spin on the owner. Send a local event to avoid missing an + * unlock before the exclusive load. + */ +" sevl\n" +"2: wfe\n" +" ldaxrh %w2, %4\n" +" eor %w1, %w2, %w0, lsr #16\n" +" cbnz %w1, 2b\n" + /* We got the lock. Critical section starts here. */ +"3:" + : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) + : "Q" (lock->owner) + : "memory"); +} + +static inline void hyp_spin_unlock(hyp_spinlock_t *lock) +{ + u64 tmp; + + asm volatile( + ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " ldrh %w1, %0\n" + " add %w1, %w1, #1\n" + " stlrh %w1, %0", + /* LSE atomics */ + " mov %w1, #1\n" + " staddlh %w1, %0\n" + __nops(1)) + : "=Q" (lock->owner), "=&r" (tmp) + : + : "memory"); +} + +#endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index a6707df4f6c0..f55201a7ff33 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -9,10 +9,15 @@ ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS hostprogs := gen-hyprel HOST_EXTRACFLAGS += -I$(objtree)/include +lib-objs := clear_page.o copy_page.o memcpy.o memset.o +lib-objs := $(addprefix ../../../lib/, $(lib-objs)) + obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ - hyp-main.o hyp-smp.o psci-relay.o + hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \ + cache.o setup.o mm.o mem_protect.o obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ - ../fpsimd.o ../hyp-entry.o ../exception.o + ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o +obj-y += $(lib-objs) ## ## Build rules for compiling nVHE hyp code diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S new file mode 100644 index 000000000000..36cef6915428 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/cache.S @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Code copied from arch/arm64/mm/cache.S. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/alternative.h> + +SYM_FUNC_START_PI(__flush_dcache_area) + dcache_by_line_op civac, sy, x0, x1, x2, x3 + ret +SYM_FUNC_END_PI(__flush_dcache_area) diff --git a/arch/arm64/kvm/hyp/nvhe/early_alloc.c b/arch/arm64/kvm/hyp/nvhe/early_alloc.c new file mode 100644 index 000000000000..1306c430ab87 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/early_alloc.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <asm/kvm_pgtable.h> + +#include <nvhe/early_alloc.h> +#include <nvhe/memory.h> + +struct kvm_pgtable_mm_ops hyp_early_alloc_mm_ops; +s64 __ro_after_init hyp_physvirt_offset; + +static unsigned long base; +static unsigned long end; +static unsigned long cur; + +unsigned long hyp_early_alloc_nr_used_pages(void) +{ + return (cur - base) >> PAGE_SHIFT; +} + +void *hyp_early_alloc_contig(unsigned int nr_pages) +{ + unsigned long size = (nr_pages << PAGE_SHIFT); + void *ret = (void *)cur; + + if (!nr_pages) + return NULL; + + if (end - cur < size) + return NULL; + + cur += size; + memset(ret, 0, size); + + return ret; +} + +void *hyp_early_alloc_page(void *arg) +{ + return hyp_early_alloc_contig(1); +} + +void hyp_early_alloc_init(void *virt, unsigned long size) +{ + base = cur = (unsigned long)virt; + end = base + size; + + hyp_early_alloc_mm_ops.zalloc_page = hyp_early_alloc_page; + hyp_early_alloc_mm_ops.phys_to_virt = hyp_phys_to_virt; + hyp_early_alloc_mm_ops.virt_to_phys = hyp_virt_to_phys; +} diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index c631e29fb001..c164045af238 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -83,11 +83,6 @@ SYM_CODE_END(__kvm_hyp_init) * x0: struct kvm_nvhe_init_params PA */ SYM_CODE_START_LOCAL(___kvm_hyp_init) -alternative_if ARM64_KVM_PROTECTED_MODE - mov_q x1, HCR_HOST_NVHE_PROTECTED_FLAGS - msr hcr_el2, x1 -alternative_else_nop_endif - ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] msr tpidr_el2, x1 @@ -97,6 +92,15 @@ alternative_else_nop_endif ldr x1, [x0, #NVHE_INIT_MAIR_EL2] msr mair_el2, x1 + ldr x1, [x0, #NVHE_INIT_HCR_EL2] + msr hcr_el2, x1 + + ldr x1, [x0, #NVHE_INIT_VTTBR] + msr vttbr_el2, x1 + + ldr x1, [x0, #NVHE_INIT_VTCR] + msr vtcr_el2, x1 + ldr x1, [x0, #NVHE_INIT_PGD_PA] phys_to_ttbr x2, x1 alternative_if ARM64_HAS_CNP @@ -115,6 +119,7 @@ alternative_else_nop_endif /* Invalidate the stale TLBs from Bootloader */ tlbi alle2 + tlbi vmalls12e1 dsb sy /* @@ -244,4 +249,31 @@ alternative_else_nop_endif SYM_CODE_END(__kvm_handle_stub_hvc) +SYM_FUNC_START(__pkvm_init_switch_pgd) + /* Turn the MMU off */ + pre_disable_mmu_workaround + mrs x2, sctlr_el2 + bic x3, x2, #SCTLR_ELx_M + msr sctlr_el2, x3 + isb + + tlbi alle2 + + /* Install the new pgtables */ + ldr x3, [x0, #NVHE_INIT_PGD_PA] + phys_to_ttbr x4, x3 +alternative_if ARM64_HAS_CNP + orr x4, x4, #TTBR_CNP_BIT +alternative_else_nop_endif + msr ttbr0_el2, x4 + + /* Set the new stack pointer */ + ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA] + mov sp, x0 + + /* And turn the MMU back on! */ + set_sctlr_el2 x2 + ret x1 +SYM_FUNC_END(__pkvm_init_switch_pgd) + .popsection diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 936328207bde..b4eaa7ef13e0 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -6,12 +6,15 @@ #include <hyp/switch.h> +#include <asm/pgtable-types.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> #include <asm/kvm_host.h> #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> +#include <nvhe/mem_protect.h> +#include <nvhe/mm.h> #include <nvhe/trap_handler.h> DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); @@ -106,6 +109,61 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt) __vgic_v3_restore_aprs(kern_hyp_va(cpu_if)); } +static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(phys_addr_t, phys, host_ctxt, 1); + DECLARE_REG(unsigned long, size, host_ctxt, 2); + DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3); + DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4); + DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5); + + /* + * __pkvm_init() will return only if an error occurred, otherwise it + * will tail-call in __pkvm_init_finalise() which will have to deal + * with the host context directly. + */ + cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base, + hyp_va_bits); +} + +static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1); + + cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot); +} + +static void handle___pkvm_create_mappings(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(unsigned long, start, host_ctxt, 1); + DECLARE_REG(unsigned long, size, host_ctxt, 2); + DECLARE_REG(unsigned long, phys, host_ctxt, 3); + DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4); + + cpu_reg(host_ctxt, 1) = __pkvm_create_mappings(start, size, phys, prot); +} + +static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(phys_addr_t, phys, host_ctxt, 1); + DECLARE_REG(size_t, size, host_ctxt, 2); + DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3); + + cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot); +} + +static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt) +{ + cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize(); +} + +static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(phys_addr_t, start, host_ctxt, 1); + DECLARE_REG(phys_addr_t, end, host_ctxt, 2); + + cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end); +} typedef void (*hcall_t)(struct kvm_cpu_context *); #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x @@ -125,6 +183,12 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_get_mdcr_el2), HANDLE_FUNC(__vgic_v3_save_aprs), HANDLE_FUNC(__vgic_v3_restore_aprs), + HANDLE_FUNC(__pkvm_init), + HANDLE_FUNC(__pkvm_cpu_set_vector), + HANDLE_FUNC(__pkvm_create_mappings), + HANDLE_FUNC(__pkvm_create_private_mapping), + HANDLE_FUNC(__pkvm_prot_finalize), + HANDLE_FUNC(__pkvm_mark_hyp), }; static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) @@ -177,6 +241,15 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) case ESR_ELx_EC_SMC64: handle_host_smc(host_ctxt); break; + case ESR_ELx_EC_SVE: + sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); + isb(); + sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); + break; + case ESR_ELx_EC_IABT_LOW: + case ESR_ELx_EC_DABT_LOW: + handle_host_mem_abort(host_ctxt); + break; default: hyp_panic(); } diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S index cd119d82d8e3..f4562f417d3f 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S @@ -25,4 +25,5 @@ SECTIONS { BEGIN_HYP_SECTION(.data..percpu) PERCPU_INPUT(L1_CACHE_BYTES) END_HYP_SECTION + HYP_SECTION(.bss) } diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c new file mode 100644 index 000000000000..f4f364aa3282 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_hyp.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_pgtable.h> +#include <asm/stage2_pgtable.h> + +#include <hyp/switch.h> + +#include <nvhe/gfp.h> +#include <nvhe/memory.h> +#include <nvhe/mem_protect.h> +#include <nvhe/mm.h> + +#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) + +extern unsigned long hyp_nr_cpus; +struct host_kvm host_kvm; + +struct hyp_pool host_s2_mem; +struct hyp_pool host_s2_dev; + +/* + * Copies of the host's CPU features registers holding sanitized values. + */ +u64 id_aa64mmfr0_el1_sys_val; +u64 id_aa64mmfr1_el1_sys_val; + +static const u8 pkvm_hyp_id = 1; + +static void *host_s2_zalloc_pages_exact(size_t size) +{ + return hyp_alloc_pages(&host_s2_mem, get_order(size)); +} + +static void *host_s2_zalloc_page(void *pool) +{ + return hyp_alloc_pages(pool, 0); +} + +static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool) +{ + unsigned long nr_pages, pfn; + int ret; + + pfn = hyp_virt_to_pfn(mem_pgt_pool); + nr_pages = host_s2_mem_pgtable_pages(); + ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0); + if (ret) + return ret; + + pfn = hyp_virt_to_pfn(dev_pgt_pool); + nr_pages = host_s2_dev_pgtable_pages(); + ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0); + if (ret) + return ret; + + host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) { + .zalloc_pages_exact = host_s2_zalloc_pages_exact, + .zalloc_page = host_s2_zalloc_page, + .phys_to_virt = hyp_phys_to_virt, + .virt_to_phys = hyp_virt_to_phys, + .page_count = hyp_page_count, + .get_page = hyp_get_page, + .put_page = hyp_put_page, + }; + + return 0; +} + +static void prepare_host_vtcr(void) +{ + u32 parange, phys_shift; + + /* The host stage 2 is id-mapped, so use parange for T0SZ */ + parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); + phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); + + host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, + id_aa64mmfr1_el1_sys_val, phys_shift); +} + +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool) +{ + struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; + int ret; + + prepare_host_vtcr(); + hyp_spin_lock_init(&host_kvm.lock); + + ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool); + if (ret) + return ret; + + ret = kvm_pgtable_stage2_init_flags(&host_kvm.pgt, &host_kvm.arch, + &host_kvm.mm_ops, KVM_HOST_S2_FLAGS); + if (ret) + return ret; + + mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); + mmu->arch = &host_kvm.arch; + mmu->pgt = &host_kvm.pgt; + mmu->vmid.vmid_gen = 0; + mmu->vmid.vmid = 0; + + return 0; +} + +int __pkvm_prot_finalize(void) +{ + struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu; + struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); + + params->vttbr = kvm_get_vttbr(mmu); + params->vtcr = host_kvm.arch.vtcr; + params->hcr_el2 |= HCR_VM; + kvm_flush_dcache_to_poc(params, sizeof(*params)); + + write_sysreg(params->hcr_el2, hcr_el2); + __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr); + + /* + * Make sure to have an ISB before the TLB maintenance below but only + * when __load_stage2() doesn't include one already. + */ + asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); + + /* Invalidate stale HCR bits that may be cached in TLBs */ + __tlbi(vmalls12e1); + dsb(nsh); + isb(); + + return 0; +} + +static int host_stage2_unmap_dev_all(void) +{ + struct kvm_pgtable *pgt = &host_kvm.pgt; + struct memblock_region *reg; + u64 addr = 0; + int i, ret; + + /* Unmap all non-memory regions to recycle the pages */ + for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { + reg = &hyp_memory[i]; + ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); + if (ret) + return ret; + } + return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); +} + +static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) +{ + int cur, left = 0, right = hyp_memblock_nr; + struct memblock_region *reg; + phys_addr_t end; + + range->start = 0; + range->end = ULONG_MAX; + + /* The list of memblock regions is sorted, binary search it */ + while (left < right) { + cur = (left + right) >> 1; + reg = &hyp_memory[cur]; + end = reg->base + reg->size; + if (addr < reg->base) { + right = cur; + range->end = reg->base; + } else if (addr >= end) { + left = cur + 1; + range->start = end; + } else { + range->start = reg->base; + range->end = end; + return true; + } + } + + return false; +} + +static bool range_is_memory(u64 start, u64 end) +{ + struct kvm_mem_range r1, r2; + + if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2)) + return false; + if (r1.start != r2.start) + return false; + + return true; +} + +static inline int __host_stage2_idmap(u64 start, u64 end, + enum kvm_pgtable_prot prot, + struct hyp_pool *pool) +{ + return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start, + prot, pool); +} + +static int host_stage2_idmap(u64 addr) +{ + enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W; + struct kvm_mem_range range; + bool is_memory = find_mem_range(addr, &range); + struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev; + int ret; + + if (is_memory) + prot |= KVM_PGTABLE_PROT_X; + + hyp_spin_lock(&host_kvm.lock); + ret = kvm_pgtable_stage2_find_range(&host_kvm.pgt, addr, prot, &range); + if (ret) + goto unlock; + + ret = __host_stage2_idmap(range.start, range.end, prot, pool); + if (is_memory || ret != -ENOMEM) + goto unlock; + + /* + * host_s2_mem has been provided with enough pages to cover all of + * memory with page granularity, so we should never hit the ENOMEM case. + * However, it is difficult to know how much of the MMIO range we will + * need to cover upfront, so we may need to 'recycle' the pages if we + * run out. + */ + ret = host_stage2_unmap_dev_all(); + if (ret) + goto unlock; + + ret = __host_stage2_idmap(range.start, range.end, prot, pool); + +unlock: + hyp_spin_unlock(&host_kvm.lock); + + return ret; +} + +int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end) +{ + int ret; + + /* + * host_stage2_unmap_dev_all() currently relies on MMIO mappings being + * non-persistent, so don't allow changing page ownership in MMIO range. + */ + if (!range_is_memory(start, end)) + return -EINVAL; + + hyp_spin_lock(&host_kvm.lock); + ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start, + &host_s2_mem, pkvm_hyp_id); + hyp_spin_unlock(&host_kvm.lock); + + return ret != -EAGAIN ? ret : 0; +} + +void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) +{ + struct kvm_vcpu_fault_info fault; + u64 esr, addr; + int ret = 0; + + esr = read_sysreg_el2(SYS_ESR); + if (!__get_fault_info(esr, &fault)) + hyp_panic(); + + addr = (fault.hpfar_el2 & HPFAR_MASK) << 8; + ret = host_stage2_idmap(addr); + if (ret && ret != -EAGAIN) + hyp_panic(); +} diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c new file mode 100644 index 000000000000..a8efdf0f9003 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_hyp.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_pgtable.h> +#include <asm/spectre.h> + +#include <nvhe/early_alloc.h> +#include <nvhe/gfp.h> +#include <nvhe/memory.h> +#include <nvhe/mm.h> +#include <nvhe/spinlock.h> + +struct kvm_pgtable pkvm_pgtable; +hyp_spinlock_t pkvm_pgd_lock; +u64 __io_map_base; + +struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; +unsigned int hyp_memblock_nr; + +int __pkvm_create_mappings(unsigned long start, unsigned long size, + unsigned long phys, enum kvm_pgtable_prot prot) +{ + int err; + + hyp_spin_lock(&pkvm_pgd_lock); + err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot); + hyp_spin_unlock(&pkvm_pgd_lock); + + return err; +} + +unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size, + enum kvm_pgtable_prot prot) +{ + unsigned long addr; + int err; + + hyp_spin_lock(&pkvm_pgd_lock); + + size = PAGE_ALIGN(size + offset_in_page(phys)); + addr = __io_map_base; + __io_map_base += size; + + /* Are we overflowing on the vmemmap ? */ + if (__io_map_base > __hyp_vmemmap) { + __io_map_base -= size; + addr = (unsigned long)ERR_PTR(-ENOMEM); + goto out; + } + + err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot); + if (err) { + addr = (unsigned long)ERR_PTR(err); + goto out; + } + + addr = addr + offset_in_page(phys); +out: + hyp_spin_unlock(&pkvm_pgd_lock); + + return addr; +} + +int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot) +{ + unsigned long start = (unsigned long)from; + unsigned long end = (unsigned long)to; + unsigned long virt_addr; + phys_addr_t phys; + + start = start & PAGE_MASK; + end = PAGE_ALIGN(end); + + for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { + int err; + + phys = hyp_virt_to_phys((void *)virt_addr); + err = __pkvm_create_mappings(virt_addr, PAGE_SIZE, phys, prot); + if (err) + return err; + } + + return 0; +} + +int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back) +{ + unsigned long start, end; + + hyp_vmemmap_range(phys, size, &start, &end); + + return __pkvm_create_mappings(start, end - start, back, PAGE_HYP); +} + +static void *__hyp_bp_vect_base; +int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot) +{ + void *vector; + + switch (slot) { + case HYP_VECTOR_DIRECT: { + vector = __kvm_hyp_vector; + break; + } + case HYP_VECTOR_SPECTRE_DIRECT: { + vector = __bp_harden_hyp_vecs; + break; + } + case HYP_VECTOR_INDIRECT: + case HYP_VECTOR_SPECTRE_INDIRECT: { + vector = (void *)__hyp_bp_vect_base; + break; + } + default: + return -EINVAL; + } + + vector = __kvm_vector_slot2addr(vector, slot); + *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector; + + return 0; +} + +int hyp_map_vectors(void) +{ + phys_addr_t phys; + void *bp_base; + + if (!cpus_have_const_cap(ARM64_SPECTRE_V3A)) + return 0; + + phys = __hyp_pa(__bp_harden_hyp_vecs); + bp_base = (void *)__pkvm_create_private_mapping(phys, + __BP_HARDEN_HYP_VECS_SZ, + PAGE_HYP_EXEC); + if (IS_ERR_OR_NULL(bp_base)) + return PTR_ERR(bp_base); + + __hyp_bp_vect_base = bp_base; + + return 0; +} + +int hyp_create_idmap(u32 hyp_va_bits) +{ + unsigned long start, end; + + start = hyp_virt_to_phys((void *)__hyp_idmap_text_start); + start = ALIGN_DOWN(start, PAGE_SIZE); + + end = hyp_virt_to_phys((void *)__hyp_idmap_text_end); + end = ALIGN(end, PAGE_SIZE); + + /* + * One half of the VA space is reserved to linearly map portions of + * memory -- see va_layout.c for more details. The other half of the VA + * space contains the trampoline page, and needs some care. Split that + * second half in two and find the quarter of VA space not conflicting + * with the idmap to place the IOs and the vmemmap. IOs use the lower + * half of the quarter and the vmemmap the upper half. + */ + __io_map_base = start & BIT(hyp_va_bits - 2); + __io_map_base ^= BIT(hyp_va_bits - 2); + __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3); + + return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC); +} diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c new file mode 100644 index 000000000000..237e03bf0cb1 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <asm/kvm_hyp.h> +#include <nvhe/gfp.h> + +u64 __hyp_vmemmap; + +/* + * Index the hyp_vmemmap to find a potential buddy page, but make no assumption + * about its current state. + * + * Example buddy-tree for a 4-pages physically contiguous pool: + * + * o : Page 3 + * / + * o-o : Page 2 + * / + * / o : Page 1 + * / / + * o---o-o : Page 0 + * Order 2 1 0 + * + * Example of requests on this pool: + * __find_buddy_nocheck(pool, page 0, order 0) => page 1 + * __find_buddy_nocheck(pool, page 0, order 1) => page 2 + * __find_buddy_nocheck(pool, page 1, order 0) => page 0 + * __find_buddy_nocheck(pool, page 2, order 0) => page 3 + */ +static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, + struct hyp_page *p, + unsigned int order) +{ + phys_addr_t addr = hyp_page_to_phys(p); + + addr ^= (PAGE_SIZE << order); + + /* + * Don't return a page outside the pool range -- it belongs to + * something else and may not be mapped in hyp_vmemmap. + */ + if (addr < pool->range_start || addr >= pool->range_end) + return NULL; + + return hyp_phys_to_page(addr); +} + +/* Find a buddy page currently available for allocation */ +static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, + struct hyp_page *p, + unsigned int order) +{ + struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); + + if (!buddy || buddy->order != order || list_empty(&buddy->node)) + return NULL; + + return buddy; + +} + +static void __hyp_attach_page(struct hyp_pool *pool, + struct hyp_page *p) +{ + unsigned int order = p->order; + struct hyp_page *buddy; + + memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); + + /* + * Only the first struct hyp_page of a high-order page (otherwise known + * as the 'head') should have p->order set. The non-head pages should + * have p->order = HYP_NO_ORDER. Here @p may no longer be the head + * after coallescing, so make sure to mark it HYP_NO_ORDER proactively. + */ + p->order = HYP_NO_ORDER; + for (; (order + 1) < pool->max_order; order++) { + buddy = __find_buddy_avail(pool, p, order); + if (!buddy) + break; + + /* Take the buddy out of its list, and coallesce with @p */ + list_del_init(&buddy->node); + buddy->order = HYP_NO_ORDER; + p = min(p, buddy); + } + + /* Mark the new head, and insert it */ + p->order = order; + list_add_tail(&p->node, &pool->free_area[order]); +} + +static void hyp_attach_page(struct hyp_page *p) +{ + struct hyp_pool *pool = hyp_page_to_pool(p); + + hyp_spin_lock(&pool->lock); + __hyp_attach_page(pool, p); + hyp_spin_unlock(&pool->lock); +} + +static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, + struct hyp_page *p, + unsigned int order) +{ + struct hyp_page *buddy; + + list_del_init(&p->node); + while (p->order > order) { + /* + * The buddy of order n - 1 currently has HYP_NO_ORDER as it + * is covered by a higher-level page (whose head is @p). Use + * __find_buddy_nocheck() to find it and inject it in the + * free_list[n - 1], effectively splitting @p in half. + */ + p->order--; + buddy = __find_buddy_nocheck(pool, p, p->order); + buddy->order = p->order; + list_add_tail(&buddy->node, &pool->free_area[buddy->order]); + } + + return p; +} + +void hyp_put_page(void *addr) +{ + struct hyp_page *p = hyp_virt_to_page(addr); + + if (hyp_page_ref_dec_and_test(p)) + hyp_attach_page(p); +} + +void hyp_get_page(void *addr) +{ + struct hyp_page *p = hyp_virt_to_page(addr); + + hyp_page_ref_inc(p); +} + +void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order) +{ + unsigned int i = order; + struct hyp_page *p; + + hyp_spin_lock(&pool->lock); + + /* Look for a high-enough-order page */ + while (i < pool->max_order && list_empty(&pool->free_area[i])) + i++; + if (i >= pool->max_order) { + hyp_spin_unlock(&pool->lock); + return NULL; + } + + /* Extract it from the tree at the right order */ + p = list_first_entry(&pool->free_area[i], struct hyp_page, node); + p = __hyp_extract_page(pool, p, order); + + hyp_spin_unlock(&pool->lock); + hyp_set_page_refcounted(p); + + return hyp_page_to_virt(p); +} + +int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, + unsigned int reserved_pages) +{ + phys_addr_t phys = hyp_pfn_to_phys(pfn); + struct hyp_page *p; + int i; + + hyp_spin_lock_init(&pool->lock); + pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT)); + for (i = 0; i < pool->max_order; i++) + INIT_LIST_HEAD(&pool->free_area[i]); + pool->range_start = phys; + pool->range_end = phys + (nr_pages << PAGE_SHIFT); + + /* Init the vmemmap portion */ + p = hyp_phys_to_page(phys); + memset(p, 0, sizeof(*p) * nr_pages); + for (i = 0; i < nr_pages; i++) { + p[i].pool = pool; + INIT_LIST_HEAD(&p[i].node); + } + + /* Attach the unused pages to the buddy tree */ + for (i = reserved_pages; i < nr_pages; i++) + __hyp_attach_page(pool, &p[i]); + + return 0; +} diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index 63de71c0481e..08508783ec3d 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -11,6 +11,7 @@ #include <linux/kvm_host.h> #include <uapi/linux/psci.h> +#include <nvhe/memory.h> #include <nvhe/trap_handler.h> void kvm_hyp_cpu_entry(unsigned long r0); @@ -20,9 +21,6 @@ void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); /* Config options set by the host. */ struct kvm_host_psci_config __ro_after_init kvm_host_psci_config; -s64 __ro_after_init hyp_physvirt_offset; - -#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset) #define INVALID_CPU_ID UINT_MAX diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c new file mode 100644 index 000000000000..7488f53b0aa2 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_hyp.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_pgtable.h> + +#include <nvhe/early_alloc.h> +#include <nvhe/gfp.h> +#include <nvhe/memory.h> +#include <nvhe/mem_protect.h> +#include <nvhe/mm.h> +#include <nvhe/trap_handler.h> + +struct hyp_pool hpool; +struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; +unsigned long hyp_nr_cpus; + +#define hyp_percpu_size ((unsigned long)__per_cpu_end - \ + (unsigned long)__per_cpu_start) + +static void *vmemmap_base; +static void *hyp_pgt_base; +static void *host_s2_mem_pgt_base; +static void *host_s2_dev_pgt_base; + +static int divide_memory_pool(void *virt, unsigned long size) +{ + unsigned long vstart, vend, nr_pages; + + hyp_early_alloc_init(virt, size); + + hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend); + nr_pages = (vend - vstart) >> PAGE_SHIFT; + vmemmap_base = hyp_early_alloc_contig(nr_pages); + if (!vmemmap_base) + return -ENOMEM; + + nr_pages = hyp_s1_pgtable_pages(); + hyp_pgt_base = hyp_early_alloc_contig(nr_pages); + if (!hyp_pgt_base) + return -ENOMEM; + + nr_pages = host_s2_mem_pgtable_pages(); + host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages); + if (!host_s2_mem_pgt_base) + return -ENOMEM; + + nr_pages = host_s2_dev_pgtable_pages(); + host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages); + if (!host_s2_dev_pgt_base) + return -ENOMEM; + + return 0; +} + +static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, + unsigned long *per_cpu_base, + u32 hyp_va_bits) +{ + void *start, *end, *virt = hyp_phys_to_virt(phys); + unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT; + int ret, i; + + /* Recreate the hyp page-table using the early page allocator */ + hyp_early_alloc_init(hyp_pgt_base, pgt_size); + ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits, + &hyp_early_alloc_mm_ops); + if (ret) + return ret; + + ret = hyp_create_idmap(hyp_va_bits); + if (ret) + return ret; + + ret = hyp_map_vectors(); + if (ret) + return ret; + + ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base)); + if (ret) + return ret; + + ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC); + if (ret) + return ret; + + ret = pkvm_create_mappings(__start_rodata, __end_rodata, PAGE_HYP_RO); + if (ret) + return ret; + + ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO); + if (ret) + return ret; + + ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP); + if (ret) + return ret; + + ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, PAGE_HYP_RO); + if (ret) + return ret; + + ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP); + if (ret) + return ret; + + for (i = 0; i < hyp_nr_cpus; i++) { + start = (void *)kern_hyp_va(per_cpu_base[i]); + end = start + PAGE_ALIGN(hyp_percpu_size); + ret = pkvm_create_mappings(start, end, PAGE_HYP); + if (ret) + return ret; + + end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va; + start = end - PAGE_SIZE; + ret = pkvm_create_mappings(start, end, PAGE_HYP); + if (ret) + return ret; + } + + return 0; +} + +static void update_nvhe_init_params(void) +{ + struct kvm_nvhe_init_params *params; + unsigned long i; + + for (i = 0; i < hyp_nr_cpus; i++) { + params = per_cpu_ptr(&kvm_init_params, i); + params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd); + __flush_dcache_area(params, sizeof(*params)); + } +} + +static void *hyp_zalloc_hyp_page(void *arg) +{ + return hyp_alloc_pages(&hpool, 0); +} + +void __noreturn __pkvm_init_finalise(void) +{ + struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data); + struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt; + unsigned long nr_pages, reserved_pages, pfn; + int ret; + + /* Now that the vmemmap is backed, install the full-fledged allocator */ + pfn = hyp_virt_to_pfn(hyp_pgt_base); + nr_pages = hyp_s1_pgtable_pages(); + reserved_pages = hyp_early_alloc_nr_used_pages(); + ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages); + if (ret) + goto out; + + ret = kvm_host_prepare_stage2(host_s2_mem_pgt_base, host_s2_dev_pgt_base); + if (ret) + goto out; + + pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) { + .zalloc_page = hyp_zalloc_hyp_page, + .phys_to_virt = hyp_phys_to_virt, + .virt_to_phys = hyp_virt_to_phys, + .get_page = hyp_get_page, + .put_page = hyp_put_page, + }; + pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; + +out: + /* + * We tail-called to here from handle___pkvm_init() and will not return, + * so make sure to propagate the return value to the host. + */ + cpu_reg(host_ctxt, 1) = ret; + + __host_enter(host_ctxt); +} + +int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, + unsigned long *per_cpu_base, u32 hyp_va_bits) +{ + struct kvm_nvhe_init_params *params; + void *virt = hyp_phys_to_virt(phys); + void (*fn)(phys_addr_t params_pa, void *finalize_fn_va); + int ret; + + if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size)) + return -EINVAL; + + hyp_spin_lock_init(&pkvm_pgd_lock); + hyp_nr_cpus = nr_cpus; + + ret = divide_memory_pool(virt, size); + if (ret) + return ret; + + ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits); + if (ret) + return ret; + + update_nvhe_init_params(); + + /* Jump in the idmap page to switch to the new page-tables */ + params = this_cpu_ptr(&kvm_init_params); + fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd); + fn(__hyp_pa(params), __pkvm_init_finalise); + + unreachable(); +} diff --git a/arch/arm64/kvm/hyp/nvhe/stub.c b/arch/arm64/kvm/hyp/nvhe/stub.c new file mode 100644 index 000000000000..c0aa6bbfd79d --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/stub.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Stubs for out-of-line function calls caused by re-using kernel + * infrastructure at EL2. + * + * Copyright (C) 2020 - Google LLC + */ + +#include <linux/list.h> + +#ifdef CONFIG_DEBUG_LIST +bool __list_add_valid(struct list_head *new, struct list_head *prev, + struct list_head *next) +{ + return true; +} + +bool __list_del_entry_valid(struct list_head *entry) +{ + return true; +} +#endif diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 736805232521..e9f6ea704d07 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -28,6 +28,8 @@ #include <asm/processor.h> #include <asm/thread_info.h> +#include <nvhe/mem_protect.h> + /* Non-VHE specific context */ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); @@ -41,9 +43,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = CPTR_EL2_DEFAULT; - val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM; + val |= CPTR_EL2_TTA | CPTR_EL2_TAM; if (!update_fp_enabled(vcpu)) { - val |= CPTR_EL2_TFP; + val |= CPTR_EL2_TFP | CPTR_EL2_TZ; __activate_traps_fpsimd32(vcpu); } @@ -68,7 +70,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu) { extern char __kvm_hyp_host_vector[]; - u64 mdcr_el2; + u64 mdcr_el2, cptr; ___deactivate_traps(vcpu); @@ -98,17 +100,14 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT; write_sysreg(mdcr_el2, mdcr_el2); - if (is_protected_kvm_enabled()) - write_sysreg(HCR_HOST_NVHE_PROTECTED_FLAGS, hcr_el2); - else - write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); - write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); - write_sysreg(__kvm_hyp_host_vector, vbar_el2); -} + write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); -static void __load_host_stage2(void) -{ - write_sysreg(0, vttbr_el2); + cptr = CPTR_EL2_DEFAULT; + if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)) + cptr |= CPTR_EL2_TZ; + + write_sysreg(cptr, cptr_el2); + write_sysreg(__kvm_hyp_host_vector, vbar_el2); } /* Save VGICv3 state on non-VHE systems */ diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index 229b06748c20..83dc3b271bc5 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -8,6 +8,8 @@ #include <asm/kvm_mmu.h> #include <asm/tlbflush.h> +#include <nvhe/mem_protect.h> + struct tlb_inv_context { u64 tcr; }; @@ -43,7 +45,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, static void __tlb_switch_to_host(struct tlb_inv_context *cxt) { - write_sysreg(0, vttbr_el2); + __load_host_stage2(); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { /* Ensure write of the host VMID */ diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 926fc07074f5..c37c1dc4feaf 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -9,8 +9,7 @@ #include <linux/bitfield.h> #include <asm/kvm_pgtable.h> - -#define KVM_PGTABLE_MAX_LEVELS 4U +#include <asm/stage2_pgtable.h> #define KVM_PTE_VALID BIT(0) @@ -49,6 +48,11 @@ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ KVM_PTE_LEAF_ATTR_HI_S2_XN) +#define KVM_PTE_LEAF_ATTR_S2_IGNORED GENMASK(58, 55) + +#define KVM_INVALID_PTE_OWNER_MASK GENMASK(63, 56) +#define KVM_MAX_OWNER_ID 1 + struct kvm_pgtable_walk_data { struct kvm_pgtable *pgt; struct kvm_pgtable_walker *walker; @@ -68,21 +72,36 @@ static u64 kvm_granule_size(u32 level) return BIT(kvm_granule_shift(level)); } -static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) +#define KVM_PHYS_INVALID (-1ULL) + +static bool kvm_phys_is_valid(u64 phys) { - u64 granule = kvm_granule_size(level); + return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX)); +} +static bool kvm_level_supports_block_mapping(u32 level) +{ /* * Reject invalid block mappings and don't bother with 4TB mappings for * 52-bit PAs. */ - if (level == 0 || (PAGE_SIZE != SZ_4K && level == 1)) + return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1)); +} + +static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level) +{ + u64 granule = kvm_granule_size(level); + + if (!kvm_level_supports_block_mapping(level)) return false; if (granule > (end - addr)) return false; - return IS_ALIGNED(addr, granule) && IS_ALIGNED(phys, granule); + if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) + return false; + + return IS_ALIGNED(addr, granule); } static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level) @@ -152,20 +171,20 @@ static kvm_pte_t kvm_phys_to_pte(u64 pa) return pte; } -static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte) +static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops) { - return __va(kvm_pte_to_phys(pte)); + return mm_ops->phys_to_virt(kvm_pte_to_phys(pte)); } -static void kvm_set_invalid_pte(kvm_pte_t *ptep) +static void kvm_clear_pte(kvm_pte_t *ptep) { - kvm_pte_t pte = *ptep; - WRITE_ONCE(*ptep, pte & ~KVM_PTE_VALID); + WRITE_ONCE(*ptep, 0); } -static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp) +static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp, + struct kvm_pgtable_mm_ops *mm_ops) { - kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(__pa(childp)); + kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp)); pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE); pte |= KVM_PTE_VALID; @@ -187,6 +206,11 @@ static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level) return pte; } +static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id) +{ + return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id); +} + static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag) @@ -228,7 +252,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, goto out; } - childp = kvm_pte_follow(pte); + childp = kvm_pte_follow(pte, data->pgt->mm_ops); ret = __kvm_pgtable_walk(data, childp, level + 1); if (ret) goto out; @@ -303,12 +327,12 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, } struct hyp_map_data { - u64 phys; - kvm_pte_t attr; + u64 phys; + kvm_pte_t attr; + struct kvm_pgtable_mm_ops *mm_ops; }; -static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot, - struct hyp_map_data *data) +static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) { bool device = prot & KVM_PGTABLE_PROT_DEVICE; u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL; @@ -333,7 +357,8 @@ static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot, attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap); attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh); attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF; - data->attr = attr; + *ptep = attr; + return 0; } @@ -359,6 +384,8 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { kvm_pte_t *childp; + struct hyp_map_data *data = arg; + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg)) return 0; @@ -366,11 +393,11 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1)) return -EINVAL; - childp = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL); + childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL); if (!childp) return -ENOMEM; - kvm_set_table_pte(ptep, childp); + kvm_set_table_pte(ptep, childp, mm_ops); return 0; } @@ -380,6 +407,7 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, int ret; struct hyp_map_data map_data = { .phys = ALIGN_DOWN(phys, PAGE_SIZE), + .mm_ops = pgt->mm_ops, }; struct kvm_pgtable_walker walker = { .cb = hyp_map_walker, @@ -387,7 +415,7 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, .arg = &map_data, }; - ret = hyp_map_set_prot_attr(prot, &map_data); + ret = hyp_set_prot_attr(prot, &map_data.attr); if (ret) return ret; @@ -397,16 +425,18 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, return ret; } -int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits) +int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, + struct kvm_pgtable_mm_ops *mm_ops) { u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits); - pgt->pgd = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL); + pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL); if (!pgt->pgd) return -ENOMEM; pgt->ia_bits = va_bits; pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels; + pgt->mm_ops = mm_ops; pgt->mmu = NULL; return 0; } @@ -414,7 +444,9 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits) static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { - free_page((unsigned long)kvm_pte_follow(*ptep)); + struct kvm_pgtable_mm_ops *mm_ops = arg; + + mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops)); return 0; } @@ -423,29 +455,75 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) struct kvm_pgtable_walker walker = { .cb = hyp_free_walker, .flags = KVM_PGTABLE_WALK_TABLE_POST, + .arg = pgt->mm_ops, }; WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); - free_page((unsigned long)pgt->pgd); + pgt->mm_ops->put_page(pgt->pgd); pgt->pgd = NULL; } struct stage2_map_data { u64 phys; kvm_pte_t attr; + u8 owner_id; kvm_pte_t *anchor; + kvm_pte_t *childp; struct kvm_s2_mmu *mmu; - struct kvm_mmu_memory_cache *memcache; + void *memcache; + + struct kvm_pgtable_mm_ops *mm_ops; }; -static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot, - struct stage2_map_data *data) +u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) +{ + u64 vtcr = VTCR_EL2_FLAGS; + u8 lvls; + + vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT; + vtcr |= VTCR_EL2_T0SZ(phys_shift); + /* + * Use a minimum 2 level page table to prevent splitting + * host PMD huge pages at stage2. + */ + lvls = stage2_pgtable_levels(phys_shift); + if (lvls < 2) + lvls = 2; + vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); + + /* + * Enable the Hardware Access Flag management, unconditionally + * on all CPUs. The features is RES0 on CPUs without the support + * and must be ignored by the CPUs. + */ + vtcr |= VTCR_EL2_HA; + + /* Set the vmid bits */ + vtcr |= (get_vmid_bits(mmfr1) == 16) ? + VTCR_EL2_VS_16BIT : + VTCR_EL2_VS_8BIT; + + return vtcr; +} + +static bool stage2_has_fwb(struct kvm_pgtable *pgt) +{ + if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + return false; + + return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); +} + +#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt)) + +static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, + kvm_pte_t *ptep) { bool device = prot & KVM_PGTABLE_PROT_DEVICE; - kvm_pte_t attr = device ? PAGE_S2_MEMATTR(DEVICE_nGnRE) : - PAGE_S2_MEMATTR(NORMAL); + kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) : + KVM_S2_MEMATTR(pgt, NORMAL); u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS; if (!(prot & KVM_PGTABLE_PROT_X)) @@ -461,44 +539,78 @@ static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot, attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh); attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF; - data->attr = attr; + *ptep = attr; + return 0; } +static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new) +{ + if (!kvm_pte_valid(old) || !kvm_pte_valid(new)) + return true; + + return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS)); +} + +static bool stage2_pte_is_counted(kvm_pte_t pte) +{ + /* + * The refcount tracks valid entries as well as invalid entries if they + * encode ownership of a page to another entity than the page-table + * owner, whose id is 0. + */ + return !!pte; +} + +static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr, + u32 level, struct kvm_pgtable_mm_ops *mm_ops) +{ + /* + * Clear the existing PTE, and perform break-before-make with + * TLB maintenance if it was valid. + */ + if (kvm_pte_valid(*ptep)) { + kvm_clear_pte(ptep); + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level); + } + + mm_ops->put_page(ptep); +} + static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { kvm_pte_t new, old = *ptep; u64 granule = kvm_granule_size(level), phys = data->phys; - struct page *page = virt_to_page(ptep); + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; if (!kvm_block_mapping_supported(addr, end, phys, level)) return -E2BIG; - new = kvm_init_valid_leaf_pte(phys, data->attr, level); - if (kvm_pte_valid(old)) { + if (kvm_phys_is_valid(phys)) + new = kvm_init_valid_leaf_pte(phys, data->attr, level); + else + new = kvm_init_invalid_leaf_owner(data->owner_id); + + if (stage2_pte_is_counted(old)) { /* * Skip updating the PTE if we are trying to recreate the exact * same mapping or only change the access permissions. Instead, * the vCPU will exit one more time from guest if still needed * and then go through the path of relaxing permissions. */ - if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS))) + if (!stage2_pte_needs_update(old, new)) return -EAGAIN; - /* - * There's an existing different valid leaf entry, so perform - * break-before-make. - */ - kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); - put_page(page); + stage2_put_pte(ptep, data->mmu, addr, level, mm_ops); } smp_store_release(ptep, new); - get_page(page); - data->phys += granule; + if (stage2_pte_is_counted(new)) + mm_ops->get_page(ptep); + if (kvm_phys_is_valid(phys)) + data->phys += granule; return 0; } @@ -512,7 +624,8 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, if (!kvm_block_mapping_supported(addr, end, data->phys, level)) return 0; - kvm_set_invalid_pte(ptep); + data->childp = kvm_pte_follow(*ptep, data->mm_ops); + kvm_clear_pte(ptep); /* * Invalidate the whole stage-2, as we may have numerous leaf @@ -527,13 +640,13 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { - int ret; + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; kvm_pte_t *childp, pte = *ptep; - struct page *page = virt_to_page(ptep); + int ret; if (data->anchor) { - if (kvm_pte_valid(pte)) - put_page(page); + if (stage2_pte_is_counted(pte)) + mm_ops->put_page(ptep); return 0; } @@ -548,7 +661,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, if (!data->memcache) return -ENOMEM; - childp = kvm_mmu_memory_cache_alloc(data->memcache); + childp = mm_ops->zalloc_page(data->memcache); if (!childp) return -ENOMEM; @@ -557,14 +670,11 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, * a table. Accesses beyond 'end' that fall within the new table * will be mapped lazily. */ - if (kvm_pte_valid(pte)) { - kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level); - put_page(page); - } + if (stage2_pte_is_counted(pte)) + stage2_put_pte(ptep, data->mmu, addr, level, mm_ops); - kvm_set_table_pte(ptep, childp); - get_page(page); + kvm_set_table_pte(ptep, childp, mm_ops); + mm_ops->get_page(ptep); return 0; } @@ -573,19 +683,25 @@ static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct stage2_map_data *data) { + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops; + kvm_pte_t *childp; int ret = 0; if (!data->anchor) return 0; - free_page((unsigned long)kvm_pte_follow(*ptep)); - put_page(virt_to_page(ptep)); - if (data->anchor == ptep) { + childp = data->childp; data->anchor = NULL; + data->childp = NULL; ret = stage2_map_walk_leaf(addr, end, level, ptep, data); + } else { + childp = kvm_pte_follow(*ptep, mm_ops); } + mm_ops->put_page(childp); + mm_ops->put_page(ptep); + return ret; } @@ -627,13 +743,14 @@ static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, - struct kvm_mmu_memory_cache *mc) + void *mc) { int ret; struct stage2_map_data map_data = { .phys = ALIGN_DOWN(phys, PAGE_SIZE), .mmu = pgt->mmu, .memcache = mc, + .mm_ops = pgt->mm_ops, }; struct kvm_pgtable_walker walker = { .cb = stage2_map_walker, @@ -643,7 +760,10 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, .arg = &map_data, }; - ret = stage2_map_set_prot_attr(prot, &map_data); + if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys))) + return -EINVAL; + + ret = stage2_set_prot_attr(pgt, prot, &map_data.attr); if (ret) return ret; @@ -652,38 +772,63 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, return ret; } -static void stage2_flush_dcache(void *addr, u64 size) +int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, + void *mc, u8 owner_id) { - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) - return; + int ret; + struct stage2_map_data map_data = { + .phys = KVM_PHYS_INVALID, + .mmu = pgt->mmu, + .memcache = mc, + .mm_ops = pgt->mm_ops, + .owner_id = owner_id, + }; + struct kvm_pgtable_walker walker = { + .cb = stage2_map_walker, + .flags = KVM_PGTABLE_WALK_TABLE_PRE | + KVM_PGTABLE_WALK_LEAF | + KVM_PGTABLE_WALK_TABLE_POST, + .arg = &map_data, + }; + + if (owner_id > KVM_MAX_OWNER_ID) + return -EINVAL; - __flush_dcache_area(addr, size); + ret = kvm_pgtable_walk(pgt, addr, size, &walker); + return ret; } -static bool stage2_pte_cacheable(kvm_pte_t pte) +static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte) { u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR; - return memattr == PAGE_S2_MEMATTR(NORMAL); + return memattr == KVM_S2_MEMATTR(pgt, NORMAL); } static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { - struct kvm_s2_mmu *mmu = arg; + struct kvm_pgtable *pgt = arg; + struct kvm_s2_mmu *mmu = pgt->mmu; + struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; kvm_pte_t pte = *ptep, *childp = NULL; bool need_flush = false; - if (!kvm_pte_valid(pte)) + if (!kvm_pte_valid(pte)) { + if (stage2_pte_is_counted(pte)) { + kvm_clear_pte(ptep); + mm_ops->put_page(ptep); + } return 0; + } if (kvm_pte_table(pte, level)) { - childp = kvm_pte_follow(pte); + childp = kvm_pte_follow(pte, mm_ops); - if (page_count(virt_to_page(childp)) != 1) + if (mm_ops->page_count(childp) != 1) return 0; - } else if (stage2_pte_cacheable(pte)) { - need_flush = true; + } else if (stage2_pte_cacheable(pgt, pte)) { + need_flush = !stage2_has_fwb(pgt); } /* @@ -691,17 +836,15 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, * block entry and rely on the remaining portions being faulted * back lazily. */ - kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level); - put_page(virt_to_page(ptep)); + stage2_put_pte(ptep, mmu, addr, level, mm_ops); if (need_flush) { - stage2_flush_dcache(kvm_pte_follow(pte), + __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level)); } if (childp) - free_page((unsigned long)childp); + mm_ops->put_page(childp); return 0; } @@ -710,7 +853,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) { struct kvm_pgtable_walker walker = { .cb = stage2_unmap_walker, - .arg = pgt->mmu, + .arg = pgt, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, }; @@ -842,12 +985,14 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { + struct kvm_pgtable *pgt = arg; + struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; kvm_pte_t pte = *ptep; - if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pte)) + if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) return 0; - stage2_flush_dcache(kvm_pte_follow(pte), kvm_granule_size(level)); + __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level)); return 0; } @@ -856,30 +1001,35 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) struct kvm_pgtable_walker walker = { .cb = stage2_flush_walker, .flags = KVM_PGTABLE_WALK_LEAF, + .arg = pgt, }; - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + if (stage2_has_fwb(pgt)) return 0; return kvm_pgtable_walk(pgt, addr, size, &walker); } -int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm) +int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch, + struct kvm_pgtable_mm_ops *mm_ops, + enum kvm_pgtable_stage2_flags flags) { size_t pgd_sz; - u64 vtcr = kvm->arch.vtcr; + u64 vtcr = arch->vtcr; u32 ia_bits = VTCR_EL2_IPA(vtcr); u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; - pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT | __GFP_ZERO); + pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz); if (!pgt->pgd) return -ENOMEM; pgt->ia_bits = ia_bits; pgt->start_level = start_level; - pgt->mmu = &kvm->arch.mmu; + pgt->mm_ops = mm_ops; + pgt->mmu = &arch->mmu; + pgt->flags = flags; /* Ensure zeroed PGD pages are visible to the hardware walker */ dsb(ishst); @@ -890,15 +1040,16 @@ static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, enum kvm_pgtable_walk_flags flag, void * const arg) { + struct kvm_pgtable_mm_ops *mm_ops = arg; kvm_pte_t pte = *ptep; - if (!kvm_pte_valid(pte)) + if (!stage2_pte_is_counted(pte)) return 0; - put_page(virt_to_page(ptep)); + mm_ops->put_page(ptep); if (kvm_pte_table(pte, level)) - free_page((unsigned long)kvm_pte_follow(pte)); + mm_ops->put_page(kvm_pte_follow(pte, mm_ops)); return 0; } @@ -910,10 +1061,85 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) .cb = stage2_free_walker, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST, + .arg = pgt->mm_ops, }; WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; - free_pages_exact(pgt->pgd, pgd_sz); + pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz); pgt->pgd = NULL; } + +#define KVM_PTE_LEAF_S2_COMPAT_MASK (KVM_PTE_LEAF_ATTR_S2_PERMS | \ + KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR | \ + KVM_PTE_LEAF_ATTR_S2_IGNORED) + +static int stage2_check_permission_walker(u64 addr, u64 end, u32 level, + kvm_pte_t *ptep, + enum kvm_pgtable_walk_flags flag, + void * const arg) +{ + kvm_pte_t old_attr, pte = *ptep, *new_attr = arg; + + /* + * Compatible mappings are either invalid and owned by the page-table + * owner (whose id is 0), or valid with matching permission attributes. + */ + if (kvm_pte_valid(pte)) { + old_attr = pte & KVM_PTE_LEAF_S2_COMPAT_MASK; + if (old_attr != *new_attr) + return -EEXIST; + } else if (pte) { + return -EEXIST; + } + + return 0; +} + +int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr, + enum kvm_pgtable_prot prot, + struct kvm_mem_range *range) +{ + kvm_pte_t attr; + struct kvm_pgtable_walker check_perm_walker = { + .cb = stage2_check_permission_walker, + .flags = KVM_PGTABLE_WALK_LEAF, + .arg = &attr, + }; + u64 granule, start, end; + u32 level; + int ret; + + ret = stage2_set_prot_attr(pgt, prot, &attr); + if (ret) + return ret; + attr &= KVM_PTE_LEAF_S2_COMPAT_MASK; + + for (level = pgt->start_level; level < KVM_PGTABLE_MAX_LEVELS; level++) { + granule = kvm_granule_size(level); + start = ALIGN_DOWN(addr, granule); + end = start + granule; + + if (!kvm_level_supports_block_mapping(level)) + continue; + + if (start < range->start || range->end < end) + continue; + + /* + * Check the presence of existing mappings with incompatible + * permissions within the current block range, and try one level + * deeper if one is found. + */ + ret = kvm_pgtable_walk(pgt, start, granule, &check_perm_walker); + if (ret != -EEXIST) + break; + } + + if (!ret) { + range->start = start; + range->end = end; + } + + return ret; +} diff --git a/arch/arm64/kvm/hyp/reserved_mem.c b/arch/arm64/kvm/hyp/reserved_mem.c new file mode 100644 index 000000000000..83ca23ac259b --- /dev/null +++ b/arch/arm64/kvm/hyp/reserved_mem.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 - Google LLC + * Author: Quentin Perret <qperret@google.com> + */ + +#include <linux/kvm_host.h> +#include <linux/memblock.h> +#include <linux/sort.h> + +#include <asm/kvm_host.h> + +#include <nvhe/memory.h> +#include <nvhe/mm.h> + +static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory); +static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr); + +phys_addr_t hyp_mem_base; +phys_addr_t hyp_mem_size; + +static int cmp_hyp_memblock(const void *p1, const void *p2) +{ + const struct memblock_region *r1 = p1; + const struct memblock_region *r2 = p2; + + return r1->base < r2->base ? -1 : (r1->base > r2->base); +} + +static void __init sort_memblock_regions(void) +{ + sort(hyp_memory, + *hyp_memblock_nr_ptr, + sizeof(struct memblock_region), + cmp_hyp_memblock, + NULL); +} + +static int __init register_memblock_regions(void) +{ + struct memblock_region *reg; + + for_each_mem_region(reg) { + if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS) + return -ENOMEM; + + hyp_memory[*hyp_memblock_nr_ptr] = *reg; + (*hyp_memblock_nr_ptr)++; + } + sort_memblock_regions(); + + return 0; +} + +void __init kvm_hyp_reserve(void) +{ + u64 nr_pages, prev, hyp_mem_pages = 0; + int ret; + + if (!is_hyp_mode_available() || is_kernel_in_hyp_mode()) + return; + + if (kvm_get_mode() != KVM_MODE_PROTECTED) + return; + + ret = register_memblock_regions(); + if (ret) { + *hyp_memblock_nr_ptr = 0; + kvm_err("Failed to register hyp memblocks: %d\n", ret); + return; + } + + hyp_mem_pages += hyp_s1_pgtable_pages(); + hyp_mem_pages += host_s2_mem_pgtable_pages(); + hyp_mem_pages += host_s2_dev_pgtable_pages(); + + /* + * The hyp_vmemmap needs to be backed by pages, but these pages + * themselves need to be present in the vmemmap, so compute the number + * of pages needed by looking for a fixed point. + */ + nr_pages = 0; + do { + prev = nr_pages; + nr_pages = hyp_mem_pages + prev; + nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE); + nr_pages += __hyp_pgtable_max_pages(nr_pages); + } while (nr_pages != prev); + hyp_mem_pages += nr_pages; + + /* + * Try to allocate a PMD-aligned region to reduce TLB pressure once + * this is unmapped from the host stage-2, and fallback to PAGE_SIZE. + */ + hyp_mem_size = hyp_mem_pages << PAGE_SHIFT; + hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(), + ALIGN(hyp_mem_size, PMD_SIZE), + PMD_SIZE); + if (!hyp_mem_base) + hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(), + hyp_mem_size, PAGE_SIZE); + else + hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE); + + if (!hyp_mem_base) { + kvm_err("Failed to reserve hyp memory\n"); + return; + } + memblock_reserve(hyp_mem_base, hyp_mem_size); + + kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20, + hyp_mem_base); +} diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 8711894db8c2..0f16b70befa8 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -88,6 +88,44 @@ static bool kvm_is_device_pfn(unsigned long pfn) return !pfn_valid(pfn); } +static void *stage2_memcache_zalloc_page(void *arg) +{ + struct kvm_mmu_memory_cache *mc = arg; + + /* Allocated with __GFP_ZERO, so no need to zero */ + return kvm_mmu_memory_cache_alloc(mc); +} + +static void *kvm_host_zalloc_pages_exact(size_t size) +{ + return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); +} + +static void kvm_host_get_page(void *addr) +{ + get_page(virt_to_page(addr)); +} + +static void kvm_host_put_page(void *addr) +{ + put_page(virt_to_page(addr)); +} + +static int kvm_host_page_count(void *addr) +{ + return page_count(virt_to_page(addr)); +} + +static phys_addr_t kvm_host_pa(void *addr) +{ + return __pa(addr); +} + +static void *kvm_host_va(phys_addr_t phys) +{ + return __va(phys); +} + /* * Unmapping vs dcache management: * @@ -127,7 +165,7 @@ static bool kvm_is_device_pfn(unsigned long pfn) static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, bool may_block) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); phys_addr_t end = start + size; assert_spin_locked(&kvm->mmu_lock); @@ -183,15 +221,39 @@ void free_hyp_pgds(void) if (hyp_pgtable) { kvm_pgtable_hyp_destroy(hyp_pgtable); kfree(hyp_pgtable); + hyp_pgtable = NULL; } mutex_unlock(&kvm_hyp_pgd_mutex); } +static bool kvm_host_owns_hyp_mappings(void) +{ + if (static_branch_likely(&kvm_protected_mode_initialized)) + return false; + + /* + * This can happen at boot time when __create_hyp_mappings() is called + * after the hyp protection has been enabled, but the static key has + * not been flipped yet. + */ + if (!hyp_pgtable && is_protected_kvm_enabled()) + return false; + + WARN_ON(!hyp_pgtable); + + return true; +} + static int __create_hyp_mappings(unsigned long start, unsigned long size, unsigned long phys, enum kvm_pgtable_prot prot) { int err; + if (!kvm_host_owns_hyp_mappings()) { + return kvm_call_hyp_nvhe(__pkvm_create_mappings, + start, size, phys, prot); + } + mutex_lock(&kvm_hyp_pgd_mutex); err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot); mutex_unlock(&kvm_hyp_pgd_mutex); @@ -253,6 +315,16 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, unsigned long base; int ret = 0; + if (!kvm_host_owns_hyp_mappings()) { + base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, + phys_addr, size, prot); + if (IS_ERR_OR_NULL((void *)base)) + return PTR_ERR((void *)base); + *haddr = base; + + return 0; + } + mutex_lock(&kvm_hyp_pgd_mutex); /* @@ -351,6 +423,17 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, return 0; } +static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { + .zalloc_page = stage2_memcache_zalloc_page, + .zalloc_pages_exact = kvm_host_zalloc_pages_exact, + .free_pages_exact = free_pages_exact, + .get_page = kvm_host_get_page, + .put_page = kvm_host_put_page, + .page_count = kvm_host_page_count, + .phys_to_virt = kvm_host_va, + .virt_to_phys = kvm_host_pa, +}; + /** * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure * @kvm: The pointer to the KVM structure @@ -374,7 +457,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) if (!pgt) return -ENOMEM; - err = kvm_pgtable_stage2_init(pgt, kvm); + err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); if (err) goto out_free_pgtable; @@ -387,7 +470,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) for_each_possible_cpu(cpu) *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; - mmu->kvm = kvm; + mmu->arch = &kvm->arch; mmu->pgt = pgt; mmu->pgd_phys = __pa(pgt->pgd); mmu->vmid.vmid_gen = 0; @@ -469,7 +552,7 @@ void stage2_unmap_vm(struct kvm *kvm) void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); struct kvm_pgtable *pgt = NULL; spin_lock(&kvm->mmu_lock); @@ -538,7 +621,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, */ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); } @@ -1208,10 +1291,22 @@ static int kvm_map_idmap_text(void) return err; } -int kvm_mmu_init(void) +static void *kvm_hyp_zalloc_page(void *arg) +{ + return (void *)get_zeroed_page(GFP_KERNEL); +} + +static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { + .zalloc_page = kvm_hyp_zalloc_page, + .get_page = kvm_host_get_page, + .put_page = kvm_host_put_page, + .phys_to_virt = kvm_host_va, + .virt_to_phys = kvm_host_pa, +}; + +int kvm_mmu_init(u32 *hyp_va_bits) { int err; - u32 hyp_va_bits; hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); @@ -1225,8 +1320,8 @@ int kvm_mmu_init(void) */ BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); - hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); - kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits); + *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); + kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); kvm_debug("HYP VA range: %lx:%lx\n", kern_hyp_va(PAGE_OFFSET), @@ -1251,7 +1346,7 @@ int kvm_mmu_init(void) goto out; } - err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits); + err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); if (err) goto out_free_pgtable; diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c index 739164324afe..8f860ae56bb7 100644 --- a/arch/arm64/kvm/perf.c +++ b/arch/arm64/kvm/perf.c @@ -55,7 +55,8 @@ int kvm_perf_init(void) * hardware performance counters. This could ensure the presence of * a physical PMU and CONFIG_PERF_EVENT is selected. */ - if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0) + if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0 + && !is_protected_kvm_enabled()) static_branch_enable(&kvm_arm_pmu_available); return perf_register_guest_info_callbacks(&kvm_guest_cbs); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index faf32a44ba04..03a6c1f4a09a 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -33,7 +33,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) { struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); - if (!ctx || !kvm_pmu_switch_needed(attr)) + if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr)) return; if (!attr->exclude_host) @@ -49,7 +49,7 @@ void kvm_clr_pmu_events(u32 clr) { struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); - if (!ctx) + if (!kvm_arm_support_pmu_v3() || !ctx) return; ctx->pmu_events.events_host &= ~clr; @@ -172,7 +172,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) struct kvm_host_data *host; u32 events_guest, events_host; - if (!has_vhe()) + if (!kvm_arm_support_pmu_v3() || !has_vhe()) return; preempt_disable(); @@ -193,7 +193,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) struct kvm_host_data *host; u32 events_guest, events_host; - if (!has_vhe()) + if (!kvm_arm_support_pmu_v3() || !has_vhe()) return; host = this_cpu_ptr_hyp_sym(kvm_host_data); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index bd354cd45d28..86d94f616a1e 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -74,10 +74,6 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) if (!system_supports_sve()) return -EINVAL; - /* Verify that KVM startup enforced this when SVE was detected: */ - if (WARN_ON(!has_vhe())) - return -EINVAL; - vcpu->arch.sve_max_vl = kvm_sve_max_vl; /* @@ -333,19 +329,10 @@ int kvm_set_ipa_limit(void) return 0; } -/* - * Configure the VTCR_EL2 for this VM. The VTCR value is common - * across all the physical CPUs on the system. We use system wide - * sanitised values to fill in different fields, except for Hardware - * Management of Access Flags. HA Flag is set unconditionally on - * all CPUs, as it is safe to run with or without the feature and - * the bit is RES0 on CPUs that don't support it. - */ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) { - u64 vtcr = VTCR_EL2_FLAGS, mmfr0; - u32 parange, phys_shift; - u8 lvls; + u64 mmfr0, mmfr1; + u32 phys_shift; if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) return -EINVAL; @@ -365,33 +352,8 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) } mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); - parange = cpuid_feature_extract_unsigned_field(mmfr0, - ID_AA64MMFR0_PARANGE_SHIFT); - if (parange > ID_AA64MMFR0_PARANGE_MAX) - parange = ID_AA64MMFR0_PARANGE_MAX; - vtcr |= parange << VTCR_EL2_PS_SHIFT; - - vtcr |= VTCR_EL2_T0SZ(phys_shift); - /* - * Use a minimum 2 level page table to prevent splitting - * host PMD huge pages at stage2. - */ - lvls = stage2_pgtable_levels(phys_shift); - if (lvls < 2) - lvls = 2; - vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls); - - /* - * Enable the Hardware Access Flag management, unconditionally - * on all CPUs. The features is RES0 on CPUs without the support - * and must be ignored by the CPUs. - */ - vtcr |= VTCR_EL2_HA; + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); - /* Set the vmid bits */ - vtcr |= (kvm_get_vmid_bits() == 16) ? - VTCR_EL2_VS_16BIT : - VTCR_EL2_VS_8BIT; - kvm->arch.vtcr = vtcr; return 0; } diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 978301392d67..acdb7b3cc97d 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -288,3 +288,10 @@ void kvm_get_kimage_voffset(struct alt_instr *alt, { generate_mov_q(kimage_voffset, origptr, updptr, nr_inst); } + +void kvm_compute_final_ctr_el0(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0), + origptr, updptr, nr_inst); +} |