diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/acpi.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/cache.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/cmpxchg.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpucaps.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 22 | ||||
-rw-r--r-- | arch/arm64/include/asm/elf.h | 13 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimdmacros.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_asm.h | 30 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 26 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 25 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/thread_info.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/topology.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/auxvec.h | 3 |
14 files changed, 144 insertions, 16 deletions
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 32f465a80e4e..0db62a4cbce2 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -86,6 +86,10 @@ static inline bool acpi_has_cpu_in_madt(void) } struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu); +static inline u32 get_acpi_id_for_cpu(unsigned int cpu) +{ + return acpi_cpu_get_madt_gicc(cpu)->uid; +} static inline void arch_fix_phys_package_id(int num, u32 slot) { } void __init acpi_init_cpus(void); diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 9bbffc7a301f..5df5cfe1c143 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -33,7 +33,7 @@ #define ICACHE_POLICY_VIPT 2 #define ICACHE_POLICY_PIPT 3 -#define L1_CACHE_SHIFT 7 +#define L1_CACHE_SHIFT (6) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) /* @@ -43,7 +43,7 @@ * cache before the transfer is done, causing old data to be seen by * the CPU. */ -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#define ARCH_DMA_MINALIGN (128) #ifndef __ASSEMBLY__ @@ -77,7 +77,7 @@ static inline u32 cache_type_cwg(void) static inline int cache_line_size(void) { u32 cwg = cache_type_cwg(); - return cwg ? 4 << cwg : L1_CACHE_BYTES; + return cwg ? 4 << cwg : ARCH_DMA_MINALIGN; } #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 4f5fd2a36e6e..3b0938281541 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -204,7 +204,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \ unsigned long tmp; \ \ asm volatile( \ - " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ + " sevl\n" \ + " wfe\n" \ + " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ " cbnz %" #w "[tmp], 1f\n" \ " wfe\n" \ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index bc51b72fafd4..8a699c708fc9 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -48,7 +48,8 @@ #define ARM64_HAS_CACHE_IDC 27 #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 +#define ARM64_SSBD 30 -#define ARM64_NCAPS 30 +#define ARM64_NCAPS 31 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 09b0f2a80c8f..55bc1f073bfb 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -537,6 +537,28 @@ static inline u64 read_zcr_features(void) return zcr; } +#define ARM64_SSBD_UNKNOWN -1 +#define ARM64_SSBD_FORCE_DISABLE 0 +#define ARM64_SSBD_KERNEL 1 +#define ARM64_SSBD_FORCE_ENABLE 2 +#define ARM64_SSBD_MITIGATED 3 + +static inline int arm64_get_ssbd_state(void) +{ +#ifdef CONFIG_ARM64_SSBD + extern int ssbd_state; + return ssbd_state; +#else + return ARM64_SSBD_UNKNOWN; +#endif +} + +#ifdef CONFIG_ARM64_SSBD +void arm64_set_ssbd_mitigation(bool state); +#else +static inline void arm64_set_ssbd_mitigation(bool state) {} +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index fac1c4de7898..433b9554c6a1 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -121,6 +121,9 @@ #ifndef __ASSEMBLY__ +#include <linux/bug.h> +#include <asm/processor.h> /* for signal_minsigstksz, used by ARCH_DLINFO */ + typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) @@ -148,6 +151,16 @@ typedef struct user_fpsimd_state elf_fpregset_t; do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \ (elf_addr_t)current->mm->context.vdso); \ + \ + /* \ + * Should always be nonzero unless there's a kernel bug. \ + * If we haven't determined a sensible value to give to \ + * userspace, omit the entry: \ + */ \ + if (likely(signal_minsigstksz)) \ + NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \ + else \ + NEW_AUX_ENT(AT_IGNORE, 0); \ } while (0) #define ARCH_HAS_SETUP_ADDITIONAL_PAGES diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index e050d765ca9e..46843515d77b 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -207,12 +207,14 @@ str w\nxtmp, [\xpfpsr, #4] .endm -.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp +.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 mrs_s x\nxtmp, SYS_ZCR_EL1 - bic x\nxtmp, x\nxtmp, ZCR_ELx_LEN_MASK - orr x\nxtmp, x\nxtmp, \xvqminus1 - msr_s SYS_ZCR_EL1, x\nxtmp // self-synchronising - + bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK + orr \xtmp2, \xtmp2, \xvqminus1 + cmp \xtmp2, x\nxtmp + b.eq 921f + msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising +921: _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 _sve_ldr_p 0, \nxbase _sve_wrffr 0 diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f6648a3e4152..951b2076a5e2 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -20,6 +20,9 @@ #include <asm/virt.h> +#define VCPU_WORKAROUND_2_FLAG_SHIFT 0 +#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT) + #define ARM_EXIT_WITH_SERROR_BIT 31 #define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT)) #define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT)) @@ -71,14 +74,37 @@ extern u32 __kvm_get_mdcr_el2(void); extern u32 __init_stage2_translation(void); +/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ +#define __hyp_this_cpu_ptr(sym) \ + ({ \ + void *__ptr = hyp_symbol_addr(sym); \ + __ptr += read_sysreg(tpidr_el2); \ + (typeof(&sym))__ptr; \ + }) + +#define __hyp_this_cpu_read(sym) \ + ({ \ + *__hyp_this_cpu_ptr(sym); \ + }) + #else /* __ASSEMBLY__ */ -.macro get_host_ctxt reg, tmp - adr_l \reg, kvm_host_cpu_state +.macro hyp_adr_this_cpu reg, sym, tmp + adr_l \reg, \sym mrs \tmp, tpidr_el2 add \reg, \reg, \tmp .endm +.macro hyp_ldr_this_cpu reg, sym, tmp + adr_l \reg, \sym + mrs \tmp, tpidr_el2 + ldr \reg, [\reg, \tmp] +.endm + +.macro get_host_ctxt reg, tmp + hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp +.endm + .macro get_vcpu_ptr vcpu, ctxt get_host_ctxt \ctxt, \vcpu ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 469de8acd06f..95d8a0e15b5f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -216,6 +216,9 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; + /* State of various workarounds, see kvm_asm.h for bit assignment */ + u64 workaround_flags; + /* Guest debug state */ u64 debug_flags; @@ -452,6 +455,29 @@ static inline bool kvm_arm_harden_branch_predictor(void) return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); } +#define KVM_SSBD_UNKNOWN -1 +#define KVM_SSBD_FORCE_DISABLE 0 +#define KVM_SSBD_KERNEL 1 +#define KVM_SSBD_FORCE_ENABLE 2 +#define KVM_SSBD_MITIGATED 3 + +static inline int kvm_arm_have_ssbd(void) +{ + switch (arm64_get_ssbd_state()) { + case ARM64_SSBD_FORCE_DISABLE: + return KVM_SSBD_FORCE_DISABLE; + case ARM64_SSBD_KERNEL: + return KVM_SSBD_KERNEL; + case ARM64_SSBD_FORCE_ENABLE: + return KVM_SSBD_FORCE_ENABLE; + case ARM64_SSBD_MITIGATED: + return KVM_SSBD_MITIGATED; + case ARM64_SSBD_UNKNOWN: + default: + return KVM_SSBD_UNKNOWN; + } +} + void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6128992c2ded..fb9a7127bb75 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -72,7 +72,6 @@ #ifdef __ASSEMBLY__ #include <asm/alternative.h> -#include <asm/cpufeature.h> /* * Convert a kernel VA into a HYP VA. @@ -473,6 +472,30 @@ static inline int kvm_map_vectors(void) } #endif +#ifdef CONFIG_ARM64_SSBD +DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); + +static inline int hyp_map_aux_data(void) +{ + int cpu, err; + + for_each_possible_cpu(cpu) { + u64 *ptr; + + ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu); + err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP); + if (err) + return err; + } + return 0; +} +#else +static inline int hyp_map_aux_data(void) +{ + return 0; +} +#endif + #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr) #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 767598932549..65ab83e8926e 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -35,6 +35,8 @@ #ifdef __KERNEL__ #include <linux/build_bug.h> +#include <linux/cache.h> +#include <linux/init.h> #include <linux/stddef.h> #include <linux/string.h> @@ -244,6 +246,9 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused); void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused); void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused); +extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ +extern void __init minsigstksz_setup(void); + /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 740aa03c5f0d..cbcf11b5e637 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -94,6 +94,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_32BIT 22 /* 32bit process */ #define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ +#define TIF_SSBD 25 /* Wants SSB mitigation */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index c4f2d50491eb..df48212f767b 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -7,14 +7,16 @@ struct cpu_topology { int thread_id; int core_id; - int cluster_id; + int package_id; + int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; + cpumask_t llc_siblings; }; extern struct cpu_topology cpu_topology[NR_CPUS]; -#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id) +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h index ec0a86d484e1..743c0b84fd30 100644 --- a/arch/arm64/include/uapi/asm/auxvec.h +++ b/arch/arm64/include/uapi/asm/auxvec.h @@ -19,7 +19,8 @@ /* vDSO location */ #define AT_SYSINFO_EHDR 33 +#define AT_MINSIGSTKSZ 51 /* stack needed for signal delivery */ -#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ #endif |