diff options
author | Olof Johansson <olof@lixom.net> | 2017-06-01 16:56:06 -0700 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2017-06-01 17:07:38 -0700 |
commit | 1ba2eaaacd2cb3e3e0d753d62dae515c6dec8035 (patch) | |
tree | f75d444520e879f95e264d273ba8619021bd308f /arch/arm64 | |
parent | 73069883c017d454737d07a5aeab2b1639fcfeca (diff) | |
parent | 44f73dc42c11398d7b84e94365a485ebd6420798 (diff) | |
download | linux-1ba2eaaacd2cb3e3e0d753d62dae515c6dec8035.tar.bz2 |
Merge tag 'mvebu-fixes-4.12-1' of git://git.infradead.org/linux-mvebu into fixes
mvebu fixes for 4.12
Fix the interrupt description of the crypto node for device tree of
the Armada 7K/8K SoCs
* tag 'mvebu-fixes-4.12-1' of git://git.infradead.org/linux-mvebu: (316 commits)
arm64: marvell: dts: fix interrupts in 7k/8k crypto nodes
+ Linux 4.12-rc2
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi | 3 | ||||
-rw-r--r-- | arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/atomic_ll_sc.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 8 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 23 | ||||
-rw-r--r-- | arch/arm64/kernel/perf_event.c | 23 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 5 |
9 files changed, 60 insertions, 20 deletions
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi index ac8df5201cd6..b4bc42ece754 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi @@ -231,8 +231,7 @@ cpm_crypto: crypto@800000 { compatible = "inside-secure,safexcel-eip197"; reg = <0x800000 0x200000>; - interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING - | IRQ_TYPE_LEVEL_HIGH)>, + interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 7740a75a8230..6e2058847ddc 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi @@ -221,8 +221,7 @@ cps_crypto: crypto@800000 { compatible = "inside-secure,safexcel-eip197"; reg = <0x800000 0x200000>; - interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING - | IRQ_TYPE_LEVEL_HIGH)>, + interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index f819fdcff1ac..f5a2d09afb38 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ " cbnz %w[tmp], 1b\n" \ " " #mb "\n" \ - " mov %" #w "[oldval], %" #w "[old]\n" \ "2:" \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ [v] "+Q" (*(unsigned long *)ptr) \ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e7f84a7b4465..428ee1f2468c 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -115,6 +115,7 @@ struct arm64_cpu_capabilities { extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; +extern struct static_key_false arm64_const_caps_ready; bool this_cpu_has_cap(unsigned int cap); @@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num) } /* System capability check for constant caps */ -static inline bool cpus_have_const_cap(int num) +static inline bool __cpus_have_const_cap(int num) { if (num >= ARM64_NCAPS) return false; @@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num) return test_bit(num, cpu_hwcaps); } +static inline bool cpus_have_const_cap(int num) +{ + if (static_branch_likely(&arm64_const_caps_ready)) + return __cpus_have_const_cap(num); + else + return cpus_have_cap(num); +} + static inline void cpus_set_cap(unsigned int num) { if (num >= ARM64_NCAPS) { @@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num) num, ARM64_NCAPS); } else { __set_bit(num, cpu_hwcaps); - static_branch_enable(&cpu_hwcap_keys[num]); } } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 5e19165c5fa8..1f252a95bc02 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -24,6 +24,7 @@ #include <linux/types.h> #include <linux/kvm_types.h> +#include <asm/cpufeature.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> @@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, unsigned long vector_ptr) { /* - * Call initialization code, and switch to the full blown - * HYP code. + * Call initialization code, and switch to the full blown HYP code. + * If the cpucaps haven't been finalized yet, something has gone very + * wrong, and hyp will crash and burn when it uses any + * cpus_have_const_cap() wrapper. */ + BUG_ON(!static_branch_likely(&arm64_const_caps_ready)); __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 94b8f7fc3310..817ce3365e20 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, */ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) { - for (; caps->matches; caps++) - if (caps->enable && cpus_have_cap(caps->capability)) + for (; caps->matches; caps++) { + unsigned int num = caps->capability; + + if (!cpus_have_cap(num)) + continue; + + /* Ensure cpus_have_const_cap(num) works */ + static_branch_enable(&cpu_hwcap_keys[num]); + + if (caps->enable) { /* * Use stop_machine() as it schedules the work allowing * us to modify PSTATE, instead of on_each_cpu() which @@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) * we return. */ stop_machine(caps->enable, NULL, cpu_online_mask); + } + } } /* @@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void) enable_cpu_capabilities(arm64_features); } +DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); +EXPORT_SYMBOL(arm64_const_caps_ready); + +static void __init mark_const_caps_ready(void) +{ + static_branch_enable(&arm64_const_caps_ready); +} + /* * Check if the current CPU has a given feature capability. * Should be called from non-preemptible context. @@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void) /* Set the CPU feature capabilies */ setup_feature_capabilities(); enable_errata_workarounds(); + mark_const_caps_ready(); setup_elf_hwcaps(arm64_elf_hwcaps); if (system_supports_32bit_el0()) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index bcc79471b38e..83a1b1ad189f 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, if (attr->exclude_idle) return -EPERM; - if (is_kernel_in_hyp_mode() && - attr->exclude_kernel != attr->exclude_hv) - return -EINVAL; + + /* + * If we're running in hyp mode, then we *are* the hypervisor. + * Therefore we ignore exclude_hv in this configuration, since + * there's no hypervisor to sample anyway. This is consistent + * with other architectures (x86 and Power). + */ + if (is_kernel_in_hyp_mode()) { + if (!attr->exclude_kernel) + config_base |= ARMV8_PMU_INCLUDE_EL2; + } else { + if (attr->exclude_kernel) + config_base |= ARMV8_PMU_EXCLUDE_EL1; + if (!attr->exclude_hv) + config_base |= ARMV8_PMU_INCLUDE_EL2; + } if (attr->exclude_user) config_base |= ARMV8_PMU_EXCLUDE_EL0; - if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) - config_base |= ARMV8_PMU_EXCLUDE_EL1; - if (!attr->exclude_hv) - config_base |= ARMV8_PMU_INCLUDE_EL2; /* * Install the filter into config_base as this is used to diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index aaf42ae8d8c3..14c4e3b14bcb 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -2,6 +2,8 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # +ccflags-y += -fno-stack-protector + KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index c6e53580aefe..71f930501ade 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -253,8 +253,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx) */ off = offsetof(struct bpf_array, ptrs); emit_a64_mov_i64(tmp, off, ctx); - emit(A64_LDR64(tmp, r2, tmp), ctx); - emit(A64_LDR64(prg, tmp, r3), ctx); + emit(A64_ADD(1, tmp, r2, tmp), ctx); + emit(A64_LSL(1, prg, r3, 3), ctx); + emit(A64_LDR64(prg, tmp, prg), ctx); emit(A64_CBZ(1, prg, jmp_offset), ctx); /* goto *(prog->bpf_func + prologue_size); */ |