From 674e70127069f3fd3c58fb0f94c60eb0f6567d78 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Aug 2016 15:03:01 +0100 Subject: arm64: Document workaround for Cortex-A72 erratum #853709 We already have a workaround for Cortex-A57 erratum #852523, but Cortex-A72 r0p0 to r0p2 do suffer from the same issue (known as erratum #853709). Let's document the fact that we already handle this. Acked-by: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/kvm/hyp/switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ae7855f16ec2..5a84b4562603 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) /* * We must restore the 32-bit state before the sysregs, thanks - * to Cortex-A57 erratum #852523. + * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). */ __sysreg32_restore_state(vcpu); __sysreg_restore_guest_state(guest_ctxt); -- cgit v1.2.3 From b63bebe2355cf2632a2979fd2982c88d080c44b6 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 10 Aug 2016 10:49:42 +0100 Subject: arm64: KVM: remove misleading comment on pmu status Comment about how PMU access is handled is not relavant since v4.6 where proper PMU support was added in. Signed-off-by: Vladimir Murzin Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/kvm/sys_regs.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b0b225ceca18..af5ea86d1c19 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 * - * We could trap ID_DFR0 and tell the guest we don't support performance - * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was - * NAKed, so it will read the PMCR anyway. - * - * Therefore we tell the guest we have 0 counters. Unfortunately, we - * must always support PMCCNTR (the cycle counter): we just RAZ/WI for - * all PM registers, which doesn't crash the guest kernel at least. - * * Debug handling: We do trap most, if not all debug related system * registers. The implementation is good enough to ensure that a guest * can use these with minimal performance degradation. The drawback is -- cgit v1.2.3 From f7f6f2d94f0027242ddfd665289b107a873fde43 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 10 Aug 2016 10:49:43 +0100 Subject: arm64: KVM: report configured SRE value to 32-bit world After commit b34f2bc ("arm64: KVM: Make ICC_SRE_EL1 access return the configured SRE value") we report SRE value to 64-bit guest, but 32-bit one still handled as RAZ/WI what leads to funny promise we do not keep: "GICv3: GIC: unable to set SRE (disabled at EL2), panic ahead" Instead, return the actual value of the ICC_SRE_EL1 register that the guest should see. [ Tweaked commit message - Christoffer ] Signed-off-by: Vladimir Murzin Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/kvm/sys_regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index af5ea86d1c19..e51367d159d0 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1352,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, /* ICC_SRE */ - { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, + { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, -- cgit v1.2.3 From 78ec79bfd59e126e1cb394302bfa531a420b3ecd Mon Sep 17 00:00:00 2001 From: Caesar Wang Date: Wed, 27 Jul 2016 22:24:06 +0800 Subject: arm64: dts: rockchip: add reset saradc node for rk3368 SoCs SARADC controller needs to be reset before programming it, otherwise it will not function properly. Signed-off-by: Caesar Wang Acked-by: Heiko Stuebner Cc: Signed-off-by: Jonathan Cameron --- arch/arm64/boot/dts/rockchip/rk3368.dtsi | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index d02a900378e1..4f44d1191bfd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -270,6 +270,8 @@ #io-channel-cells = <1>; clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; clock-names = "saradc", "apb_pclk"; + resets = <&cru SRST_SARADC>; + reset-names = "saradc-apb"; status = "disabled"; }; -- cgit v1.2.3 From fd363bd417ddb6103564c69cfcbd92d9a7877431 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 24 Aug 2016 18:02:08 +0100 Subject: arm64: avoid TLB conflict with CONFIG_RANDOMIZE_BASE When CONFIG_RANDOMIZE_BASE is selected, we modify the page tables to remap the kernel at a newly-chosen VA range. We do this with the MMU disabled, but do not invalidate TLBs prior to re-enabling the MMU with the new tables. Thus the old mappings entries may still live in TLBs, and we risk violating Break-Before-Make requirements, leading to TLB conflicts and/or other issues. We invalidate TLBs when we uninsall the idmap in early setup code, but prior to this we are subject to issues relating to the Break-Before-Make violation. Avoid these issues by invalidating the TLBs before the new mappings can be used by the hardware. Fixes: f80fb3a3d508 ("arm64: add support for kernel ASLR") Cc: # 4.6+ Acked-by: Ard Biesheuvel Acked-by: Will Deacon Signed-off-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b77f58355da1..3e7b050e99dc 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -757,6 +757,9 @@ ENTRY(__enable_mmu) isb bl __create_page_tables // recreate kernel mapping + tlbi vmalle1 // Remove any stale TLB entries + dsb nsh + msr sctlr_el1, x19 // re-enable the MMU isb ic iallu // flush instructions fetched -- cgit v1.2.3 From 744c6c37cc18705d19e179622f927f5b781fe9cc Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 26 Aug 2016 16:03:42 +0100 Subject: arm64: kernel: Fix unmasked debug exceptions when restoring mdscr_el1 Changes to make the resume from cpu_suspend() code behave more like secondary boot caused debug exceptions to be unmasked early by __cpu_setup(). We then go on to restore mdscr_el1 in cpu_do_resume(), potentially taking break or watch points based on uninitialised registers. Mask debug exceptions in cpu_do_resume(), which is specific to resume from cpu_suspend(). Debug exceptions will be restored to their original state by local_dbg_restore() in cpu_suspend(), which runs after hw_breakpoint_restore() has re-initialised the other registers. Reported-by: Lorenzo Pieralisi Fixes: cabe1c81ea5b ("arm64: Change cpu_resume() to enable mmu early then access sleep_sp by va") Cc: # 4.7+ Signed-off-by: James Morse Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/mm/proc.S | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 5bb61de23201..9d37e967fa19 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -100,7 +100,16 @@ ENTRY(cpu_do_resume) msr tcr_el1, x8 msr vbar_el1, x9 + + /* + * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking + * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug + * exception. Mask them until local_dbg_restore() in cpu_suspend() + * resets them. + */ + disable_dbg msr mdscr_el1, x10 + msr sctlr_el1, x12 /* * Restore oslsr_el1 by writing oslar_el1 -- cgit v1.2.3 From 872c63fbf9e153146b07f0cece4da0d70b283eeb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 5 Sep 2016 11:56:05 +0100 Subject: arm64: spinlocks: implement smp_mb__before_spinlock() as smp_mb() smp_mb__before_spinlock() is intended to upgrade a spin_lock() operation to a full barrier, such that prior stores are ordered with respect to loads and stores occuring inside the critical section. Unfortunately, the core code defines the barrier as smp_wmb(), which is insufficient to provide the required ordering guarantees when used in conjunction with our load-acquire-based spinlock implementation. This patch overrides the arm64 definition of smp_mb__before_spinlock() to map to a full smp_mb(). Cc: Cc: Peter Zijlstra Reported-by: Alan Stern Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/spinlock.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index e875a5a551d7..89206b568cd4 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() +/* + * Accesses appearing in program order before a spin_lock() operation + * can be reordered with accesses inside the critical section, by virtue + * of arch_spin_lock being constructed using acquire semantics. + * + * In cases where this is problematic (e.g. try_to_wake_up), an + * smp_mb__before_spinlock() can restore the required ordering. + */ +#define smp_mb__before_spinlock() smp_mb() + #endif /* __ASM_SPINLOCK_H */ -- cgit v1.2.3 From 2b9743441a312e0b0a2d87deae363eccbe9d0f00 Mon Sep 17 00:00:00 2001 From: Chunyan Zhang Date: Thu, 8 Sep 2016 20:46:42 +0800 Subject: arm64: use preempt_disable_notrace in _percpu_read/write When debug preempt or preempt tracer is enabled, preempt_count_add/sub() can be traced by function and function graph tracing, and preempt_disable/enable() would call preempt_count_add/sub(), so in Ftrace subsystem we should use preempt_disable/enable_notrace instead. In the commit 345ddcc882d8 ("ftrace: Have set_ftrace_pid use the bitmap like events do") the function this_cpu_read() was added to trace_graph_entry(), and if this_cpu_read() calls preempt_disable(), graph tracer will go into a recursive loop, even if the tracing_on is disabled. So this patch change to use preempt_enable/disable_notrace instead in this_cpu_read(). Since Yonghui Yang helped a lot to find the root cause of this problem, so also add his SOB. Signed-off-by: Yonghui Yang Signed-off-by: Chunyan Zhang Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/percpu.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 0a456bef8c79..2fee2f59288c 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, #define _percpu_read(pcp) \ ({ \ typeof(pcp) __retval; \ - preempt_disable(); \ + preempt_disable_notrace(); \ __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ sizeof(pcp)); \ - preempt_enable(); \ + preempt_enable_notrace(); \ __retval; \ }) #define _percpu_write(pcp, val) \ do { \ - preempt_disable(); \ + preempt_disable_notrace(); \ __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ sizeof(pcp)); \ - preempt_enable(); \ + preempt_enable_notrace(); \ } while(0) \ #define _pcp_protect(operation, pcp, val) \ -- cgit v1.2.3