From b7f8b440f3001cc1775c028f0a783786113c2ae3 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 11 Jun 2019 15:47:20 +0000 Subject: powerpc/32s: fix initial setup of segment registers on secondary CPU The patch referenced below moved the loading of segment registers out of load_up_mmu() in order to do it earlier in the boot sequence. However, the secondary CPU still needs it to be done when loading up the MMU. Reported-by: Erhard F. Fixes: 215b823707ce ("powerpc/32s: set up an early static hash table for KASAN") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/head_32.S | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 755fab9641d6..c82947a3892a 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -757,6 +757,7 @@ __secondary_start: stw r0,0(r3) /* load up the MMU */ + bl load_segment_registers bl load_up_mmu /* ptr to phys current thread */ -- cgit v1.2.3 From e8732ffa2e096d433c3f2349b871d43ed0d39f5c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 13 Jun 2019 13:52:30 +0000 Subject: powerpc/booke: fix fast syscall entry on SMP Use r10 instead of r9 to calculate CPU offset as r9 contains the value from SRR1 which is used later. Fixes: 1a4b739bbb4f ("powerpc/32: implement fast entry for syscalls on BOOKE") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/head_booke.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index bfeb469e8106..9f9e0d109d7d 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -145,9 +145,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) tophys(r11,r11) addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP - lwz r9,TASK_CPU(r2) - slwi r9,r9,3 - add r11,r11,r9 + lwz r10, TASK_CPU(r2) + slwi r10, r10, 3 + add r11, r11, r10 #endif lwz r12,0(r11) mtspr SPRN_DBCR0,r12 -- cgit v1.2.3 From 82f6e266f8123d7938713c0e10c03aa655b3e68a Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 23 May 2019 08:39:27 +0000 Subject: powerpc/32: fix build failure on book3e with KVM Build failure was introduced by the commit identified below, due to missed macro expension leading to wrong called function's name. arch/powerpc/kernel/head_fsl_booke.o: In function `SystemCall': arch/powerpc/kernel/head_fsl_booke.S:416: undefined reference to `kvmppc_handler_BOOKE_INTERRUPT_SYSCALL_SPRN_SRR1' Makefile:1052: recipe for target 'vmlinux' failed The called function should be kvmppc_handler_8_0x01B(). This patch fixes it. Reported-by: Paul Mackerras Fixes: 1a4b739bbb4f ("powerpc/32: implement fast entry for syscalls on BOOKE") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/head_booke.h | 4 ++-- arch/powerpc/kernel/head_fsl_booke.S | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 9f9e0d109d7d..2ae635df9026 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -83,7 +83,7 @@ END_BTB_FLUSH_SECTION SAVE_4GPRS(3, r11); \ SAVE_2GPRS(7, r11) -.macro SYSCALL_ENTRY trapno intno +.macro SYSCALL_ENTRY trapno intno srr1 mfspr r10, SPRN_SPRG_THREAD #ifdef CONFIG_KVM_BOOKE_HV BEGIN_FTR_SECTION @@ -94,7 +94,7 @@ BEGIN_FTR_SECTION mfspr r11, SPRN_SRR1 mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */ bf 3, 1975f - b kvmppc_handler_BOOKE_INTERRUPT_\intno\()_SPRN_SRR1 + b kvmppc_handler_\intno\()_\srr1 1975: mr r12, r13 lwz r13, THREAD_NORMSAVE(2)(r10) diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 6621f230cc37..2b39f42c3676 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -413,7 +413,7 @@ interrupt_base: /* System Call Interrupt */ START_EXCEPTION(SystemCall) - SYSCALL_ENTRY 0xc00 SYSCALL + SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1 /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \ -- cgit v1.2.3 From fabb2efcf0846e28b4910fc20bdc203d3d0170af Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Mon, 17 Jun 2019 17:16:18 +1000 Subject: KVM: PPC: Book3S HV: Fix r3 corruption in h_set_dabr() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit c1fe190c0672 ("powerpc: Add force enable of DAWR on P9 option") screwed up some assembler and corrupted a pointer in r3. This resulted in crashes like the below: BUG: Kernel NULL pointer dereference at 0x000013bf Faulting instruction address: 0xc00000000010b044 Oops: Kernel access of bad area, sig: 11 [#1] LE PAGE_SIZE=64K MMU=Radix MMU=Hash SMP NR_CPUS=2048 NUMA pSeries CPU: 8 PID: 1771 Comm: qemu-system-ppc Kdump: loaded Not tainted 5.2.0-rc4+ #3 NIP: c00000000010b044 LR: c0080000089dacf4 CTR: c00000000010aff4 REGS: c00000179b397710 TRAP: 0300 Not tainted (5.2.0-rc4+) MSR: 800000000280b033 CR: 42244842 XER: 00000000 CFAR: c00000000010aff8 DAR: 00000000000013bf DSISR: 42000000 IRQMASK: 0 GPR00: c0080000089dd6bc c00000179b3979a0 c008000008a04300 ffffffffffffffff GPR04: 0000000000000000 0000000000000003 000000002444b05d c0000017f11c45d0 ... NIP kvmppc_h_set_dabr+0x50/0x68 LR kvmppc_pseries_do_hcall+0xa3c/0xeb0 [kvm_hv] Call Trace: 0xc0000017f11c0000 (unreliable) kvmppc_vcpu_run_hv+0x694/0xec0 [kvm_hv] kvmppc_vcpu_run+0x34/0x48 [kvm] kvm_arch_vcpu_ioctl_run+0x2f4/0x400 [kvm] kvm_vcpu_ioctl+0x460/0x850 [kvm] do_vfs_ioctl+0xe4/0xb40 ksys_ioctl+0xc4/0x110 sys_ioctl+0x28/0x80 system_call+0x5c/0x70 Instruction dump: 4082fff4 4c00012c 38600000 4e800020 e96280c0 896b0000 2c2b0000 3860ffff 4d820020 50852e74 508516f6 78840724 f8a313c8 7c942ba6 7cbc2ba6 Fix the bug by only changing r3 when we are returning immediately. Fixes: c1fe190c0672 ("powerpc: Add force enable of DAWR on P9 option") Signed-off-by: Michael Neuling Signed-off-by: Suraj Jitindar Singh Reported-by: Cédric Le Goater Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index f9b2620fbecd..5cb8516b209c 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2507,8 +2507,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) LOAD_REG_ADDR(r11, dawr_force_enable) lbz r11, 0(r11) cmpdi r11, 0 + bne 3f li r3, H_HARDWARE - beqlr + blr +3: /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW rlwimi r5, r4, 2, DAWRX_WT -- cgit v1.2.3 From 84b028243ef07a3f65c1857343ada2b1022f8bed Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Mon, 17 Jun 2019 17:16:19 +1000 Subject: KVM: PPC: Book3S HV: Only write DAWR[X] when handling h_set_dawr in real mode The hcall H_SET_DAWR is used by a guest to set the data address watchpoint register (DAWR). This hcall is handled in the host in kvmppc_h_set_dawr() which can be called in either real mode on the guest exit path from hcall_try_real_mode() in book3s_hv_rmhandlers.S, or in virtual mode when called from kvmppc_pseries_do_hcall() in book3s_hv.c. The function kvmppc_h_set_dawr() updates the dawr and dawrx fields in the vcpu struct accordingly and then also writes the respective values into the DAWR and DAWRX registers directly. It is necessary to write the registers directly here when calling the function in real mode since the path to re-enter the guest won't do this. However when in virtual mode the host DAWR and DAWRX values have already been restored, and so writing the registers would overwrite these. Additionally there is no reason to write the guest values here as these will be read from the vcpu struct and written to the registers appropriately the next time the vcpu is run. This also avoids the case when handling h_set_dawr for a nested guest where the guest hypervisor isn't able to write the DAWR and DAWRX registers directly and must rely on the real hypervisor to do this for it when it calls H_ENTER_NESTED. Fixes: c1fe190c0672 ("powerpc: Add force enable of DAWR on P9 option") Signed-off-by: Suraj Jitindar Singh Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5cb8516b209c..bc18366cd1ba 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2517,9 +2517,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) clrrdi r4, r4, 3 std r4, VCPU_DAWR(r3) std r5, VCPU_DAWRX(r3) + /* + * If came in through the real mode hcall handler then it is necessary + * to write the registers since the return path won't. Otherwise it is + * sufficient to store then in the vcpu struct as they will be loaded + * next time the vcpu is run. + */ + mfmsr r6 + andi. r6, r6, MSR_DR /* in real mode? */ + bne 4f mtspr SPRN_DAWR, r4 mtspr SPRN_DAWRX, r5 - li r3, 0 +4: li r3, 0 blr _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ -- cgit v1.2.3 From 9739ab7eda459f0669ec9807e0d9be5020bab88c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Jun 2019 10:24:46 +0200 Subject: powerpc: enable a 30-bit ZONE_DMA for 32-bit pmac With the strict dma mask checking introduced with the switch to the generic DMA direct code common wifi chips on 32-bit powerbooks stopped working. Add a 30-bit ZONE_DMA to the 32-bit pmac builds to allow them to reliably allocate dma coherent memory. Fixes: 65a21b71f948 ("powerpc/dma: remove dma_nommu_dma_supported") Reported-by: Aaro Koskinen Signed-off-by: Christoph Hellwig Tested-by: Larry Finger Acked-by: Larry Finger Tested-by: Aaro Koskinen Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/page.h | 7 +++++++ arch/powerpc/mm/mem.c | 3 ++- arch/powerpc/platforms/powermac/Kconfig | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index dbc8c0679480..3d013e4696e9 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -323,6 +323,13 @@ struct vm_area_struct; #endif /* __ASSEMBLY__ */ #include +/* + * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks. + */ +#ifdef CONFIG_PPC32 +#define ARCH_ZONE_DMA_BITS 30 +#else #define ARCH_ZONE_DMA_BITS 31 +#endif #endif /* _ASM_POWERPC_PAGE_H */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index e885fe2aafcc..40bd4153ab09 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -253,7 +253,8 @@ void __init paging_init(void) (long int)((top_of_ram - total_ram) >> 20)); #ifdef CONFIG_ZONE_DMA - max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT); + max_zone_pfns[ZONE_DMA] = min(max_low_pfn, + ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig index f834a19ed772..c02d8c503b29 100644 --- a/arch/powerpc/platforms/powermac/Kconfig +++ b/arch/powerpc/platforms/powermac/Kconfig @@ -7,6 +7,7 @@ config PPC_PMAC select PPC_INDIRECT_PCI if PPC32 select PPC_MPC106 if PPC32 select PPC_NATIVE + select ZONE_DMA if PPC32 default y config PPC_PMAC64 -- cgit v1.2.3 From 50087112592016a3fc10b394a55f1f1a1bde6908 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Thu, 20 Jun 2019 11:46:49 +1000 Subject: KVM: PPC: Book3S HV: Invalidate ERAT when flushing guest TLB entries When a guest vcpu moves from one physical thread to another it is necessary for the host to perform a tlb flush on the previous core if another vcpu from the same guest is going to run there. This is because the guest may use the local form of the tlb invalidation instruction meaning stale tlb entries would persist where it previously ran. This is handled on guest entry in kvmppc_check_need_tlb_flush() which calls flush_guest_tlb() to perform the tlb flush. Previously the generic radix__local_flush_tlb_lpid_guest() function was used, however the functionality was reimplemented in flush_guest_tlb() to avoid the trace_tlbie() call as the flushing may be done in real mode. The reimplementation in flush_guest_tlb() was missing an erat invalidation after flushing the tlb. This lead to observable memory corruption in the guest due to the caching of stale translations. Fix this by adding the erat invalidation. Fixes: 70ea13f6e609 ("KVM: PPC: Book3S HV: Flush TLB on secondary radix threads") Signed-off-by: Suraj Jitindar Singh Signed-off-by: Michael Ellerman --- arch/powerpc/kvm/book3s_hv_builtin.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 6035d24f1d1d..a46286f73eec 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -833,6 +833,7 @@ static void flush_guest_tlb(struct kvm *kvm) } } asm volatile("ptesync": : :"memory"); + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, -- cgit v1.2.3