diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2018-06-14 17:42:54 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-06-14 17:42:54 +0200 |
commit | 09027ab73b82ec773f6ab8ae9468d7e15b748dfa (patch) | |
tree | dc418a7b8712b8591f0e2a5c0c81ee6e43c335b6 /arch/powerpc/kvm/book3s_hv_rmhandlers.S | |
parent | 273ba45796c14b4a2b669098f13d576b9e233dd8 (diff) | |
parent | f61e0d3cc4aee194014074471658a5a037e311ce (diff) | |
download | linux-09027ab73b82ec773f6ab8ae9468d7e15b748dfa.tar.bz2 |
Merge tag 'kvm-ppc-next-4.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rmhandlers.S')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 318 |
1 files changed, 77 insertions, 241 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index b97d261d3b89..153988d878e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -39,8 +39,6 @@ BEGIN_FTR_SECTION; \ extsw reg, reg; \ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) -#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) - /* Values in HSTATE_NAPPING(r13) */ #define NAPPING_CEDE 1 #define NAPPING_NOVCPU 2 @@ -639,6 +637,10 @@ kvmppc_hv_entry: /* Primary thread switches to guest partition. */ cmpwi r6,0 bne 10f + + /* Radix has already switched LPID and flushed core TLB */ + bne cr7, 22f + lwz r7,KVM_LPID(r9) BEGIN_FTR_SECTION ld r6,KVM_SDR1(r9) @@ -650,7 +652,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_LPID,r7 isync - /* See if we need to flush the TLB */ + /* See if we need to flush the TLB. Hash has to be done in RM */ lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ BEGIN_FTR_SECTION /* @@ -677,15 +679,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) li r7,0x800 /* IS field = 0b10 */ ptesync li r0,0 /* RS for P9 version of tlbiel */ - bne cr7, 29f 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ addi r7,r7,0x1000 bdnz 28b - b 30f -29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */ - addi r7,r7,0x1000 - bdnz 29b -30: ptesync + ptesync 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */ andc r7,r7,r8 stdcx. r7,0,r6 @@ -799,7 +796,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ - bl kvmppc_restore_tm + mr r3, r4 + ld r4, VCPU_MSR(r3) + bl kvmppc_restore_tm_hv + ld r4, HSTATE_KVM_VCPU(r13) 91: #endif @@ -1783,7 +1783,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ - bl kvmppc_save_tm + mr r3, r9 + ld r4, VCPU_MSR(r3) + bl kvmppc_save_tm_hv + ld r9, HSTATE_KVM_VCPU(r13) 91: #endif @@ -2686,8 +2689,9 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ - ld r9, HSTATE_KVM_VCPU(r13) - bl kvmppc_save_tm + ld r3, HSTATE_KVM_VCPU(r13) + ld r4, VCPU_MSR(r3) + bl kvmppc_save_tm_hv 91: #endif @@ -2805,7 +2809,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) /* * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR */ - bl kvmppc_restore_tm + mr r3, r4 + ld r4, VCPU_MSR(r3) + bl kvmppc_restore_tm_hv + ld r4, HSTATE_KVM_VCPU(r13) 91: #endif @@ -3126,11 +3133,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Save transactional state and TM-related registers. - * Called with r9 pointing to the vcpu struct. + * Called with r3 pointing to the vcpu struct and r4 containing + * the guest MSR value. * This can modify all checkpointed registers, but - * restores r1, r2 and r9 (vcpu pointer) before exit. + * restores r1 and r2 before exit. */ -kvmppc_save_tm: +kvmppc_save_tm_hv: + /* See if we need to handle fake suspend mode */ +BEGIN_FTR_SECTION + b __kvmppc_save_tm +END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) + + lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ + cmpwi r0, 0 + beq __kvmppc_save_tm + + /* The following code handles the fake_suspend = 1 case */ mflr r0 std r0, PPC_LR_STKOFF(r1) stdu r1, -PPC_MIN_STKFRM(r1) @@ -3141,59 +3159,37 @@ kvmppc_save_tm: rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG mtmsrd r8 - ld r5, VCPU_MSR(r9) - rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 - beq 1f /* TM not active in guest. */ - - std r1, HSTATE_HOST_R1(r13) - li r3, TM_CAUSE_KVM_RESCHED - -BEGIN_FTR_SECTION - lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ - cmpwi r0, 0 - beq 3f rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ beq 4f -BEGIN_FTR_SECTION_NESTED(96) +BEGIN_FTR_SECTION bl pnv_power9_force_smt4_catch -END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) +END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) nop - b 6f -3: - /* Emulation of the treclaim instruction needs TEXASR before treclaim */ - mfspr r6, SPRN_TEXASR - std r6, VCPU_ORIG_TEXASR(r9) -6: -END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) - /* Clear the MSR RI since r1, r13 are all going to be foobar. */ + std r1, HSTATE_HOST_R1(r13) + + /* Clear the MSR RI since r1, r13 may be foobar. */ li r5, 0 mtmsrd r5, 1 - /* All GPRs are volatile at this point. */ + /* We have to treclaim here because that's the only way to do S->N */ + li r3, TM_CAUSE_KVM_RESCHED TRECLAIM(R3) - /* Temporarily store r13 and r9 so we have some regs to play with */ - SET_SCRATCH0(r13) - GET_PACA(r13) - std r9, PACATMSCRATCH(r13) - - /* If doing TM emulation on POWER9 DD2.2, check for fake suspend mode */ -BEGIN_FTR_SECTION - lbz r9, HSTATE_FAKE_SUSPEND(r13) - cmpwi r9, 0 - beq 2f /* * We were in fake suspend, so we are not going to save the * register state as the guest checkpointed state (since * we already have it), therefore we can now use any volatile GPR. */ - /* Reload stack pointer and TOC. */ + /* Reload PACA pointer, stack pointer and TOC. */ + GET_PACA(r13) ld r1, HSTATE_HOST_R1(r13) ld r2, PACATOC(r13) + /* Set MSR RI now we have r1 and r13 back. */ li r5, MSR_RI mtmsrd r5, 1 + HMT_MEDIUM ld r6, HSTATE_DSCR(r13) mtspr SPRN_DSCR, r6 @@ -3208,85 +3204,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96) li r0, PSSCR_FAKE_SUSPEND andc r3, r3, r0 mtspr SPRN_PSSCR, r3 - ld r9, HSTATE_KVM_VCPU(r13) - /* Don't save TEXASR, use value from last exit in real suspend state */ - b 11f -2: -END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) + /* Don't save TEXASR, use value from last exit in real suspend state */ ld r9, HSTATE_KVM_VCPU(r13) - - /* Get a few more GPRs free. */ - std r29, VCPU_GPRS_TM(29)(r9) - std r30, VCPU_GPRS_TM(30)(r9) - std r31, VCPU_GPRS_TM(31)(r9) - - /* Save away PPR and DSCR soon so don't run with user values. */ - mfspr r31, SPRN_PPR - HMT_MEDIUM - mfspr r30, SPRN_DSCR - ld r29, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r29 - - /* Save all but r9, r13 & r29-r31 */ - reg = 0 - .rept 29 - .if (reg != 9) && (reg != 13) - std reg, VCPU_GPRS_TM(reg)(r9) - .endif - reg = reg + 1 - .endr - /* ... now save r13 */ - GET_SCRATCH0(r4) - std r4, VCPU_GPRS_TM(13)(r9) - /* ... and save r9 */ - ld r4, PACATMSCRATCH(r13) - std r4, VCPU_GPRS_TM(9)(r9) - - /* Reload stack pointer and TOC. */ - ld r1, HSTATE_HOST_R1(r13) - ld r2, PACATOC(r13) - - /* Set MSR RI now we have r1 and r13 back. */ - li r5, MSR_RI - mtmsrd r5, 1 - - /* Save away checkpinted SPRs. */ - std r31, VCPU_PPR_TM(r9) - std r30, VCPU_DSCR_TM(r9) - mflr r5 - mfcr r6 - mfctr r7 - mfspr r8, SPRN_AMR - mfspr r10, SPRN_TAR - mfxer r11 - std r5, VCPU_LR_TM(r9) - stw r6, VCPU_CR_TM(r9) - std r7, VCPU_CTR_TM(r9) - std r8, VCPU_AMR_TM(r9) - std r10, VCPU_TAR_TM(r9) - std r11, VCPU_XER_TM(r9) - - /* Restore r12 as trap number. */ - lwz r12, VCPU_TRAP(r9) - - /* Save FP/VSX. */ - addi r3, r9, VCPU_FPRS_TM - bl store_fp_state - addi r3, r9, VCPU_VRS_TM - bl store_vr_state - mfspr r6, SPRN_VRSAVE - stw r6, VCPU_VRSAVE_TM(r9) -1: - /* - * We need to save these SPRs after the treclaim so that the software - * error code is recorded correctly in the TEXASR. Also the user may - * change these outside of a transaction, so they must always be - * context switched. - */ - mfspr r7, SPRN_TEXASR - std r7, VCPU_TEXASR(r9) -11: mfspr r5, SPRN_TFHAR mfspr r6, SPRN_TFIAR std r5, VCPU_TFHAR(r9) @@ -3299,149 +3219,63 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) /* * Restore transactional state and TM-related registers. - * Called with r4 pointing to the vcpu struct. + * Called with r3 pointing to the vcpu struct + * and r4 containing the guest MSR value. * This potentially modifies all checkpointed registers. - * It restores r1, r2, r4 from the PACA. + * It restores r1 and r2 from the PACA. */ -kvmppc_restore_tm: +kvmppc_restore_tm_hv: + /* + * If we are doing TM emulation for the guest on a POWER9 DD2, + * then we don't actually do a trechkpt -- we either set up + * fake-suspend mode, or emulate a TM rollback. + */ +BEGIN_FTR_SECTION + b __kvmppc_restore_tm +END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) mflr r0 std r0, PPC_LR_STKOFF(r1) - /* Turn on TM/FP/VSX/VMX so we can restore them. */ + li r0, 0 + stb r0, HSTATE_FAKE_SUSPEND(r13) + + /* Turn on TM so we can restore TM SPRs */ mfmsr r5 - li r6, MSR_TM >> 32 - sldi r6, r6, 32 - or r5, r5, r6 - ori r5, r5, MSR_FP - oris r5, r5, (MSR_VEC | MSR_VSX)@h + li r0, 1 + rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG mtmsrd r5 /* * The user may change these outside of a transaction, so they must * always be context switched. */ - ld r5, VCPU_TFHAR(r4) - ld r6, VCPU_TFIAR(r4) - ld r7, VCPU_TEXASR(r4) + ld r5, VCPU_TFHAR(r3) + ld r6, VCPU_TFIAR(r3) + ld r7, VCPU_TEXASR(r3) mtspr SPRN_TFHAR, r5 mtspr SPRN_TFIAR, r6 mtspr SPRN_TEXASR, r7 - li r0, 0 - stb r0, HSTATE_FAKE_SUSPEND(r13) - ld r5, VCPU_MSR(r4) - rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 + rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 beqlr /* TM not active in guest */ - std r1, HSTATE_HOST_R1(r13) - /* Make sure the failure summary is set, otherwise we'll program check - * when we trechkpt. It's possible that this might have been not set - * on a kvmppc_set_one_reg() call but we shouldn't let this crash the - * host. - */ + /* Make sure the failure summary is set */ oris r7, r7, (TEXASR_FS)@h mtspr SPRN_TEXASR, r7 - /* - * If we are doing TM emulation for the guest on a POWER9 DD2, - * then we don't actually do a trechkpt -- we either set up - * fake-suspend mode, or emulate a TM rollback. - */ -BEGIN_FTR_SECTION - b .Ldo_tm_fake_load -END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) - - /* - * We need to load up the checkpointed state for the guest. - * We need to do this early as it will blow away any GPRs, VSRs and - * some SPRs. - */ - - mr r31, r4 - addi r3, r31, VCPU_FPRS_TM - bl load_fp_state - addi r3, r31, VCPU_VRS_TM - bl load_vr_state - mr r4, r31 - lwz r7, VCPU_VRSAVE_TM(r4) - mtspr SPRN_VRSAVE, r7 - - ld r5, VCPU_LR_TM(r4) - lwz r6, VCPU_CR_TM(r4) - ld r7, VCPU_CTR_TM(r4) - ld r8, VCPU_AMR_TM(r4) - ld r9, VCPU_TAR_TM(r4) - ld r10, VCPU_XER_TM(r4) - mtlr r5 - mtcr r6 - mtctr r7 - mtspr SPRN_AMR, r8 - mtspr SPRN_TAR, r9 - mtxer r10 - - /* - * Load up PPR and DSCR values but don't put them in the actual SPRs - * till the last moment to avoid running with userspace PPR and DSCR for - * too long. - */ - ld r29, VCPU_DSCR_TM(r4) - ld r30, VCPU_PPR_TM(r4) - - std r2, PACATMSCRATCH(r13) /* Save TOC */ - - /* Clear the MSR RI since r1, r13 are all going to be foobar. */ - li r5, 0 - mtmsrd r5, 1 - - /* Load GPRs r0-r28 */ - reg = 0 - .rept 29 - ld reg, VCPU_GPRS_TM(reg)(r31) - reg = reg + 1 - .endr - - mtspr SPRN_DSCR, r29 - mtspr SPRN_PPR, r30 - - /* Load final GPRs */ - ld 29, VCPU_GPRS_TM(29)(r31) - ld 30, VCPU_GPRS_TM(30)(r31) - ld 31, VCPU_GPRS_TM(31)(r31) - - /* TM checkpointed state is now setup. All GPRs are now volatile. */ - TRECHKPT - - /* Now let's get back the state we need. */ - HMT_MEDIUM - GET_PACA(r13) - ld r29, HSTATE_DSCR(r13) - mtspr SPRN_DSCR, r29 - ld r4, HSTATE_KVM_VCPU(r13) - ld r1, HSTATE_HOST_R1(r13) - ld r2, PACATMSCRATCH(r13) - - /* Set the MSR RI since we have our registers back. */ - li r5, MSR_RI - mtmsrd r5, 1 -9: - ld r0, PPC_LR_STKOFF(r1) - mtlr r0 - blr - -.Ldo_tm_fake_load: cmpwi r5, 1 /* check for suspended state */ bgt 10f stb r5, HSTATE_FAKE_SUSPEND(r13) - b 9b /* and return */ + b 9f /* and return */ 10: stdu r1, -PPC_MIN_STKFRM(r1) /* guest is in transactional state, so simulate rollback */ - mr r3, r4 bl kvmhv_emulate_tm_rollback nop - ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */ addi r1, r1, PPC_MIN_STKFRM - b 9b -#endif +9: ld r0, PPC_LR_STKOFF(r1) + mtlr r0 + blr +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* * We come here if we get any exception or interrupt while we are @@ -3572,6 +3406,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) bcl 20, 31, .+4 5: mflr r3 addi r3, r3, 9f - 5b + li r4, -1 + rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ ld r4, PACAKMSR(r13) mtspr SPRN_SRR0, r3 mtspr SPRN_SRR1, r4 |