diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-21 17:16:21 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-21 17:16:21 -0700 |
commit | 98edb6ca4174f17a64890a02f44c211c8b44fb3c (patch) | |
tree | 033bc5f7da410046d28dd1cefcd2d63cda33d25b /arch/powerpc | |
parent | a8251096b427283c47e7d8f9568be6b388dd68ec (diff) | |
parent | 8fbf065d625617bbbf6b72d5f78f84ad13c8b547 (diff) | |
download | linux-98edb6ca4174f17a64890a02f44c211c8b44fb3c.tar.bz2 |
Merge branch 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.35' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (269 commits)
KVM: x86: Add missing locking to arch specific vcpu ioctls
KVM: PPC: Add missing vcpu_load()/vcpu_put() in vcpu ioctls
KVM: MMU: Segregate shadow pages with different cr0.wp
KVM: x86: Check LMA bit before set_efer
KVM: Don't allow lmsw to clear cr0.pe
KVM: Add cpuid.txt file
KVM: x86: Tell the guest we'll warn it about tsc stability
x86, paravirt: don't compute pvclock adjustments if we trust the tsc
x86: KVM guest: Try using new kvm clock msrs
KVM: x86: export paravirtual cpuid flags in KVM_GET_SUPPORTED_CPUID
KVM: x86: add new KVMCLOCK cpuid feature
KVM: x86: change msr numbers for kvmclock
x86, paravirt: Add a global synchronization point for pvclock
x86, paravirt: Enable pvclock flags in vcpu_time_info structure
KVM: x86: Inject #GP with the right rip on efer writes
KVM: SVM: Don't allow nested guest to VMMCALL into host
KVM: x86: Fix exception reinjection forced to true
KVM: Fix wallclock version writing race
KVM: MMU: Don't read pdptrs with mmu spinlock held in mmu_alloc_roots
KVM: VMX: enable VMXON check with SMX enabled (Intel TXT)
...
Diffstat (limited to 'arch/powerpc')
41 files changed, 4191 insertions, 712 deletions
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index a9b91ed3d4b9..2048a6aeea91 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h @@ -21,6 +21,7 @@ /* operations for longs and pointers */ #define PPC_LL stringify_in_c(ld) #define PPC_STL stringify_in_c(std) +#define PPC_STLU stringify_in_c(stdu) #define PPC_LCMPI stringify_in_c(cmpdi) #define PPC_LONG stringify_in_c(.llong) #define PPC_LONG_ALIGN stringify_in_c(.balign 8) @@ -44,6 +45,7 @@ /* operations for longs and pointers */ #define PPC_LL stringify_in_c(lwz) #define PPC_STL stringify_in_c(stw) +#define PPC_STLU stringify_in_c(stwu) #define PPC_LCMPI stringify_in_c(cmpwi) #define PPC_LONG stringify_in_c(.long) #define PPC_LONG_ALIGN stringify_in_c(.balign 4) diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index 81f3b0b5601e..6c5547d82bbe 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h @@ -77,4 +77,14 @@ struct kvm_debug_exit_arch { struct kvm_guest_debug_arch { }; +#define KVM_REG_MASK 0x001f +#define KVM_REG_EXT_MASK 0xffe0 +#define KVM_REG_GPR 0x0000 +#define KVM_REG_FPR 0x0020 +#define KVM_REG_QPR 0x0040 +#define KVM_REG_FQPR 0x0060 + +#define KVM_INTERRUPT_SET -1U +#define KVM_INTERRUPT_UNSET -2U + #endif /* __LINUX_KVM_POWERPC_H */ diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index aadf2dd6f84e..c5ea4cda34b3 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -88,6 +88,8 @@ #define BOOK3S_HFLAG_DCBZ32 0x1 #define BOOK3S_HFLAG_SLB 0x2 +#define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 +#define BOOK3S_HFLAG_NATIVE_PS 0x8 #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index db7db0a96967..6f74d93725a0 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -22,46 +22,47 @@ #include <linux/types.h> #include <linux/kvm_host.h> -#include <asm/kvm_book3s_64_asm.h> +#include <asm/kvm_book3s_asm.h> struct kvmppc_slb { u64 esid; u64 vsid; u64 orige; u64 origv; - bool valid; - bool Ks; - bool Kp; - bool nx; - bool large; /* PTEs are 16MB */ - bool tb; /* 1TB segment */ - bool class; + bool valid : 1; + bool Ks : 1; + bool Kp : 1; + bool nx : 1; + bool large : 1; /* PTEs are 16MB */ + bool tb : 1; /* 1TB segment */ + bool class : 1; }; struct kvmppc_sr { u32 raw; u32 vsid; - bool Ks; - bool Kp; - bool nx; + bool Ks : 1; + bool Kp : 1; + bool nx : 1; + bool valid : 1; }; struct kvmppc_bat { u64 raw; u32 bepi; u32 bepi_mask; - bool vs; - bool vp; u32 brpn; u8 wimg; u8 pp; + bool vs : 1; + bool vp : 1; }; struct kvmppc_sid_map { u64 guest_vsid; u64 guest_esid; u64 host_vsid; - bool valid; + bool valid : 1; }; #define SID_MAP_BITS 9 @@ -70,7 +71,7 @@ struct kvmppc_sid_map { struct kvmppc_vcpu_book3s { struct kvm_vcpu vcpu; - struct kvmppc_book3s_shadow_vcpu shadow_vcpu; + struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct kvmppc_slb slb[64]; struct { @@ -82,9 +83,10 @@ struct kvmppc_vcpu_book3s { struct kvmppc_bat ibat[8]; struct kvmppc_bat dbat[8]; u64 hid[6]; + u64 gqr[8]; int slb_nr; + u32 dsisr; u64 sdr1; - u64 dsisr; u64 hior; u64 msr_mask; u64 vsid_first; @@ -98,15 +100,15 @@ struct kvmppc_vcpu_book3s { #define CONTEXT_GUEST 1 #define CONTEXT_GUEST_END 2 -#define VSID_REAL 0xfffffffffff00000 -#define VSID_REAL_DR 0xffffffffffe00000 -#define VSID_REAL_IR 0xffffffffffd00000 -#define VSID_BAT 0xffffffffffc00000 -#define VSID_PR 0x8000000000000000 +#define VSID_REAL 0x1fffffffffc00000ULL +#define VSID_BAT 0x1fffffffffb00000ULL +#define VSID_REAL_DR 0x2000000000000000ULL +#define VSID_REAL_IR 0x4000000000000000ULL +#define VSID_PR 0x8000000000000000ULL -extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask); +extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); -extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end); +extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); @@ -114,11 +116,13 @@ extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data); -extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, bool data); -extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr); +extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); +extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, u32 val); +extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); +extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); extern u32 kvmppc_trampoline_lowmem; extern u32 kvmppc_trampoline_enter; @@ -126,6 +130,8 @@ extern void kvmppc_rmcall(ulong srr0, ulong srr1); extern void kvmppc_load_up_fpu(void); extern void kvmppc_load_up_altivec(void); extern void kvmppc_load_up_vsx(void); +extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); +extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { @@ -140,7 +146,108 @@ static inline ulong dsisr(void) } extern void kvm_return_point(void); +static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu); + +static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) +{ + if ( num < 14 ) { + to_svcpu(vcpu)->gpr[num] = val; + to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; + } else + vcpu->arch.gpr[num] = val; +} + +static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) +{ + if ( num < 14 ) + return to_svcpu(vcpu)->gpr[num]; + else + return vcpu->arch.gpr[num]; +} + +static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) +{ + to_svcpu(vcpu)->cr = val; + to_book3s(vcpu)->shadow_vcpu->cr = val; +} + +static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) +{ + return to_svcpu(vcpu)->cr; +} + +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) +{ + to_svcpu(vcpu)->xer = val; + to_book3s(vcpu)->shadow_vcpu->xer = val; +} + +static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) +{ + return to_svcpu(vcpu)->xer; +} + +static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) +{ + to_svcpu(vcpu)->ctr = val; +} + +static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) +{ + return to_svcpu(vcpu)->ctr; +} + +static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) +{ + to_svcpu(vcpu)->lr = val; +} + +static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) +{ + return to_svcpu(vcpu)->lr; +} + +static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) +{ + to_svcpu(vcpu)->pc = val; +} + +static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) +{ + return to_svcpu(vcpu)->pc; +} + +static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) +{ + ulong pc = kvmppc_get_pc(vcpu); + struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); + + /* Load the instruction manually if it failed to do so in the + * exit path */ + if (svcpu->last_inst == KVM_INST_FETCH_FAILED) + kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); + + return svcpu->last_inst; +} + +static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) +{ + return to_svcpu(vcpu)->fault_dar; +} + +/* Magic register values loaded into r3 and r4 before the 'sc' assembly + * instruction for the OSI hypercalls */ +#define OSI_SC_MAGIC_R3 0x113724FA +#define OSI_SC_MAGIC_R4 0x77810F9B #define INS_DCBZ 0x7c0007ec +/* Also add subarch specific defines */ + +#ifdef CONFIG_PPC_BOOK3S_32 +#include <asm/kvm_book3s_32.h> +#else +#include <asm/kvm_book3s_64.h> +#endif + #endif /* __ASM_KVM_BOOK3S_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h new file mode 100644 index 000000000000..de604db135f5 --- /dev/null +++ b/arch/powerpc/include/asm/kvm_book3s_32.h @@ -0,0 +1,42 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright SUSE Linux Products GmbH 2010 + * + * Authors: Alexander Graf <agraf@suse.de> + */ + +#ifndef __ASM_KVM_BOOK3S_32_H__ +#define __ASM_KVM_BOOK3S_32_H__ + +static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) +{ + return to_book3s(vcpu)->shadow_vcpu; +} + +#define PTE_SIZE 12 +#define VSID_ALL 0 +#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */ +#define SR_KP 0x20000000 +#define PTE_V 0x80000000 +#define PTE_SEC 0x00000040 +#define PTE_M 0x00000010 +#define PTE_R 0x00000100 +#define PTE_C 0x00000080 + +#define SID_SHIFT 28 +#define ESID_MASK 0xf0000000 +#define VSID_MASK 0x00fffffff0000000ULL + +#endif /* __ASM_KVM_BOOK3S_32_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h new file mode 100644 index 000000000000..4cadd612d575 --- /dev/null +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -0,0 +1,28 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright SUSE Linux Products GmbH 2010 + * + * Authors: Alexander Graf <agraf@suse.de> + */ + +#ifndef __ASM_KVM_BOOK3S_64_H__ +#define __ASM_KVM_BOOK3S_64_H__ + +static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) +{ + return &get_paca()->shadow_vcpu; +} + +#endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 183461b48407..36fdb3aff30b 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -22,7 +22,7 @@ #ifdef __ASSEMBLY__ -#ifdef CONFIG_KVM_BOOK3S_64_HANDLER +#ifdef CONFIG_KVM_BOOK3S_HANDLER #include <asm/kvm_asm.h> @@ -55,7 +55,7 @@ kvmppc_resume_\intno: .macro DO_KVM intno .endm -#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */ +#endif /* CONFIG_KVM_BOOK3S_HANDLER */ #else /*__ASSEMBLY__ */ @@ -63,12 +63,33 @@ struct kvmppc_book3s_shadow_vcpu { ulong gpr[14]; u32 cr; u32 xer; + + u32 fault_dsisr; + u32 last_inst; + ulong ctr; + ulong lr; + ulong pc; + ulong shadow_srr1; + ulong fault_dar; + ulong host_r1; ulong host_r2; ulong handler; ulong scratch0; ulong scratch1; ulong vmhandler; + u8 in_guest; + +#ifdef CONFIG_PPC_BOOK3S_32 + u32 sr[16]; /* Guest SRs */ +#endif +#ifdef CONFIG_PPC_BOOK3S_64 + u8 slb_max; /* highest used guest slb entry */ + struct { + u64 esid; + u64 vsid; + } slb[64]; /* guest SLB */ +#endif }; #endif /*__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h new file mode 100644 index 000000000000..9c9ba3d59b1b --- /dev/null +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -0,0 +1,96 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright SUSE Linux Products GmbH 2010 + * + * Authors: Alexander Graf <agraf@suse.de> + */ + +#ifndef __ASM_KVM_BOOKE_H__ +#define __ASM_KVM_BOOKE_H__ + +#include <linux/types.h> +#include <linux/kvm_host.h> + +static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) +{ + vcpu->arch.gpr[num] = val; +} + +static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) +{ + return vcpu->arch.gpr[num]; +} + +static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) +{ + vcpu->arch.cr = val; +} + +static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.cr; +} + +static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) +{ + vcpu->arch.xer = val; +} + +static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.xer; +} + +static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.last_inst; +} + +static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) +{ + vcpu->arch.ctr = val; +} + +static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.ctr; +} + +static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) +{ + vcpu->arch.lr = val; +} + +static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.lr; +} + +static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) +{ + vcpu->arch.pc = val; +} + +static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.pc; +} + +static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.fault_dear; +} + +#endif /* __ASM_KVM_BOOKE_H__ */ diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h new file mode 100644 index 000000000000..94f05de9ad04 --- /dev/null +++ b/arch/powerpc/include/asm/kvm_fpu.h @@ -0,0 +1,85 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright Novell Inc. 2010 + * + * Authors: Alexander Graf <agraf@suse.de> + */ + +#ifndef __ASM_KVM_FPU_H__ +#define __ASM_KVM_FPU_H__ + +#include <linux/types.h> + +extern void fps_fres(struct thread_struct *t, u32 *dst, u32 *src1); +extern void fps_frsqrte(struct thread_struct *t, u32 *dst, u32 *src1); +extern void fps_fsqrts(struct thread_struct *t, u32 *dst, u32 *src1); + +extern void fps_fadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); +extern void fps_fdivs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); +extern void fps_fmuls(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); +extern void fps_fsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); + +extern void fps_fmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, + u32 *src3); +extern void fps_fmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, + u32 *src3); +extern void fps_fnmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, + u32 *src3); +extern void fps_fnmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, + u32 *src3); +extern void fps_fsel(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, + u32 *src3); + +#define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \ + u64 *dst, u64 *src1); +#define FPD_TWO_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \ + u64 *dst, u64 *src1, u64 *src2); +#define FPD_THREE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \ + u64 *dst, u64 *src1, u64 *src2, u64 *src3); + +extern void fpd_fcmpu(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2); +extern void fpd_fcmpo(u64 *fpscr, u32 *cr, u64 *src1, u64 *src2); + +FPD_ONE_IN(fsqrts) +FPD_ONE_IN(frsqrtes) +FPD_ONE_IN(fres) +FPD_ONE_IN(frsp) +FPD_ONE_IN(fctiw) +FPD_ONE_IN(fctiwz) +FPD_ONE_IN(fsqrt) +FPD_ONE_IN(fre) +FPD_ONE_IN(frsqrte) +FPD_ONE_IN(fneg) +FPD_ONE_IN(fabs) +FPD_TWO_IN(fadds) +FPD_TWO_IN(fsubs) +FPD_TWO_IN(fdivs) +FPD_TWO_IN(fmuls) +FPD_TWO_IN(fcpsgn) +FPD_TWO_IN(fdiv) +FPD_TWO_IN(fadd) +FPD_TWO_IN(fmul) +FPD_TWO_IN(fsub) +FPD_THREE_IN(fmsubs) +FPD_THREE_IN(fmadds) +FPD_THREE_IN(fnmsubs) +FPD_THREE_IN(fnmadds) +FPD_THREE_IN(fsel) +FPD_THREE_IN(fmsub) +FPD_THREE_IN(fmadd) +FPD_THREE_IN(fnmsub) +FPD_THREE_IN(fnmadd) + +#endif diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 5e5bae7e152f..0c9ad869decd 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -66,7 +66,7 @@ struct kvm_vcpu_stat { u32 dec_exits; u32 ext_intr_exits; u32 halt_wakeup; -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S u32 pf_storage; u32 pf_instruc; u32 sp_storage; @@ -124,12 +124,12 @@ struct kvm_arch { }; struct kvmppc_pte { - u64 eaddr; + ulong eaddr; u64 vpage; - u64 raddr; - bool may_read; - bool may_write; - bool may_execute; + ulong raddr; + bool may_read : 1; + bool may_write : 1; + bool may_execute : 1; }; struct kvmppc_mmu { @@ -145,7 +145,7 @@ struct kvmppc_mmu { int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); void (*reset_msr)(struct kvm_vcpu *vcpu); void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); - int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid); + int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); bool (*is_dcbz32)(struct kvm_vcpu *vcpu); }; @@ -160,7 +160,7 @@ struct hpte_cache { struct kvm_vcpu_arch { ulong host_stack; u32 host_pid; -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S ulong host_msr; ulong host_r2; void *host_retip; @@ -175,7 +175,7 @@ struct kvm_vcpu_arch { ulong gpr[32]; u64 fpr[32]; - u32 fpscr; + u64 fpscr; #ifdef CONFIG_ALTIVEC vector128 vr[32]; @@ -186,19 +186,23 @@ struct kvm_vcpu_arch { u64 vsr[32]; #endif +#ifdef CONFIG_PPC_BOOK3S + /* For Gekko paired singles */ + u32 qpr[32]; +#endif + +#ifdef CONFIG_BOOKE ulong pc; ulong ctr; ulong lr; -#ifdef CONFIG_BOOKE ulong xer; u32 cr; #endif ulong msr; -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S ulong shadow_msr; - ulong shadow_srr1; ulong hflags; ulong guest_owned_ext; #endif @@ -253,20 +257,22 @@ struct kvm_vcpu_arch { struct dentry *debugfs_exit_timing; #endif +#ifdef CONFIG_BOOKE u32 last_inst; -#ifdef CONFIG_PPC64 - ulong fault_dsisr; -#endif ulong fault_dear; ulong fault_esr; ulong queued_dear; ulong queued_esr; +#endif gpa_t paddr_accessed; u8 io_gpr; /* GPR used as IO source/target */ u8 mmio_is_bigendian; + u8 mmio_sign_extend; u8 dcr_needed; u8 dcr_is_write; + u8 osi_needed; + u8 osi_enabled; u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ @@ -275,7 +281,7 @@ struct kvm_vcpu_arch { u64 dec_jiffies; unsigned long pending_exceptions; -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S struct hpte_cache hpte_cache[HPTEG_CACHE_NUM]; int hpte_cache_offset; #endif diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index e2642829e435..18d139ec2d22 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -30,6 +30,8 @@ #include <linux/kvm_host.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/kvm_book3s.h> +#else +#include <asm/kvm_booke.h> #endif enum emulation_result { @@ -37,6 +39,7 @@ enum emulation_result { EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ EMULATE_DO_DCR, /* kvm_run filled with DCR request */ EMULATE_FAIL, /* can't emulate this instruction */ + EMULATE_AGAIN, /* something went wrong. go again */ }; extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); @@ -48,8 +51,11 @@ extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_bigendian); +extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, + int is_bigendian); extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, - u32 val, unsigned int bytes, int is_bigendian); + u64 val, unsigned int bytes, int is_bigendian); extern int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu); @@ -63,6 +69,7 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); +extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, @@ -88,6 +95,8 @@ extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); +extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, + struct kvm_interrupt *irq); extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int op, int *advance); @@ -99,81 +108,37 @@ extern void kvmppc_booke_exit(void); extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); -#ifdef CONFIG_PPC_BOOK3S - -/* We assume we're always acting on the current vcpu */ - -static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) -{ - if ( num < 14 ) { - get_paca()->shadow_vcpu.gpr[num] = val; - to_book3s(vcpu)->shadow_vcpu.gpr[num] = val; - } else - vcpu->arch.gpr[num] = val; -} - -static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) -{ - if ( num < 14 ) - return get_paca()->shadow_vcpu.gpr[num]; - else - return vcpu->arch.gpr[num]; -} - -static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) -{ - get_paca()->shadow_vcpu.cr = val; - to_book3s(vcpu)->shadow_vcpu.cr = val; -} - -static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) -{ - return get_paca()->shadow_vcpu.cr; -} - -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) -{ - get_paca()->shadow_vcpu.xer = val; - to_book3s(vcpu)->shadow_vcpu.xer = val; -} - -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) +/* + * Cuts out inst bits with ordering according to spec. + * That means the leftmost bit is zero. All given bits are included. + */ +static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb) { - return get_paca()->shadow_vcpu.xer; -} + u32 r; + u32 mask; -#else + BUG_ON(msb > lsb); -static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) -{ - vcpu->arch.gpr[num] = val; -} + mask = (1 << (lsb - msb + 1)) - 1; + r = (inst >> (63 - lsb)) & mask; -static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) -{ - return vcpu->arch.gpr[num]; + return r; } -static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) +/* + * Replaces inst bits with ordering according to spec. + */ +static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) { - vcpu->arch.cr = val; -} + u32 r; + u32 mask; -static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.cr; -} + BUG_ON(msb > lsb); -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) -{ - vcpu->arch.xer = val; -} + mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb); + r = (inst & ~mask) | ((value << (63 - lsb)) & mask); -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.xer; + return r; } -#endif - #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 26383e0778aa..81fb41289d6c 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -27,6 +27,8 @@ extern int __init_new_context(void); extern void __destroy_context(int context_id); static inline void mmu_context_init(void) { } #else +extern unsigned long __init_new_context(void); +extern void __destroy_context(unsigned long context_id); extern void mmu_context_init(void); #endif diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 971dfa4815f0..8ce7963ad41d 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -23,7 +23,7 @@ #include <asm/page.h> #include <asm/exception-64e.h> #ifdef CONFIG_KVM_BOOK3S_64_HANDLER -#include <asm/kvm_book3s_64_asm.h> +#include <asm/kvm_book3s_asm.h> #endif register struct paca_struct *local_paca asm("r13"); @@ -137,15 +137,9 @@ struct paca_struct { u64 startpurr; /* PURR/TB value snapshot */ u64 startspurr; /* SPURR value snapshot */ -#ifdef CONFIG_KVM_BOOK3S_64_HANDLER - struct { - u64 esid; - u64 vsid; - } kvm_slb[64]; /* guest SLB */ +#ifdef CONFIG_KVM_BOOK3S_HANDLER /* We use this to store guest state in */ struct kvmppc_book3s_shadow_vcpu shadow_vcpu; - u8 kvm_slb_max; /* highest used guest slb entry */ - u8 kvm_in_guest; /* are we inside the guest? */ #endif }; diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 221ba6240464..7492fe8ad6e4 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -229,6 +229,9 @@ struct thread_struct { unsigned long spefscr; /* SPE & eFP status */ int used_spe; /* set if process has used spe */ #endif /* CONFIG_SPE */ +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER + void* kvm_shadow_vcpu; /* KVM internal data */ +#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ }; #define ARCH_MIN_TASKALIGN 16 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index b68f025924a8..d62fdf4e504b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -293,10 +293,12 @@ #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ #define HID1_PS (1<<16) /* 750FX PLL selection */ #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ +#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */ #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ #define SPRN_IABR2 0x3FA /* 83xx */ #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ #define SPRN_HID4 0x3F4 /* 970 HID4 */ +#define SPRN_HID4_GEKKO 0x3F3 /* Gekko HID4 */ #define SPRN_HID5 0x3F6 /* 970 HID5 */ #define SPRN_HID6 0x3F9 /* BE HID 6 */ #define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */ @@ -465,6 +467,14 @@ #define SPRN_VRSAVE 0x100 /* Vector Register Save Register */ #define SPRN_XER 0x001 /* Fixed Point Exception Register */ +#define SPRN_MMCR0_GEKKO 0x3B8 /* Gekko Monitor Mode Control Register 0 */ +#define SPRN_MMCR1_GEKKO 0x3BC /* Gekko Monitor Mode Control Register 1 */ +#define SPRN_PMC1_GEKKO 0x3B9 /* Gekko Performance Monitor Control 1 */ +#define SPRN_PMC2_GEKKO 0x3BA /* Gekko Performance Monitor Control 2 */ +#define SPRN_PMC3_GEKKO 0x3BD /* Gekko Performance Monitor Control 3 */ +#define SPRN_PMC4_GEKKO 0x3BE /* Gekko Performance Monitor Control 4 */ +#define SPRN_WPAR_GEKKO 0x399 /* Gekko Write Pipe Address Register */ + #define SPRN_SCOMC 0x114 /* SCOM Access Control */ #define SPRN_SCOMD 0x115 /* SCOM Access DATA */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 28a686fb269c..496cc5b3984f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -50,6 +50,9 @@ #endif #ifdef CONFIG_KVM #include <linux/kvm_host.h> +#ifndef CONFIG_BOOKE +#include <asm/kvm_book3s.h> +#endif #endif #ifdef CONFIG_PPC32 @@ -105,6 +108,9 @@ int main(void) DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); #endif /* CONFIG_SPE */ #endif /* CONFIG_PPC64 */ +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER + DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); +#endif DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); @@ -191,33 +197,9 @@ int main(void) DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); #ifdef CONFIG_KVM_BOOK3S_64_HANDLER - DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); - DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); - DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); - DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr)); - DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer)); - DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0])); - DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1])); - DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2])); - DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3])); - DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4])); - DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5])); - DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6])); - DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7])); - DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8])); - DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9])); - DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10])); - DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11])); - DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12])); - DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13])); - DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1)); - DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2)); - DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct, - shadow_vcpu.vmhandler)); - DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct, - shadow_vcpu.scratch0)); - DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct, - shadow_vcpu.scratch1)); + DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); + DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb)); + DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max)); #endif #endif /* CONFIG_PPC64 */ @@ -228,8 +210,8 @@ int main(void) /* Interrupt register frame */ DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); -#ifdef CONFIG_PPC64 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); +#ifdef CONFIG_PPC64 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); @@ -412,9 +394,6 @@ int main(void) DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); - DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); - DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); @@ -422,27 +401,68 @@ int main(void) DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); - DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); - DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); - DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); - - /* book3s_64 */ -#ifdef CONFIG_PPC64 - DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); + /* book3s */ +#ifdef CONFIG_PPC_BOOK3S DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); - DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2)); DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); - DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); + DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - + offsetof(struct kvmppc_vcpu_book3s, vcpu)); + DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr)); + DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer)); + DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr)); + DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr)); + DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc)); + DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0])); + DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1])); + DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2])); + DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3])); + DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4])); + DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5])); + DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6])); + DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7])); + DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8])); + DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9])); + DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10])); + DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11])); + DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12])); + DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13])); + DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1)); + DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2)); + DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu, + vmhandler)); + DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu, + scratch0)); + DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu, + scratch1)); + DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu, + in_guest)); + DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu, + fault_dsisr)); + DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu, + fault_dar)); + DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu, + last_inst)); + DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu, + shadow_srr1)); +#ifdef CONFIG_PPC_BOOK3S_32 + DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr)); +#endif #else DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); -#endif /* CONFIG_PPC64 */ + DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); + DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); + DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); + DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); + DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); + DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); +#endif /* CONFIG_PPC_BOOK3S */ #endif #ifdef CONFIG_44x DEFINE(PGD_T_LOG2, PGD_T_LOG2); diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index e025e89fe93e..98c4b29a56f4 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -33,6 +33,7 @@ #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/bug.h> +#include <asm/kvm_book3s_asm.h> /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ #define LOAD_BAT(n, reg, RA, RB) \ @@ -303,6 +304,7 @@ __secondary_hold_acknowledge: */ #define EXCEPTION(n, label, hdlr, xfer) \ . = n; \ + DO_KVM n; \ label: \ EXCEPTION_PROLOG; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ @@ -358,6 +360,7 @@ i##n: \ * -- paulus. */ . = 0x200 + DO_KVM 0x200 mtspr SPRN_SPRG_SCRATCH0,r10 mtspr SPRN_SPRG_SCRATCH1,r11 mfcr r10 @@ -381,6 +384,7 @@ i##n: \ /* Data access exception. */ . = 0x300 + DO_KVM 0x300 DataAccess: EXCEPTION_PROLOG mfspr r10,SPRN_DSISR @@ -397,6 +401,7 @@ DataAccess: /* Instruction access exception. */ . = 0x400 + DO_KVM 0x400 InstructionAccess: EXCEPTION_PROLOG andis. r0,r9,0x4000 /* no pte found? */ @@ -413,6 +418,7 @@ InstructionAccess: /* Alignment exception */ . = 0x600 + DO_KVM 0x600 Alignment: EXCEPTION_PROLOG mfspr r4,SPRN_DAR @@ -427,6 +433,7 @@ Alignment: /* Floating-point unavailable */ . = 0x800 + DO_KVM 0x800 FPUnavailable: BEGIN_FTR_SECTION /* @@ -450,6 +457,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) /* System call */ . = 0xc00 + DO_KVM 0xc00 SystemCall: EXCEPTION_PROLOG EXC_XFER_EE_LITE(0xc00, DoSyscall) @@ -467,9 +475,11 @@ SystemCall: * by executing an altivec instruction. */ . = 0xf00 + DO_KVM 0xf00 b PerformanceMonitor . = 0xf20 + DO_KVM 0xf20 b AltiVecUnavailable /* @@ -882,6 +892,10 @@ __secondary_start: RFI #endif /* CONFIG_SMP */ +#ifdef CONFIG_KVM_BOOK3S_HANDLER +#include "../kvm/book3s_rmhandlers.S" +#endif + /* * Those generic dummy functions are kept for CPUs not * included in CONFIG_6xx diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index bed9a29ee383..844a44b64472 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -37,7 +37,7 @@ #include <asm/firmware.h> #include <asm/page_64.h> #include <asm/irqflags.h> -#include <asm/kvm_book3s_64_asm.h> +#include <asm/kvm_book3s_asm.h> /* The physical memory is layed out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow @@ -169,7 +169,7 @@ exception_marker: /* KVM trampoline code needs to be close to the interrupt handlers */ #ifdef CONFIG_KVM_BOOK3S_64_HANDLER -#include "../kvm/book3s_64_rmhandlers.S" +#include "../kvm/book3s_rmhandlers.S" #endif _GLOBAL(generic_secondary_thread_init) diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index ab3e392ac63c..bc9f39d2598b 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -101,6 +101,10 @@ EXPORT_SYMBOL(pci_dram_offset); EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(kernel_thread); +#ifndef CONFIG_BOOKE +EXPORT_SYMBOL_GPL(cvt_df); +EXPORT_SYMBOL_GPL(cvt_fd); +#endif EXPORT_SYMBOL(giveup_fpu); #ifdef CONFIG_ALTIVEC EXPORT_SYMBOL(giveup_altivec); diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 689a57c2ac80..73c0a3f64ed1 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c @@ -147,7 +147,7 @@ static int __init kvmppc_44x_init(void) if (r) return r; - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE); + return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); } static void __exit kvmppc_44x_exit(void) diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 60624cc9f4d4..b7baff78f90c 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -22,12 +22,34 @@ config KVM select ANON_INODES select KVM_MMIO +config KVM_BOOK3S_HANDLER + bool + +config KVM_BOOK3S_32_HANDLER + bool + select KVM_BOOK3S_HANDLER + config KVM_BOOK3S_64_HANDLER bool + select KVM_BOOK3S_HANDLER + +config KVM_BOOK3S_32 + tristate "KVM support for PowerPC book3s_32 processors" + depends on EXPERIMENTAL && PPC_BOOK3S_32 && !SMP && !PTE_64BIT + select KVM + select KVM_BOOK3S_32_HANDLER + ---help--- + Support running unmodified book3s_32 guest kernels + in virtual machines on book3s_32 host processors. + + This module provides access to the hardware capabilities through + a character device node named /dev/kvm. + + If unsure, say N. config KVM_BOOK3S_64 tristate "KVM support for PowerPC book3s_64 processors" - depends on EXPERIMENTAL && PPC64 + depends on EXPERIMENTAL && PPC_BOOK3S_64 select KVM select KVM_BOOK3S_64_HANDLER ---help--- diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 56484d652377..ff436066bf77 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -14,7 +14,7 @@ CFLAGS_emulate.o := -I. common-objs-y += powerpc.o emulate.o obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o -obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o +obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o AFLAGS_booke_interrupts.o := -I$(obj) @@ -40,17 +40,31 @@ kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) kvm-book3s_64-objs := \ $(common-objs-y) \ + fpu.o \ + book3s_paired_singles.o \ book3s.o \ - book3s_64_emulate.o \ - book3s_64_interrupts.o \ + book3s_emulate.o \ + book3s_interrupts.o \ book3s_64_mmu_host.o \ book3s_64_mmu.o \ book3s_32_mmu.o kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs) +kvm-book3s_32-objs := \ + $(common-objs-y) \ + fpu.o \ + book3s_paired_singles.o \ + book3s.o \ + book3s_emulate.o \ + book3s_interrupts.o \ + book3s_32_mmu_host.o \ + book3s_32_mmu.o +kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) + kvm-objs := $(kvm-objs-m) $(kvm-objs-y) obj-$(CONFIG_KVM_440) += kvm.o obj-$(CONFIG_KVM_E500) += kvm.o obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o +obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 604af29b71ed..b998abf1a63d 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -16,6 +16,7 @@ #include <linux/kvm_host.h> #include <linux/err.h> +#include <linux/slab.h> #include <asm/reg.h> #include <asm/cputable.h> @@ -29,6 +30,7 @@ #include <linux/gfp.h> #include <linux/sched.h> #include <linux/vmalloc.h> +#include <linux/highmem.h> #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU @@ -36,7 +38,15 @@ /* #define EXIT_DEBUG_SIMPLE */ /* #define DEBUG_EXT */ -static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); +static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, + ulong msr); + +/* Some compatibility defines */ +#ifdef CONFIG_PPC_BOOK3S_32 +#define MSR_USER32 MSR_USER +#define MSR_USER64 MSR_USER +#define HW_PAGE_SIZE PAGE_SIZE +#endif struct kvm_stats_debugfs_item debugfs_entries[] = { { "exits", VCPU_STAT(sum_exits) }, @@ -69,18 +79,26 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); - memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, +#ifdef CONFIG_PPC_BOOK3S_64 + memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); + memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, sizeof(get_paca()->shadow_vcpu)); - get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; + to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; +#endif + +#ifdef CONFIG_PPC_BOOK3S_32 + current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; +#endif } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { - memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); - memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, +#ifdef CONFIG_PPC_BOOK3S_64 + memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); + memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, sizeof(get_paca()->shadow_vcpu)); - to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; + to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; +#endif kvmppc_giveup_ext(vcpu, MSR_FP); kvmppc_giveup_ext(vcpu, MSR_VEC); @@ -131,18 +149,22 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) } } - if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || - (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { + if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != + (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { kvmppc_mmu_flush_segments(vcpu); - kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); + kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); } + + /* Preload FPU if it's enabled */ + if (vcpu->arch.msr & MSR_FP) + kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); } void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) { - vcpu->arch.srr0 = vcpu->arch.pc; + vcpu->arch.srr0 = kvmppc_get_pc(vcpu); vcpu->arch.srr1 = vcpu->arch.msr | flags; - vcpu->arch.pc = to_book3s(vcpu)->hior + vec; + kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); vcpu->arch.mmu.reset_msr(vcpu); } @@ -218,6 +240,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); } +void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, + struct kvm_interrupt *irq) +{ + kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); +} + int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) { int deliver = 1; @@ -302,7 +330,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); #endif priority = __ffs(*pending); - while (priority <= (sizeof(unsigned int) * 8)) { + while (priority < BOOK3S_IRQPRIO_MAX) { if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && (priority != BOOK3S_IRQPRIO_DECREMENTER)) { /* DEC interrupts get cleared by mtdec */ @@ -318,13 +346,18 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) { + u32 host_pvr; + vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; vcpu->arch.pvr = pvr; +#ifdef CONFIG_PPC_BOOK3S_64 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { kvmppc_mmu_book3s_64_init(vcpu); to_book3s(vcpu)->hior = 0xfff00000; to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; - } else { + } else +#endif + { kvmppc_mmu_book3s_32_init(vcpu); to_book3s(vcpu)->hior = 0; to_book3s(vcpu)->msr_mask = 0xffffffffULL; @@ -337,6 +370,32 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) !strcmp(cur_cpu_spec->platform, "ppc970")) vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; + /* Cell performs badly if MSR_FEx are set. So let's hope nobody + really needs them in a VM on Cell and force disable them. */ + if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) + to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); + +#ifdef CONFIG_PPC_BOOK3S_32 + /* 32 bit Book3S always has 32 byte dcbz */ + vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; +#endif + + /* On some CPUs we can execute paired single operations natively */ + asm ( "mfpvr %0" : "=r"(host_pvr)); + switch (host_pvr) { + case 0x00080200: /* lonestar 2.0 */ + case 0x00088202: /* lonestar 2.2 */ + case 0x70000100: /* gekko 1.0 */ + case 0x00080100: /* gekko 2.0 */ + case 0x00083203: /* gekko 2.3a */ + case 0x00083213: /* gekko 2.3b */ + case 0x00083204: /* gekko 2.4 */ + case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ + case 0x00087200: /* broadway */ + vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; + /* Enable HID2.PSE - in case we need it later */ + mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); + } } /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To @@ -350,34 +409,29 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) */ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) { - bool touched = false; - hva_t hpage; + struct page *hpage; + u64 hpage_offset; u32 *page; int i; - hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); - if (kvm_is_error_hva(hpage)) + hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); + if (is_error_page(hpage)) return; - hpage |= pte->raddr & ~PAGE_MASK; - hpage &= ~0xFFFULL; - - page = vmalloc(HW_PAGE_SIZE); - - if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE)) - goto out; + hpage_offset = pte->raddr & ~PAGE_MASK; + hpage_offset &= ~0xFFFULL; + hpage_offset /= 4; - for (i=0; i < HW_PAGE_SIZE / 4; i++) - if ((page[i] & 0xff0007ff) == INS_DCBZ) { - page[i] &= 0xfffffff7; // reserved instruction, so we trap - touched = true; - } + get_page(hpage); + page = kmap_atomic(hpage, KM_USER0); - if (touched) - copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE); + /* patch dcbz into reserved instruction, so we trap */ + for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) + if ((page[i] & 0xff0007ff) == INS_DCBZ) + page[i] &= 0xfffffff7; -out: - vfree(page); + kunmap_atomic(page, KM_USER0); + put_page(hpage); } static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, @@ -391,15 +445,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, } else { pte->eaddr = eaddr; pte->raddr = eaddr & 0xffffffff; - pte->vpage = eaddr >> 12; - switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { - case 0: - pte->vpage |= VSID_REAL; - case MSR_DR: - pte->vpage |= VSID_REAL_DR; - case MSR_IR: - pte->vpage |= VSID_REAL_IR; - } + pte->vpage = VSID_REAL | eaddr >> 12; pte->may_read = true; pte->may_write = true; pte->may_execute = true; @@ -434,55 +480,55 @@ err: return kvmppc_bad_hva(); } -int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr) +int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, + bool data) { struct kvmppc_pte pte; - hva_t hva = eaddr; vcpu->stat.st++; - if (kvmppc_xlate(vcpu, eaddr, false, &pte)) - goto err; + if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) + return -ENOENT; - hva = kvmppc_pte_to_hva(vcpu, &pte, false); - if (kvm_is_error_hva(hva)) - goto err; + *eaddr = pte.raddr; - if (copy_to_user((void __user *)hva, ptr, size)) { - printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva); - goto err; - } + if (!pte.may_write) + return -EPERM; - return 0; + if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) + return EMULATE_DO_MMIO; -err: - return -ENOENT; + return EMULATE_DONE; } -int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, +int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data) { struct kvmppc_pte pte; - hva_t hva = eaddr; + hva_t hva = *eaddr; vcpu->stat.ld++; - if (kvmppc_xlate(vcpu, eaddr, data, &pte)) - goto err; + if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) + goto nopte; + + *eaddr = pte.raddr; hva = kvmppc_pte_to_hva(vcpu, &pte, true); if (kvm_is_error_hva(hva)) - goto err; + goto mmio; if (copy_from_user(ptr, (void __user *)hva, size)) { printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); - goto err; + goto mmio; } - return 0; + return EMULATE_DONE; -err: +nopte: return -ENOENT; +mmio: + return EMULATE_DO_MMIO; } static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) @@ -499,12 +545,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, int page_found = 0; struct kvmppc_pte pte; bool is_mmio = false; + bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; + bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; + u64 vsid; - if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { - relocated = (vcpu->arch.msr & MSR_DR); - } else { - relocated = (vcpu->arch.msr & MSR_IR); - } + relocated = data ? dr : ir; /* Resolve real address if translation turned on */ if (relocated) { @@ -516,14 +561,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte.raddr = eaddr & 0xffffffff; pte.eaddr = eaddr; pte.vpage = eaddr >> 12; - switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { - case 0: - pte.vpage |= VSID_REAL; - case MSR_DR: - pte.vpage |= VSID_REAL_DR; - case MSR_IR: - pte.vpage |= VSID_REAL_IR; - } + } + + switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { + case 0: + pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); + break; + case MSR_DR: + case MSR_IR: + vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); + + if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) + pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); + else + pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); + pte.vpage |= vsid; + + if (vsid == -1) + page_found = -EINVAL; + break; } if (vcpu->arch.mmu.is_dcbz32(vcpu) && @@ -538,20 +594,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (page_found == -ENOENT) { /* Page not found in guest PTE entries */ - vcpu->arch.dear = vcpu->arch.fault_dear; - to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; - vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); + vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); + to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; + vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EPERM) { /* Storage protection */ - vcpu->arch.dear = vcpu->arch.fault_dear; - to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; + vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); + to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; - vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); + vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ - vcpu->arch.dear = vcpu->arch.fault_dear; + vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); } else if (!is_mmio && kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { @@ -583,11 +639,13 @@ static inline int get_fpr_index(int i) } /* Give up external provider (FPU, Altivec, VSX) */ -static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) +void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) { struct thread_struct *t = ¤t->thread; u64 *vcpu_fpr = vcpu->arch.fpr; +#ifdef CONFIG_VSX u64 *vcpu_vsx = vcpu->arch.vsr; +#endif u64 *thread_fpr = (u64*)t->fpr; int i; @@ -629,21 +687,65 @@ static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) kvmppc_recalc_shadow_msr(vcpu); } +static int kvmppc_read_inst(struct kvm_vcpu *vcpu) +{ + ulong srr0 = kvmppc_get_pc(vcpu); + u32 last_inst = kvmppc_get_last_inst(vcpu); + int ret; + + ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); + if (ret == -ENOENT) { + vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); + vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); + vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); + kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); + return EMULATE_AGAIN; + } + + return EMULATE_DONE; +} + +static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) +{ + + /* Need to do paired single emulation? */ + if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) + return EMULATE_DONE; + + /* Read out the instruction */ + if (kvmppc_read_inst(vcpu) == EMULATE_DONE) + /* Need to emulate */ + return EMULATE_FAIL; + + return EMULATE_AGAIN; +} + /* Handle external providers (FPU, Altivec, VSX) */ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ulong msr) { struct thread_struct *t = ¤t->thread; u64 *vcpu_fpr = vcpu->arch.fpr; +#ifdef CONFIG_VSX u64 *vcpu_vsx = vcpu->arch.vsr; +#endif u64 *thread_fpr = (u64*)t->fpr; int i; + /* When we have paired singles, we emulate in software */ + if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) + return RESUME_GUEST; + if (!(vcpu->arch.msr & msr)) { kvmppc_book3s_queue_irqprio(vcpu, exit_nr); return RESUME_GUEST; } + /* We already own the ext */ + if (vcpu->arch.guest_owned_ext & msr) { + return RESUME_GUEST; + } + #ifdef DEBUG_EXT printk(KERN_INFO "Loading up ext 0x%lx\n", msr); #endif @@ -696,21 +798,33 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, run->ready_for_interrupt_injection = 1; #ifdef EXIT_DEBUG printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", - exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, - kvmppc_get_dec(vcpu), vcpu->arch.msr); + exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), + kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1); #elif defined (EXIT_DEBUG_SIMPLE) if ((exit_nr != 0x900) && (exit_nr != 0x500)) printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", - exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, + exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), vcpu->arch.msr); #endif kvm_resched(vcpu); switch (exit_nr) { case BOOK3S_INTERRUPT_INST_STORAGE: vcpu->stat.pf_instruc++; + +#ifdef CONFIG_PPC_BOOK3S_32 + /* We set segments as unused segments when invalidating them. So + * treat the respective fault as segment fault. */ + if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] + == SR_INVALID) { + kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); + r = RESUME_GUEST; + break; + } +#endif + /* only care about PTEG not found errors, but leave NX alone */ - if (vcpu->arch.shadow_srr1 & 0x40000000) { - r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); + if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { + r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); vcpu->stat.sp_instruc++; } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { @@ -719,37 +833,52 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * so we can't use the NX bit inside the guest. Let's cross our fingers, * that no guest that needs the dcbz hack does NX. */ - kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); + kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); + r = RESUME_GUEST; } else { - vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; + vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); - kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); + kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); r = RESUME_GUEST; } break; case BOOK3S_INTERRUPT_DATA_STORAGE: + { + ulong dar = kvmppc_get_fault_dar(vcpu); vcpu->stat.pf_storage++; + +#ifdef CONFIG_PPC_BOOK3S_32 + /* We set segments as unused segments when invalidating them. So + * treat the respective fault as segment fault. */ + if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { + kvmppc_mmu_map_segment(vcpu, dar); + r = RESUME_GUEST; + break; + } +#endif + /* The only case we need to handle is missing shadow PTEs */ - if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { - r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); + if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { + r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); } else { - vcpu->arch.dear = vcpu->arch.fault_dear; - to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; + vcpu->arch.dear = dar; + to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); - kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); + kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL); r = RESUME_GUEST; } break; + } case BOOK3S_INTERRUPT_DATA_SEGMENT: - if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { - vcpu->arch.dear = vcpu->arch.fault_dear; + if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { + vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_SEGMENT); } r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_INST_SEGMENT: - if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { + if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_SEGMENT); } @@ -764,18 +893,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; + case BOOK3S_INTERRUPT_PERFMON: + r = RESUME_GUEST; + break; case BOOK3S_INTERRUPT_PROGRAM: { enum emulation_result er; ulong flags; - flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; +program_interrupt: + flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; if (vcpu->arch.msr & MSR_PR) { #ifdef EXIT_DEBUG - printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); + printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); #endif - if ((vcpu->arch.last_inst & 0xff0007ff) != + if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; @@ -789,33 +922,80 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case EMULATE_DONE: r = RESUME_GUEST_NV; break; + case EMULATE_AGAIN: + r = RESUME_GUEST; + break; case EMULATE_FAIL: printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", - __func__, vcpu->arch.pc, vcpu->arch.last_inst); + __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; + case EMULATE_DO_MMIO: + run->exit_reason = KVM_EXIT_MMIO; + r = RESUME_HOST_NV; + break; default: BUG(); } break; } case BOOK3S_INTERRUPT_SYSCALL: -#ifdef EXIT_DEBUG - printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); -#endif - vcpu->stat.syscall_exits++; - kvmppc_book3s_queue_irqprio(vcpu, exit_nr); - r = RESUME_GUEST; + // XXX make user settable + if (vcpu->arch.osi_enabled && + (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && + (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { + u64 *gprs = run->osi.gprs; + int i; + + run->exit_reason = KVM_EXIT_OSI; + for (i = 0; i < 32; i++) + gprs[i] = kvmppc_get_gpr(vcpu, i); + vcpu->arch.osi_needed = 1; + r = RESUME_HOST_NV; + + } else { + vcpu->stat.syscall_exits++; + kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + r = RESUME_GUEST; + } break; case BOOK3S_INTERRUPT_FP_UNAVAIL: - r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP); - break; case BOOK3S_INTERRUPT_ALTIVEC: - r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC); - break; case BOOK3S_INTERRUPT_VSX: - r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); + { + int ext_msr = 0; + + switch (exit_nr) { + case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; + case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; + case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; + } + + switch (kvmppc_check_ext(vcpu, exit_nr)) { + case EMULATE_DONE: + /* everything ok - let's enable the ext */ + r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); + break; + case EMULATE_FAIL: + /* we need to emulate this instruction */ + goto program_interrupt; + break; + default: + /* nothing to worry about - go again */ + break; + } + break; + } + case BOOK3S_INTERRUPT_ALIGNMENT: + if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { + to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, + kvmppc_get_last_inst(vcpu)); + vcpu->arch.dear = kvmppc_alignment_dar(vcpu, + kvmppc_get_last_inst(vcpu)); + kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + } + r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_MACHINE_CHECK: case BOOK3S_INTERRUPT_TRACE: @@ -825,7 +1005,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, default: /* Ugh - bork here! What did we get? */ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", - exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); + exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); r = RESUME_HOST; BUG(); break; @@ -852,7 +1032,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } #ifdef EXIT_DEBUG - printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); + printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r); #endif return r; @@ -867,10 +1047,12 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - regs->pc = vcpu->arch.pc; + vcpu_load(vcpu); + + regs->pc = kvmppc_get_pc(vcpu); regs->cr = kvmppc_get_cr(vcpu); - regs->ctr = vcpu->arch.ctr; - regs->lr = vcpu->arch.lr; + regs->ctr = kvmppc_get_ctr(vcpu); + regs->lr = kvmppc_get_lr(vcpu); regs->xer = kvmppc_get_xer(vcpu); regs->msr = vcpu->arch.msr; regs->srr0 = vcpu->arch.srr0; @@ -887,6 +1069,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); + vcpu_put(vcpu); + return 0; } @@ -894,10 +1078,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu->arch.pc = regs->pc; + vcpu_load(vcpu); + + kvmppc_set_pc(vcpu, regs->pc); kvmppc_set_cr(vcpu, regs->cr); - vcpu->arch.ctr = regs->ctr; - vcpu->arch.lr = regs->lr; + kvmppc_set_ctr(vcpu, regs->ctr); + kvmppc_set_lr(vcpu, regs->lr); kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); vcpu->arch.srr0 = regs->srr0; @@ -913,6 +1099,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); + vcpu_put(vcpu); + return 0; } @@ -922,6 +1110,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; + vcpu_load(vcpu); + sregs->pvr = vcpu->arch.pvr; sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; @@ -940,6 +1130,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; } } + + vcpu_put(vcpu); + return 0; } @@ -949,6 +1142,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; + vcpu_load(vcpu); + kvmppc_set_pvr(vcpu, sregs->pvr); vcpu3s->sdr1 = sregs->u.s.sdr1; @@ -975,6 +1170,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, /* Flush the MMU after messing with the segments */ kvmppc_mmu_pte_flush(vcpu, 0, 0); + + vcpu_put(vcpu); + return 0; } @@ -1042,24 +1240,33 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvmppc_vcpu_book3s *vcpu_book3s; struct kvm_vcpu *vcpu; - int err; + int err = -ENOMEM; - vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, - get_order(sizeof(struct kvmppc_vcpu_book3s))); - if (!vcpu_book3s) { - err = -ENOMEM; + vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); + if (!vcpu_book3s) goto out; - } + + memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s)); + + vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) + kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); + if (!vcpu_book3s->shadow_vcpu) + goto free_vcpu; vcpu = &vcpu_book3s->vcpu; err = kvm_vcpu_init(vcpu, kvm, id); if (err) - goto free_vcpu; + goto free_shadow_vcpu; vcpu->arch.host_retip = kvm_return_point; vcpu->arch.host_msr = mfmsr(); +#ifdef CONFIG_PPC_BOOK3S_64 /* default to book3s_64 (970fx) */ vcpu->arch.pvr = 0x3C0301; +#else + /* default to book3s_32 (750) */ + vcpu->arch.pvr = 0x84202; +#endif kvmppc_set_pvr(vcpu, vcpu->arch.pvr); vcpu_book3s->slb_nr = 64; @@ -1067,23 +1274,24 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; +#ifdef CONFIG_PPC_BOOK3S_64 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; +#else + vcpu->arch.rmcall = (ulong)kvmppc_rmcall; +#endif vcpu->arch.shadow_msr = MSR_USER64; - err = __init_new_context(); + err = kvmppc_mmu_init(vcpu); if (err < 0) - goto free_vcpu; - vcpu_book3s->context_id = err; - - vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; - vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS; - vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; + goto free_shadow_vcpu; return vcpu; +free_shadow_vcpu: + kfree(vcpu_book3s->shadow_vcpu); free_vcpu: - free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); + vfree(vcpu_book3s); out: return ERR_PTR(err); } @@ -1092,9 +1300,9 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); - __destroy_context(vcpu_book3s->context_id); kvm_vcpu_uninit(vcpu); - free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); + kfree(vcpu_book3s->shadow_vcpu); + vfree(vcpu_book3s); } extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); @@ -1102,8 +1310,12 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret; struct thread_struct ext_bkp; +#ifdef CONFIG_ALTIVEC bool save_vec = current->thread.used_vr; +#endif +#ifdef CONFIG_VSX bool save_vsx = current->thread.used_vsr; +#endif ulong ext_msr; /* No need to go into the guest when all we do is going out */ @@ -1144,6 +1356,10 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* XXX we get called with irq disabled - change that! */ local_irq_enable(); + /* Preload FPU if it's enabled */ + if (vcpu->arch.msr & MSR_FP) + kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); + ret = __kvmppc_vcpu_entry(kvm_run, vcpu); local_irq_disable(); @@ -1179,7 +1395,8 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_book3s_init(void) { - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE); + return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, + THIS_MODULE); } static void kvmppc_book3s_exit(void) diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index faf99f20d993..0b10503c8a4a 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -37,7 +37,7 @@ #define dprintk(X...) do { } while(0) #endif -#ifdef DEBUG_PTE +#ifdef DEBUG_MMU_PTE #define dprintk_pte(X...) printk(KERN_INFO X) #else #define dprintk_pte(X...) do { } while(0) @@ -45,6 +45,9 @@ #define PTEG_FLAG_ACCESSED 0x00000100 #define PTEG_FLAG_DIRTY 0x00000080 +#ifndef SID_SHIFT +#define SID_SHIFT 28 +#endif static inline bool check_debug_ip(struct kvm_vcpu *vcpu) { @@ -57,6 +60,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu) static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); +static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, + u64 *vsid); static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) { @@ -66,13 +71,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, bool data) { - struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr); + u64 vsid; struct kvmppc_pte pte; if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) return pte.vpage; - return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16); + kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); + return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); } static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) @@ -142,8 +148,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, bat->bepi_mask); } if ((eaddr & bat->bepi_mask) == bat->bepi) { + u64 vsid; + kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, + eaddr >> SID_SHIFT, &vsid); + vsid <<= 16; + pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; + pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); - pte->vpage = (eaddr >> 12) | VSID_BAT; pte->may_read = bat->pp; pte->may_write = bat->pp > 1; pte->may_execute = true; @@ -172,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_sr *sre; hva_t ptegp; u32 pteg[16]; - u64 ptem = 0; + u32 ptem = 0; int i; int found = 0; @@ -302,6 +313,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, /* And then put in the new SR */ sre->raw = value; sre->vsid = (value & 0x0fffffff); + sre->valid = (value & 0x80000000) ? false : true; sre->Ks = (value & 0x40000000) ? true : false; sre->Kp = (value & 0x20000000) ? true : false; sre->nx = (value & 0x10000000) ? true : false; @@ -312,36 +324,48 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) { - kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL); + kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); } -static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, +static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { + ulong ea = esid << SID_SHIFT; + struct kvmppc_sr *sr; + u64 gvsid = esid; + + if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { + sr = find_sr(to_book3s(vcpu), ea); + if (sr->valid) + gvsid = sr->vsid; + } + /* In case we only have one of MSR_IR or MSR_DR set, let's put that in the real-mode context (and hope RM doesn't access high memory) */ switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { case 0: - *vsid = (VSID_REAL >> 16) | esid; + *vsid = VSID_REAL | esid; break; case MSR_IR: - *vsid = (VSID_REAL_IR >> 16) | esid; + *vsid = VSID_REAL_IR | gvsid; break; case MSR_DR: - *vsid = (VSID_REAL_DR >> 16) | esid; + *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: - { - ulong ea; - ea = esid << SID_SHIFT; - *vsid = find_sr(to_book3s(vcpu), ea)->vsid; + if (!sr->valid) + return -1; + + *vsid = sr->vsid; break; - } default: BUG(); } + if (vcpu->arch.msr & MSR_PR) + *vsid |= VSID_PR; + return 0; } diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c new file mode 100644 index 000000000000..0bb66005338f --- /dev/null +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -0,0 +1,483 @@ +/* + * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. + * + * Authors: + * Alexander Graf <agraf@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include <linux/kvm_host.h> + +#include <asm/kvm_ppc.h> +#include <asm/kvm_book3s.h> +#include <asm/mmu-hash32.h> +#include <asm/machdep.h> +#include <asm/mmu_context.h> +#include <asm/hw_irq.h> + +/* #define DEBUG_MMU */ +/* #define DEBUG_SR */ + +#ifdef DEBUG_MMU +#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) +#else +#define dprintk_mmu(a, ...) do { } while(0) +#endif + +#ifdef DEBUG_SR +#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__) +#else +#define dprintk_sr(a, ...) do { } while(0) +#endif + +#if PAGE_SHIFT != 12 +#error Unknown page size +#endif + +#ifdef CONFIG_SMP +#error XXX need to grab mmu_hash_lock +#endif + +#ifdef CONFIG_PTE_64BIT +#error Only 32 bit pages are supported for now +#endif + +static ulong htab; +static u32 htabmask; + +static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) +{ + volatile u32 *pteg; + + dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n", + pte->pte.eaddr, pte->pte.vpage, pte->host_va); + + pteg = (u32*)pte->slot; + + pteg[0] = 0; + asm volatile ("sync"); + asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); + asm volatile ("sync"); + asm volatile ("tlbsync"); + + pte->host_va = 0; + + if (pte->pte.may_write) + kvm_release_pfn_dirty(pte->pfn); + else + kvm_release_pfn_clean(pte->pfn); +} + +void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) +{ + int i; + + dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n", + vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); + BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); + + guest_ea &= ea_mask; + for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { + struct hpte_cache *pte; + + pte = &vcpu->arch.hpte_cache[i]; + if (!pte->host_va) + continue; + + if ((pte->pte.eaddr & ea_mask) == guest_ea) { + invalidate_pte(vcpu, pte); + } + } + + /* Doing a complete flush -> start from scratch */ + if (!ea_mask) + vcpu->arch.hpte_cache_offset = 0; +} + +void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) +{ + int i; + + dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", + vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); + BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); + + guest_vp &= vp_mask; + for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { + struct hpte_cache *pte; + + pte = &vcpu->arch.hpte_cache[i]; + if (!pte->host_va) + continue; + + if ((pte->pte.vpage & vp_mask) == guest_vp) { + invalidate_pte(vcpu, pte); + } + } +} + +void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) +{ + int i; + + dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", + vcpu->arch.hpte_cache_offset, pa_start, pa_end); + BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); + + for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { + struct hpte_cache *pte; + + pte = &vcpu->arch.hpte_cache[i]; + if (!pte->host_va) + continue; + + if ((pte->pte.raddr >= pa_start) && + (pte->pte.raddr < pa_end)) { + invalidate_pte(vcpu, pte); + } + } +} + +struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) +{ + int i; + u64 guest_vp; + + guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); + for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { + struct hpte_cache *pte; + + pte = &vcpu->arch.hpte_cache[i]; + if (!pte->host_va) + continue; + + if (pte->pte.vpage == guest_vp) + return &pte->pte; + } + + return NULL; +} + +static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) + kvmppc_mmu_pte_flush(vcpu, 0, 0); + + return vcpu->arch.hpte_cache_offset++; +} + +/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using + * a hash, so we don't waste cycles on looping */ +static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) +{ + return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); +} + + +static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) +{ + struct kvmppc_sid_map *map; + u16 sid_map_mask; + + if (vcpu->arch.msr & MSR_PR) + gvsid |= VSID_PR; + + sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); + map = &to_book3s(vcpu)->sid_map[sid_map_mask]; + if (map->guest_vsid == gvsid) { + dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", + gvsid, map->host_vsid); + return map; + } + + map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; + if (map->guest_vsid == gvsid) { + dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", + gvsid, map->host_vsid); + return map; + } + + dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid); + return NULL; +} + +static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, + bool primary) +{ + u32 page, hash; + ulong pteg = htab; + + page = (eaddr & ~ESID_MASK) >> 12; + + hash = ((vsid ^ page) << 6); + if (!primary) + hash = ~hash; + + hash &= htabmask; + + pteg |= hash; + + dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n", + htab, hash, htabmask, pteg); + + return (u32*)pteg; +} + +extern char etext[]; + +int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) +{ + pfn_t hpaddr; + u64 va; + u64 vsid; + struct kvmppc_sid_map *map; + volatile u32 *pteg; + u32 eaddr = orig_pte->eaddr; + u32 pteg0, pteg1; + register int rr = 0; + bool primary = false; + bool evict = false; + int hpte_id; + struct hpte_cache *pte; + + /* Get host physical address for gpa */ + hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); + if (kvm_is_error_hva(hpaddr)) { + printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", + orig_pte->eaddr); + return -EINVAL; + } + hpaddr <<= PAGE_SHIFT; + + /* and write the mapping ea -> hpa into the pt */ + vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); + map = find_sid_vsid(vcpu, vsid); + if (!map) { + kvmppc_mmu_map_segment(vcpu, eaddr); + map = find_sid_vsid(vcpu, vsid); + } + BUG_ON(!map); + + vsid = map->host_vsid; + va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); + +next_pteg: + if (rr == 16) { + primary = !primary; + evict = true; + rr = 0; + } + + pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); + + /* not evicting yet */ + if (!evict && (pteg[rr] & PTE_V)) { + rr += 2; + goto next_pteg; + } + + dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr); + dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); + + pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | + (primary ? 0 : PTE_SEC); + pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; + + if (orig_pte->may_write) { + pteg1 |= PP_RWRW; + mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); + } else { + pteg1 |= PP_RWRX; + } + + local_irq_disable(); + + if (pteg[rr]) { + pteg[rr] = 0; + asm volatile ("sync"); + } + pteg[rr + 1] = pteg1; + pteg[rr] = pteg0; + asm volatile ("sync"); + + local_irq_enable(); + + dprintk_mmu("KVM: new PTEG: %p\n", pteg); + dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); + dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); + + + /* Now tell our Shadow PTE code about the new page */ + + hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); + pte = &vcpu->arch.hpte_cache[hpte_id]; + + dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", + orig_pte->may_write ? 'w' : '-', + orig_pte->may_execute ? 'x' : '-', + orig_pte->eaddr, (ulong)pteg, va, + orig_pte->vpage, hpaddr); + + pte->slot = (ulong)&pteg[rr]; + pte->host_va = va; + pte->pte = *orig_pte; + pte->pfn = hpaddr >> PAGE_SHIFT; + + return 0; +} + +static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) +{ + struct kvmppc_sid_map *map; + struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); + u16 sid_map_mask; + static int backwards_map = 0; + + if (vcpu->arch.msr & MSR_PR) + gvsid |= VSID_PR; + + /* We might get collisions that trap in preceding order, so let's + map them differently */ + + sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); + if (backwards_map) + sid_map_mask = SID_MAP_MASK - sid_map_mask; + + map = &to_book3s(vcpu)->sid_map[sid_map_mask]; + + /* Make sure we're taking the other map next time */ + backwards_map = !backwards_map; + + /* Uh-oh ... out of mappings. Let's flush! */ + if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { + vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; + memset(vcpu_book3s->sid_map, 0, + sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); + kvmppc_mmu_pte_flush(vcpu, 0, 0); + kvmppc_mmu_flush_segments(vcpu); + } + map->host_vsid = vcpu_book3s->vsid_next; + + /* Would have to be 111 to be completely aligned with the rest of + Linux, but that is just way too little space! */ + vcpu_book3s->vsid_next+=1; + + map->guest_vsid = gvsid; + map->valid = true; + + return map; +} + +int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) +{ + u32 esid = eaddr >> SID_SHIFT; + u64 gvsid; + u32 sr; + struct kvmppc_sid_map *map; + struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); + + if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { + /* Invalidate an entry */ + svcpu->sr[esid] = SR_INVALID; + return -ENOENT; + } + + map = find_sid_vsid(vcpu, gvsid); + if (!map) + map = create_sid_map(vcpu, gvsid); + + map->guest_esid = esid; + sr = map->host_vsid | SR_KP; + svcpu->sr[esid] = sr; + + dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); + + return 0; +} + +void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) +{ + int i; + struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); + + dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); + for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) + svcpu->sr[i] = SR_INVALID; +} + +void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +{ + kvmppc_mmu_pte_flush(vcpu, 0, 0); + preempt_disable(); + __destroy_context(to_book3s(vcpu)->context_id); + preempt_enable(); +} + +/* From mm/mmu_context_hash32.c */ +#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) + +int kvmppc_mmu_init(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); + int err; + ulong sdr1; + + err = __init_new_context(); + if (err < 0) + return -1; + vcpu3s->context_id = err; + + vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1; + vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id); + +#if 0 /* XXX still doesn't guarantee uniqueness */ + /* We could collide with the Linux vsid space because the vsid + * wraps around at 24 bits. We're safe if we do our own space + * though, so let's always set the highest bit. */ + + vcpu3s->vsid_max |= 0x00800000; + vcpu3s->vsid_first |= 0x00800000; +#endif + BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); + + vcpu3s->vsid_next = vcpu3s->vsid_first; + + /* Remember where the HTAB is */ + asm ( "mfsdr1 %0" : "=r"(sdr1) ); + htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; + htab = (ulong)__va(sdr1 & 0xffff0000); + + return 0; +} diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S new file mode 100644 index 000000000000..3608471ad2d8 --- /dev/null +++ b/arch/powerpc/kvm/book3s_32_sr.S @@ -0,0 +1,143 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright SUSE Linux Products GmbH 2009 + * + * Authors: Alexander Graf <agraf@suse.de> + */ + +/****************************************************************************** + * * + * Entry code * + * * + *****************************************************************************/ + +.macro LOAD_GUEST_SEGMENTS + + /* Required state: + * + * MSR = ~IR|DR + * R1 = host R1 + * R2 = host R2 + * R3 = shadow vcpu + * all other volatile GPRS = free + * SVCPU[CR] = guest CR + * SVCPU[XER] = guest XER + * SVCPU[CTR] = guest CTR + * SVCPU[LR] = guest LR + */ + +#define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \ + mtsr n, r9 + + XCHG_SR(0) + XCHG_SR(1) + XCHG_SR(2) + XCHG_SR(3) + XCHG_SR(4) + XCHG_SR(5) + XCHG_SR(6) + XCHG_SR(7) + XCHG_SR(8) + XCHG_SR(9) + XCHG_SR(10) + XCHG_SR(11) + XCHG_SR(12) + XCHG_SR(13) + XCHG_SR(14) + XCHG_SR(15) + + /* Clear BATs. */ + +#define KVM_KILL_BAT(n, reg) \ + mtspr SPRN_IBAT##n##U,reg; \ + mtspr SPRN_IBAT##n##L,reg; \ + mtspr SPRN_DBAT##n##U,reg; \ + mtspr SPRN_DBAT##n##L,reg; \ + + li r9, 0 + KVM_KILL_BAT(0, r9) + KVM_KILL_BAT(1, r9) + KVM_KILL_BAT(2, r9) + KVM_KILL_BAT(3, r9) + +.endm + +/****************************************************************************** + * * + * Exit code * + * * + *****************************************************************************/ + +.macro LOAD_HOST_SEGMENTS + + /* Register usage at this point: + * + * R1 = host R1 + * R2 = host R2 + * R12 = exit handler id + * R13 = shadow vcpu - SHADOW_VCPU_OFF + * SVCPU.* = guest * + * SVCPU[CR] = guest CR + * SVCPU[XER] = guest XER + * SVCPU[CTR] = guest CTR + * SVCPU[LR] = guest LR + * + */ + + /* Restore BATs */ + + /* We only overwrite the upper part, so we only restoree + the upper part. */ +#define KVM_LOAD_BAT(n, reg, RA, RB) \ + lwz RA,(n*16)+0(reg); \ + lwz RB,(n*16)+4(reg); \ + mtspr SPRN_IBAT##n##U,RA; \ + mtspr SPRN_IBAT##n##L,RB; \ + lwz RA,(n*16)+8(reg); \ + lwz RB,(n*16)+12(reg); \ + mtspr SPRN_DBAT##n##U,RA; \ + mtspr SPRN_DBAT##n##L,RB; \ + + lis r9, BATS@ha + addi r9, r9, BATS@l + tophys(r9, r9) + KVM_LOAD_BAT(0, r9, r10, r11) + KVM_LOAD_BAT(1, r9, r10, r11) + KVM_LOAD_BAT(2, r9, r10, r11) + KVM_LOAD_BAT(3, r9, r10, r11) + + /* Restore Segment Registers */ + + /* 0xc - 0xf */ + + li r0, 4 + mtctr r0 + LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc)) + lis r4, 0xc000 +3: mtsrin r3, r4 + addi r3, r3, 0x111 /* increment VSID */ + addis r4, r4, 0x1000 /* address of next segment */ + bdnz 3b + + /* 0x0 - 0xb */ + + /* 'current->mm' needs to be in r4 */ + tophys(r4, r2) + lwz r4, MM(r4) + tophys(r4, r4) + /* This only clobbers r0, r3, r4 and r5 */ + bl switch_mmu_context + +.endm diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 512dcff77554..4025ea26b3c1 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -232,7 +232,7 @@ do_second: } dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " - "-> 0x%llx\n", + "-> 0x%lx\n", eaddr, avpn, gpte->vpage, gpte->raddr); found = true; break; @@ -383,7 +383,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) if (vcpu->arch.msr & MSR_IR) { kvmppc_mmu_flush_segments(vcpu); - kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); + kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); } } @@ -439,37 +439,43 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); } -static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, +static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { + ulong ea = esid << SID_SHIFT; + struct kvmppc_slb *slb; + u64 gvsid = esid; + + if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { + slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); + if (slb) + gvsid = slb->vsid; + } + switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { case 0: - *vsid = (VSID_REAL >> 16) | esid; + *vsid = VSID_REAL | esid; break; case MSR_IR: - *vsid = (VSID_REAL_IR >> 16) | esid; + *vsid = VSID_REAL_IR | gvsid; break; case MSR_DR: - *vsid = (VSID_REAL_DR >> 16) | esid; + *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: - { - ulong ea; - struct kvmppc_slb *slb; - ea = esid << SID_SHIFT; - slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); - if (slb) - *vsid = slb->vsid; - else + if (!slb) return -ENOENT; + *vsid = gvsid; break; - } default: BUG(); break; } + if (vcpu->arch.msr & MSR_PR) + *vsid |= VSID_PR; + return 0; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index f2899b297ffd..e4b5744977f6 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -48,21 +48,25 @@ static void invalidate_pte(struct hpte_cache *pte) { - dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", - i, pte->pte.eaddr, pte->pte.vpage, pte->host_va); + dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", + pte->pte.eaddr, pte->pte.vpage, pte->host_va); ppc_md.hpte_invalidate(pte->slot, pte->host_va, MMU_PAGE_4K, MMU_SEGSIZE_256M, false); pte->host_va = 0; - kvm_release_pfn_dirty(pte->pfn); + + if (pte->pte.may_write) + kvm_release_pfn_dirty(pte->pfn); + else + kvm_release_pfn_clean(pte->pfn); } -void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask) +void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) { int i; - dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n", + dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); @@ -106,12 +110,12 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) } } -void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end) +void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) { int i; - dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", - vcpu->arch.hpte_cache_offset, guest_pa, pa_mask); + dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n", + vcpu->arch.hpte_cache_offset, pa_start, pa_end); BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { @@ -182,7 +186,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); map = &to_book3s(vcpu)->sid_map[sid_map_mask]; if (map->guest_vsid == gvsid) { - dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", + dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n", gvsid, map->host_vsid); return map; } @@ -194,7 +198,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) return map; } - dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid); + dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n", + sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid); return NULL; } @@ -212,7 +217,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) /* Get host physical address for gpa */ hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); if (kvm_is_error_hva(hpaddr)) { - printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr); + printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); return -EINVAL; } hpaddr <<= PAGE_SHIFT; @@ -227,10 +232,16 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); map = find_sid_vsid(vcpu, vsid); if (!map) { - kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); + ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); + WARN_ON(ret < 0); map = find_sid_vsid(vcpu, vsid); } - BUG_ON(!map); + if (!map) { + printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n", + vsid, orig_pte->eaddr); + WARN_ON(true); + return -EINVAL; + } vsid = map->host_vsid; va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); @@ -257,26 +268,26 @@ map_again: if (ret < 0) { /* If we couldn't map a primary PTE, try a secondary */ -#ifdef USE_SECONDARY hash = ~hash; + vflags ^= HPTE_V_SECONDARY; attempt++; - if (attempt % 2) - vflags = HPTE_V_SECONDARY; - else - vflags = 0; -#else - attempt = 2; -#endif goto map_again; } else { int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id]; - dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n", + dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', (rflags & HPTE_R_N) ? '-' : 'x', orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr); + /* The ppc_md code may give us a secondary entry even though we + asked for a primary. Fix up. */ + if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) { + hash = ~hash; + hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); + } + pte->slot = hpteg + (ret & 7); pte->host_va = va; pte->pte = *orig_pte; @@ -321,6 +332,9 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) map->guest_vsid = gvsid; map->valid = true; + dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n", + sid_map_mask, gvsid, map->host_vsid); + return map; } @@ -331,14 +345,14 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) int found_inval = -1; int r; - if (!get_paca()->kvm_slb_max) - get_paca()->kvm_slb_max = 1; + if (!to_svcpu(vcpu)->slb_max) + to_svcpu(vcpu)->slb_max = 1; /* Are we overwriting? */ - for (i = 1; i < get_paca()->kvm_slb_max; i++) { - if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V)) + for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) { + if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V)) found_inval = i; - else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid) + else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid) return i; } @@ -352,11 +366,11 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) max_slb_size = mmu_slb_size; /* Overflowing -> purge */ - if ((get_paca()->kvm_slb_max) == max_slb_size) + if ((to_svcpu(vcpu)->slb_max) == max_slb_size) kvmppc_mmu_flush_segments(vcpu); - r = get_paca()->kvm_slb_max; - get_paca()->kvm_slb_max++; + r = to_svcpu(vcpu)->slb_max; + to_svcpu(vcpu)->slb_max++; return r; } @@ -374,7 +388,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { /* Invalidate an entry */ - get_paca()->kvm_slb[slb_index].esid = 0; + to_svcpu(vcpu)->slb[slb_index].esid = 0; return -ENOENT; } @@ -388,8 +402,8 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) slb_vsid &= ~SLB_VSID_KP; slb_esid |= slb_index; - get_paca()->kvm_slb[slb_index].esid = slb_esid; - get_paca()->kvm_slb[slb_index].vsid = slb_vsid; + to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; + to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); @@ -398,11 +412,29 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { - get_paca()->kvm_slb_max = 1; - get_paca()->kvm_slb[0].esid = 0; + to_svcpu(vcpu)->slb_max = 1; + to_svcpu(vcpu)->slb[0].esid = 0; } void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) { kvmppc_mmu_pte_flush(vcpu, 0, 0); + __destroy_context(to_book3s(vcpu)->context_id); +} + +int kvmppc_mmu_init(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); + int err; + + err = __init_new_context(); + if (err < 0) + return -1; + vcpu3s->context_id = err; + + vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1; + vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; + vcpu3s->vsid_next = vcpu3s->vsid_first; + + return 0; } diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 35b762722187..04e7d3bbfe8b 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -44,8 +44,7 @@ slb_exit_skip_ ## num: * * *****************************************************************************/ -.global kvmppc_handler_trampoline_enter -kvmppc_handler_trampoline_enter: +.macro LOAD_GUEST_SEGMENTS /* Required state: * @@ -53,20 +52,14 @@ kvmppc_handler_trampoline_enter: * R13 = PACA * R1 = host R1 * R2 = host R2 - * R9 = guest IP - * R10 = guest MSR - * all other GPRS = free - * PACA[KVM_CR] = guest CR - * PACA[KVM_XER] = guest XER + * R3 = shadow vcpu + * all other volatile GPRS = free + * SVCPU[CR] = guest CR + * SVCPU[XER] = guest XER + * SVCPU[CTR] = guest CTR + * SVCPU[LR] = guest LR */ - mtsrr0 r9 - mtsrr1 r10 - - /* Activate guest mode, so faults get handled by KVM */ - li r11, KVM_GUEST_MODE_GUEST - stb r11, PACA_KVM_IN_GUEST(r13) - /* Remove LPAR shadow entries */ #if SLB_NUM_BOLTED == 3 @@ -101,14 +94,14 @@ kvmppc_handler_trampoline_enter: /* Fill SLB with our shadow */ - lbz r12, PACA_KVM_SLB_MAX(r13) + lbz r12, SVCPU_SLB_MAX(r3) mulli r12, r12, 16 - addi r12, r12, PACA_KVM_SLB - add r12, r12, r13 + addi r12, r12, SVCPU_SLB + add r12, r12, r3 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */ - li r11, PACA_KVM_SLB - add r11, r11, r13 + li r11, SVCPU_SLB + add r11, r11, r3 slb_loop_enter: @@ -127,34 +120,7 @@ slb_loop_enter_skip: slb_do_enter: - /* Enter guest */ - - ld r0, (PACA_KVM_R0)(r13) - ld r1, (PACA_KVM_R1)(r13) - ld r2, (PACA_KVM_R2)(r13) - ld r3, (PACA_KVM_R3)(r13) - ld r4, (PACA_KVM_R4)(r13) - ld r5, (PACA_KVM_R5)(r13) - ld r6, (PACA_KVM_R6)(r13) - ld r7, (PACA_KVM_R7)(r13) - ld r8, (PACA_KVM_R8)(r13) - ld r9, (PACA_KVM_R9)(r13) - ld r10, (PACA_KVM_R10)(r13) - ld r12, (PACA_KVM_R12)(r13) - - lwz r11, (PACA_KVM_CR)(r13) - mtcr r11 - - ld r11, (PACA_KVM_XER)(r13) - mtxer r11 - - ld r11, (PACA_KVM_R11)(r13) - ld r13, (PACA_KVM_R13)(r13) - - RFI -kvmppc_handler_trampoline_enter_end: - - +.endm /****************************************************************************** * * @@ -162,99 +128,22 @@ kvmppc_handler_trampoline_enter_end: * * *****************************************************************************/ -.global kvmppc_handler_trampoline_exit -kvmppc_handler_trampoline_exit: +.macro LOAD_HOST_SEGMENTS /* Register usage at this point: * - * SPRG_SCRATCH0 = guest R13 - * R12 = exit handler id - * R13 = PACA - * PACA.KVM.SCRATCH0 = guest R12 - * PACA.KVM.SCRATCH1 = guest CR + * R1 = host R1 + * R2 = host R2 + * R12 = exit handler id + * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] + * SVCPU.* = guest * + * SVCPU[CR] = guest CR + * SVCPU[XER] = guest XER + * SVCPU[CTR] = guest CTR + * SVCPU[LR] = guest LR * */ - /* Save registers */ - - std r0, PACA_KVM_R0(r13) - std r1, PACA_KVM_R1(r13) - std r2, PACA_KVM_R2(r13) - std r3, PACA_KVM_R3(r13) - std r4, PACA_KVM_R4(r13) - std r5, PACA_KVM_R5(r13) - std r6, PACA_KVM_R6(r13) - std r7, PACA_KVM_R7(r13) - std r8, PACA_KVM_R8(r13) - std r9, PACA_KVM_R9(r13) - std r10, PACA_KVM_R10(r13) - std r11, PACA_KVM_R11(r13) - - /* Restore R1/R2 so we can handle faults */ - ld r1, PACA_KVM_HOST_R1(r13) - ld r2, PACA_KVM_HOST_R2(r13) - - /* Save guest PC and MSR in GPRs */ - mfsrr0 r3 - mfsrr1 r4 - - /* Get scratch'ed off registers */ - mfspr r9, SPRN_SPRG_SCRATCH0 - std r9, PACA_KVM_R13(r13) - - ld r8, PACA_KVM_SCRATCH0(r13) - std r8, PACA_KVM_R12(r13) - - lwz r7, PACA_KVM_SCRATCH1(r13) - stw r7, PACA_KVM_CR(r13) - - /* Save more register state */ - - mfxer r6 - stw r6, PACA_KVM_XER(r13) - - mfdar r5 - mfdsisr r6 - - /* - * In order for us to easily get the last instruction, - * we got the #vmexit at, we exploit the fact that the - * virtual layout is still the same here, so we can just - * ld from the guest's PC address - */ - - /* We only load the last instruction when it's safe */ - cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE - beq ld_last_inst - cmpwi r12, BOOK3S_INTERRUPT_PROGRAM - beq ld_last_inst - - b no_ld_last_inst - -ld_last_inst: - /* Save off the guest instruction we're at */ - - /* Set guest mode to 'jump over instruction' so if lwz faults - * we'll just continue at the next IP. */ - li r9, KVM_GUEST_MODE_SKIP - stb r9, PACA_KVM_IN_GUEST(r13) - - /* 1) enable paging for data */ - mfmsr r9 - ori r11, r9, MSR_DR /* Enable paging for data */ - mtmsr r11 - /* 2) fetch the instruction */ - li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */ - lwz r0, 0(r3) - /* 3) disable paging again */ - mtmsr r9 - -no_ld_last_inst: - - /* Unset guest mode */ - li r9, KVM_GUEST_MODE_NONE - stb r9, PACA_KVM_IN_GUEST(r13) - /* Restore bolted entries from the shadow and fix it along the way */ /* We don't store anything in entry 0, so we don't need to take care of it */ @@ -275,28 +164,4 @@ no_ld_last_inst: slb_do_exit: - /* Register usage at this point: - * - * R0 = guest last inst - * R1 = host R1 - * R2 = host R2 - * R3 = guest PC - * R4 = guest MSR - * R5 = guest DAR - * R6 = guest DSISR - * R12 = exit handler id - * R13 = PACA - * PACA.KVM.* = guest * - * - */ - - /* RFI into the highmem handler */ - mfmsr r7 - ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ - mtsrr1 r7 - ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */ - mtsrr0 r8 - - RFI -kvmppc_handler_trampoline_exit_end: - +.endm diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 2b0ee7e040c9..c85f906038ce 100644 --- a/arch/powerpc/kvm/book3s_64_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -28,13 +28,16 @@ #define OP_31_XOP_MFMSR 83 #define OP_31_XOP_MTMSR 146 #define OP_31_XOP_MTMSRD 178 +#define OP_31_XOP_MTSR 210 #define OP_31_XOP_MTSRIN 242 #define OP_31_XOP_TLBIEL 274 #define OP_31_XOP_TLBIE 306 #define OP_31_XOP_SLBMTE 402 #define OP_31_XOP_SLBIE 434 #define OP_31_XOP_SLBIA 498 +#define OP_31_XOP_MFSR 595 #define OP_31_XOP_MFSRIN 659 +#define OP_31_XOP_DCBA 758 #define OP_31_XOP_SLBMFEV 851 #define OP_31_XOP_EIOIO 854 #define OP_31_XOP_SLBMFEE 915 @@ -42,6 +45,24 @@ /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ #define OP_31_XOP_DCBZ 1010 +#define OP_LFS 48 +#define OP_LFD 50 +#define OP_STFS 52 +#define OP_STFD 54 + +#define SPRN_GQR0 912 +#define SPRN_GQR1 913 +#define SPRN_GQR2 914 +#define SPRN_GQR3 915 +#define SPRN_GQR4 916 +#define SPRN_GQR5 917 +#define SPRN_GQR6 918 +#define SPRN_GQR7 919 + +/* Book3S_32 defines mfsrin(v) - but that messes up our abstract + * function pointers, so let's just disable the define. */ +#undef mfsrin + int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { @@ -52,7 +73,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, switch (get_xop(inst)) { case OP_19_XOP_RFID: case OP_19_XOP_RFI: - vcpu->arch.pc = vcpu->arch.srr0; + kvmppc_set_pc(vcpu, vcpu->arch.srr0); kvmppc_set_msr(vcpu, vcpu->arch.srr1); *advance = 0; break; @@ -80,6 +101,18 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, case OP_31_XOP_MTMSR: kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); break; + case OP_31_XOP_MFSR: + { + int srnum; + + srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); + if (vcpu->arch.mmu.mfsrin) { + u32 sr; + sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); + kvmppc_set_gpr(vcpu, get_rt(inst), sr); + } + break; + } case OP_31_XOP_MFSRIN: { int srnum; @@ -92,6 +125,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, } break; } + case OP_31_XOP_MTSR: + vcpu->arch.mmu.mtsrin(vcpu, + (inst >> 16) & 0xf, + kvmppc_get_gpr(vcpu, get_rs(inst))); + break; case OP_31_XOP_MTSRIN: vcpu->arch.mmu.mtsrin(vcpu, (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, @@ -150,12 +188,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_set_gpr(vcpu, get_rt(inst), t); } break; + case OP_31_XOP_DCBA: + /* Gets treated as NOP */ + break; case OP_31_XOP_DCBZ: { ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); ulong ra = 0; - ulong addr; + ulong addr, vaddr; u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + u32 dsisr; + int r; if (get_ra(inst)) ra = kvmppc_get_gpr(vcpu, get_ra(inst)); @@ -163,15 +206,25 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, addr = (ra + rb) & ~31ULL; if (!(vcpu->arch.msr & MSR_SF)) addr &= 0xffffffff; + vaddr = addr; + + r = kvmppc_st(vcpu, &addr, 32, zeros, true); + if ((r == -ENOENT) || (r == -EPERM)) { + *advance = 0; + vcpu->arch.dear = vaddr; + to_svcpu(vcpu)->fault_dar = vaddr; + + dsisr = DSISR_ISSTORE; + if (r == -ENOENT) + dsisr |= DSISR_NOHPTE; + else if (r == -EPERM) + dsisr |= DSISR_PROTFAULT; + + to_book3s(vcpu)->dsisr = dsisr; + to_svcpu(vcpu)->fault_dsisr = dsisr; - if (kvmppc_st(vcpu, addr, 32, zeros)) { - vcpu->arch.dear = addr; - vcpu->arch.fault_dear = addr; - to_book3s(vcpu)->dsisr = DSISR_PROTFAULT | - DSISR_ISSTORE; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); - kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL); } break; @@ -184,6 +237,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, emulated = EMULATE_FAIL; } + if (emulated == EMULATE_FAIL) + emulated = kvmppc_emulate_paired_single(run, vcpu); + return emulated; } @@ -207,6 +263,34 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, } } +static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) +{ + struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); + struct kvmppc_bat *bat; + + switch (sprn) { + case SPRN_IBAT0U ... SPRN_IBAT3L: + bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; + break; + case SPRN_IBAT4U ... SPRN_IBAT7L: + bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; + break; + case SPRN_DBAT0U ... SPRN_DBAT3L: + bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; + break; + case SPRN_DBAT4U ... SPRN_DBAT7L: + bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; + break; + default: + BUG(); + } + + if (sprn % 2) + return bat->raw >> 32; + else + return bat->raw; +} + static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); @@ -217,13 +301,13 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; break; case SPRN_IBAT4U ... SPRN_IBAT7L: - bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2]; + bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; break; case SPRN_DBAT0U ... SPRN_DBAT3L: bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; break; case SPRN_DBAT4U ... SPRN_DBAT7L: - bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2]; + bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; break; default: BUG(); @@ -258,6 +342,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) /* BAT writes happen so rarely that we're ok to flush * everything here */ kvmppc_mmu_pte_flush(vcpu, 0, 0); + kvmppc_mmu_flush_segments(vcpu); break; case SPRN_HID0: to_book3s(vcpu)->hid[0] = spr_val; @@ -268,7 +353,32 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_HID2: to_book3s(vcpu)->hid[2] = spr_val; break; + case SPRN_HID2_GEKKO: + to_book3s(vcpu)->hid[2] = spr_val; + /* HID2.PSE controls paired single on gekko */ + switch (vcpu->arch.pvr) { + case 0x00080200: /* lonestar 2.0 */ + case 0x00088202: /* lonestar 2.2 */ + case 0x70000100: /* gekko 1.0 */ + case 0x00080100: /* gekko 2.0 */ + case 0x00083203: /* gekko 2.3a */ + case 0x00083213: /* gekko 2.3b */ + case 0x00083204: /* gekko 2.4 */ + case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ + case 0x00087200: /* broadway */ + if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { + /* Native paired singles */ + } else if (spr_val & (1 << 29)) { /* HID2.PSE */ + vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; + kvmppc_giveup_ext(vcpu, MSR_FP); + } else { + vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; + } + break; + } + break; case SPRN_HID4: + case SPRN_HID4_GEKKO: to_book3s(vcpu)->hid[4] = spr_val; break; case SPRN_HID5: @@ -278,12 +388,30 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) (mfmsr() & MSR_HV)) vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; break; + case SPRN_GQR0: + case SPRN_GQR1: + case SPRN_GQR2: + case SPRN_GQR3: + case SPRN_GQR4: + case SPRN_GQR5: + case SPRN_GQR6: + case SPRN_GQR7: + to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; + break; case SPRN_ICTC: case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: + case SPRN_L2CR: + case SPRN_MMCR0_GEKKO: + case SPRN_MMCR1_GEKKO: + case SPRN_PMC1_GEKKO: + case SPRN_PMC2_GEKKO: + case SPRN_PMC3_GEKKO: + case SPRN_PMC4_GEKKO: + case SPRN_WPAR_GEKKO: break; default: printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); @@ -301,6 +429,12 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int emulated = EMULATE_DONE; switch (sprn) { + case SPRN_IBAT0U ... SPRN_IBAT3L: + case SPRN_IBAT4U ... SPRN_IBAT7L: + case SPRN_DBAT0U ... SPRN_DBAT3L: + case SPRN_DBAT4U ... SPRN_DBAT7L: + kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn)); + break; case SPRN_SDR1: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); break; @@ -320,19 +454,40 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); break; case SPRN_HID2: + case SPRN_HID2_GEKKO: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); break; case SPRN_HID4: + case SPRN_HID4_GEKKO: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); break; case SPRN_HID5: kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); break; + case SPRN_GQR0: + case SPRN_GQR1: + case SPRN_GQR2: + case SPRN_GQR3: + case SPRN_GQR4: + case SPRN_GQR5: + case SPRN_GQR6: + case SPRN_GQR7: + kvmppc_set_gpr(vcpu, rt, + to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]); + break; case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: case SPRN_CTRLF: case SPRN_CTRLT: + case SPRN_L2CR: + case SPRN_MMCR0_GEKKO: + case SPRN_MMCR1_GEKKO: + case SPRN_PMC1_GEKKO: + case SPRN_PMC2_GEKKO: + case SPRN_PMC3_GEKKO: + case SPRN_PMC4_GEKKO: + case SPRN_WPAR_GEKKO: kvmppc_set_gpr(vcpu, rt, 0); break; default: @@ -346,3 +501,73 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) return emulated; } +u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) +{ + u32 dsisr = 0; + + /* + * This is what the spec says about DSISR bits (not mentioned = 0): + * + * 12:13 [DS] Set to bits 30:31 + * 15:16 [X] Set to bits 29:30 + * 17 [X] Set to bit 25 + * [D/DS] Set to bit 5 + * 18:21 [X] Set to bits 21:24 + * [D/DS] Set to bits 1:4 + * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS) + * 27:31 Set to bits 11:15 (RA) + */ + + switch (get_op(inst)) { + /* D-form */ + case OP_LFS: + case OP_LFD: + case OP_STFD: + case OP_STFS: + dsisr |= (inst >> 12) & 0x4000; /* bit 17 */ + dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */ + break; + /* X-form */ + case 31: + dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */ + dsisr |= (inst << 8) & 0x04000; /* bit 17 */ + dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */ + break; + default: + printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); + break; + } + + dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */ + + return dsisr; +} + +ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) +{ + ulong dar = 0; + ulong ra; + + switch (get_op(inst)) { + case OP_LFS: + case OP_LFD: + case OP_STFD: + case OP_STFS: + ra = get_ra(inst); + if (ra) + dar = kvmppc_get_gpr(vcpu, ra); + dar += (s32)((s16)inst); + break; + case 31: + ra = get_ra(inst); + if (ra) + dar = kvmppc_get_gpr(vcpu, ra); + dar += kvmppc_get_gpr(vcpu, get_rb(inst)); + break; + default: + printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); + break; + } + + return dar; +} diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_exports.c index 1dd5a1ddfd0d..1dd5a1ddfd0d 100644 --- a/arch/powerpc/kvm/book3s_64_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index c1584d0cbce8..2f0bc928b08a 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -24,36 +24,56 @@ #include <asm/asm-offsets.h> #include <asm/exception-64s.h> -#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit -#define ULONG_SIZE 8 -#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) +#if defined(CONFIG_PPC_BOOK3S_64) -.macro DISABLE_INTERRUPTS - mfmsr r0 - rldicl r0,r0,48,1 - rotldi r0,r0,16 - mtmsrd r0,1 -.endm +#define ULONG_SIZE 8 +#define FUNC(name) GLUE(.,name) +#define GET_SHADOW_VCPU(reg) \ + addi reg, r13, PACA_KVM_SVCPU + +#define DISABLE_INTERRUPTS \ + mfmsr r0; \ + rldicl r0,r0,48,1; \ + rotldi r0,r0,16; \ + mtmsrd r0,1; \ + +#elif defined(CONFIG_PPC_BOOK3S_32) + +#define ULONG_SIZE 4 +#define FUNC(name) name + +#define GET_SHADOW_VCPU(reg) \ + lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) + +#define DISABLE_INTERRUPTS \ + mfmsr r0; \ + rlwinm r0,r0,0,17,15; \ + mtmsr r0; \ + +#endif /* CONFIG_PPC_BOOK3S_XX */ + + +#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) #define VCPU_LOAD_NVGPRS(vcpu) \ - ld r14, VCPU_GPR(r14)(vcpu); \ - ld r15, VCPU_GPR(r15)(vcpu); \ - ld r16, VCPU_GPR(r16)(vcpu); \ - ld r17, VCPU_GPR(r17)(vcpu); \ - ld r18, VCPU_GPR(r18)(vcpu); \ - ld r19, VCPU_GPR(r19)(vcpu); \ - ld r20, VCPU_GPR(r20)(vcpu); \ - ld r21, VCPU_GPR(r21)(vcpu); \ - ld r22, VCPU_GPR(r22)(vcpu); \ - ld r23, VCPU_GPR(r23)(vcpu); \ - ld r24, VCPU_GPR(r24)(vcpu); \ - ld r25, VCPU_GPR(r25)(vcpu); \ - ld r26, VCPU_GPR(r26)(vcpu); \ - ld r27, VCPU_GPR(r27)(vcpu); \ - ld r28, VCPU_GPR(r28)(vcpu); \ - ld r29, VCPU_GPR(r29)(vcpu); \ - ld r30, VCPU_GPR(r30)(vcpu); \ - ld r31, VCPU_GPR(r31)(vcpu); \ + PPC_LL r14, VCPU_GPR(r14)(vcpu); \ + PPC_LL r15, VCPU_GPR(r15)(vcpu); \ + PPC_LL r16, VCPU_GPR(r16)(vcpu); \ + PPC_LL r17, VCPU_GPR(r17)(vcpu); \ + PPC_LL r18, VCPU_GPR(r18)(vcpu); \ + PPC_LL r19, VCPU_GPR(r19)(vcpu); \ + PPC_LL r20, VCPU_GPR(r20)(vcpu); \ + PPC_LL r21, VCPU_GPR(r21)(vcpu); \ + PPC_LL r22, VCPU_GPR(r22)(vcpu); \ + PPC_LL r23, VCPU_GPR(r23)(vcpu); \ + PPC_LL r24, VCPU_GPR(r24)(vcpu); \ + PPC_LL r25, VCPU_GPR(r25)(vcpu); \ + PPC_LL r26, VCPU_GPR(r26)(vcpu); \ + PPC_LL r27, VCPU_GPR(r27)(vcpu); \ + PPC_LL r28, VCPU_GPR(r28)(vcpu); \ + PPC_LL r29, VCPU_GPR(r29)(vcpu); \ + PPC_LL r30, VCPU_GPR(r30)(vcpu); \ + PPC_LL r31, VCPU_GPR(r31)(vcpu); \ /***************************************************************************** * * @@ -69,11 +89,11 @@ _GLOBAL(__kvmppc_vcpu_entry) kvm_start_entry: /* Write correct stack frame */ - mflr r0 - std r0,16(r1) + mflr r0 + PPC_STL r0,PPC_LR_STKOFF(r1) /* Save host state to the stack */ - stdu r1, -SWITCH_FRAME_SIZE(r1) + PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) /* Save r3 (kvm_run) and r4 (vcpu) */ SAVE_2GPRS(3, r1) @@ -82,33 +102,28 @@ kvm_start_entry: SAVE_NVGPRS(r1) /* Save LR */ - std r0, _LINK(r1) + PPC_STL r0, _LINK(r1) /* Load non-volatile guest state from the vcpu */ VCPU_LOAD_NVGPRS(r4) + GET_SHADOW_VCPU(r5) + /* Save R1/R2 in the PACA */ - std r1, PACA_KVM_HOST_R1(r13) - std r2, PACA_KVM_HOST_R2(r13) + PPC_STL r1, SVCPU_HOST_R1(r5) + PPC_STL r2, SVCPU_HOST_R2(r5) /* XXX swap in/out on load? */ - ld r3, VCPU_HIGHMEM_HANDLER(r4) - std r3, PACA_KVM_VMHANDLER(r13) + PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4) + PPC_STL r3, SVCPU_VMHANDLER(r5) kvm_start_lightweight: - ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ - ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ - - /* Load some guest state in the respective registers */ - ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ - /* will be swapped in by rmcall */ - - ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ - mtlr r3 /* LR = r3 */ + PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ DISABLE_INTERRUPTS +#ifdef CONFIG_PPC_BOOK3S_64 /* Some guests may need to have dcbz set to 32 byte length. * * Usually we ensure that by patching the guest's instructions @@ -118,7 +133,7 @@ kvm_start_lightweight: * because that's a lot faster. */ - ld r3, VCPU_HFLAGS(r4) + PPC_LL r3, VCPU_HFLAGS(r4) rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ beq no_dcbz32_on @@ -128,13 +143,15 @@ kvm_start_lightweight: no_dcbz32_on: - ld r6, VCPU_RMCALL(r4) +#endif /* CONFIG_PPC_BOOK3S_64 */ + + PPC_LL r6, VCPU_RMCALL(r4) mtctr r6 - ld r3, VCPU_TRAMPOLINE_ENTER(r4) + PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4) LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) - /* Jump to SLB patching handlder and into our guest */ + /* Jump to segment patching handler and into our guest */ bctr /* @@ -149,31 +166,20 @@ kvmppc_handler_highmem: /* * Register usage at this point: * - * R0 = guest last inst - * R1 = host R1 - * R2 = host R2 - * R3 = guest PC - * R4 = guest MSR - * R5 = guest DAR - * R6 = guest DSISR - * R13 = PACA - * PACA.KVM.* = guest * + * R1 = host R1 + * R2 = host R2 + * R12 = exit handler id + * R13 = PACA + * SVCPU.* = guest * * */ /* R7 = vcpu */ - ld r7, GPR4(r1) + PPC_LL r7, GPR4(r1) - /* Now save the guest state */ +#ifdef CONFIG_PPC_BOOK3S_64 - stw r0, VCPU_LAST_INST(r7) - - std r3, VCPU_PC(r7) - std r4, VCPU_SHADOW_SRR1(r7) - std r5, VCPU_FAULT_DEAR(r7) - std r6, VCPU_FAULT_DSISR(r7) - - ld r5, VCPU_HFLAGS(r7) + PPC_LL r5, VCPU_HFLAGS(r7) rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ beq no_dcbz32_off @@ -184,35 +190,29 @@ kvmppc_handler_highmem: no_dcbz32_off: - std r14, VCPU_GPR(r14)(r7) - std r15, VCPU_GPR(r15)(r7) - std r16, VCPU_GPR(r16)(r7) - std r17, VCPU_GPR(r17)(r7) - std r18, VCPU_GPR(r18)(r7) - std r19, VCPU_GPR(r19)(r7) - std r20, VCPU_GPR(r20)(r7) - std r21, VCPU_GPR(r21)(r7) - std r22, VCPU_GPR(r22)(r7) - std r23, VCPU_GPR(r23)(r7) - std r24, VCPU_GPR(r24)(r7) - std r25, VCPU_GPR(r25)(r7) - std r26, VCPU_GPR(r26)(r7) - std r27, VCPU_GPR(r27)(r7) - std r28, VCPU_GPR(r28)(r7) - std r29, VCPU_GPR(r29)(r7) - std r30, VCPU_GPR(r30)(r7) - std r31, VCPU_GPR(r31)(r7) - - /* Save guest CTR */ - mfctr r5 - std r5, VCPU_CTR(r7) - - /* Save guest LR */ - mflr r5 - std r5, VCPU_LR(r7) +#endif /* CONFIG_PPC_BOOK3S_64 */ + + PPC_STL r14, VCPU_GPR(r14)(r7) + PPC_STL r15, VCPU_GPR(r15)(r7) + PPC_STL r16, VCPU_GPR(r16)(r7) + PPC_STL r17, VCPU_GPR(r17)(r7) + PPC_STL r18, VCPU_GPR(r18)(r7) + PPC_STL r19, VCPU_GPR(r19)(r7) + PPC_STL r20, VCPU_GPR(r20)(r7) + PPC_STL r21, VCPU_GPR(r21)(r7) + PPC_STL r22, VCPU_GPR(r22)(r7) + PPC_STL r23, VCPU_GPR(r23)(r7) + PPC_STL r24, VCPU_GPR(r24)(r7) + PPC_STL r25, VCPU_GPR(r25)(r7) + PPC_STL r26, VCPU_GPR(r26)(r7) + PPC_STL r27, VCPU_GPR(r27)(r7) + PPC_STL r28, VCPU_GPR(r28)(r7) + PPC_STL r29, VCPU_GPR(r29)(r7) + PPC_STL r30, VCPU_GPR(r30)(r7) + PPC_STL r31, VCPU_GPR(r31)(r7) /* Restore host msr -> SRR1 */ - ld r6, VCPU_HOST_MSR(r7) + PPC_LL r6, VCPU_HOST_MSR(r7) /* * For some interrupts, we need to call the real Linux @@ -228,9 +228,12 @@ no_dcbz32_off: beq call_linux_handler cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER beq call_linux_handler + cmpwi r12, BOOK3S_INTERRUPT_PERFMON + beq call_linux_handler /* Back to EE=1 */ mtmsr r6 + sync b kvm_return_point call_linux_handler: @@ -249,14 +252,14 @@ call_linux_handler: */ /* Restore host IP -> SRR0 */ - ld r5, VCPU_HOST_RETIP(r7) + PPC_LL r5, VCPU_HOST_RETIP(r7) /* XXX Better move to a safe function? * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ mtlr r12 - ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) + PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7) mtsrr0 r4 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) mtsrr1 r3 @@ -274,7 +277,7 @@ kvm_return_point: /* Restore r3 (kvm_run) and r4 (vcpu) */ REST_2GPRS(3, r1) - bl KVMPPC_HANDLE_EXIT + bl FUNC(kvmppc_handle_exit) /* If RESUME_GUEST, get back in the loop */ cmpwi r3, RESUME_GUEST @@ -285,7 +288,7 @@ kvm_return_point: kvm_exit_loop: - ld r4, _LINK(r1) + PPC_LL r4, _LINK(r1) mtlr r4 /* Restore non-volatile host registers (r14 - r31) */ @@ -296,8 +299,8 @@ kvm_exit_loop: kvm_loop_heavyweight: - ld r4, _LINK(r1) - std r4, (16 + SWITCH_FRAME_SIZE)(r1) + PPC_LL r4, _LINK(r1) + PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) /* Load vcpu and cpu_run */ REST_2GPRS(3, r1) @@ -315,4 +318,3 @@ kvm_loop_lightweight: /* Jump back into the beginning of this function */ b kvm_start_lightweight - diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c new file mode 100644 index 000000000000..a9f66abafcb3 --- /dev/null +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -0,0 +1,1289 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright Novell Inc 2010 + * + * Authors: Alexander Graf <agraf@suse.de> + */ + +#include <asm/kvm.h> +#include <asm/kvm_ppc.h> +#include <asm/disassemble.h> +#include <asm/kvm_book3s.h> +#include <asm/kvm_fpu.h> +#include <asm/reg.h> +#include <asm/cacheflush.h> +#include <linux/vmalloc.h> + +/* #define DEBUG */ + +#ifdef DEBUG +#define dprintk printk +#else +#define dprintk(...) do { } while(0); +#endif + +#define OP_LFS 48 +#define OP_LFSU 49 +#define OP_LFD 50 +#define OP_LFDU 51 +#define OP_STFS 52 +#define OP_STFSU 53 +#define OP_STFD 54 +#define OP_STFDU 55 +#define OP_PSQ_L 56 +#define OP_PSQ_LU 57 +#define OP_PSQ_ST 60 +#define OP_PSQ_STU 61 + +#define OP_31_LFSX 535 +#define OP_31_LFSUX 567 +#define OP_31_LFDX 599 +#define OP_31_LFDUX 631 +#define OP_31_STFSX 663 +#define OP_31_STFSUX 695 +#define OP_31_STFX 727 +#define OP_31_STFUX 759 +#define OP_31_LWIZX 887 +#define OP_31_STFIWX 983 + +#define OP_59_FADDS 21 +#define OP_59_FSUBS 20 +#define OP_59_FSQRTS 22 +#define OP_59_FDIVS 18 +#define OP_59_FRES 24 +#define OP_59_FMULS 25 +#define OP_59_FRSQRTES 26 +#define OP_59_FMSUBS 28 +#define OP_59_FMADDS 29 +#define OP_59_FNMSUBS 30 +#define OP_59_FNMADDS 31 + +#define OP_63_FCMPU 0 +#define OP_63_FCPSGN 8 +#define OP_63_FRSP 12 +#define OP_63_FCTIW 14 +#define OP_63_FCTIWZ 15 +#define OP_63_FDIV 18 +#define OP_63_FADD 21 +#define OP_63_FSQRT 22 +#define OP_63_FSEL 23 +#define OP_63_FRE 24 +#define OP_63_FMUL 25 +#define OP_63_FRSQRTE 26 +#define OP_63_FMSUB 28 +#define OP_63_FMADD 29 +#define OP_63_FNMSUB 30 +#define OP_63_FNMADD 31 +#define OP_63_FCMPO 32 +#define OP_63_MTFSB1 38 // XXX +#define OP_63_FSUB 20 +#define OP_63_FNEG 40 +#define OP_63_MCRFS 64 +#define OP_63_MTFSB0 70 +#define OP_63_FMR 72 +#define OP_63_MTFSFI 134 +#define OP_63_FABS 264 +#define OP_63_MFFS 583 +#define OP_63_MTFSF 711 + +#define OP_4X_PS_CMPU0 0 +#define OP_4X_PSQ_LX 6 +#define OP_4XW_PSQ_STX 7 +#define OP_4A_PS_SUM0 10 +#define OP_4A_PS_SUM1 11 +#define OP_4A_PS_MULS0 12 +#define OP_4A_PS_MULS1 13 +#define OP_4A_PS_MADDS0 14 +#define OP_4A_PS_MADDS1 15 +#define OP_4A_PS_DIV 18 +#define OP_4A_PS_SUB 20 +#define OP_4A_PS_ADD 21 +#define OP_4A_PS_SEL 23 +#define OP_4A_PS_RES 24 +#define OP_4A_PS_MUL 25 +#define OP_4A_PS_RSQRTE 26 +#define OP_4A_PS_MSUB 28 +#define OP_4A_PS_MADD 29 +#define OP_4A_PS_NMSUB 30 +#define OP_4A_PS_NMADD 31 +#define OP_4X_PS_CMPO0 32 +#define OP_4X_PSQ_LUX 38 +#define OP_4XW_PSQ_STUX 39 +#define OP_4X_PS_NEG 40 +#define OP_4X_PS_CMPU1 64 +#define OP_4X_PS_MR 72 +#define OP_4X_PS_CMPO1 96 +#define OP_4X_PS_NABS 136 +#define OP_4X_PS_ABS 264 +#define OP_4X_PS_MERGE00 528 +#define OP_4X_PS_MERGE01 560 +#define OP_4X_PS_MERGE10 592 +#define OP_4X_PS_MERGE11 624 + +#define SCALAR_NONE 0 +#define SCALAR_HIGH (1 << 0) +#define SCALAR_LOW (1 << 1) +#define SCALAR_NO_PS0 (1 << 2) +#define SCALAR_NO_PS1 (1 << 3) + +#define GQR_ST_TYPE_MASK 0x00000007 +#define GQR_ST_TYPE_SHIFT 0 +#define GQR_ST_SCALE_MASK 0x00003f00 +#define GQR_ST_SCALE_SHIFT 8 +#define GQR_LD_TYPE_MASK 0x00070000 +#define GQR_LD_TYPE_SHIFT 16 +#define GQR_LD_SCALE_MASK 0x3f000000 +#define GQR_LD_SCALE_SHIFT 24 + +#define GQR_QUANTIZE_FLOAT 0 +#define GQR_QUANTIZE_U8 4 +#define GQR_QUANTIZE_U16 5 +#define GQR_QUANTIZE_S8 6 +#define GQR_QUANTIZE_S16 7 + +#define FPU_LS_SINGLE 0 +#define FPU_LS_DOUBLE 1 +#define FPU_LS_SINGLE_LOW 2 + +static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) +{ + struct thread_struct t; + + t.fpscr.val = vcpu->arch.fpscr; + cvt_df((double*)&vcpu->arch.fpr[rt], (float*)&vcpu->arch.qpr[rt], &t); +} + +static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) +{ + u64 dsisr; + + vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0); + vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); + vcpu->arch.dear = eaddr; + /* Page Fault */ + dsisr = kvmppc_set_field(0, 33, 33, 1); + if (is_store) + to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); + kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); +} + +static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + int rs, ulong addr, int ls_type) +{ + int emulated = EMULATE_FAIL; + struct thread_struct t; + int r; + char tmp[8]; + int len = sizeof(u32); + + if (ls_type == FPU_LS_DOUBLE) + len = sizeof(u64); + + t.fpscr.val = vcpu->arch.fpscr; + + /* read from memory */ + r = kvmppc_ld(vcpu, &addr, len, tmp, true); + vcpu->arch.paddr_accessed = addr; + + if (r < 0) { + kvmppc_inject_pf(vcpu, addr, false); + goto done_load; + } else if (r == EMULATE_DO_MMIO) { + emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1); + goto done_load; + } + + emulated = EMULATE_DONE; + + /* put in registers */ + switch (ls_type) { + case FPU_LS_SINGLE: + cvt_fd((float*)tmp, (double*)&vcpu->arch.fpr[rs], &t); + vcpu->arch.qpr[rs] = *((u32*)tmp); + break; + case FPU_LS_DOUBLE: + vcpu->arch.fpr[rs] = *((u64*)tmp); + break; + } + + dprintk(KERN_INFO "KVM: FPR_LD [0x%llx] at 0x%lx (%d)\n", *(u64*)tmp, + addr, len); + +done_load: + return emulated; +} + +static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, + int rs, ulong addr, int ls_type) +{ + int emulated = EMULATE_FAIL; + struct thread_struct t; + int r; + char tmp[8]; + u64 val; + int len; + + t.fpscr.val = vcpu->arch.fpscr; + + switch (ls_type) { + case FPU_LS_SINGLE: + cvt_df((double*)&vcpu->arch.fpr[rs], (float*)tmp, &t); + val = *((u32*)tmp); + len = sizeof(u32); + break; + case FPU_LS_SINGLE_LOW: + *((u32*)tmp) = vcpu->arch.fpr[rs]; + val = vcpu->arch.fpr[rs] & 0xffffffff; + len = sizeof(u32); + break; + case FPU_LS_DOUBLE: + *((u64*)tmp) = vcpu->arch.fpr[rs]; + val = vcpu->arch.fpr[rs]; + len = sizeof(u64); + break; + default: + val = 0; + len = 0; + } + + r = kvmppc_st(vcpu, &addr, len, tmp, true); + vcpu->arch.paddr_accessed = addr; + if (r < 0) { + kvmppc_inject_pf(vcpu, addr, true); + } else if (r == EMULATE_DO_MMIO) { + emulated = kvmppc_handle_store(run, vcpu, val, len, 1); + } else { + emulated = EMULATE_DONE; + } + + dprintk(KERN_INFO "KVM: FPR_ST [0x%llx] at 0x%lx (%d)\n", + val, addr, len); + + return emulated; +} + +static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + int rs, ulong addr, bool w, int i) +{ + int emulated = EMULATE_FAIL; + struct thread_struct t; + int r; + float one = 1.0; + u32 tmp[2]; + + t.fpscr.val = vcpu->arch.fpscr; + + /* read from memory */ + if (w) { + r = kvmppc_ld(vcpu, &addr, sizeof(u32), tmp, true); + memcpy(&tmp[1], &one, sizeof(u32)); + } else { + r = kvmppc_ld(vcpu, &addr, sizeof(u32) * 2, tmp, true); + } + vcpu->arch.paddr_accessed = addr; + if (r < 0) { + kvmppc_inject_pf(vcpu, addr, false); + goto done_load; + } else if ((r == EMULATE_DO_MMIO) && w) { + emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1); + vcpu->arch.qpr[rs] = tmp[1]; + goto done_load; + } else if (r == EMULATE_DO_MMIO) { + emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1); + goto done_load; + } + + emulated = EMULATE_DONE; + + /* put in registers */ + cvt_fd((float*)&tmp[0], (double*)&vcpu->arch.fpr[rs], &t); + vcpu->arch.qpr[rs] = tmp[1]; + + dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], + tmp[1], addr, w ? 4 : 8); + +done_load: + return emulated; +} + +static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, + int rs, ulong addr, bool w, int i) +{ + int emulated = EMULATE_FAIL; + struct thread_struct t; + int r; + u32 tmp[2]; + int len = w ? sizeof(u32) : sizeof(u64); + + t.fpscr.val = vcpu->arch.fpscr; + + cvt_df((double*)&vcpu->arch.fpr[rs], (float*)&tmp[0], &t); + tmp[1] = vcpu->arch.qpr[rs]; + + r = kvmppc_st(vcpu, &addr, len, tmp, true); + vcpu->arch.paddr_accessed = addr; + if (r < 0) { + kvmppc_inject_pf(vcpu, addr, true); + } else if ((r == EMULATE_DO_MMIO) && w) { + emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1); + } else if (r == EMULATE_DO_MMIO) { + u64 val = ((u64)tmp[0] << 32) | tmp[1]; + emulated = kvmppc_handle_store(run, vcpu, val, 8, 1); + } else { + emulated = EMULATE_DONE; + } + + dprintk(KERN_INFO "KVM: PSQ_ST [0x%x, 0x%x] at 0x%lx (%d)\n", + tmp[0], tmp[1], addr, len); + + return emulated; +} + +/* + * Cuts out inst bits with ordering according to spec. + * That means the leftmost bit is zero. All given bits are included. + */ +static inline u32 inst_get_field(u32 inst, int msb, int lsb) +{ + return kvmppc_get_field(inst, msb + 32, lsb + 32); +} + +/* + * Replaces inst bits with ordering according to spec. + */ +static inline u32 inst_set_field(u32 inst, int msb, int lsb, int value) +{ + return kvmppc_set_field(inst, msb + 32, lsb + 32, value); +} + +bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst) +{ + if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) + return false; + + switch (get_op(inst)) { + case OP_PSQ_L: + case OP_PSQ_LU: + case OP_PSQ_ST: + case OP_PSQ_STU: + case OP_LFS: + case OP_LFSU: + case OP_LFD: + case OP_LFDU: + case OP_STFS: + case OP_STFSU: + case OP_STFD: + case OP_STFDU: + return true; + case 4: + /* X form */ + switch (inst_get_field(inst, 21, 30)) { + case OP_4X_PS_CMPU0: + case OP_4X_PSQ_LX: + case OP_4X_PS_CMPO0: + case OP_4X_PSQ_LUX: + case OP_4X_PS_NEG: + case OP_4X_PS_CMPU1: + case OP_4X_PS_MR: + case OP_4X_PS_CMPO1: + case OP_4X_PS_NABS: + case OP_4X_PS_ABS: + case OP_4X_PS_MERGE00: + case OP_4X_PS_MERGE01: + case OP_4X_PS_MERGE10: + case OP_4X_PS_MERGE11: + return true; + } + /* XW form */ + switch (inst_get_field(inst, 25, 30)) { + case OP_4XW_PSQ_STX: + case OP_4XW_PSQ_STUX: + return true; + } + /* A form */ + switch (inst_get_field(inst, 26, 30)) { + case OP_4A_PS_SUM1: + case OP_4A_PS_SUM0: + case OP_4A_PS_MULS0: + case OP_4A_PS_MULS1: + case OP_4A_PS_MADDS0: + case OP_4A_PS_MADDS1: + case OP_4A_PS_DIV: + case OP_4A_PS_SUB: + case OP_4A_PS_ADD: + case OP_4A_PS_SEL: + case OP_4A_PS_RES: + case OP_4A_PS_MUL: + case OP_4A_PS_RSQRTE: + case OP_4A_PS_MSUB: + case OP_4A_PS_MADD: + case OP_4A_PS_NMSUB: + case OP_4A_PS_NMADD: + return true; + } + break; + case 59: + switch (inst_get_field(inst, 21, 30)) { + case OP_59_FADDS: + case OP_59_FSUBS: + case OP_59_FDIVS: + case OP_59_FRES: + case OP_59_FRSQRTES: + return true; + } + switch (inst_get_field(inst, 26, 30)) { + case OP_59_FMULS: + case OP_59_FMSUBS: + case OP_59_FMADDS: + case OP_59_FNMSUBS: + case OP_59_FNMADDS: + return true; + } + break; + case 63: + switch (inst_get_field(inst, 21, 30)) { + case OP_63_MTFSB0: + case OP_63_MTFSB1: + case OP_63_MTFSF: + case OP_63_MTFSFI: + case OP_63_MCRFS: + case OP_63_MFFS: + case OP_63_FCMPU: + case OP_63_FCMPO: + case OP_63_FNEG: + case OP_63_FMR: + case OP_63_FABS: + case OP_63_FRSP: + case OP_63_FDIV: + case OP_63_FADD: + case OP_63_FSUB: + case OP_63_FCTIW: + case OP_63_FCTIWZ: + case OP_63_FRSQRTE: + case OP_63_FCPSGN: + return true; + } + switch (inst_get_field(inst, 26, 30)) { + case OP_63_FMUL: + case OP_63_FSEL: + case OP_63_FMSUB: + case OP_63_FMADD: + case OP_63_FNMSUB: + case OP_63_FNMADD: + return true; + } + break; + case 31: + switch (inst_get_field(inst, 21, 30)) { + case OP_31_LFSX: + case OP_31_LFSUX: + case OP_31_LFDX: + case OP_31_LFDUX: + case OP_31_STFSX: + case OP_31_STFSUX: + case OP_31_STFX: + case OP_31_STFUX: + case OP_31_STFIWX: + return true; + } + break; + } + + return false; +} + +static int get_d_signext(u32 inst) +{ + int d = inst & 0x8ff; + + if (d & 0x800) + return -(d & 0x7ff); + + return (d & 0x7ff); +} + +static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, + int reg_out, int reg_in1, int reg_in2, + int reg_in3, int scalar, + void (*func)(struct thread_struct *t, + u32 *dst, u32 *src1, + u32 *src2, u32 *src3)) +{ + u32 *qpr = vcpu->arch.qpr; + u64 *fpr = vcpu->arch.fpr; + u32 ps0_out; + u32 ps0_in1, ps0_in2, ps0_in3; + u32 ps1_in1, ps1_in2, ps1_in3; + struct thread_struct t; + t.fpscr.val = vcpu->arch.fpscr; + + /* RC */ + WARN_ON(rc); + + /* PS0 */ + cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t); + cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t); + cvt_df((double*)&fpr[reg_in3], (float*)&ps0_in3, &t); + + if (scalar & SCALAR_LOW) + ps0_in2 = qpr[reg_in2]; + + func(&t, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); + + dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", + ps0_in1, ps0_in2, ps0_in3, ps0_out); + + if (!(scalar & SCALAR_NO_PS0)) + cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); + + /* PS1 */ + ps1_in1 = qpr[reg_in1]; + ps1_in2 = qpr[reg_in2]; + ps1_in3 = qpr[reg_in3]; + + if (scalar & SCALAR_HIGH) + ps1_in2 = ps0_in2; + + if (!(scalar & SCALAR_NO_PS1)) + func(&t, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); + + dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", + ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); + + return EMULATE_DONE; +} + +static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, + int reg_out, int reg_in1, int reg_in2, + int scalar, + void (*func)(struct thread_struct *t, + u32 *dst, u32 *src1, + u32 *src2)) +{ + u32 *qpr = vcpu->arch.qpr; + u64 *fpr = vcpu->arch.fpr; + u32 ps0_out; + u32 ps0_in1, ps0_in2; + u32 ps1_out; + u32 ps1_in1, ps1_in2; + struct thread_struct t; + t.fpscr.val = vcpu->arch.fpscr; + + /* RC */ + WARN_ON(rc); + + /* PS0 */ + cvt_df((double*)&fpr[reg_in1], (float*)&ps0_in1, &t); + + if (scalar & SCALAR_LOW) + ps0_in2 = qpr[reg_in2]; + else + cvt_df((double*)&fpr[reg_in2], (float*)&ps0_in2, &t); + + func(&t, &ps0_out, &ps0_in1, &ps0_in2); + + if (!(scalar & SCALAR_NO_PS0)) { + dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", + ps0_in1, ps0_in2, ps0_out); + + cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); + } + + /* PS1 */ + ps1_in1 = qpr[reg_in1]; + ps1_in2 = qpr[reg_in2]; + + if (scalar & SCALAR_HIGH) + ps1_in2 = ps0_in2; + + func(&t, &ps1_out, &ps1_in1, &ps1_in2); + + if (!(scalar & SCALAR_NO_PS1)) { + qpr[reg_out] = ps1_out; + + dprintk(KERN_INFO "PS2 ps1 -> f(0x%x, 0x%x) = 0x%x\n", + ps1_in1, ps1_in2, qpr[reg_out]); + } + + return EMULATE_DONE; +} + +static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, + int reg_out, int reg_in, + void (*func)(struct thread_struct *t, + u32 *dst, u32 *src1)) +{ + u32 *qpr = vcpu->arch.qpr; + u64 *fpr = vcpu->arch.fpr; + u32 ps0_out, ps0_in; + u32 ps1_in; + struct thread_struct t; + t.fpscr.val = vcpu->arch.fpscr; + + /* RC */ + WARN_ON(rc); + + /* PS0 */ + cvt_df((double*)&fpr[reg_in], (float*)&ps0_in, &t); + func(&t, &ps0_out, &ps0_in); + + dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", + ps0_in, ps0_out); + + cvt_fd((float*)&ps0_out, (double*)&fpr[reg_out], &t); + + /* PS1 */ + ps1_in = qpr[reg_in]; + func(&t, &qpr[reg_out], &ps1_in); + + dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", + ps1_in, qpr[reg_out]); + + return EMULATE_DONE; +} + +int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + u32 inst = kvmppc_get_last_inst(vcpu); + enum emulation_result emulated = EMULATE_DONE; + + int ax_rd = inst_get_field(inst, 6, 10); + int ax_ra = inst_get_field(inst, 11, 15); + int ax_rb = inst_get_field(inst, 16, 20); + int ax_rc = inst_get_field(inst, 21, 25); + short full_d = inst_get_field(inst, 16, 31); + + u64 *fpr_d = &vcpu->arch.fpr[ax_rd]; + u64 *fpr_a = &vcpu->arch.fpr[ax_ra]; + u64 *fpr_b = &vcpu->arch.fpr[ax_rb]; + u64 *fpr_c = &vcpu->arch.fpr[ax_rc]; + + bool rcomp = (inst & 1) ? true : false; + u32 cr = kvmppc_get_cr(vcpu); + struct thread_struct t; +#ifdef DEBUG + int i; +#endif + + t.fpscr.val = vcpu->arch.fpscr; + + if (!kvmppc_inst_is_paired_single(vcpu, inst)) + return EMULATE_FAIL; + + if (!(vcpu->arch.msr & MSR_FP)) { + kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); + return EMULATE_AGAIN; + } + + kvmppc_giveup_ext(vcpu, MSR_FP); + preempt_disable(); + enable_kernel_fp(); + /* Do we need to clear FE0 / FE1 here? Don't think so. */ + +#ifdef DEBUG + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { + u32 f; + cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t); + dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", + i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); + } +#endif + + switch (get_op(inst)) { + case OP_PSQ_L: + { + ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; + bool w = inst_get_field(inst, 16, 16) ? true : false; + int i = inst_get_field(inst, 17, 19); + + addr += get_d_signext(inst); + emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); + break; + } + case OP_PSQ_LU: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra); + bool w = inst_get_field(inst, 16, 16) ? true : false; + int i = inst_get_field(inst, 17, 19); + + addr += get_d_signext(inst); + emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_PSQ_ST: + { + ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; + bool w = inst_get_field(inst, 16, 16) ? true : false; + int i = inst_get_field(inst, 17, 19); + + addr += get_d_signext(inst); + emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); + break; + } + case OP_PSQ_STU: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra); + bool w = inst_get_field(inst, 16, 16) ? true : false; + int i = inst_get_field(inst, 17, 19); + + addr += get_d_signext(inst); + emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case 4: + /* X form */ + switch (inst_get_field(inst, 21, 30)) { + case OP_4X_PS_CMPU0: + /* XXX */ + emulated = EMULATE_FAIL; + break; + case OP_4X_PSQ_LX: + { + ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; + bool w = inst_get_field(inst, 21, 21) ? true : false; + int i = inst_get_field(inst, 22, 24); + + addr += kvmppc_get_gpr(vcpu, ax_rb); + emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); + break; + } + case OP_4X_PS_CMPO0: + /* XXX */ + emulated = EMULATE_FAIL; + break; + case OP_4X_PSQ_LUX: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra); + bool w = inst_get_field(inst, 21, 21) ? true : false; + int i = inst_get_field(inst, 22, 24); + + addr += kvmppc_get_gpr(vcpu, ax_rb); + emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_4X_PS_NEG: + vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; + vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL; + vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; + vcpu->arch.qpr[ax_rd] ^= 0x80000000; + break; + case OP_4X_PS_CMPU1: + /* XXX */ + emulated = EMULATE_FAIL; + break; + case OP_4X_PS_MR: + WARN_ON(rcomp); + vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; + vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; + break; + case OP_4X_PS_CMPO1: + /* XXX */ + emulated = EMULATE_FAIL; + break; + case OP_4X_PS_NABS: + WARN_ON(rcomp); + vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; + vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL; + vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; + vcpu->arch.qpr[ax_rd] |= 0x80000000; + break; + case OP_4X_PS_ABS: + WARN_ON(rcomp); + vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; + vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL; + vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; + vcpu->arch.qpr[ax_rd] &= ~0x80000000; + break; + case OP_4X_PS_MERGE00: + WARN_ON(rcomp); + vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; + /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ + cvt_df((double*)&vcpu->arch.fpr[ax_rb], + (float*)&vcpu->arch.qpr[ax_rd], &t); + break; + case OP_4X_PS_MERGE01: + WARN_ON(rcomp); + vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; + vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; + break; + case OP_4X_PS_MERGE10: + WARN_ON(rcomp); + /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ + cvt_fd((float*)&vcpu->arch.qpr[ax_ra], + (double*)&vcpu->arch.fpr[ax_rd], &t); + /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ + cvt_df((double*)&vcpu->arch.fpr[ax_rb], + (float*)&vcpu->arch.qpr[ax_rd], &t); + break; + case OP_4X_PS_MERGE11: + WARN_ON(rcomp); + /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ + cvt_fd((float*)&vcpu->arch.qpr[ax_ra], + (double*)&vcpu->arch.fpr[ax_rd], &t); + vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; + break; + } + /* XW form */ + switch (inst_get_field(inst, 25, 30)) { + case OP_4XW_PSQ_STX: + { + ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; + bool w = inst_get_field(inst, 21, 21) ? true : false; + int i = inst_get_field(inst, 22, 24); + + addr += kvmppc_get_gpr(vcpu, ax_rb); + emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); + break; + } + case OP_4XW_PSQ_STUX: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra); + bool w = inst_get_field(inst, 21, 21) ? true : false; + int i = inst_get_field(inst, 22, 24); + + addr += kvmppc_get_gpr(vcpu, ax_rb); + emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + } + /* A form */ + switch (inst_get_field(inst, 26, 30)) { + case OP_4A_PS_SUM1: + emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, + ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); + vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc]; + break; + case OP_4A_PS_SUM0: + emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds); + vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc]; + break; + case OP_4A_PS_MULS0: + emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls); + break; + case OP_4A_PS_MULS1: + emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, SCALAR_LOW, fps_fmuls); + break; + case OP_4A_PS_MADDS0: + emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds); + break; + case OP_4A_PS_MADDS1: + emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds); + break; + case OP_4A_PS_DIV: + emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rb, SCALAR_NONE, fps_fdivs); + break; + case OP_4A_PS_SUB: + emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rb, SCALAR_NONE, fps_fsubs); + break; + case OP_4A_PS_ADD: + emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rb, SCALAR_NONE, fps_fadds); + break; + case OP_4A_PS_SEL: + emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel); + break; + case OP_4A_PS_RES: + emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, + ax_rb, fps_fres); + break; + case OP_4A_PS_MUL: + emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, SCALAR_NONE, fps_fmuls); + break; + case OP_4A_PS_RSQRTE: + emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, + ax_rb, fps_frsqrte); + break; + case OP_4A_PS_MSUB: + emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs); + break; + case OP_4A_PS_MADD: + emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds); + break; + case OP_4A_PS_NMSUB: + emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs); + break; + case OP_4A_PS_NMADD: + emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, + ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds); + break; + } + break; + + /* Real FPU operations */ + + case OP_LFS: + { + ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; + + emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, + FPU_LS_SINGLE); + break; + } + case OP_LFSU: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; + + emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, + FPU_LS_SINGLE); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_LFD: + { + ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; + + emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, + FPU_LS_DOUBLE); + break; + } + case OP_LFDU: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; + + emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, + FPU_LS_DOUBLE); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_STFS: + { + ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, + FPU_LS_SINGLE); + break; + } + case OP_STFSU: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, + FPU_LS_SINGLE); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_STFD: + { + ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, + FPU_LS_DOUBLE); + break; + } + case OP_STFDU: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, + FPU_LS_DOUBLE); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case 31: + switch (inst_get_field(inst, 21, 30)) { + case OP_31_LFSX: + { + ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; + + addr += kvmppc_get_gpr(vcpu, ax_rb); + emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, + addr, FPU_LS_SINGLE); + break; + } + case OP_31_LFSUX: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + + kvmppc_get_gpr(vcpu, ax_rb); + + emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, + addr, FPU_LS_SINGLE); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_31_LFDX: + { + ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + + kvmppc_get_gpr(vcpu, ax_rb); + + emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, + addr, FPU_LS_DOUBLE); + break; + } + case OP_31_LFDUX: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + + kvmppc_get_gpr(vcpu, ax_rb); + + emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, + addr, FPU_LS_DOUBLE); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_31_STFSX: + { + ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + + kvmppc_get_gpr(vcpu, ax_rb); + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + addr, FPU_LS_SINGLE); + break; + } + case OP_31_STFSUX: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + + kvmppc_get_gpr(vcpu, ax_rb); + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + addr, FPU_LS_SINGLE); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_31_STFX: + { + ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + + kvmppc_get_gpr(vcpu, ax_rb); + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + addr, FPU_LS_DOUBLE); + break; + } + case OP_31_STFUX: + { + ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + + kvmppc_get_gpr(vcpu, ax_rb); + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + addr, FPU_LS_DOUBLE); + + if (emulated == EMULATE_DONE) + kvmppc_set_gpr(vcpu, ax_ra, addr); + break; + } + case OP_31_STFIWX: + { + ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + + kvmppc_get_gpr(vcpu, ax_rb); + + emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, + addr, + FPU_LS_SINGLE_LOW); + break; + } + break; + } + break; + case 59: + switch (inst_get_field(inst, 21, 30)) { + case OP_59_FADDS: + fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_59_FSUBS: + fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_59_FDIVS: + fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_59_FRES: + fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_59_FRSQRTES: + fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + } + switch (inst_get_field(inst, 26, 30)) { + case OP_59_FMULS: + fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_59_FMSUBS: + fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_59_FMADDS: + fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_59_FNMSUBS: + fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_59_FNMADDS: + fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + } + break; + case 63: + switch (inst_get_field(inst, 21, 30)) { + case OP_63_MTFSB0: + case OP_63_MTFSB1: + case OP_63_MCRFS: + case OP_63_MTFSFI: + /* XXX need to implement */ + break; + case OP_63_MFFS: + /* XXX missing CR */ + *fpr_d = vcpu->arch.fpscr; + break; + case OP_63_MTFSF: + /* XXX missing fm bits */ + /* XXX missing CR */ + vcpu->arch.fpscr = *fpr_b; + break; + case OP_63_FCMPU: + { + u32 tmp_cr; + u32 cr0_mask = 0xf0000000; + u32 cr_shift = inst_get_field(inst, 6, 8) * 4; + + fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); + cr &= ~(cr0_mask >> cr_shift); + cr |= (cr & cr0_mask) >> cr_shift; + break; + } + case OP_63_FCMPO: + { + u32 tmp_cr; + u32 cr0_mask = 0xf0000000; + u32 cr_shift = inst_get_field(inst, 6, 8) * 4; + + fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); + cr &= ~(cr0_mask >> cr_shift); + cr |= (cr & cr0_mask) >> cr_shift; + break; + } + case OP_63_FNEG: + fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + break; + case OP_63_FMR: + *fpr_d = *fpr_b; + break; + case OP_63_FABS: + fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + break; + case OP_63_FCPSGN: + fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + break; + case OP_63_FDIV: + fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + break; + case OP_63_FADD: + fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + break; + case OP_63_FSUB: + fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + break; + case OP_63_FCTIW: + fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + break; + case OP_63_FCTIWZ: + fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + break; + case OP_63_FRSP: + fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + kvmppc_sync_qpr(vcpu, ax_rd); + break; + case OP_63_FRSQRTE: + { + double one = 1.0f; + + /* fD = sqrt(fB) */ + fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + /* fD = 1.0f / fD */ + fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); + break; + } + } + switch (inst_get_field(inst, 26, 30)) { + case OP_63_FMUL: + fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); + break; + case OP_63_FSEL: + fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + break; + case OP_63_FMSUB: + fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + break; + case OP_63_FMADD: + fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + break; + case OP_63_FNMSUB: + fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + break; + case OP_63_FNMADD: + fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + break; + } + break; + } + +#ifdef DEBUG + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { + u32 f; + cvt_df((double*)&vcpu->arch.fpr[i], (float*)&f, &t); + dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); + } +#endif + + if (rcomp) + kvmppc_set_cr(vcpu, cr); + + preempt_enable(); + + return emulated; +} diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index c83c60ad96c5..506d5c316c96 100644 --- a/arch/powerpc/kvm/book3s_64_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -22,7 +22,10 @@ #include <asm/reg.h> #include <asm/page.h> #include <asm/asm-offsets.h> + +#ifdef CONFIG_PPC_BOOK3S_64 #include <asm/exception-64s.h> +#endif /***************************************************************************** * * @@ -30,6 +33,39 @@ * * ****************************************************************************/ +#if defined(CONFIG_PPC_BOOK3S_64) + +#define LOAD_SHADOW_VCPU(reg) \ + mfspr reg, SPRN_SPRG_PACA + +#define SHADOW_VCPU_OFF PACA_KVM_SVCPU +#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) +#define FUNC(name) GLUE(.,name) + +#elif defined(CONFIG_PPC_BOOK3S_32) + +#define LOAD_SHADOW_VCPU(reg) \ + mfspr reg, SPRN_SPRG_THREAD; \ + lwz reg, THREAD_KVM_SVCPU(reg); \ + /* PPC32 can have a NULL pointer - let's check for that */ \ + mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \ + mfcr r12; \ + cmpwi reg, 0; \ + bne 1f; \ + mfspr reg, SPRN_SPRG_SCRATCH0; \ + mtcr r12; \ + mfspr r12, SPRN_SPRG_SCRATCH1; \ + b kvmppc_resume_\intno; \ +1:; \ + mtcr r12; \ + mfspr r12, SPRN_SPRG_SCRATCH1; \ + tophys(reg, reg) + +#define SHADOW_VCPU_OFF 0 +#define MSR_NOIRQ MSR_KERNEL +#define FUNC(name) name + +#endif .macro INTERRUPT_TRAMPOLINE intno @@ -42,19 +78,19 @@ kvmppc_trampoline_\intno: * First thing to do is to find out if we're coming * from a KVM guest or a Linux process. * - * To distinguish, we check a magic byte in the PACA + * To distinguish, we check a magic byte in the PACA/current */ - mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ - std r12, PACA_KVM_SCRATCH0(r13) + LOAD_SHADOW_VCPU(r13) + PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) mfcr r12 - stw r12, PACA_KVM_SCRATCH1(r13) - lbz r12, PACA_KVM_IN_GUEST(r13) + stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) + lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) cmpwi r12, KVM_GUEST_MODE_NONE bne ..kvmppc_handler_hasmagic_\intno /* No KVM guest? Then jump back to the Linux handler! */ - lwz r12, PACA_KVM_SCRATCH1(r13) + lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) mtcr r12 - ld r12, PACA_KVM_SCRATCH0(r13) + PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ b kvmppc_resume_\intno /* Get back original handler */ @@ -76,9 +112,7 @@ kvmppc_trampoline_\intno: INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE -INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE -INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM @@ -88,7 +122,14 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC + +/* Those are only available on 64 bit machines */ + +#ifdef CONFIG_PPC_BOOK3S_64 +INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT +INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX +#endif /* * Bring us back to the faulting code, but skip the @@ -99,11 +140,11 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX * * Input Registers: * - * R12 = free - * R13 = PACA - * PACA.KVM.SCRATCH0 = guest R12 - * PACA.KVM.SCRATCH1 = guest CR - * SPRG_SCRATCH0 = guest R13 + * R12 = free + * R13 = Shadow VCPU (PACA) + * SVCPU.SCRATCH0 = guest R12 + * SVCPU.SCRATCH1 = guest CR + * SPRG_SCRATCH0 = guest R13 * */ kvmppc_handler_skip_ins: @@ -114,9 +155,9 @@ kvmppc_handler_skip_ins: mtsrr0 r12 /* Clean up all state */ - lwz r12, PACA_KVM_SCRATCH1(r13) + lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) mtcr r12 - ld r12, PACA_KVM_SCRATCH0(r13) + PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) mfspr r13, SPRN_SPRG_SCRATCH0 /* And get back into the code */ @@ -147,41 +188,48 @@ kvmppc_handler_lowmem_trampoline_end: * * R3 = function * R4 = MSR - * R5 = CTR + * R5 = scratch register * */ _GLOBAL(kvmppc_rmcall) - mtmsr r4 /* Disable relocation, so mtsrr + LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ) + mtmsr r5 /* Disable relocation and interrupts, so mtsrr doesn't get interrupted */ - mtctr r5 + sync mtsrr0 r3 mtsrr1 r4 RFI +#if defined(CONFIG_PPC_BOOK3S_32) +#define STACK_LR INT_FRAME_SIZE+4 +#elif defined(CONFIG_PPC_BOOK3S_64) +#define STACK_LR _LINK +#endif + /* * Activate current's external feature (FPU/Altivec/VSX) */ -#define define_load_up(what) \ - \ -_GLOBAL(kvmppc_load_up_ ## what); \ - subi r1, r1, INT_FRAME_SIZE; \ - mflr r3; \ - std r3, _LINK(r1); \ - mfmsr r4; \ - std r31, GPR3(r1); \ - mr r31, r4; \ - li r5, MSR_DR; \ - oris r5, r5, MSR_EE@h; \ - andc r4, r4, r5; \ - mtmsr r4; \ - \ - bl .load_up_ ## what; \ - \ - mtmsr r31; \ - ld r3, _LINK(r1); \ - ld r31, GPR3(r1); \ - addi r1, r1, INT_FRAME_SIZE; \ - mtlr r3; \ +#define define_load_up(what) \ + \ +_GLOBAL(kvmppc_load_up_ ## what); \ + PPC_STLU r1, -INT_FRAME_SIZE(r1); \ + mflr r3; \ + PPC_STL r3, STACK_LR(r1); \ + PPC_STL r20, _NIP(r1); \ + mfmsr r20; \ + LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ + andc r3,r20,r3; /* Disable DR,EE */ \ + mtmsr r3; \ + sync; \ + \ + bl FUNC(load_up_ ## what); \ + \ + mtmsr r20; /* Enable DR,EE */ \ + sync; \ + PPC_LL r3, STACK_LR(r1); \ + PPC_LL r20, _NIP(r1); \ + mtlr r3; \ + addi r1, r1, INT_FRAME_SIZE; \ blr define_load_up(fpu) @@ -194,11 +242,10 @@ define_load_up(vsx) .global kvmppc_trampoline_lowmem kvmppc_trampoline_lowmem: - .long kvmppc_handler_lowmem_trampoline - _stext + .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START .global kvmppc_trampoline_enter kvmppc_trampoline_enter: - .long kvmppc_handler_trampoline_enter - _stext - -#include "book3s_64_slb.S" + .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START +#include "book3s_segment.S" diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S new file mode 100644 index 000000000000..7c52ed0b7051 --- /dev/null +++ b/arch/powerpc/kvm/book3s_segment.S @@ -0,0 +1,259 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Copyright SUSE Linux Products GmbH 2010 + * + * Authors: Alexander Graf <agraf@suse.de> + */ + +/* Real mode helpers */ + +#if defined(CONFIG_PPC_BOOK3S_64) + +#define GET_SHADOW_VCPU(reg) \ + addi reg, r13, PACA_KVM_SVCPU + +#elif defined(CONFIG_PPC_BOOK3S_32) + +#define GET_SHADOW_VCPU(reg) \ + tophys(reg, r2); \ + lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ + tophys(reg, reg) + +#endif + +/* Disable for nested KVM */ +#define USE_QUICK_LAST_INST + + +/* Get helper functions for subarch specific functionality */ + +#if defined(CONFIG_PPC_BOOK3S_64) +#include "book3s_64_slb.S" +#elif defined(CONFIG_PPC_BOOK3S_32) +#include "book3s_32_sr.S" +#endif + +/****************************************************************************** + * * + * Entry code * + * * + *****************************************************************************/ + +.global kvmppc_handler_trampoline_enter +kvmppc_handler_trampoline_enter: + + /* Required state: + * + * MSR = ~IR|DR + * R13 = PACA + * R1 = host R1 + * R2 = host R2 + * R10 = guest MSR + * all other volatile GPRS = free + * SVCPU[CR] = guest CR + * SVCPU[XER] = guest XER + * SVCPU[CTR] = guest CTR + * SVCPU[LR] = guest LR + */ + + /* r3 = shadow vcpu */ + GET_SHADOW_VCPU(r3) + + /* Move SRR0 and SRR1 into the respective regs */ + PPC_LL r9, SVCPU_PC(r3) + mtsrr0 r9 + mtsrr1 r10 + + /* Activate guest mode, so faults get handled by KVM */ + li r11, KVM_GUEST_MODE_GUEST + stb r11, SVCPU_IN_GUEST(r3) + + /* Switch to guest segment. This is subarch specific. */ + LOAD_GUEST_SEGMENTS + + /* Enter guest */ + + PPC_LL r4, (SVCPU_CTR)(r3) + PPC_LL r5, (SVCPU_LR)(r3) + lwz r6, (SVCPU_CR)(r3) + lwz r7, (SVCPU_XER)(r3) + + mtctr r4 + mtlr r5 + mtcr r6 + mtxer r7 + + PPC_LL r0, (SVCPU_R0)(r3) + PPC_LL r1, (SVCPU_R1)(r3) + PPC_LL r2, (SVCPU_R2)(r3) + PPC_LL r4, (SVCPU_R4)(r3) + PPC_LL r5, (SVCPU_R5)(r3) + PPC_LL r6, (SVCPU_R6)(r3) + PPC_LL r7, (SVCPU_R7)(r3) + PPC_LL r8, (SVCPU_R8)(r3) + PPC_LL r9, (SVCPU_R9)(r3) + PPC_LL r10, (SVCPU_R10)(r3) + PPC_LL r11, (SVCPU_R11)(r3) + PPC_LL r12, (SVCPU_R12)(r3) + PPC_LL r13, (SVCPU_R13)(r3) + + PPC_LL r3, (SVCPU_R3)(r3) + + RFI +kvmppc_handler_trampoline_enter_end: + + + +/****************************************************************************** + * * + * Exit code * + * * + *****************************************************************************/ + +.global kvmppc_handler_trampoline_exit +kvmppc_handler_trampoline_exit: + + /* Register usage at this point: + * + * SPRG_SCRATCH0 = guest R13 + * R12 = exit handler id + * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] + * SVCPU.SCRATCH0 = guest R12 + * SVCPU.SCRATCH1 = guest CR + * + */ + + /* Save registers */ + + PPC_STL r0, (SHADOW_VCPU_OFF + SVCPU_R0)(r13) + PPC_STL r1, (SHADOW_VCPU_OFF + SVCPU_R1)(r13) + PPC_STL r2, (SHADOW_VCPU_OFF + SVCPU_R2)(r13) + PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_R3)(r13) + PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_R4)(r13) + PPC_STL r5, (SHADOW_VCPU_OFF + SVCPU_R5)(r13) + PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_R6)(r13) + PPC_STL r7, (SHADOW_VCPU_OFF + SVCPU_R7)(r13) + PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R8)(r13) + PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R9)(r13) + PPC_STL r10, (SHADOW_VCPU_OFF + SVCPU_R10)(r13) + PPC_STL r11, (SHADOW_VCPU_OFF + SVCPU_R11)(r13) + + /* Restore R1/R2 so we can handle faults */ + PPC_LL r1, (SHADOW_VCPU_OFF + SVCPU_HOST_R1)(r13) + PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) + + /* Save guest PC and MSR */ + mfsrr0 r3 + mfsrr1 r4 + + PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) + PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) + + /* Get scratch'ed off registers */ + mfspr r9, SPRN_SPRG_SCRATCH0 + PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) + lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) + + PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_R13)(r13) + PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_R12)(r13) + stw r7, (SHADOW_VCPU_OFF + SVCPU_CR)(r13) + + /* Save more register state */ + + mfxer r5 + mfdar r6 + mfdsisr r7 + mfctr r8 + mflr r9 + + stw r5, (SHADOW_VCPU_OFF + SVCPU_XER)(r13) + PPC_STL r6, (SHADOW_VCPU_OFF + SVCPU_FAULT_DAR)(r13) + stw r7, (SHADOW_VCPU_OFF + SVCPU_FAULT_DSISR)(r13) + PPC_STL r8, (SHADOW_VCPU_OFF + SVCPU_CTR)(r13) + PPC_STL r9, (SHADOW_VCPU_OFF + SVCPU_LR)(r13) + + /* + * In order for us to easily get the last instruction, + * we got the #vmexit at, we exploit the fact that the + * virtual layout is still the same here, so we can just + * ld from the guest's PC address + */ + + /* We only load the last instruction when it's safe */ + cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE + beq ld_last_inst + cmpwi r12, BOOK3S_INTERRUPT_PROGRAM + beq ld_last_inst + cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT + beq- ld_last_inst + + b no_ld_last_inst + +ld_last_inst: + /* Save off the guest instruction we're at */ + + /* In case lwz faults */ + li r0, KVM_INST_FETCH_FAILED + +#ifdef USE_QUICK_LAST_INST + + /* Set guest mode to 'jump over instruction' so if lwz faults + * we'll just continue at the next IP. */ + li r9, KVM_GUEST_MODE_SKIP + stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) + + /* 1) enable paging for data */ + mfmsr r9 + ori r11, r9, MSR_DR /* Enable paging for data */ + mtmsr r11 + sync + /* 2) fetch the instruction */ + lwz r0, 0(r3) + /* 3) disable paging again */ + mtmsr r9 + sync + +#endif + stw r0, (SHADOW_VCPU_OFF + SVCPU_LAST_INST)(r13) + +no_ld_last_inst: + + /* Unset guest mode */ + li r9, KVM_GUEST_MODE_NONE + stb r9, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) + + /* Switch back to host MMU */ + LOAD_HOST_SEGMENTS + + /* Register usage at this point: + * + * R1 = host R1 + * R2 = host R2 + * R12 = exit handler id + * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64] + * SVCPU.* = guest * + * + */ + + /* RFI into the highmem handler */ + mfmsr r7 + ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */ + mtsrr1 r7 + /* Load highmem handler address */ + PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_VMHANDLER)(r13) + mtsrr0 r8 + + RFI +kvmppc_handler_trampoline_exit_end: diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 2a3a1953d4bd..a33ab8cc2ccc 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -133,6 +133,12 @@ void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); } +void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, + struct kvm_interrupt *irq) +{ + clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); +} + /* Deliver the interrupt of the corresponding priority, if possible. */ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) @@ -479,6 +485,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + regs->pc = vcpu->arch.pc; regs->cr = kvmppc_get_cr(vcpu); regs->ctr = vcpu->arch.ctr; @@ -499,6 +507,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); + vcpu_put(vcpu); + return 0; } @@ -506,6 +516,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; + vcpu_load(vcpu); + vcpu->arch.pc = regs->pc; kvmppc_set_cr(vcpu, regs->cr); vcpu->arch.ctr = regs->ctr; @@ -525,6 +537,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); + vcpu_put(vcpu); + return 0; } @@ -553,7 +567,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { - return kvmppc_core_vcpu_translate(vcpu, tr); + int r; + + vcpu_load(vcpu); + r = kvmppc_core_vcpu_translate(vcpu, tr); + vcpu_put(vcpu); + return r; } int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 669a5c5fc7d7..bc2b4004eb26 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -161,7 +161,7 @@ static int __init kvmppc_e500_init(void) flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), THIS_MODULE); + return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); } static void __init kvmppc_e500_exit(void) diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index cb72a65f4ecc..4568ec386c2a 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -38,10 +38,12 @@ #define OP_31_XOP_LBZX 87 #define OP_31_XOP_STWX 151 #define OP_31_XOP_STBX 215 +#define OP_31_XOP_LBZUX 119 #define OP_31_XOP_STBUX 247 #define OP_31_XOP_LHZX 279 #define OP_31_XOP_LHZUX 311 #define OP_31_XOP_MFSPR 339 +#define OP_31_XOP_LHAX 343 #define OP_31_XOP_STHX 407 #define OP_31_XOP_STHUX 439 #define OP_31_XOP_MTSPR 467 @@ -62,10 +64,12 @@ #define OP_STBU 39 #define OP_LHZ 40 #define OP_LHZU 41 +#define OP_LHA 42 +#define OP_LHAU 43 #define OP_STH 44 #define OP_STHU 45 -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) { return 1; @@ -82,7 +86,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) unsigned long dec_nsec; pr_debug("mtDEC: %x\n", vcpu->arch.dec); -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S /* mtdec lowers the interrupt line when positive. */ kvmppc_core_dequeue_dec(vcpu); @@ -128,7 +132,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { - u32 inst = vcpu->arch.last_inst; + u32 inst = kvmppc_get_last_inst(vcpu); u32 ea; int ra; int rb; @@ -143,13 +147,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); - /* Try again next time */ - if (inst == KVM_INST_FETCH_FAILED) - return EMULATE_DONE; - switch (get_op(inst)) { case OP_TRAP: -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else @@ -171,6 +171,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; + case OP_31_XOP_LBZUX: + rt = get_rt(inst); + ra = get_ra(inst); + rb = get_rb(inst); + + ea = kvmppc_get_gpr(vcpu, rb); + if (ra) + ea += kvmppc_get_gpr(vcpu, ra); + + emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); + kvmppc_set_gpr(vcpu, ra, ea); + break; + case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, @@ -200,6 +213,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) kvmppc_set_gpr(vcpu, rs, ea); break; + case OP_31_XOP_LHAX: + rt = get_rt(inst); + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + break; + case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); @@ -450,6 +468,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; + case OP_LHA: + rt = get_rt(inst); + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + break; + + case OP_LHAU: + ra = get_ra(inst); + rt = get_rt(inst); + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); + break; + case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, @@ -472,7 +502,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); - if (emulated == EMULATE_FAIL) { + if (emulated == EMULATE_AGAIN) { + advance = 0; + } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); @@ -480,10 +512,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) } } - trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated); + trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); + /* Advance past emulated instruction. */ if (advance) - vcpu->arch.pc += 4; /* Advance past emulated instruction. */ + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; } diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S new file mode 100644 index 000000000000..2b340a3eee90 --- /dev/null +++ b/arch/powerpc/kvm/fpu.S @@ -0,0 +1,273 @@ +/* + * FPU helper code to use FPU operations from inside the kernel + * + * Copyright (C) 2010 Alexander Graf (agraf@suse.de) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <asm/reg.h> +#include <asm/page.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> +#include <asm/cputable.h> +#include <asm/cache.h> +#include <asm/thread_info.h> +#include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> + +/* Instructions operating on single parameters */ + +/* + * Single operation with one input operand + * + * R3 = (double*)&fpscr + * R4 = (short*)&result + * R5 = (short*)¶m1 + */ +#define FPS_ONE_IN(name) \ +_GLOBAL(fps_ ## name); \ + lfd 0,0(r3); /* load up fpscr value */ \ + MTFSF_L(0); \ + lfs 0,0(r5); \ + \ + name 0,0; \ + \ + stfs 0,0(r4); \ + mffs 0; \ + stfd 0,0(r3); /* save new fpscr value */ \ + blr + +/* + * Single operation with two input operands + * + * R3 = (double*)&fpscr + * R4 = (short*)&result + * R5 = (short*)¶m1 + * R6 = (short*)¶m2 + */ +#define FPS_TWO_IN(name) \ +_GLOBAL(fps_ ## name); \ + lfd 0,0(r3); /* load up fpscr value */ \ + MTFSF_L(0); \ + lfs 0,0(r5); \ + lfs 1,0(r6); \ + \ + name 0,0,1; \ + \ + stfs 0,0(r4); \ + mffs 0; \ + stfd 0,0(r3); /* save new fpscr value */ \ + blr + +/* + * Single operation with three input operands + * + * R3 = (double*)&fpscr + * R4 = (short*)&result + * R5 = (short*)¶m1 + * R6 = (short*)¶m2 + * R7 = (short*)¶m3 + */ +#define FPS_THREE_IN(name) \ +_GLOBAL(fps_ ## name); \ + lfd 0,0(r3); /* load up fpscr value */ \ + MTFSF_L(0); \ + lfs 0,0(r5); \ + lfs 1,0(r6); \ + lfs 2,0(r7); \ + \ + name 0,0,1,2; \ + \ + stfs 0,0(r4); \ + mffs 0; \ + stfd 0,0(r3); /* save new fpscr value */ \ + blr + +FPS_ONE_IN(fres) +FPS_ONE_IN(frsqrte) +FPS_ONE_IN(fsqrts) +FPS_TWO_IN(fadds) +FPS_TWO_IN(fdivs) +FPS_TWO_IN(fmuls) +FPS_TWO_IN(fsubs) +FPS_THREE_IN(fmadds) +FPS_THREE_IN(fmsubs) +FPS_THREE_IN(fnmadds) +FPS_THREE_IN(fnmsubs) +FPS_THREE_IN(fsel) + + +/* Instructions operating on double parameters */ + +/* + * Beginning of double instruction processing + * + * R3 = (double*)&fpscr + * R4 = (u32*)&cr + * R5 = (double*)&result + * R6 = (double*)¶m1 + * R7 = (double*)¶m2 [load_two] + * R8 = (double*)¶m3 [load_three] + * LR = instruction call function + */ +fpd_load_three: + lfd 2,0(r8) /* load param3 */ +fpd_load_two: + lfd 1,0(r7) /* load param2 */ +fpd_load_one: + lfd 0,0(r6) /* load param1 */ +fpd_load_none: + lfd 3,0(r3) /* load up fpscr value */ + MTFSF_L(3) + lwz r6, 0(r4) /* load cr */ + mtcr r6 + blr + +/* + * End of double instruction processing + * + * R3 = (double*)&fpscr + * R4 = (u32*)&cr + * R5 = (double*)&result + * LR = caller of instruction call function + */ +fpd_return: + mfcr r6 + stfd 0,0(r5) /* save result */ + mffs 0 + stfd 0,0(r3) /* save new fpscr value */ + stw r6,0(r4) /* save new cr value */ + blr + +/* + * Double operation with no input operand + * + * R3 = (double*)&fpscr + * R4 = (u32*)&cr + * R5 = (double*)&result + */ +#define FPD_NONE_IN(name) \ +_GLOBAL(fpd_ ## name); \ + mflr r12; \ + bl fpd_load_none; \ + mtlr r12; \ + \ + name. 0; /* call instruction */ \ + b fpd_return + +/* + * Double operation with one input operand + * + * R3 = (double*)&fpscr + * R4 = (u32*)&cr + * R5 = (double*)&result + * R6 = (double*)¶m1 + */ +#define FPD_ONE_IN(name) \ +_GLOBAL(fpd_ ## name); \ + mflr r12; \ + bl fpd_load_one; \ + mtlr r12; \ + \ + name. 0,0; /* call instruction */ \ + b fpd_return + +/* + * Double operation with two input operands + * + * R3 = (double*)&fpscr + * R4 = (u32*)&cr + * R5 = (double*)&result + * R6 = (double*)¶m1 + * R7 = (double*)¶m2 + * R8 = (double*)¶m3 + */ +#define FPD_TWO_IN(name) \ +_GLOBAL(fpd_ ## name); \ + mflr r12; \ + bl fpd_load_two; \ + mtlr r12; \ + \ + name. 0,0,1; /* call instruction */ \ + b fpd_return + +/* + * CR Double operation with two input operands + * + * R3 = (double*)&fpscr + * R4 = (u32*)&cr + * R5 = (double*)¶m1 + * R6 = (double*)¶m2 + * R7 = (double*)¶m3 + */ +#define FPD_TWO_IN_CR(name) \ +_GLOBAL(fpd_ ## name); \ + lfd 1,0(r6); /* load param2 */ \ + lfd 0,0(r5); /* load param1 */ \ + lfd 3,0(r3); /* load up fpscr value */ \ + MTFSF_L(3); \ + lwz r6, 0(r4); /* load cr */ \ + mtcr r6; \ + \ + name 0,0,1; /* call instruction */ \ + mfcr r6; \ + mffs 0; \ + stfd 0,0(r3); /* save new fpscr value */ \ + stw r6,0(r4); /* save new cr value */ \ + blr + +/* + * Double operation with three input operands + * + * R3 = (double*)&fpscr + * R4 = (u32*)&cr + * R5 = (double*)&result + * R6 = (double*)¶m1 + * R7 = (double*)¶m2 + * R8 = (double*)¶m3 + */ +#define FPD_THREE_IN(name) \ +_GLOBAL(fpd_ ## name); \ + mflr r12; \ + bl fpd_load_three; \ + mtlr r12; \ + \ + name. 0,0,1,2; /* call instruction */ \ + b fpd_return + +FPD_ONE_IN(fsqrts) +FPD_ONE_IN(frsqrtes) +FPD_ONE_IN(fres) +FPD_ONE_IN(frsp) +FPD_ONE_IN(fctiw) +FPD_ONE_IN(fctiwz) +FPD_ONE_IN(fsqrt) +FPD_ONE_IN(fre) +FPD_ONE_IN(frsqrte) +FPD_ONE_IN(fneg) +FPD_ONE_IN(fabs) +FPD_TWO_IN(fadds) +FPD_TWO_IN(fsubs) +FPD_TWO_IN(fdivs) +FPD_TWO_IN(fmuls) +FPD_TWO_IN_CR(fcmpu) +FPD_TWO_IN(fcpsgn) +FPD_TWO_IN(fdiv) +FPD_TWO_IN(fadd) +FPD_TWO_IN(fmul) +FPD_TWO_IN_CR(fcmpo) +FPD_TWO_IN(fsub) +FPD_THREE_IN(fmsubs) +FPD_THREE_IN(fmadds) +FPD_THREE_IN(fnmsubs) +FPD_THREE_IN(fnmadds) +FPD_THREE_IN(fsel) +FPD_THREE_IN(fmsub) +FPD_THREE_IN(fmadd) +FPD_THREE_IN(fnmsub) +FPD_THREE_IN(fnmadd) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 297fcd2ff7d0..9b8683f39e05 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -70,7 +70,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) case EMULATE_FAIL: /* XXX Deliver Program interrupt to guest. */ printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, - vcpu->arch.last_inst); + kvmppc_get_last_inst(vcpu)); r = RESUME_HOST; break; default: @@ -148,6 +148,10 @@ int kvm_dev_ioctl_check_extension(long ext) switch (ext) { case KVM_CAP_PPC_SEGSTATE: + case KVM_CAP_PPC_PAIRED_SINGLES: + case KVM_CAP_PPC_UNSET_IRQ: + case KVM_CAP_ENABLE_CAP: + case KVM_CAP_PPC_OSI: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -193,12 +197,17 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; vcpu = kvmppc_core_vcpu_create(kvm, id); - kvmppc_create_vcpu_debugfs(vcpu, id); + if (!IS_ERR(vcpu)) + kvmppc_create_vcpu_debugfs(vcpu, id); return vcpu; } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { + /* Make sure we're not using the vcpu anymore */ + hrtimer_cancel(&vcpu->arch.dec_timer); + tasklet_kill(&vcpu->arch.tasklet); + kvmppc_remove_vcpu_debugfs(vcpu); kvmppc_core_vcpu_free(vcpu); } @@ -278,7 +287,7 @@ static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { - ulong gpr; + u64 gpr; if (run->mmio.len > sizeof(gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); @@ -287,6 +296,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, if (vcpu->arch.mmio_is_bigendian) { switch (run->mmio.len) { + case 8: gpr = *(u64 *)run->mmio.data; break; case 4: gpr = *(u32 *)run->mmio.data; break; case 2: gpr = *(u16 *)run->mmio.data; break; case 1: gpr = *(u8 *)run->mmio.data; break; @@ -300,7 +310,43 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, } } + if (vcpu->arch.mmio_sign_extend) { + switch (run->mmio.len) { +#ifdef CONFIG_PPC64 + case 4: + gpr = (s64)(s32)gpr; + break; +#endif + case 2: + gpr = (s64)(s16)gpr; + break; + case 1: + gpr = (s64)(s8)gpr; + break; + } + } + kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); + + switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { + case KVM_REG_GPR: + kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); + break; + case KVM_REG_FPR: + vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; + break; +#ifdef CONFIG_PPC_BOOK3S + case KVM_REG_QPR: + vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; + break; + case KVM_REG_FQPR: + vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; + vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; + break; +#endif + default: + BUG(); + } } int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, @@ -319,12 +365,25 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.mmio_is_bigendian = is_bigendian; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 0; + vcpu->arch.mmio_sign_extend = 0; return EMULATE_DO_MMIO; } +/* Same as above, but sign extends */ +int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, int is_bigendian) +{ + int r; + + r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); + vcpu->arch.mmio_sign_extend = 1; + + return r; +} + int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, - u32 val, unsigned int bytes, int is_bigendian) + u64 val, unsigned int bytes, int is_bigendian) { void *data = run->mmio.data; @@ -342,6 +401,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, /* Store the value at the lowest bytes in 'data'. */ if (is_bigendian) { switch (bytes) { + case 8: *(u64 *)data = val; break; case 4: *(u32 *)data = val; break; case 2: *(u16 *)data = val; break; case 1: *(u8 *)data = val; break; @@ -376,6 +436,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (!vcpu->arch.dcr_is_write) kvmppc_complete_dcr_load(vcpu, run); vcpu->arch.dcr_needed = 0; + } else if (vcpu->arch.osi_needed) { + u64 *gprs = run->osi.gprs; + int i; + + for (i = 0; i < 32; i++) + kvmppc_set_gpr(vcpu, i, gprs[i]); + vcpu->arch.osi_needed = 0; } kvmppc_core_deliver_interrupts(vcpu); @@ -396,7 +463,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { - kvmppc_core_queue_external(vcpu, irq); + if (irq->irq == KVM_INTERRUPT_UNSET) + kvmppc_core_dequeue_external(vcpu, irq); + else + kvmppc_core_queue_external(vcpu, irq); if (waitqueue_active(&vcpu->wq)) { wake_up_interruptible(&vcpu->wq); @@ -406,6 +476,27 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) return 0; } +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + int r; + + if (cap->flags) + return -EINVAL; + + switch (cap->cap) { + case KVM_CAP_PPC_OSI: + r = 0; + vcpu->arch.osi_enabled = true; + break; + default: + r = -EINVAL; + break; + } + + return r; +} + int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { @@ -434,6 +525,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } + case KVM_ENABLE_CAP: + { + struct kvm_enable_cap cap; + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + goto out; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } default: r = -EINVAL; } diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c index 0dfba2bf7f31..d0ee554e86e4 100644 --- a/arch/powerpc/mm/mmu_context_hash32.c +++ b/arch/powerpc/mm/mmu_context_hash32.c @@ -60,11 +60,7 @@ static unsigned long next_mmu_context; static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; - -/* - * Set up the context for a new address space. - */ -int init_new_context(struct task_struct *t, struct mm_struct *mm) +unsigned long __init_new_context(void) { unsigned long ctx = next_mmu_context; @@ -74,19 +70,38 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm) ctx = 0; } next_mmu_context = (ctx + 1) & LAST_CONTEXT; - mm->context.id = ctx; + + return ctx; +} +EXPORT_SYMBOL_GPL(__init_new_context); + +/* + * Set up the context for a new address space. + */ +int init_new_context(struct task_struct *t, struct mm_struct *mm) +{ + mm->context.id = __init_new_context(); return 0; } /* + * Free a context ID. Make sure to call this with preempt disabled! + */ +void __destroy_context(unsigned long ctx) +{ + clear_bit(ctx, context_map); +} +EXPORT_SYMBOL_GPL(__destroy_context); + +/* * We're finished using the context for an address space. */ void destroy_context(struct mm_struct *mm) { preempt_disable(); if (mm->context.id != NO_CONTEXT) { - clear_bit(mm->context.id, context_map); + __destroy_context(mm->context.id); mm->context.id = NO_CONTEXT; } preempt_enable(); |