diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2011-09-20 17:07:28 +0200 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-09-20 17:07:34 +0200 |
commit | 480e5926ce3bb61ec229be2dab08bdce8abb8d2e (patch) | |
tree | b252230da3bba55a8e4ebd747767257c781670d7 /arch/s390 | |
parent | 9d037a777695993ec7437e5f451647dea7919d4c (diff) | |
download | linux-480e5926ce3bb61ec229be2dab08bdce8abb8d2e.tar.bz2 |
[S390] kvm: fix address mode switching
598841ca9919d008b520114d8a4378c4ce4e40a1 ([S390] use gmap address
spaces for kvm guest images) changed kvm to use a separate address
space for kvm guests. This address space was switched in __vcpu_run
In some cases (preemption, page fault) there is the possibility that
this address space switch is lost.
The typical symptom was a huge amount of validity intercepts or
random guest addressing exceptions.
Fix this by doing the switch in sie_loop and sie_exit and saving the
address space in the gmap structure itself. Also use the preempt
notifier.
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Acked-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/entry64.S | 6 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 10 |
5 files changed, 15 insertions, 10 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 519eb5f187ef..c0cb794bb365 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) * struct gmap_struct - guest address space * @mm: pointer to the parent mm_struct * @table: pointer to the page directory + * @asce: address space control element for gmap page table * @crst_list: list of all crst tables used in the guest address space */ struct gmap { struct list_head list; struct mm_struct *mm; unsigned long *table; + unsigned long asce; struct list_head crst_list; }; diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 532fd4322156..2b45591e1582 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <asm/vdso.h> #include <asm/sigp.h> +#include <asm/pgtable.h> /* * Make sure that the compiler is new enough. We want a compiler that @@ -126,6 +127,7 @@ int main(void) DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); + DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); @@ -151,6 +153,7 @@ int main(void) DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); + DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); #endif /* CONFIG_32BIT */ return 0; } diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 5f729d627cef..713da0760538 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -1076,6 +1076,11 @@ sie_loop: lg %r14,__LC_THREAD_INFO # pointer thread_info struct tm __TI_flags+7(%r14),_TIF_EXIT_SIE jnz sie_exit + lg %r14,__LC_GMAP # get gmap pointer + ltgr %r14,%r14 + jz sie_gmap + lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce +sie_gmap: lg %r14,__SF_EMPTY(%r15) # get control block pointer SPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) @@ -1083,6 +1088,7 @@ sie_done: SPP __LC_CMF_HPP # set host id lg %r14,__LC_THREAD_INFO # pointer thread_info struct sie_exit: + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f17296e4fc89..b4eced131e5c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -263,10 +263,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; restore_fp_regs(&vcpu->arch.guest_fpregs); restore_access_regs(vcpu->arch.guest_acrs); + gmap_enable(vcpu->arch.gmap); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + gmap_disable(vcpu->arch.gmap); save_fp_regs(&vcpu->arch.guest_fpregs); save_access_regs(vcpu->arch.guest_acrs); restore_fp_regs(&vcpu->arch.host_fpregs); @@ -461,7 +463,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) local_irq_disable(); kvm_guest_enter(); local_irq_enable(); - gmap_enable(vcpu->arch.gmap); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { @@ -470,7 +471,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); - gmap_disable(vcpu->arch.gmap); local_irq_disable(); kvm_guest_exit(); local_irq_enable(); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4d1f2bce87b3..f69ff3c13496 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm) table = (unsigned long *) page_to_phys(page); crst_table_init(table, _REGION1_ENTRY_EMPTY); gmap->table = table; + gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | __pa(table); list_add(&gmap->list, &mm->context.gmap_list); return gmap; @@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free); */ void gmap_enable(struct gmap *gmap) { - /* Load primary space page table origin. */ - S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | - _ASCE_USER_BITS | __pa(gmap->table); - asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); S390_lowcore.gmap = (unsigned long) gmap; } EXPORT_SYMBOL_GPL(gmap_enable); @@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable); */ void gmap_disable(struct gmap *gmap) { - /* Load primary space page table origin. */ - S390_lowcore.user_asce = - gmap->mm->context.asce_bits | __pa(gmap->mm->pgd); - asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); S390_lowcore.gmap = 0UL; } EXPORT_SYMBOL_GPL(gmap_disable); |