From 30b072ce0356e8b141f4ca6da7220486fa3641d9 Mon Sep 17 00:00:00 2001 From: Alexis Dambricourt Date: Mon, 4 Jul 2016 21:05:15 +0200 Subject: KVM: MTRR: fix kvm_mtrr_check_gfn_range_consistency page fault The following #PF may occurs: [ 1403.317041] BUG: unable to handle kernel paging request at 0000000200000068 [ 1403.317045] IP: [] __mtrr_lookup_var_next+0x10/0xa0 [kvm] [ 1403.317123] Call Trace: [ 1403.317134] [] ? kvm_mtrr_check_gfn_range_consistency+0xc5/0x120 [kvm] [ 1403.317143] [] ? tdp_page_fault+0x9f/0x2c0 [kvm] [ 1403.317152] [] ? kvm_set_msr_common+0x858/0xc00 [kvm] [ 1403.317161] [] ? x86_emulate_insn+0x273/0xd30 [kvm] [ 1403.317171] [] ? kvm_cpuid+0x34/0x190 [kvm] [ 1403.317180] [] ? kvm_mmu_page_fault+0x59/0xe0 [kvm] [ 1403.317183] [] ? vmx_handle_exit+0x1d1/0x14a0 [kvm_intel] [ 1403.317185] [] ? atomic_switch_perf_msrs+0x6f/0xa0 [kvm_intel] [ 1403.317187] [] ? vmx_vcpu_run+0x2ad/0x420 [kvm_intel] [ 1403.317196] [] ? kvm_arch_vcpu_ioctl_run+0x622/0x1550 [kvm] [ 1403.317204] [] ? kvm_arch_vcpu_load+0x59/0x210 [kvm] [ 1403.317206] [] ? __kernel_fpu_end+0x35/0x100 [ 1403.317213] [] ? kvm_vcpu_ioctl+0x316/0x5d0 [kvm] [ 1403.317215] [] ? do_sigtimedwait+0xd5/0x220 [ 1403.317217] [] ? do_vfs_ioctl+0x9d/0x5c0 [ 1403.317224] [] ? kvm_on_user_return+0x3e/0x70 [kvm] [ 1403.317225] [] ? SyS_ioctl+0x74/0x80 [ 1403.317227] [] ? entry_SYSCALL_64_fastpath+0x1e/0xa8 [ 1403.317242] RIP [] __mtrr_lookup_var_next+0x10/0xa0 [kvm] At mtrr_lookup_fixed_next(), when the condition 'if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))' becomes true, mtrr_lookup_var_start() is called with iter->range with gargabe values from the fixed MTRR union field. Then, list_prepare_entry() do not call list_entry() initialization, keeping a garbage pointer in iter->range which is accessed in the following __mtrr_lookup_var_next() call. Fixes: f571c0973e4b8c888e049b6842e4b4f93b5c609c Signed-off-by: Alexis Dambricourt Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mtrr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index c146f3c262c3..0149ac59c273 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter) iter->fixed = false; iter->start_max = iter->start; + iter->range = NULL; iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node); __mtrr_lookup_var_next(iter); -- cgit v1.2.3 From 9770404a0061ec46dec6e15c4b07731ce2e2d7bb Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 6 Jul 2016 14:57:24 +0200 Subject: KVM: SVM: fix trashing of MSR_TSC_AUX I don't know what I was thinking when I wrote commit 46896c73c1a4 ("KVM: svm: add support for RDTSCP", 2015-11-12); I missed write_rdtscp_aux which obviously uses MSR_TSC_AUX. Therefore we do need to save/restore MSR_TSC_AUX in svm_vcpu_run. Cc: stable@vger.kernel.org Cc: Borislav Petkov Fixes: 46896c73c1a4 ("KVM: svm: add support for RDTSCP") Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 16ef31b87452..f931d3e33c7f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include "trace.h" @@ -1530,9 +1531,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); } } - /* This assumes that the kernel never uses MSR_TSC_AUX */ - if (static_cpu_has(X86_FEATURE_RDTSCP)) - wrmsrl(MSR_TSC_AUX, svm->tsc_aux); avic_vcpu_load(vcpu, cpu); } @@ -4474,6 +4472,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) svm->vmcb->save.cr2 = vcpu->arch.cr2; clgi(); + if (static_cpu_has(X86_FEATURE_RDTSCP)) + wrmsrl(MSR_TSC_AUX, svm->tsc_aux); local_irq_enable(); @@ -4550,6 +4550,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); + if (static_cpu_has(X86_FEATURE_RDTSCP)) + wrmsrl(MSR_TSC_AUX, __getcpu()); #ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else -- cgit v1.2.3 From 4e59516a12a6ef6dcb660cb3a3f70c64bd60cfec Mon Sep 17 00:00:00 2001 From: Peter Feiner Date: Thu, 7 Jul 2016 14:49:58 -0700 Subject: kvm: vmx: ensure VMCS is current while enabling PML Between loading the new VMCS and enabling PML, the CPU was unpinned. If the vCPU thread were migrated to another CPU in the interim (e.g., due to preemption or sleeping alloc_page), then the VMWRITEs to enable PML would target the wrong VMCS -- or no VMCS at all: [ 2087.266950] vmwrite error: reg 200e value 3fe1d52000 (err -506126336) [ 2087.267062] vmwrite error: reg 812 value 1ff (err 511) [ 2087.267125] vmwrite error: reg 401e value 12229c00 (err 304258048) This patch ensures that the VMCS remains current while enabling PML by doing the VMWRITEs while the CPU is pinned. Allocation of the PML buffer is hoisted out of the critical section. Signed-off-by: Peter Feiner Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 56 +++++++++++++++++++++++------------------------------- 1 file changed, 24 insertions(+), 32 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 64a79f271276..e34965b37a88 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4979,6 +4979,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) if (vmx_xsaves_supported()) vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); + if (enable_pml) { + ASSERT(vmx->pml_pg); + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); + } + return 0; } @@ -7937,22 +7943,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) *info2 = vmcs_read32(VM_EXIT_INTR_INFO); } -static int vmx_create_pml_buffer(struct vcpu_vmx *vmx) -{ - struct page *pml_pg; - - pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!pml_pg) - return -ENOMEM; - - vmx->pml_pg = pml_pg; - - vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); - vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); - - return 0; -} - static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) { if (vmx->pml_pg) { @@ -8885,14 +8875,26 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (err) goto free_vcpu; + err = -ENOMEM; + + /* + * If PML is turned on, failure on enabling PML just results in failure + * of creating the vcpu, therefore we can simplify PML logic (by + * avoiding dealing with cases, such as enabling PML partially on vcpus + * for the guest, etc. + */ + if (enable_pml) { + vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!vmx->pml_pg) + goto uninit_vcpu; + } + vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) > PAGE_SIZE); - err = -ENOMEM; - if (!vmx->guest_msrs) { - goto uninit_vcpu; - } + if (!vmx->guest_msrs) + goto free_pml; vmx->loaded_vmcs = &vmx->vmcs01; vmx->loaded_vmcs->vmcs = alloc_vmcs(); @@ -8936,18 +8938,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) vmx->nested.current_vmptr = -1ull; vmx->nested.current_vmcs12 = NULL; - /* - * If PML is turned on, failure on enabling PML just results in failure - * of creating the vcpu, therefore we can simplify PML logic (by - * avoiding dealing with cases, such as enabling PML partially on vcpus - * for the guest, etc. - */ - if (enable_pml) { - err = vmx_create_pml_buffer(vmx); - if (err) - goto free_vmcs; - } - return &vmx->vcpu; free_vmcs: @@ -8955,6 +8945,8 @@ free_vmcs: free_loaded_vmcs(vmx->loaded_vmcs); free_msrs: kfree(vmx->guest_msrs); +free_pml: + vmx_destroy_pml_buffer(vmx); uninit_vcpu: kvm_vcpu_uninit(&vmx->vcpu); free_vcpu: -- cgit v1.2.3 From 2f1fe81123f59271bddda673b60116bde9660385 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Fri, 8 Jul 2016 15:36:06 -0700 Subject: KVM: nVMX: Fix memory corruption when using VMCS shadowing When freeing the nested resources of a vcpu, there is an assumption that the vcpu's vmcs01 is the current VMCS on the CPU that executes nested_release_vmcs12(). If this assumption is violated, the vcpu's vmcs01 may be made active on multiple CPUs at the same time, in violation of Intel's specification. Moreover, since the vcpu's vmcs01 is not VMCLEARed on every CPU on which it is active, it can linger in a CPU's VMCS cache after it has been freed and potentially repurposed. Subsequent eviction from the CPU's VMCS cache on a capacity miss can result in memory corruption. It is not sufficient for vmx_free_vcpu() to call vmx_load_vmcs01(). If the vcpu in question was last loaded on a different CPU, it must be migrated to the current CPU before calling vmx_load_vmcs01(). Signed-off-by: Jim Mattson Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 19 +++++++++++++++++-- virt/kvm/kvm_main.c | 2 ++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e34965b37a88..5bf203b58f58 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8844,6 +8844,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) put_cpu(); } +/* + * Ensure that the current vmcs of the logical processor is the + * vmcs01 of the vcpu before calling free_nested(). + */ +static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int r; + + r = vcpu_load(vcpu); + BUG_ON(r); + vmx_load_vmcs01(vcpu); + free_nested(vmx); + vcpu_put(vcpu); +} + static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -8852,8 +8868,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) vmx_destroy_pml_buffer(vmx); free_vpid(vmx->vpid); leave_guest_mode(vcpu); - vmx_load_vmcs01(vcpu); - free_nested(vmx); + vmx_free_vcpu_nested(vcpu); free_loaded_vmcs(vmx->loaded_vmcs); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 48bd520fc702..dd25346ec356 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -148,6 +148,7 @@ int vcpu_load(struct kvm_vcpu *vcpu) put_cpu(); return 0; } +EXPORT_SYMBOL_GPL(vcpu_load); void vcpu_put(struct kvm_vcpu *vcpu) { @@ -157,6 +158,7 @@ void vcpu_put(struct kvm_vcpu *vcpu) preempt_enable(); mutex_unlock(&vcpu->mutex); } +EXPORT_SYMBOL_GPL(vcpu_put); static void ack_flush(void *_completed) { -- cgit v1.2.3 From 2b23c3a6e3eb2fba77eb35fdfa3d71a9aa3f33b7 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 14 Jul 2016 19:08:18 +0200 Subject: KVM: SVM: do not set MSR_TSC_AUX on 32-bit builds This is unnecessary---and besides, __getcpu() is not even available on 32-bit builds. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f931d3e33c7f..05c08b69cee7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4550,9 +4550,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) #endif ); +#ifdef CONFIG_X86_64 if (static_cpu_has(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, __getcpu()); -#ifdef CONFIG_X86_64 wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else loadsegment(fs, svm->host.fs); -- cgit v1.2.3 From 6a907cd0ad31fce30b7a7416576410a7de7c8933 Mon Sep 17 00:00:00 2001 From: Radim Krčmář Date: Fri, 15 Jul 2016 14:38:36 +0200 Subject: Revert "KVM: SVM: fix trashing of MSR_TSC_AUX" This reverts commit 9770404a0061ec46dec6e15c4b07731ce2e2d7bb. The reverted patch is not needed as only userspace uses RDTSCP and MSR_TSC_AUX is in host_save_user_msrs[] and therefore properly saved in svm_vcpu_load() and restored in svm_vcpu_put() before every switch to userspace. The reverted patch did not allow the kernel to use RDTSCP in the future, because of missed trashing in svm_set_msr() and 64-bit ifdef. This reverts commit 2b23c3a6e3eb2fba77eb35fdfa3d71a9aa3f33b7. 2b23c3a6e3eb ("KVM: SVM: do not set MSR_TSC_AUX on 32-bit builds") is a build fix for 9770404a0061 and reverting them separately would only break more bisections. Cc: stable@vger.kernel.org --- arch/x86/kvm/svm.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 05c08b69cee7..16ef31b87452 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include "trace.h" @@ -1531,6 +1530,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); } } + /* This assumes that the kernel never uses MSR_TSC_AUX */ + if (static_cpu_has(X86_FEATURE_RDTSCP)) + wrmsrl(MSR_TSC_AUX, svm->tsc_aux); avic_vcpu_load(vcpu, cpu); } @@ -4472,8 +4474,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) svm->vmcb->save.cr2 = vcpu->arch.cr2; clgi(); - if (static_cpu_has(X86_FEATURE_RDTSCP)) - wrmsrl(MSR_TSC_AUX, svm->tsc_aux); local_irq_enable(); @@ -4551,8 +4551,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ); #ifdef CONFIG_X86_64 - if (static_cpu_has(X86_FEATURE_RDTSCP)) - wrmsrl(MSR_TSC_AUX, __getcpu()); wrmsrl(MSR_GS_BASE, svm->host.gs_base); #else loadsegment(fs, svm->host.fs); -- cgit v1.2.3 From b244c9fc251e14a083a1cbf04bef10bd99303a76 Mon Sep 17 00:00:00 2001 From: "Cao, Lei" Date: Fri, 15 Jul 2016 13:54:04 +0000 Subject: KVM: VMX: handle PML full VMEXIT that occurs during event delivery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With PML enabled, guest will shut down if a PML full VMEXIT occurs during event delivery. According to Intel SDM 27.2.3, PML full VMEXIT can occur when event is being delivered through IDT, so KVM should not exit to user space with error. Instead, it should let EXIT_REASON_PML_FULL go through and the event will be re-injected on the next VMENTRY. Signed-off-by: Lei Cao Cc: stable@vger.kernel.org Fixes: 843e4330573c ("KVM: VMX: Add PML support in VMX") [Shortened the summary and Cc'd stable.] Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5bf203b58f58..7758680db20b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8214,6 +8214,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) if ((vectoring_info & VECTORING_INFO_VALID_MASK) && (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && + exit_reason != EXIT_REASON_PML_FULL && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; -- cgit v1.2.3 From 4c47eb1c18c38b755eb4894a6ca38f834de3ec23 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 19 Jul 2016 13:56:54 +0100 Subject: arm64: KVM: VHE: Context switch MDSCR_EL1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The kprobe enablement work has uncovered that changes made by a guest to MDSCR_EL1 were propagated to the host when VHE was enabled, leading to unexpected exception being delivered. Moving this register to the list of registers that are always context-switched fixes the issue. Fixes: 9c6c35683286 ("arm64: KVM: VHE: Split save/restore of registers shared between guest and host") Cc: stable@vger.kernel.org #4.6 Reported-by: Tirumalesh Chalamarla Tested-by: Tirumalesh Chalamarla Signed-off-by: Marc Zyngier Signed-off-by: Radim Krčmář --- arch/arm64/kvm/hyp/sysreg-sr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 0f7c40eb3f53..934137647837 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { } /* * Non-VHE: Both host and guest must save everything. * - * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and - * guest must save everything. + * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc, + * pstate, and guest must save everything. */ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) @@ -37,6 +37,7 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); + ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->gp_regs.regs.sp = read_sysreg(sp_el0); ctxt->gp_regs.regs.pc = read_sysreg_el2(elr); ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr); @@ -61,7 +62,6 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair); ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl); ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); - ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr); @@ -90,6 +90,7 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); + write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt->gp_regs.regs.sp, sp_el0); write_sysreg_el2(ctxt->gp_regs.regs.pc, elr); write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr); @@ -114,7 +115,6 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair); write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl); write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); - write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); write_sysreg_el1(ctxt->gp_regs.elr_el1, elr); -- cgit v1.2.3