summaryrefslogtreecommitdiffstats
path: root/arch/x86/boot/compressed/head_64.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/boot/compressed/head_64.S')
-rw-r--r--arch/x86/boot/compressed/head_64.S125
1 files changed, 96 insertions, 29 deletions
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 1f1f6c8139b3..f7bacc4c1494 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -53,19 +53,7 @@ SYM_FUNC_START(startup_32)
* all need to be under the 4G limit.
*/
cld
- /*
- * Test KEEP_SEGMENTS flag to see if the bootloader is asking
- * us to not reload segments
- */
- testb $KEEP_SEGMENTS, BP_loadflags(%esi)
- jnz 1f
-
cli
- movl $(__BOOT_DS), %eax
- movl %eax, %ds
- movl %eax, %es
- movl %eax, %ss
-1:
/*
* Calculate the delta between where we were compiled to run
@@ -80,10 +68,21 @@ SYM_FUNC_START(startup_32)
1: popl %ebp
subl $1b, %ebp
+ /* Load new GDT with the 64bit segments using 32bit descriptor */
+ leal gdt(%ebp), %eax
+ movl %eax, 2(%eax)
+ lgdt (%eax)
+
+ /* Load segment registers with our descriptors */
+ movl $__BOOT_DS, %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %fs
+ movl %eax, %gs
+ movl %eax, %ss
+
/* setup a stack and make sure cpu supports long mode. */
- movl $boot_stack_end, %eax
- addl %ebp, %eax
- movl %eax, %esp
+ leal boot_stack_end(%ebp), %esp
call verify_cpu
testl %eax, %eax
@@ -120,10 +119,6 @@ SYM_FUNC_START(startup_32)
* Prepare for entering 64 bit mode
*/
- /* Load new GDT with the 64bit segments using 32bit descriptor */
- addl %ebp, gdt+2(%ebp)
- lgdt gdt(%ebp)
-
/* Enable PAE mode */
movl %cr4, %eax
orl $X86_CR4_PAE, %eax
@@ -212,8 +207,13 @@ SYM_FUNC_START(startup_32)
cmp $0, %edi
jz 1f
leal efi64_stub_entry(%ebp), %eax
- movl %esi, %edx
movl efi32_boot_args+4(%ebp), %esi
+ movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer
+ cmpl $0, %edx
+ jnz 1f
+ leal efi_pe_entry(%ebp), %eax
+ movl %edi, %ecx // MS calling convention
+ movl %esi, %edx
1:
#endif
pushl %eax
@@ -238,11 +238,17 @@ SYM_FUNC_START(efi32_stub_entry)
1: pop %ebp
subl $1b, %ebp
+ movl %esi, efi32_boot_args+8(%ebp)
+SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
movl %ecx, efi32_boot_args(%ebp)
movl %edx, efi32_boot_args+4(%ebp)
- sgdtl efi32_boot_gdt(%ebp)
movb $0, efi_is64(%ebp)
+ /* Save firmware GDTR and code/data selectors */
+ sgdtl efi32_boot_gdt(%ebp)
+ movw %cs, efi32_boot_cs(%ebp)
+ movw %ds, efi32_boot_ds(%ebp)
+
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
@@ -266,6 +272,9 @@ SYM_CODE_START(startup_64)
* and command line.
*/
+ cld
+ cli
+
/* Setup data segments. */
xorl %eax, %eax
movl %eax, %ds
@@ -354,9 +363,9 @@ SYM_CODE_START(startup_64)
*/
/* Make sure we have GDT with 32-bit code segment */
- leaq gdt(%rip), %rax
- movq %rax, gdt64+2(%rip)
- lgdt gdt64(%rip)
+ leaq gdt64(%rip), %rax
+ addq %rax, 2(%rax)
+ lgdt (%rax)
/*
* paging_prepare() sets up the trampoline and checks if we need to
@@ -441,6 +450,16 @@ trampoline_return:
cld
popq %rsi
+ /*
+ * The GDT may get overwritten either during the copy we just did or
+ * during extract_kernel below. To avoid any issues, repoint the GDTR
+ * to the new copy of the GDT.
+ */
+ leaq gdt64(%rbx), %rax
+ subq %rbp, 2(%rax)
+ addq %rbx, 2(%rax)
+ lgdt (%rax)
+
/*
* Jump to the relocated address.
*/
@@ -613,13 +632,13 @@ SYM_FUNC_END(.Lno_longmode)
.data
SYM_DATA_START_LOCAL(gdt64)
- .word gdt_end - gdt
- .quad 0
+ .word gdt_end - gdt - 1
+ .quad gdt - gdt64
SYM_DATA_END(gdt64)
.balign 8
SYM_DATA_START_LOCAL(gdt)
- .word gdt_end - gdt
- .long gdt
+ .word gdt_end - gdt - 1
+ .long 0
.word 0
.quad 0x00cf9a000000ffff /* __KERNEL32_CS */
.quad 0x00af9a000000ffff /* __KERNEL_CS */
@@ -629,8 +648,56 @@ SYM_DATA_START_LOCAL(gdt)
SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
#ifdef CONFIG_EFI_MIXED
-SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0)
+SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
+
+#define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
+#define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
+#define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
+
+ .text
+ .code32
+SYM_FUNC_START(efi32_pe_entry)
+ pushl %ebp
+
+ call verify_cpu // check for long mode support
+ testl %eax, %eax
+ movl $0x80000003, %eax // EFI_UNSUPPORTED
+ jnz 3f
+
+ call 1f
+1: pop %ebp
+ subl $1b, %ebp
+
+ /* Get the loaded image protocol pointer from the image handle */
+ subl $12, %esp // space for the loaded image pointer
+ pushl %esp // pass its address
+ leal 4f(%ebp), %eax
+ pushl %eax // pass the GUID address
+ pushl 28(%esp) // pass the image handle
+
+ movl 36(%esp), %eax // sys_table
+ movl ST32_boottime(%eax), %eax // sys_table->boottime
+ call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
+ cmp $0, %eax
+ jnz 2f
+
+ movl 32(%esp), %ecx // image_handle
+ movl 36(%esp), %edx // sys_table
+ movl 12(%esp), %esi // loaded_image
+ movl LI32_image_base(%esi), %esi // loaded_image->image_base
+ jmp efi32_pe_stub_entry
+
+2: addl $24, %esp
+3: popl %ebp
+ ret
+SYM_FUNC_END(efi32_pe_entry)
+
+ .section ".rodata"
+ /* EFI loaded image protocol GUID */
+4: .long 0x5B1B31A1
+ .word 0x9562, 0x11d2
+ .byte 0x8E, 0x3F, 0x00, 0xA0, 0xC9, 0x69, 0x72, 0x3B
#endif
/*