From f3670394c29ff3730638762c1760fd2f624e6d7b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 22 Sep 2014 23:05:49 -0700 Subject: Revert "x86/efi: Fixup GOT in all boot code paths" This reverts commit 9cb0e394234d244fe5a97e743ec9dd7ddff7e64b. It causes my Sony Vaio Pro 11 to immediately reboot at startup. Acked-by: Ingo Molnar Cc: Peter Anvin Cc: Maarten Lankhorst Cc: Ard Biesheuvel Cc: Matt Fleming Signed-off-by: Linus Torvalds --- arch/x86/boot/compressed/head_32.S | 54 ++++++++++-------------------------- arch/x86/boot/compressed/head_64.S | 56 ++++++++++---------------------------- 2 files changed, 29 insertions(+), 81 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index d6b8aa4c986c..cbed1407a5cd 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -30,33 +30,6 @@ #include #include -/* - * Adjust our own GOT - * - * The relocation base must be in %ebx - * - * It is safe to call this macro more than once, because in some of the - * code paths multiple invocations are inevitable, e.g. via the efi* - * entry points. - * - * Relocation is only performed the first time. - */ -.macro FIXUP_GOT - cmpb $1, got_fixed(%ebx) - je 2f - - leal _got(%ebx), %edx - leal _egot(%ebx), %ecx -1: - cmpl %ecx, %edx - jae 2f - addl %ebx, (%edx) - addl $4, %edx - jmp 1b -2: - movb $1, got_fixed(%ebx) -.endm - __HEAD ENTRY(startup_32) #ifdef CONFIG_EFI_STUB @@ -83,9 +56,6 @@ ENTRY(efi_pe_entry) add %esi, 88(%eax) pushl %eax - movl %esi, %ebx - FIXUP_GOT - call make_boot_params cmpl $0, %eax je fail @@ -111,10 +81,6 @@ ENTRY(efi32_stub_entry) leal efi32_config(%esi), %eax add %esi, 88(%eax) pushl %eax - - movl %esi, %ebx - FIXUP_GOT - 2: call efi_main cmpl $0, %eax @@ -224,7 +190,19 @@ relocated: shrl $2, %ecx rep stosl - FIXUP_GOT +/* + * Adjust our own GOT + */ + leal _got(%ebx), %edx + leal _egot(%ebx), %ecx +1: + cmpl %ecx, %edx + jae 2f + addl %ebx, (%edx) + addl $4, %edx + jmp 1b +2: + /* * Do the decompression, and jump to the new kernel.. */ @@ -247,12 +225,8 @@ relocated: xorl %ebx, %ebx jmp *%eax - .data -/* Have we relocated the GOT? */ -got_fixed: - .byte 0 - #ifdef CONFIG_EFI_STUB + .data efi32_config: .fill 11,8,0 .long efi_call_phys diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 50f69c7eaaf4..2884e0c3e8a5 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -32,33 +32,6 @@ #include #include -/* - * Adjust our own GOT - * - * The relocation base must be in %rbx - * - * It is safe to call this macro more than once, because in some of the - * code paths multiple invocations are inevitable, e.g. via the efi* - * entry points. - * - * Relocation is only performed the first time. - */ -.macro FIXUP_GOT - cmpb $1, got_fixed(%rip) - je 2f - - leaq _got(%rip), %rdx - leaq _egot(%rip), %rcx -1: - cmpq %rcx, %rdx - jae 2f - addq %rbx, (%rdx) - addq $8, %rdx - jmp 1b -2: - movb $1, got_fixed(%rip) -.endm - __HEAD .code32 ENTRY(startup_32) @@ -279,13 +252,10 @@ ENTRY(efi_pe_entry) subq $1b, %rbp /* - * Relocate efi_config->call() and the GOT entries. + * Relocate efi_config->call(). */ addq %rbp, efi64_config+88(%rip) - movq %rbp, %rbx - FIXUP_GOT - movq %rax, %rdi call make_boot_params cmpq $0,%rax @@ -301,13 +271,10 @@ handover_entry: subq $1b, %rbp /* - * Relocate efi_config->call() and the GOT entries. + * Relocate efi_config->call(). */ movq efi_config(%rip), %rax addq %rbp, 88(%rax) - - movq %rbp, %rbx - FIXUP_GOT 2: movq efi_config(%rip), %rdi call efi_main @@ -418,8 +385,19 @@ relocated: shrq $3, %rcx rep stosq - FIXUP_GOT - +/* + * Adjust our own GOT + */ + leaq _got(%rip), %rdx + leaq _egot(%rip), %rcx +1: + cmpq %rcx, %rdx + jae 2f + addq %rbx, (%rdx) + addq $8, %rdx + jmp 1b +2: + /* * Do the decompression, and jump to the new kernel.. */ @@ -459,10 +437,6 @@ gdt: .quad 0x0000000000000000 /* TS continued */ gdt_end: -/* Have we relocated the GOT? */ -got_fixed: - .byte 0 - #ifdef CONFIG_EFI_STUB efi_config: .quad 0 -- cgit v1.2.3 From 7da4b29d496b1389d3a29b55d3668efecaa08ebd Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Tue, 23 Sep 2014 22:31:07 +0200 Subject: crypto: aesni - disable "by8" AVX CTR optimization The "by8" implementation introduced in commit 22cddcc7df8f ("crypto: aes - AES CTR x86_64 "by8" AVX optimization") is failing crypto tests as it handles counter block overflows differently. It only accounts the right most 32 bit as a counter -- not the whole block as all other implementations do. This makes it fail the cryptomgr test #4 that specifically tests this corner case. As we're quite late in the release cycle, just disable the "by8" variant for now. Reported-by: Romain Francoise Signed-off-by: Mathias Krause Cc: Chandramouli Narayanan Signed-off-by: Herbert Xu --- arch/x86/crypto/aesni-intel_glue.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 888950f29fd9..a7ccd57f19e4 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -481,7 +481,7 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, crypto_inc(ctrblk, AES_BLOCK_SIZE); } -#ifdef CONFIG_AS_AVX +#if 0 /* temporary disabled due to failing crypto tests */ static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) { @@ -1522,7 +1522,7 @@ static int __init aesni_init(void) aesni_gcm_dec_tfm = aesni_gcm_dec; } aesni_ctr_enc_tfm = aesni_ctr_enc; -#ifdef CONFIG_AS_AVX +#if 0 /* temporary disabled due to failing crypto tests */ if (cpu_has_avx) { /* optimize performance of ctr mode encryption transform */ aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; -- cgit v1.2.3