From ffedeeb780dc554eff3d3b16e6a462a26a41d7ec Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:50:41 +0200 Subject: linkage: Introduce new macros for assembler symbols Introduce new C macros for annotations of functions and data in assembly. There is a long-standing mess in macros like ENTRY, END, ENDPROC and similar. They are used in different manners and sometimes incorrectly. So introduce macros with clear use to annotate assembly as follows: a) Support macros for the ones below SYM_T_FUNC -- type used by assembler to mark functions SYM_T_OBJECT -- type used by assembler to mark data SYM_T_NONE -- type used by assembler to mark entries of unknown type They are defined as STT_FUNC, STT_OBJECT, and STT_NOTYPE respectively. According to the gas manual, this is the most portable way. I am not sure about other assemblers, so this can be switched back to %function and %object if this turns into a problem. Architectures can also override them by something like ", @function" if they need. SYM_A_ALIGN, SYM_A_NONE -- align the symbol? SYM_L_GLOBAL, SYM_L_WEAK, SYM_L_LOCAL -- linkage of symbols b) Mostly internal annotations, used by the ones below SYM_ENTRY -- use only if you have to (for non-paired symbols) SYM_START -- use only if you have to (for paired symbols) SYM_END -- use only if you have to (for paired symbols) c) Annotations for code SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code SYM_INNER_LABEL -- only for labels in the middle of code SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one function SYM_FUNC_START_ALIAS -- use where there are two global names for one function SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function SYM_FUNC_START -- use for global functions SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment SYM_FUNC_START_LOCAL -- use for local functions SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment SYM_FUNC_START_WEAK -- use for weak functions SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, SYM_FUNC_START_WEAK, ... For functions with special (non-C) calling conventions: SYM_CODE_START -- use for non-C (special) functions SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment SYM_CODE_START_LOCAL -- use for local non-C (special) functions SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions, w/o alignment SYM_CODE_END -- the end of SYM_CODE_START_LOCAL or SYM_CODE_START d) For data SYM_DATA_START -- global data symbol SYM_DATA_START_LOCAL -- local data symbol SYM_DATA_END -- the end of the SYM_DATA_START symbol SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol SYM_DATA -- start+end wrapper around simple global data SYM_DATA_LOCAL -- start+end wrapper around simple local data ========== The macros allow to pair starts and ends of functions and mark functions correctly in the output ELF objects. All users of the old macros in x86 are converted to use these in further patches. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Acked-by: Rafael J. Wysocki Cc: Andrew Morton Cc: Andrey Ryabinin Cc: Boris Ostrovsky Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jonathan Corbet Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Len Brown Cc: Linus Torvalds Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Mark Rutland Cc: Pavel Machek Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: x86-ml Cc: xen-devel@lists.xenproject.org Link: https://lkml.kernel.org/r/20191011115108.12392-2-jslaby@suse.cz --- include/linux/linkage.h | 245 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 237 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 7e020782ade2..f3ae8f3dea2c 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -75,32 +75,58 @@ #ifdef __ASSEMBLY__ +/* SYM_T_FUNC -- type used by assembler to mark functions */ +#ifndef SYM_T_FUNC +#define SYM_T_FUNC STT_FUNC +#endif + +/* SYM_T_OBJECT -- type used by assembler to mark data */ +#ifndef SYM_T_OBJECT +#define SYM_T_OBJECT STT_OBJECT +#endif + +/* SYM_T_NONE -- type used by assembler to mark entries of unknown type */ +#ifndef SYM_T_NONE +#define SYM_T_NONE STT_NOTYPE +#endif + +/* SYM_A_* -- align the symbol? */ +#define SYM_A_ALIGN ALIGN +#define SYM_A_NONE /* nothing */ + +/* SYM_L_* -- linkage of symbols */ +#define SYM_L_GLOBAL(name) .globl name +#define SYM_L_WEAK(name) .weak name +#define SYM_L_LOCAL(name) /* nothing */ + #ifndef LINKER_SCRIPT #define ALIGN __ALIGN #define ALIGN_STR __ALIGN_STR +/* === DEPRECATED annotations === */ + #ifndef GLOBAL +/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */ #define GLOBAL(name) \ .globl name ASM_NL \ name: #endif #ifndef ENTRY +/* deprecated, use SYM_FUNC_START */ #define ENTRY(name) \ - .globl name ASM_NL \ - ALIGN ASM_NL \ - name: + SYM_FUNC_START(name) #endif #endif /* LINKER_SCRIPT */ #ifndef WEAK +/* deprecated, use SYM_FUNC_START_WEAK* */ #define WEAK(name) \ - .weak name ASM_NL \ - ALIGN ASM_NL \ - name: + SYM_FUNC_START_WEAK(name) #endif #ifndef END +/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */ #define END(name) \ .size name, .-name #endif @@ -110,11 +136,214 @@ * static analysis tools such as stack depth analyzer. */ #ifndef ENDPROC +/* deprecated, use SYM_FUNC_END */ #define ENDPROC(name) \ - .type name, @function ASM_NL \ - END(name) + SYM_FUNC_END(name) +#endif + +/* === generic annotations === */ + +/* SYM_ENTRY -- use only if you have to for non-paired symbols */ +#ifndef SYM_ENTRY +#define SYM_ENTRY(name, linkage, align...) \ + linkage(name) ASM_NL \ + align ASM_NL \ + name: +#endif + +/* SYM_START -- use only if you have to */ +#ifndef SYM_START +#define SYM_START(name, linkage, align...) \ + SYM_ENTRY(name, linkage, align) +#endif + +/* SYM_END -- use only if you have to */ +#ifndef SYM_END +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name +#endif + +/* === code annotations === */ + +/* + * FUNC -- C-like functions (proper stack frame etc.) + * CODE -- non-C code (e.g. irq handlers with different, special stack etc.) + * + * Objtool validates stack for FUNC, but not for CODE. + * Objtool generates debug info for both FUNC & CODE, but needs special + * annotations for each CODE's start (to describe the actual stack frame). + * + * ALIAS -- does not generate debug info -- the aliased function will + */ + +/* SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code */ +#ifndef SYM_INNER_LABEL_ALIGN +#define SYM_INNER_LABEL_ALIGN(name, linkage) \ + .type name SYM_T_NONE ASM_NL \ + SYM_ENTRY(name, linkage, SYM_A_ALIGN) +#endif + +/* SYM_INNER_LABEL -- only for labels in the middle of code */ +#ifndef SYM_INNER_LABEL +#define SYM_INNER_LABEL(name, linkage) \ + .type name SYM_T_NONE ASM_NL \ + SYM_ENTRY(name, linkage, SYM_A_NONE) +#endif + +/* + * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one + * function + */ +#ifndef SYM_FUNC_START_LOCAL_ALIAS +#define SYM_FUNC_START_LOCAL_ALIAS(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) +#endif + +/* + * SYM_FUNC_START_ALIAS -- use where there are two global names for one + * function + */ +#ifndef SYM_FUNC_START_ALIAS +#define SYM_FUNC_START_ALIAS(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START -- use for global functions */ +#ifndef SYM_FUNC_START +/* + * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two + * later. + */ +#define SYM_FUNC_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */ +#ifndef SYM_FUNC_START_NOALIGN +#define SYM_FUNC_START_NOALIGN(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) +#endif + +/* SYM_FUNC_START_LOCAL -- use for local functions */ +#ifndef SYM_FUNC_START_LOCAL +/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) #endif +/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */ +#ifndef SYM_FUNC_START_LOCAL_NOALIGN +#define SYM_FUNC_START_LOCAL_NOALIGN(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) #endif +/* SYM_FUNC_START_WEAK -- use for weak functions */ +#ifndef SYM_FUNC_START_WEAK +#define SYM_FUNC_START_WEAK(name) \ + SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN) #endif + +/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */ +#ifndef SYM_FUNC_START_WEAK_NOALIGN +#define SYM_FUNC_START_WEAK_NOALIGN(name) \ + SYM_START(name, SYM_L_WEAK, SYM_A_NONE) +#endif + +/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ +#ifndef SYM_FUNC_END_ALIAS +#define SYM_FUNC_END_ALIAS(name) \ + SYM_END(name, SYM_T_FUNC) +#endif + +/* + * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, + * SYM_FUNC_START_WEAK, ... + */ +#ifndef SYM_FUNC_END +/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ +#define SYM_FUNC_END(name) \ + SYM_END(name, SYM_T_FUNC) +#endif + +/* SYM_CODE_START -- use for non-C (special) functions */ +#ifndef SYM_CODE_START +#define SYM_CODE_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) +#endif + +/* SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment */ +#ifndef SYM_CODE_START_NOALIGN +#define SYM_CODE_START_NOALIGN(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) +#endif + +/* SYM_CODE_START_LOCAL -- use for local non-C (special) functions */ +#ifndef SYM_CODE_START_LOCAL +#define SYM_CODE_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) +#endif + +/* + * SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions, + * w/o alignment + */ +#ifndef SYM_CODE_START_LOCAL_NOALIGN +#define SYM_CODE_START_LOCAL_NOALIGN(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) +#endif + +/* SYM_CODE_END -- the end of SYM_CODE_START_LOCAL, SYM_CODE_START, ... */ +#ifndef SYM_CODE_END +#define SYM_CODE_END(name) \ + SYM_END(name, SYM_T_NONE) +#endif + +/* === data annotations === */ + +/* SYM_DATA_START -- global data symbol */ +#ifndef SYM_DATA_START +#define SYM_DATA_START(name) \ + SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) +#endif + +/* SYM_DATA_START -- local data symbol */ +#ifndef SYM_DATA_START_LOCAL +#define SYM_DATA_START_LOCAL(name) \ + SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) +#endif + +/* SYM_DATA_END -- the end of SYM_DATA_START symbol */ +#ifndef SYM_DATA_END +#define SYM_DATA_END(name) \ + SYM_END(name, SYM_T_OBJECT) +#endif + +/* SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol */ +#ifndef SYM_DATA_END_LABEL +#define SYM_DATA_END_LABEL(name, linkage, label) \ + linkage(label) ASM_NL \ + .type label SYM_T_OBJECT ASM_NL \ + label: \ + SYM_END(name, SYM_T_OBJECT) +#endif + +/* SYM_DATA -- start+end wrapper around simple global data */ +#ifndef SYM_DATA +#define SYM_DATA(name, data...) \ + SYM_DATA_START(name) ASM_NL \ + data ASM_NL \ + SYM_DATA_END(name) +#endif + +/* SYM_DATA_LOCAL -- start+end wrapper around simple local data */ +#ifndef SYM_DATA_LOCAL +#define SYM_DATA_LOCAL(name, data...) \ + SYM_DATA_START_LOCAL(name) ASM_NL \ + data ASM_NL \ + SYM_DATA_END(name) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* _LINUX_LINKAGE_H */ -- cgit v1.2.3 From b4edca150106a68d05eaf823d665a355ff19e28b Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:50:59 +0200 Subject: x86/asm: Remove the last GLOBAL user and remove the macro Convert the remaining 32bit users and remove the GLOBAL macro finally. In particular, this means to use SYM_ENTRY for the singlestepping hack region. Exclude the global definition of GLOBAL from x86 too. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andrew Morton Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: Mark Rutland Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Cc: Will Deacon Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-20-jslaby@suse.cz --- arch/x86/entry/entry_32.S | 4 ++-- arch/x86/include/asm/linkage.h | 8 -------- include/linux/linkage.h | 2 ++ 3 files changed, 4 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index f37ff583cecb..4900a6a5e125 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -832,7 +832,7 @@ SYM_INNER_LABEL_ALIGN(resume_userspace, SYM_L_LOCAL) jmp restore_all SYM_CODE_END(ret_from_exception) -GLOBAL(__begin_SYSENTER_singlestep_region) +SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) /* * All code from here through __end_SYSENTER_singlestep_region is subject * to being single-stepped if a user program sets TF and executes SYSENTER. @@ -1011,7 +1011,7 @@ ENTRY(entry_SYSENTER_32) pushl $X86_EFLAGS_FIXED popfl jmp .Lsysenter_flags_fixed -GLOBAL(__end_SYSENTER_singlestep_region) +SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) ENDPROC(entry_SYSENTER_32) /* diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index e07188e8d763..365111789cc6 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -13,14 +13,6 @@ #ifdef __ASSEMBLY__ -/* - * GLOBAL is DEPRECATED - * - * use SYM_DATA_START, SYM_FUNC_START, SYM_INNER_LABEL, SYM_CODE_START, or - * similar - */ -#define GLOBAL(name) SYM_ENTRY(name, SYM_L_GLOBAL, SYM_A_NONE) - #if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16) #define __ALIGN .p2align 4, 0x90 #define __ALIGN_STR __stringify(__ALIGN) diff --git a/include/linux/linkage.h b/include/linux/linkage.h index f3ae8f3dea2c..cb1108dde385 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -105,12 +105,14 @@ /* === DEPRECATED annotations === */ +#ifndef CONFIG_X86 #ifndef GLOBAL /* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */ #define GLOBAL(name) \ .globl name ASM_NL \ name: #endif +#endif #ifndef ENTRY /* deprecated, use SYM_FUNC_START */ -- cgit v1.2.3 From 6dcc5627f6aec4cb1d1494d06a48d8061db06a04 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:04 +0200 Subject: x86/asm: Change all ENTRY+ENDPROC to SYM_FUNC_* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are all functions which are invoked from elsewhere, so annotate them as global using the new SYM_FUNC_START and their ENDPROC's by SYM_FUNC_END. Make sure ENTRY/ENDPROC is not defined on X86_64, given these were the last users. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Reviewed-by: Rafael J. Wysocki [hibernate] Reviewed-by: Boris Ostrovsky [xen bits] Acked-by: Herbert Xu [crypto] Cc: Allison Randal Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Ard Biesheuvel Cc: Armijn Hemel Cc: Cao jin Cc: Darren Hart Cc: Dave Hansen Cc: "David S. Miller" Cc: Enrico Weigelt Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Jim Mattson Cc: Joerg Roedel Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Kate Stewart Cc: "Kirill A. Shutemov" Cc: kvm ML Cc: Len Brown Cc: linux-arch@vger.kernel.org Cc: linux-crypto@vger.kernel.org Cc: linux-efi Cc: linux-efi@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Mark Rutland Cc: Matt Fleming Cc: Paolo Bonzini Cc: Pavel Machek Cc: Peter Zijlstra Cc: platform-driver-x86@vger.kernel.org Cc: "Radim Krčmář" Cc: Sean Christopherson Cc: Stefano Stabellini Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: Vitaly Kuznetsov Cc: Wanpeng Li Cc: Wei Huang Cc: x86-ml Cc: xen-devel@lists.xenproject.org Cc: Xiaoyao Li Link: https://lkml.kernel.org/r/20191011115108.12392-25-jslaby@suse.cz --- arch/x86/boot/compressed/efi_thunk_64.S | 4 +- arch/x86/boot/compressed/head_64.S | 16 ++++---- arch/x86/boot/compressed/mem_encrypt.S | 8 ++-- arch/x86/crypto/aegis128-aesni-asm.S | 28 ++++++------- arch/x86/crypto/aes_ctrby8_avx-x86_64.S | 12 +++--- arch/x86/crypto/aesni-intel_asm.S | 60 ++++++++++++++-------------- arch/x86/crypto/aesni-intel_avx-x86_64.S | 32 +++++++-------- arch/x86/crypto/blowfish-x86_64-asm_64.S | 16 ++++---- arch/x86/crypto/camellia-aesni-avx-asm_64.S | 24 +++++------ arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 24 +++++------ arch/x86/crypto/camellia-x86_64-asm_64.S | 16 ++++---- arch/x86/crypto/cast5-avx-x86_64-asm_64.S | 16 ++++---- arch/x86/crypto/cast6-avx-x86_64-asm_64.S | 24 +++++------ arch/x86/crypto/chacha-avx2-x86_64.S | 12 +++--- arch/x86/crypto/chacha-avx512vl-x86_64.S | 12 +++--- arch/x86/crypto/chacha-ssse3-x86_64.S | 12 +++--- arch/x86/crypto/crc32-pclmul_asm.S | 4 +- arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 4 +- arch/x86/crypto/crct10dif-pcl-asm_64.S | 4 +- arch/x86/crypto/des3_ede-asm_64.S | 8 ++-- arch/x86/crypto/ghash-clmulni-intel_asm.S | 8 ++-- arch/x86/crypto/nh-avx2-x86_64.S | 4 +- arch/x86/crypto/nh-sse2-x86_64.S | 4 +- arch/x86/crypto/poly1305-avx2-x86_64.S | 4 +- arch/x86/crypto/poly1305-sse2-x86_64.S | 8 ++-- arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 24 +++++------ arch/x86/crypto/serpent-avx2-asm_64.S | 24 +++++------ arch/x86/crypto/serpent-sse2-x86_64-asm_64.S | 8 ++-- arch/x86/crypto/sha1_avx2_x86_64_asm.S | 4 +- arch/x86/crypto/sha1_ni_asm.S | 4 +- arch/x86/crypto/sha1_ssse3_asm.S | 4 +- arch/x86/crypto/sha256-avx-asm.S | 4 +- arch/x86/crypto/sha256-avx2-asm.S | 4 +- arch/x86/crypto/sha256-ssse3-asm.S | 4 +- arch/x86/crypto/sha256_ni_asm.S | 4 +- arch/x86/crypto/sha512-avx-asm.S | 4 +- arch/x86/crypto/sha512-avx2-asm.S | 4 +- arch/x86/crypto/sha512-ssse3-asm.S | 4 +- arch/x86/crypto/twofish-avx-x86_64-asm_64.S | 24 +++++------ arch/x86/crypto/twofish-x86_64-asm_64-3way.S | 8 ++-- arch/x86/crypto/twofish-x86_64-asm_64.S | 8 ++-- arch/x86/entry/entry_64.S | 10 ++--- arch/x86/entry/entry_64_compat.S | 4 +- arch/x86/kernel/acpi/wakeup_64.S | 8 ++-- arch/x86/kernel/ftrace_64.S | 20 +++++----- arch/x86/kernel/irqflags.S | 8 ++-- arch/x86/kvm/vmx/vmenter.S | 12 +++--- arch/x86/lib/checksum_32.S | 8 ++-- arch/x86/lib/clear_page_64.S | 12 +++--- arch/x86/lib/cmpxchg16b_emu.S | 4 +- arch/x86/lib/cmpxchg8b_emu.S | 4 +- arch/x86/lib/copy_page_64.S | 4 +- arch/x86/lib/copy_user_64.S | 16 ++++---- arch/x86/lib/csum-copy_64.S | 4 +- arch/x86/lib/getuser.S | 16 ++++---- arch/x86/lib/hweight.S | 8 ++-- arch/x86/lib/iomap_copy_64.S | 4 +- arch/x86/lib/memcpy_64.S | 4 +- arch/x86/lib/memmove_64.S | 4 +- arch/x86/lib/memset_64.S | 4 +- arch/x86/lib/msr-reg.S | 8 ++-- arch/x86/lib/putuser.S | 16 ++++---- arch/x86/lib/retpoline.S | 4 +- arch/x86/mm/mem_encrypt_boot.S | 8 ++-- arch/x86/platform/efi/efi_stub_64.S | 4 +- arch/x86/platform/efi/efi_thunk_64.S | 4 +- arch/x86/power/hibernate_asm_64.S | 8 ++-- arch/x86/xen/xen-asm.S | 28 ++++++------- arch/x86/xen/xen-asm_64.S | 16 ++++---- include/linux/linkage.h | 4 ++ 70 files changed, 379 insertions(+), 375 deletions(-) (limited to 'include') diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S index 31312070db22..593913692d16 100644 --- a/arch/x86/boot/compressed/efi_thunk_64.S +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -23,7 +23,7 @@ .code64 .text -ENTRY(efi64_thunk) +SYM_FUNC_START(efi64_thunk) push %rbp push %rbx @@ -97,7 +97,7 @@ ENTRY(efi64_thunk) pop %rbx pop %rbp ret -ENDPROC(efi64_thunk) +SYM_FUNC_END(efi64_thunk) SYM_FUNC_START_LOCAL(efi_exit32) movq func_rt_ptr(%rip), %rax diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 55800467ce5c..58a512e33d8d 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -45,7 +45,7 @@ __HEAD .code32 -ENTRY(startup_32) +SYM_FUNC_START(startup_32) /* * 32bit entry is 0 and it is ABI so immutable! * If we come here directly from a bootloader, @@ -222,11 +222,11 @@ ENTRY(startup_32) /* Jump from 32bit compatibility mode into 64bit mode. */ lret -ENDPROC(startup_32) +SYM_FUNC_END(startup_32) #ifdef CONFIG_EFI_MIXED .org 0x190 -ENTRY(efi32_stub_entry) +SYM_FUNC_START(efi32_stub_entry) add $0x4, %esp /* Discard return address */ popl %ecx popl %edx @@ -245,7 +245,7 @@ ENTRY(efi32_stub_entry) movl %eax, efi_config(%ebp) jmp startup_32 -ENDPROC(efi32_stub_entry) +SYM_FUNC_END(efi32_stub_entry) #endif .code64 @@ -447,7 +447,7 @@ SYM_CODE_END(startup_64) #ifdef CONFIG_EFI_STUB /* The entry point for the PE/COFF executable is efi_pe_entry. */ -ENTRY(efi_pe_entry) +SYM_FUNC_START(efi_pe_entry) movq %rcx, efi64_config(%rip) /* Handle */ movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */ @@ -496,10 +496,10 @@ fail: movl BP_code32_start(%esi), %eax leaq startup_64(%rax), %rax jmp *%rax -ENDPROC(efi_pe_entry) +SYM_FUNC_END(efi_pe_entry) .org 0x390 -ENTRY(efi64_stub_entry) +SYM_FUNC_START(efi64_stub_entry) movq %rdi, efi64_config(%rip) /* Handle */ movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */ @@ -508,7 +508,7 @@ ENTRY(efi64_stub_entry) movq %rdx, %rsi jmp handover_entry -ENDPROC(efi64_stub_entry) +SYM_FUNC_END(efi64_stub_entry) #endif .text diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index 28d703cad310..dd07e7b41b11 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -15,7 +15,7 @@ .text .code32 -ENTRY(get_sev_encryption_bit) +SYM_FUNC_START(get_sev_encryption_bit) xor %eax, %eax #ifdef CONFIG_AMD_MEM_ENCRYPT @@ -65,10 +65,10 @@ ENTRY(get_sev_encryption_bit) #endif /* CONFIG_AMD_MEM_ENCRYPT */ ret -ENDPROC(get_sev_encryption_bit) +SYM_FUNC_END(get_sev_encryption_bit) .code64 -ENTRY(set_sev_encryption_mask) +SYM_FUNC_START(set_sev_encryption_mask) #ifdef CONFIG_AMD_MEM_ENCRYPT push %rbp push %rdx @@ -90,7 +90,7 @@ ENTRY(set_sev_encryption_mask) xor %rax, %rax ret -ENDPROC(set_sev_encryption_mask) +SYM_FUNC_END(set_sev_encryption_mask) .data diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index b7026fdef4ff..51d46d93efbc 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -186,7 +186,7 @@ SYM_FUNC_END(__store_partial) /* * void crypto_aegis128_aesni_init(void *state, const void *key, const void *iv); */ -ENTRY(crypto_aegis128_aesni_init) +SYM_FUNC_START(crypto_aegis128_aesni_init) FRAME_BEGIN /* load IV: */ @@ -226,13 +226,13 @@ ENTRY(crypto_aegis128_aesni_init) FRAME_END ret -ENDPROC(crypto_aegis128_aesni_init) +SYM_FUNC_END(crypto_aegis128_aesni_init) /* * void crypto_aegis128_aesni_ad(void *state, unsigned int length, * const void *data); */ -ENTRY(crypto_aegis128_aesni_ad) +SYM_FUNC_START(crypto_aegis128_aesni_ad) FRAME_BEGIN cmp $0x10, LEN @@ -378,7 +378,7 @@ ENTRY(crypto_aegis128_aesni_ad) .Lad_out: FRAME_END ret -ENDPROC(crypto_aegis128_aesni_ad) +SYM_FUNC_END(crypto_aegis128_aesni_ad) .macro encrypt_block a s0 s1 s2 s3 s4 i movdq\a (\i * 0x10)(SRC), MSG @@ -402,7 +402,7 @@ ENDPROC(crypto_aegis128_aesni_ad) * void crypto_aegis128_aesni_enc(void *state, unsigned int length, * const void *src, void *dst); */ -ENTRY(crypto_aegis128_aesni_enc) +SYM_FUNC_START(crypto_aegis128_aesni_enc) FRAME_BEGIN cmp $0x10, LEN @@ -493,13 +493,13 @@ ENTRY(crypto_aegis128_aesni_enc) .Lenc_out: FRAME_END ret -ENDPROC(crypto_aegis128_aesni_enc) +SYM_FUNC_END(crypto_aegis128_aesni_enc) /* * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length, * const void *src, void *dst); */ -ENTRY(crypto_aegis128_aesni_enc_tail) +SYM_FUNC_START(crypto_aegis128_aesni_enc_tail) FRAME_BEGIN /* load the state: */ @@ -533,7 +533,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail) FRAME_END ret -ENDPROC(crypto_aegis128_aesni_enc_tail) +SYM_FUNC_END(crypto_aegis128_aesni_enc_tail) .macro decrypt_block a s0 s1 s2 s3 s4 i movdq\a (\i * 0x10)(SRC), MSG @@ -556,7 +556,7 @@ ENDPROC(crypto_aegis128_aesni_enc_tail) * void crypto_aegis128_aesni_dec(void *state, unsigned int length, * const void *src, void *dst); */ -ENTRY(crypto_aegis128_aesni_dec) +SYM_FUNC_START(crypto_aegis128_aesni_dec) FRAME_BEGIN cmp $0x10, LEN @@ -647,13 +647,13 @@ ENTRY(crypto_aegis128_aesni_dec) .Ldec_out: FRAME_END ret -ENDPROC(crypto_aegis128_aesni_dec) +SYM_FUNC_END(crypto_aegis128_aesni_dec) /* * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length, * const void *src, void *dst); */ -ENTRY(crypto_aegis128_aesni_dec_tail) +SYM_FUNC_START(crypto_aegis128_aesni_dec_tail) FRAME_BEGIN /* load the state: */ @@ -697,13 +697,13 @@ ENTRY(crypto_aegis128_aesni_dec_tail) FRAME_END ret -ENDPROC(crypto_aegis128_aesni_dec_tail) +SYM_FUNC_END(crypto_aegis128_aesni_dec_tail) /* * void crypto_aegis128_aesni_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ -ENTRY(crypto_aegis128_aesni_final) +SYM_FUNC_START(crypto_aegis128_aesni_final) FRAME_BEGIN /* load the state: */ @@ -744,4 +744,4 @@ ENTRY(crypto_aegis128_aesni_final) FRAME_END ret -ENDPROC(crypto_aegis128_aesni_final) +SYM_FUNC_END(crypto_aegis128_aesni_final) diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 5f6a5af9c489..ec437db1fa54 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -544,11 +544,11 @@ ddq_add_8: * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ -ENTRY(aes_ctr_enc_128_avx_by8) +SYM_FUNC_START(aes_ctr_enc_128_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_128 -ENDPROC(aes_ctr_enc_128_avx_by8) +SYM_FUNC_END(aes_ctr_enc_128_avx_by8) /* * routine to do AES192 CTR enc/decrypt "by8" @@ -557,11 +557,11 @@ ENDPROC(aes_ctr_enc_128_avx_by8) * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ -ENTRY(aes_ctr_enc_192_avx_by8) +SYM_FUNC_START(aes_ctr_enc_192_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_192 -ENDPROC(aes_ctr_enc_192_avx_by8) +SYM_FUNC_END(aes_ctr_enc_192_avx_by8) /* * routine to do AES256 CTR enc/decrypt "by8" @@ -570,8 +570,8 @@ ENDPROC(aes_ctr_enc_192_avx_by8) * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ -ENTRY(aes_ctr_enc_256_avx_by8) +SYM_FUNC_START(aes_ctr_enc_256_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_256 -ENDPROC(aes_ctr_enc_256_avx_by8) +SYM_FUNC_END(aes_ctr_enc_256_avx_by8) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 9d8d5f2296e1..d28503f99f58 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -1592,7 +1592,7 @@ _esb_loop_\@: * poly = x^128 + x^127 + x^126 + x^121 + 1 * *****************************************************************************/ -ENTRY(aesni_gcm_dec) +SYM_FUNC_START(aesni_gcm_dec) FUNC_SAVE GCM_INIT %arg6, arg7, arg8, arg9 @@ -1600,7 +1600,7 @@ ENTRY(aesni_gcm_dec) GCM_COMPLETE arg10, arg11 FUNC_RESTORE ret -ENDPROC(aesni_gcm_dec) +SYM_FUNC_END(aesni_gcm_dec) /***************************************************************************** @@ -1680,7 +1680,7 @@ ENDPROC(aesni_gcm_dec) * * poly = x^128 + x^127 + x^126 + x^121 + 1 ***************************************************************************/ -ENTRY(aesni_gcm_enc) +SYM_FUNC_START(aesni_gcm_enc) FUNC_SAVE GCM_INIT %arg6, arg7, arg8, arg9 @@ -1689,7 +1689,7 @@ ENTRY(aesni_gcm_enc) GCM_COMPLETE arg10, arg11 FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc) +SYM_FUNC_END(aesni_gcm_enc) /***************************************************************************** * void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1702,12 +1702,12 @@ ENDPROC(aesni_gcm_enc) * const u8 *aad, // Additional Authentication Data (AAD) * u64 aad_len) // Length of AAD in bytes. */ -ENTRY(aesni_gcm_init) +SYM_FUNC_START(aesni_gcm_init) FUNC_SAVE GCM_INIT %arg3, %arg4,%arg5, %arg6 FUNC_RESTORE ret -ENDPROC(aesni_gcm_init) +SYM_FUNC_END(aesni_gcm_init) /***************************************************************************** * void aesni_gcm_enc_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1717,12 +1717,12 @@ ENDPROC(aesni_gcm_init) * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. */ -ENTRY(aesni_gcm_enc_update) +SYM_FUNC_START(aesni_gcm_enc_update) FUNC_SAVE GCM_ENC_DEC enc FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc_update) +SYM_FUNC_END(aesni_gcm_enc_update) /***************************************************************************** * void aesni_gcm_dec_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1732,12 +1732,12 @@ ENDPROC(aesni_gcm_enc_update) * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. */ -ENTRY(aesni_gcm_dec_update) +SYM_FUNC_START(aesni_gcm_dec_update) FUNC_SAVE GCM_ENC_DEC dec FUNC_RESTORE ret -ENDPROC(aesni_gcm_dec_update) +SYM_FUNC_END(aesni_gcm_dec_update) /***************************************************************************** * void aesni_gcm_finalize(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1747,12 +1747,12 @@ ENDPROC(aesni_gcm_dec_update) * u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely), * // 12 or 8. */ -ENTRY(aesni_gcm_finalize) +SYM_FUNC_START(aesni_gcm_finalize) FUNC_SAVE GCM_COMPLETE %arg3 %arg4 FUNC_RESTORE ret -ENDPROC(aesni_gcm_finalize) +SYM_FUNC_END(aesni_gcm_finalize) #endif @@ -1830,7 +1830,7 @@ SYM_FUNC_END(_key_expansion_256b) * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, * unsigned int key_len) */ -ENTRY(aesni_set_key) +SYM_FUNC_START(aesni_set_key) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP @@ -1939,12 +1939,12 @@ ENTRY(aesni_set_key) #endif FRAME_END ret -ENDPROC(aesni_set_key) +SYM_FUNC_END(aesni_set_key) /* * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ -ENTRY(aesni_enc) +SYM_FUNC_START(aesni_enc) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP @@ -1963,7 +1963,7 @@ ENTRY(aesni_enc) #endif FRAME_END ret -ENDPROC(aesni_enc) +SYM_FUNC_END(aesni_enc) /* * _aesni_enc1: internal ABI @@ -2133,7 +2133,7 @@ SYM_FUNC_END(_aesni_enc4) /* * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ -ENTRY(aesni_dec) +SYM_FUNC_START(aesni_dec) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP @@ -2153,7 +2153,7 @@ ENTRY(aesni_dec) #endif FRAME_END ret -ENDPROC(aesni_dec) +SYM_FUNC_END(aesni_dec) /* * _aesni_dec1: internal ABI @@ -2324,7 +2324,7 @@ SYM_FUNC_END(_aesni_dec4) * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len) */ -ENTRY(aesni_ecb_enc) +SYM_FUNC_START(aesni_ecb_enc) FRAME_BEGIN #ifndef __x86_64__ pushl LEN @@ -2378,13 +2378,13 @@ ENTRY(aesni_ecb_enc) #endif FRAME_END ret -ENDPROC(aesni_ecb_enc) +SYM_FUNC_END(aesni_ecb_enc) /* * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len); */ -ENTRY(aesni_ecb_dec) +SYM_FUNC_START(aesni_ecb_dec) FRAME_BEGIN #ifndef __x86_64__ pushl LEN @@ -2439,13 +2439,13 @@ ENTRY(aesni_ecb_dec) #endif FRAME_END ret -ENDPROC(aesni_ecb_dec) +SYM_FUNC_END(aesni_ecb_dec) /* * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ -ENTRY(aesni_cbc_enc) +SYM_FUNC_START(aesni_cbc_enc) FRAME_BEGIN #ifndef __x86_64__ pushl IVP @@ -2483,13 +2483,13 @@ ENTRY(aesni_cbc_enc) #endif FRAME_END ret -ENDPROC(aesni_cbc_enc) +SYM_FUNC_END(aesni_cbc_enc) /* * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ -ENTRY(aesni_cbc_dec) +SYM_FUNC_START(aesni_cbc_dec) FRAME_BEGIN #ifndef __x86_64__ pushl IVP @@ -2576,7 +2576,7 @@ ENTRY(aesni_cbc_dec) #endif FRAME_END ret -ENDPROC(aesni_cbc_dec) +SYM_FUNC_END(aesni_cbc_dec) #ifdef __x86_64__ .pushsection .rodata @@ -2638,7 +2638,7 @@ SYM_FUNC_END(_aesni_inc) * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ -ENTRY(aesni_ctr_enc) +SYM_FUNC_START(aesni_ctr_enc) FRAME_BEGIN cmp $16, LEN jb .Lctr_enc_just_ret @@ -2695,7 +2695,7 @@ ENTRY(aesni_ctr_enc) .Lctr_enc_just_ret: FRAME_END ret -ENDPROC(aesni_ctr_enc) +SYM_FUNC_END(aesni_ctr_enc) /* * _aesni_gf128mul_x_ble: internal ABI @@ -2719,7 +2719,7 @@ ENDPROC(aesni_ctr_enc) * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * bool enc, u8 *iv) */ -ENTRY(aesni_xts_crypt8) +SYM_FUNC_START(aesni_xts_crypt8) FRAME_BEGIN cmpb $0, %cl movl $0, %ecx @@ -2823,6 +2823,6 @@ ENTRY(aesni_xts_crypt8) FRAME_END ret -ENDPROC(aesni_xts_crypt8) +SYM_FUNC_END(aesni_xts_crypt8) #endif diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 91c039ab5699..bfa1c0b3e5b4 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -1775,12 +1775,12 @@ _initial_blocks_done\@: # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ ############################################################# -ENTRY(aesni_gcm_init_avx_gen2) +SYM_FUNC_START(aesni_gcm_init_avx_gen2) FUNC_SAVE INIT GHASH_MUL_AVX, PRECOMPUTE_AVX FUNC_RESTORE ret -ENDPROC(aesni_gcm_init_avx_gen2) +SYM_FUNC_END(aesni_gcm_init_avx_gen2) ############################################################################### #void aesni_gcm_enc_update_avx_gen2( @@ -1790,7 +1790,7 @@ ENDPROC(aesni_gcm_init_avx_gen2) # const u8 *in, /* Plaintext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_enc_update_avx_gen2) +SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2) FUNC_SAVE mov keysize, %eax cmp $32, %eax @@ -1809,7 +1809,7 @@ key_256_enc_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13 FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc_update_avx_gen2) +SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2) ############################################################################### #void aesni_gcm_dec_update_avx_gen2( @@ -1819,7 +1819,7 @@ ENDPROC(aesni_gcm_enc_update_avx_gen2) # const u8 *in, /* Ciphertext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_dec_update_avx_gen2) +SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -1838,7 +1838,7 @@ key_256_dec_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13 FUNC_RESTORE ret -ENDPROC(aesni_gcm_dec_update_avx_gen2) +SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2) ############################################################################### #void aesni_gcm_finalize_avx_gen2( @@ -1848,7 +1848,7 @@ ENDPROC(aesni_gcm_dec_update_avx_gen2) # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### -ENTRY(aesni_gcm_finalize_avx_gen2) +SYM_FUNC_START(aesni_gcm_finalize_avx_gen2) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -1867,7 +1867,7 @@ key_256_finalize: GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4 FUNC_RESTORE ret -ENDPROC(aesni_gcm_finalize_avx_gen2) +SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) #endif /* CONFIG_AS_AVX */ @@ -2746,12 +2746,12 @@ _initial_blocks_done\@: # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ ############################################################# -ENTRY(aesni_gcm_init_avx_gen4) +SYM_FUNC_START(aesni_gcm_init_avx_gen4) FUNC_SAVE INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2 FUNC_RESTORE ret -ENDPROC(aesni_gcm_init_avx_gen4) +SYM_FUNC_END(aesni_gcm_init_avx_gen4) ############################################################################### #void aesni_gcm_enc_avx_gen4( @@ -2761,7 +2761,7 @@ ENDPROC(aesni_gcm_init_avx_gen4) # const u8 *in, /* Plaintext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_enc_update_avx_gen4) +SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -2780,7 +2780,7 @@ key_256_enc_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13 FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc_update_avx_gen4) +SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4) ############################################################################### #void aesni_gcm_dec_update_avx_gen4( @@ -2790,7 +2790,7 @@ ENDPROC(aesni_gcm_enc_update_avx_gen4) # const u8 *in, /* Ciphertext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_dec_update_avx_gen4) +SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -2809,7 +2809,7 @@ key_256_dec_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13 FUNC_RESTORE ret -ENDPROC(aesni_gcm_dec_update_avx_gen4) +SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4) ############################################################################### #void aesni_gcm_finalize_avx_gen4( @@ -2819,7 +2819,7 @@ ENDPROC(aesni_gcm_dec_update_avx_gen4) # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### -ENTRY(aesni_gcm_finalize_avx_gen4) +SYM_FUNC_START(aesni_gcm_finalize_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -2838,6 +2838,6 @@ key_256_finalize4: GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4 FUNC_RESTORE ret -ENDPROC(aesni_gcm_finalize_avx_gen4) +SYM_FUNC_END(aesni_gcm_finalize_avx_gen4) #endif /* CONFIG_AS_AVX2 */ diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S index 330db7a48af8..4222ac6d6584 100644 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S @@ -103,7 +103,7 @@ bswapq RX0; \ xorq RX0, (RIO); -ENTRY(__blowfish_enc_blk) +SYM_FUNC_START(__blowfish_enc_blk) /* input: * %rdi: ctx * %rsi: dst @@ -139,9 +139,9 @@ ENTRY(__blowfish_enc_blk) .L__enc_xor: xor_block(); ret; -ENDPROC(__blowfish_enc_blk) +SYM_FUNC_END(__blowfish_enc_blk) -ENTRY(blowfish_dec_blk) +SYM_FUNC_START(blowfish_dec_blk) /* input: * %rdi: ctx * %rsi: dst @@ -171,7 +171,7 @@ ENTRY(blowfish_dec_blk) movq %r11, %r12; ret; -ENDPROC(blowfish_dec_blk) +SYM_FUNC_END(blowfish_dec_blk) /********************************************************************** 4-way blowfish, four blocks parallel @@ -283,7 +283,7 @@ ENDPROC(blowfish_dec_blk) bswapq RX3; \ xorq RX3, 24(RIO); -ENTRY(__blowfish_enc_blk_4way) +SYM_FUNC_START(__blowfish_enc_blk_4way) /* input: * %rdi: ctx * %rsi: dst @@ -330,9 +330,9 @@ ENTRY(__blowfish_enc_blk_4way) popq %rbx; popq %r12; ret; -ENDPROC(__blowfish_enc_blk_4way) +SYM_FUNC_END(__blowfish_enc_blk_4way) -ENTRY(blowfish_dec_blk_4way) +SYM_FUNC_START(blowfish_dec_blk_4way) /* input: * %rdi: ctx * %rsi: dst @@ -365,4 +365,4 @@ ENTRY(blowfish_dec_blk_4way) popq %r12; ret; -ENDPROC(blowfish_dec_blk_4way) +SYM_FUNC_END(blowfish_dec_blk_4way) diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index f4408ca55fdb..d01ddd73de65 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -893,7 +893,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk16) jmp .Ldec_max24; SYM_FUNC_END(__camellia_dec_blk16) -ENTRY(camellia_ecb_enc_16way) +SYM_FUNC_START(camellia_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -916,9 +916,9 @@ ENTRY(camellia_ecb_enc_16way) FRAME_END ret; -ENDPROC(camellia_ecb_enc_16way) +SYM_FUNC_END(camellia_ecb_enc_16way) -ENTRY(camellia_ecb_dec_16way) +SYM_FUNC_START(camellia_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -946,9 +946,9 @@ ENTRY(camellia_ecb_dec_16way) FRAME_END ret; -ENDPROC(camellia_ecb_dec_16way) +SYM_FUNC_END(camellia_ecb_dec_16way) -ENTRY(camellia_cbc_dec_16way) +SYM_FUNC_START(camellia_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -997,7 +997,7 @@ ENTRY(camellia_cbc_dec_16way) FRAME_END ret; -ENDPROC(camellia_cbc_dec_16way) +SYM_FUNC_END(camellia_cbc_dec_16way) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ @@ -1005,7 +1005,7 @@ ENDPROC(camellia_cbc_dec_16way) vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; -ENTRY(camellia_ctr_16way) +SYM_FUNC_START(camellia_ctr_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1110,7 +1110,7 @@ ENTRY(camellia_ctr_16way) FRAME_END ret; -ENDPROC(camellia_ctr_16way) +SYM_FUNC_END(camellia_ctr_16way) #define gf128mul_x_ble(iv, mask, tmp) \ vpsrad $31, iv, tmp; \ @@ -1256,7 +1256,7 @@ SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way) ret; SYM_FUNC_END(camellia_xts_crypt_16way) -ENTRY(camellia_xts_enc_16way) +SYM_FUNC_START(camellia_xts_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1268,9 +1268,9 @@ ENTRY(camellia_xts_enc_16way) leaq __camellia_enc_blk16, %r9; jmp camellia_xts_crypt_16way; -ENDPROC(camellia_xts_enc_16way) +SYM_FUNC_END(camellia_xts_enc_16way) -ENTRY(camellia_xts_dec_16way) +SYM_FUNC_START(camellia_xts_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1286,4 +1286,4 @@ ENTRY(camellia_xts_dec_16way) leaq __camellia_dec_blk16, %r9; jmp camellia_xts_crypt_16way; -ENDPROC(camellia_xts_dec_16way) +SYM_FUNC_END(camellia_xts_dec_16way) diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 72ae3edd0997..563ef6e83cdd 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -931,7 +931,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk32) jmp .Ldec_max24; SYM_FUNC_END(__camellia_dec_blk32) -ENTRY(camellia_ecb_enc_32way) +SYM_FUNC_START(camellia_ecb_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -958,9 +958,9 @@ ENTRY(camellia_ecb_enc_32way) FRAME_END ret; -ENDPROC(camellia_ecb_enc_32way) +SYM_FUNC_END(camellia_ecb_enc_32way) -ENTRY(camellia_ecb_dec_32way) +SYM_FUNC_START(camellia_ecb_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -992,9 +992,9 @@ ENTRY(camellia_ecb_dec_32way) FRAME_END ret; -ENDPROC(camellia_ecb_dec_32way) +SYM_FUNC_END(camellia_ecb_dec_32way) -ENTRY(camellia_cbc_dec_32way) +SYM_FUNC_START(camellia_cbc_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1060,7 +1060,7 @@ ENTRY(camellia_cbc_dec_32way) FRAME_END ret; -ENDPROC(camellia_cbc_dec_32way) +SYM_FUNC_END(camellia_cbc_dec_32way) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ @@ -1076,7 +1076,7 @@ ENDPROC(camellia_cbc_dec_32way) vpslldq $8, tmp1, tmp1; \ vpsubq tmp1, x, x; -ENTRY(camellia_ctr_32way) +SYM_FUNC_START(camellia_ctr_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1200,7 +1200,7 @@ ENTRY(camellia_ctr_32way) FRAME_END ret; -ENDPROC(camellia_ctr_32way) +SYM_FUNC_END(camellia_ctr_32way) #define gf128mul_x_ble(iv, mask, tmp) \ vpsrad $31, iv, tmp; \ @@ -1369,7 +1369,7 @@ SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way) ret; SYM_FUNC_END(camellia_xts_crypt_32way) -ENTRY(camellia_xts_enc_32way) +SYM_FUNC_START(camellia_xts_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1382,9 +1382,9 @@ ENTRY(camellia_xts_enc_32way) leaq __camellia_enc_blk32, %r9; jmp camellia_xts_crypt_32way; -ENDPROC(camellia_xts_enc_32way) +SYM_FUNC_END(camellia_xts_enc_32way) -ENTRY(camellia_xts_dec_32way) +SYM_FUNC_START(camellia_xts_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1400,4 +1400,4 @@ ENTRY(camellia_xts_dec_32way) leaq __camellia_dec_blk32, %r9; jmp camellia_xts_crypt_32way; -ENDPROC(camellia_xts_dec_32way) +SYM_FUNC_END(camellia_xts_dec_32way) diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S index 23528bc18fc6..1372e6408850 100644 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S @@ -175,7 +175,7 @@ bswapq RAB0; \ movq RAB0, 4*2(RIO); -ENTRY(__camellia_enc_blk) +SYM_FUNC_START(__camellia_enc_blk) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -220,9 +220,9 @@ ENTRY(__camellia_enc_blk) movq RR12, %r12; ret; -ENDPROC(__camellia_enc_blk) +SYM_FUNC_END(__camellia_enc_blk) -ENTRY(camellia_dec_blk) +SYM_FUNC_START(camellia_dec_blk) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -258,7 +258,7 @@ ENTRY(camellia_dec_blk) movq RR12, %r12; ret; -ENDPROC(camellia_dec_blk) +SYM_FUNC_END(camellia_dec_blk) /********************************************************************** 2-way camellia @@ -409,7 +409,7 @@ ENDPROC(camellia_dec_blk) bswapq RAB1; \ movq RAB1, 12*2(RIO); -ENTRY(__camellia_enc_blk_2way) +SYM_FUNC_START(__camellia_enc_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -456,9 +456,9 @@ ENTRY(__camellia_enc_blk_2way) movq RR12, %r12; popq %rbx; ret; -ENDPROC(__camellia_enc_blk_2way) +SYM_FUNC_END(__camellia_enc_blk_2way) -ENTRY(camellia_dec_blk_2way) +SYM_FUNC_START(camellia_dec_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -496,4 +496,4 @@ ENTRY(camellia_dec_blk_2way) movq RR12, %r12; movq RXOR, %rbx; ret; -ENDPROC(camellia_dec_blk_2way) +SYM_FUNC_END(camellia_dec_blk_2way) diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index ef86c6a966de..8a6181b08b59 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -359,7 +359,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16) jmp .L__dec_tail; SYM_FUNC_END(__cast5_dec_blk16) -ENTRY(cast5_ecb_enc_16way) +SYM_FUNC_START(cast5_ecb_enc_16way) /* input: * %rdi: ctx * %rsi: dst @@ -394,9 +394,9 @@ ENTRY(cast5_ecb_enc_16way) popq %r15; FRAME_END ret; -ENDPROC(cast5_ecb_enc_16way) +SYM_FUNC_END(cast5_ecb_enc_16way) -ENTRY(cast5_ecb_dec_16way) +SYM_FUNC_START(cast5_ecb_dec_16way) /* input: * %rdi: ctx * %rsi: dst @@ -432,9 +432,9 @@ ENTRY(cast5_ecb_dec_16way) popq %r15; FRAME_END ret; -ENDPROC(cast5_ecb_dec_16way) +SYM_FUNC_END(cast5_ecb_dec_16way) -ENTRY(cast5_cbc_dec_16way) +SYM_FUNC_START(cast5_cbc_dec_16way) /* input: * %rdi: ctx * %rsi: dst @@ -484,9 +484,9 @@ ENTRY(cast5_cbc_dec_16way) popq %r12; FRAME_END ret; -ENDPROC(cast5_cbc_dec_16way) +SYM_FUNC_END(cast5_cbc_dec_16way) -ENTRY(cast5_ctr_16way) +SYM_FUNC_START(cast5_ctr_16way) /* input: * %rdi: ctx * %rsi: dst @@ -560,4 +560,4 @@ ENTRY(cast5_ctr_16way) popq %r12; FRAME_END ret; -ENDPROC(cast5_ctr_16way) +SYM_FUNC_END(cast5_ctr_16way) diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index b080a7454e70..932a3ce32a88 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -341,7 +341,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8) ret; SYM_FUNC_END(__cast6_dec_blk8) -ENTRY(cast6_ecb_enc_8way) +SYM_FUNC_START(cast6_ecb_enc_8way) /* input: * %rdi: ctx * %rsi: dst @@ -362,9 +362,9 @@ ENTRY(cast6_ecb_enc_8way) popq %r15; FRAME_END ret; -ENDPROC(cast6_ecb_enc_8way) +SYM_FUNC_END(cast6_ecb_enc_8way) -ENTRY(cast6_ecb_dec_8way) +SYM_FUNC_START(cast6_ecb_dec_8way) /* input: * %rdi: ctx * %rsi: dst @@ -385,9 +385,9 @@ ENTRY(cast6_ecb_dec_8way) popq %r15; FRAME_END ret; -ENDPROC(cast6_ecb_dec_8way) +SYM_FUNC_END(cast6_ecb_dec_8way) -ENTRY(cast6_cbc_dec_8way) +SYM_FUNC_START(cast6_cbc_dec_8way) /* input: * %rdi: ctx * %rsi: dst @@ -411,9 +411,9 @@ ENTRY(cast6_cbc_dec_8way) popq %r12; FRAME_END ret; -ENDPROC(cast6_cbc_dec_8way) +SYM_FUNC_END(cast6_cbc_dec_8way) -ENTRY(cast6_ctr_8way) +SYM_FUNC_START(cast6_ctr_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -439,9 +439,9 @@ ENTRY(cast6_ctr_8way) popq %r12; FRAME_END ret; -ENDPROC(cast6_ctr_8way) +SYM_FUNC_END(cast6_ctr_8way) -ENTRY(cast6_xts_enc_8way) +SYM_FUNC_START(cast6_xts_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -466,9 +466,9 @@ ENTRY(cast6_xts_enc_8way) popq %r15; FRAME_END ret; -ENDPROC(cast6_xts_enc_8way) +SYM_FUNC_END(cast6_xts_enc_8way) -ENTRY(cast6_xts_dec_8way) +SYM_FUNC_START(cast6_xts_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -493,4 +493,4 @@ ENTRY(cast6_xts_dec_8way) popq %r15; FRAME_END ret; -ENDPROC(cast6_xts_dec_8way) +SYM_FUNC_END(cast6_xts_dec_8way) diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/crypto/chacha-avx2-x86_64.S index 831e4434fc20..ee9a40ab4109 100644 --- a/arch/x86/crypto/chacha-avx2-x86_64.S +++ b/arch/x86/crypto/chacha-avx2-x86_64.S @@ -34,7 +34,7 @@ CTR4BL: .octa 0x00000000000000000000000000000002 .text -ENTRY(chacha_2block_xor_avx2) +SYM_FUNC_START(chacha_2block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 2 data blocks output, o # %rdx: up to 2 data blocks input, i @@ -224,9 +224,9 @@ ENTRY(chacha_2block_xor_avx2) lea -8(%r10),%rsp jmp .Ldone2 -ENDPROC(chacha_2block_xor_avx2) +SYM_FUNC_END(chacha_2block_xor_avx2) -ENTRY(chacha_4block_xor_avx2) +SYM_FUNC_START(chacha_4block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i @@ -529,9 +529,9 @@ ENTRY(chacha_4block_xor_avx2) lea -8(%r10),%rsp jmp .Ldone4 -ENDPROC(chacha_4block_xor_avx2) +SYM_FUNC_END(chacha_4block_xor_avx2) -ENTRY(chacha_8block_xor_avx2) +SYM_FUNC_START(chacha_8block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 8 data blocks output, o # %rdx: up to 8 data blocks input, i @@ -1018,4 +1018,4 @@ ENTRY(chacha_8block_xor_avx2) jmp .Ldone8 -ENDPROC(chacha_8block_xor_avx2) +SYM_FUNC_END(chacha_8block_xor_avx2) diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S index 848f9c75fd4f..bb193fde123a 100644 --- a/arch/x86/crypto/chacha-avx512vl-x86_64.S +++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S @@ -24,7 +24,7 @@ CTR8BL: .octa 0x00000003000000020000000100000000 .text -ENTRY(chacha_2block_xor_avx512vl) +SYM_FUNC_START(chacha_2block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 2 data blocks output, o # %rdx: up to 2 data blocks input, i @@ -187,9 +187,9 @@ ENTRY(chacha_2block_xor_avx512vl) jmp .Ldone2 -ENDPROC(chacha_2block_xor_avx512vl) +SYM_FUNC_END(chacha_2block_xor_avx512vl) -ENTRY(chacha_4block_xor_avx512vl) +SYM_FUNC_START(chacha_4block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i @@ -453,9 +453,9 @@ ENTRY(chacha_4block_xor_avx512vl) jmp .Ldone4 -ENDPROC(chacha_4block_xor_avx512vl) +SYM_FUNC_END(chacha_4block_xor_avx512vl) -ENTRY(chacha_8block_xor_avx512vl) +SYM_FUNC_START(chacha_8block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 8 data blocks output, o # %rdx: up to 8 data blocks input, i @@ -833,4 +833,4 @@ ENTRY(chacha_8block_xor_avx512vl) jmp .Ldone8 -ENDPROC(chacha_8block_xor_avx512vl) +SYM_FUNC_END(chacha_8block_xor_avx512vl) diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S index 361d2bfc253c..a38ab2512a6f 100644 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S @@ -111,7 +111,7 @@ SYM_FUNC_START_LOCAL(chacha_permute) ret SYM_FUNC_END(chacha_permute) -ENTRY(chacha_block_xor_ssse3) +SYM_FUNC_START(chacha_block_xor_ssse3) # %rdi: Input state matrix, s # %rsi: up to 1 data block output, o # %rdx: up to 1 data block input, i @@ -197,9 +197,9 @@ ENTRY(chacha_block_xor_ssse3) lea -8(%r10),%rsp jmp .Ldone -ENDPROC(chacha_block_xor_ssse3) +SYM_FUNC_END(chacha_block_xor_ssse3) -ENTRY(hchacha_block_ssse3) +SYM_FUNC_START(hchacha_block_ssse3) # %rdi: Input state matrix, s # %rsi: output (8 32-bit words) # %edx: nrounds @@ -218,9 +218,9 @@ ENTRY(hchacha_block_ssse3) FRAME_END ret -ENDPROC(hchacha_block_ssse3) +SYM_FUNC_END(hchacha_block_ssse3) -ENTRY(chacha_4block_xor_ssse3) +SYM_FUNC_START(chacha_4block_xor_ssse3) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i @@ -788,4 +788,4 @@ ENTRY(chacha_4block_xor_ssse3) jmp .Ldone4 -ENDPROC(chacha_4block_xor_ssse3) +SYM_FUNC_END(chacha_4block_xor_ssse3) diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index 1c099dc08cc3..9fd28ff65bc2 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S @@ -103,7 +103,7 @@ * size_t len, uint crc32) */ -ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ +SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ movdqa (BUF), %xmm1 movdqa 0x10(BUF), %xmm2 movdqa 0x20(BUF), %xmm3 @@ -238,4 +238,4 @@ fold_64: PEXTRD 0x01, %xmm1, %eax ret -ENDPROC(crc32_pclmul_le_16) +SYM_FUNC_END(crc32_pclmul_le_16) diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index d9b734d0c8cc..0e6690e3618c 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -74,7 +74,7 @@ # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init); .text -ENTRY(crc_pcl) +SYM_FUNC_START(crc_pcl) #define bufp %rdi #define bufp_dw %edi #define bufp_w %di @@ -311,7 +311,7 @@ do_return: popq %rdi popq %rbx ret -ENDPROC(crc_pcl) +SYM_FUNC_END(crc_pcl) .section .rodata, "a", @progbits ################################################################ diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S index 3d873e67749d..b2533d63030e 100644 --- a/arch/x86/crypto/crct10dif-pcl-asm_64.S +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -95,7 +95,7 @@ # Assumes len >= 16. # .align 16 -ENTRY(crc_t10dif_pcl) +SYM_FUNC_START(crc_t10dif_pcl) movdqa .Lbswap_mask(%rip), BSWAP_MASK @@ -280,7 +280,7 @@ ENTRY(crc_t10dif_pcl) jge .Lfold_16_bytes_loop # 32 <= len <= 255 add $16, len jmp .Lhandle_partial_segment # 17 <= len <= 31 -ENDPROC(crc_t10dif_pcl) +SYM_FUNC_END(crc_t10dif_pcl) .section .rodata, "a", @progbits .align 16 diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S index 7fca43099a5f..fac0fdc3f25d 100644 --- a/arch/x86/crypto/des3_ede-asm_64.S +++ b/arch/x86/crypto/des3_ede-asm_64.S @@ -162,7 +162,7 @@ movl left##d, (io); \ movl right##d, 4(io); -ENTRY(des3_ede_x86_64_crypt_blk) +SYM_FUNC_START(des3_ede_x86_64_crypt_blk) /* input: * %rdi: round keys, CTX * %rsi: dst @@ -244,7 +244,7 @@ ENTRY(des3_ede_x86_64_crypt_blk) popq %rbx; ret; -ENDPROC(des3_ede_x86_64_crypt_blk) +SYM_FUNC_END(des3_ede_x86_64_crypt_blk) /*********************************************************************** * 3-way 3DES @@ -418,7 +418,7 @@ ENDPROC(des3_ede_x86_64_crypt_blk) #define __movq(src, dst) \ movq src, dst; -ENTRY(des3_ede_x86_64_crypt_blk_3way) +SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way) /* input: * %rdi: ctx, round keys * %rsi: dst (3 blocks) @@ -529,7 +529,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) popq %rbx; ret; -ENDPROC(des3_ede_x86_64_crypt_blk_3way) +SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way) .section .rodata, "a", @progbits .align 16 diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index e81da25a33ca..bb9735fbb865 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -90,7 +90,7 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble) SYM_FUNC_END(__clmul_gf128mul_ble) /* void clmul_ghash_mul(char *dst, const u128 *shash) */ -ENTRY(clmul_ghash_mul) +SYM_FUNC_START(clmul_ghash_mul) FRAME_BEGIN movups (%rdi), DATA movups (%rsi), SHASH @@ -101,13 +101,13 @@ ENTRY(clmul_ghash_mul) movups DATA, (%rdi) FRAME_END ret -ENDPROC(clmul_ghash_mul) +SYM_FUNC_END(clmul_ghash_mul) /* * void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, * const u128 *shash); */ -ENTRY(clmul_ghash_update) +SYM_FUNC_START(clmul_ghash_update) FRAME_BEGIN cmp $16, %rdx jb .Lupdate_just_ret # check length @@ -130,4 +130,4 @@ ENTRY(clmul_ghash_update) .Lupdate_just_ret: FRAME_END ret -ENDPROC(clmul_ghash_update) +SYM_FUNC_END(clmul_ghash_update) diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S index f7946ea1b704..b22c7b936272 100644 --- a/arch/x86/crypto/nh-avx2-x86_64.S +++ b/arch/x86/crypto/nh-avx2-x86_64.S @@ -69,7 +69,7 @@ * * It's guaranteed that message_len % 16 == 0. */ -ENTRY(nh_avx2) +SYM_FUNC_START(nh_avx2) vmovdqu 0x00(KEY), K0 vmovdqu 0x10(KEY), K1 @@ -154,4 +154,4 @@ ENTRY(nh_avx2) vpaddq T4, T0, T0 vmovdqu T0, (HASH) ret -ENDPROC(nh_avx2) +SYM_FUNC_END(nh_avx2) diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S index 51f52d4ab4bb..d7ae22dd6683 100644 --- a/arch/x86/crypto/nh-sse2-x86_64.S +++ b/arch/x86/crypto/nh-sse2-x86_64.S @@ -71,7 +71,7 @@ * * It's guaranteed that message_len % 16 == 0. */ -ENTRY(nh_sse2) +SYM_FUNC_START(nh_sse2) movdqu 0x00(KEY), K0 movdqu 0x10(KEY), K1 @@ -120,4 +120,4 @@ ENTRY(nh_sse2) movdqu T0, 0x00(HASH) movdqu T1, 0x10(HASH) ret -ENDPROC(nh_sse2) +SYM_FUNC_END(nh_sse2) diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S index 8b341bc29d41..d6063feda9da 100644 --- a/arch/x86/crypto/poly1305-avx2-x86_64.S +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S @@ -79,7 +79,7 @@ ORMASK: .octa 0x00000000010000000000000001000000 #define d3 %r12 #define d4 %r13 -ENTRY(poly1305_4block_avx2) +SYM_FUNC_START(poly1305_4block_avx2) # %rdi: Accumulator h[5] # %rsi: 64 byte input block m # %rdx: Poly1305 key r[5] @@ -387,4 +387,4 @@ ENTRY(poly1305_4block_avx2) pop %r12 pop %rbx ret -ENDPROC(poly1305_4block_avx2) +SYM_FUNC_END(poly1305_4block_avx2) diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S index 5578f846e622..d8ea29b96640 100644 --- a/arch/x86/crypto/poly1305-sse2-x86_64.S +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -46,7 +46,7 @@ ORMASK: .octa 0x00000000010000000000000001000000 #define d3 %r11 #define d4 %r12 -ENTRY(poly1305_block_sse2) +SYM_FUNC_START(poly1305_block_sse2) # %rdi: Accumulator h[5] # %rsi: 16 byte input block m # %rdx: Poly1305 key r[5] @@ -276,7 +276,7 @@ ENTRY(poly1305_block_sse2) pop %r12 pop %rbx ret -ENDPROC(poly1305_block_sse2) +SYM_FUNC_END(poly1305_block_sse2) #define u0 0x00(%r8) @@ -301,7 +301,7 @@ ENDPROC(poly1305_block_sse2) #undef d0 #define d0 %r13 -ENTRY(poly1305_2block_sse2) +SYM_FUNC_START(poly1305_2block_sse2) # %rdi: Accumulator h[5] # %rsi: 16 byte input block m # %rdx: Poly1305 key r[5] @@ -587,4 +587,4 @@ ENTRY(poly1305_2block_sse2) pop %r12 pop %rbx ret -ENDPROC(poly1305_2block_sse2) +SYM_FUNC_END(poly1305_2block_sse2) diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index a098aa015784..ba9e4c1e7f5c 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -662,7 +662,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx) ret; SYM_FUNC_END(__serpent_dec_blk8_avx) -ENTRY(serpent_ecb_enc_8way_avx) +SYM_FUNC_START(serpent_ecb_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -678,9 +678,9 @@ ENTRY(serpent_ecb_enc_8way_avx) FRAME_END ret; -ENDPROC(serpent_ecb_enc_8way_avx) +SYM_FUNC_END(serpent_ecb_enc_8way_avx) -ENTRY(serpent_ecb_dec_8way_avx) +SYM_FUNC_START(serpent_ecb_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -696,9 +696,9 @@ ENTRY(serpent_ecb_dec_8way_avx) FRAME_END ret; -ENDPROC(serpent_ecb_dec_8way_avx) +SYM_FUNC_END(serpent_ecb_dec_8way_avx) -ENTRY(serpent_cbc_dec_8way_avx) +SYM_FUNC_START(serpent_cbc_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -714,9 +714,9 @@ ENTRY(serpent_cbc_dec_8way_avx) FRAME_END ret; -ENDPROC(serpent_cbc_dec_8way_avx) +SYM_FUNC_END(serpent_cbc_dec_8way_avx) -ENTRY(serpent_ctr_8way_avx) +SYM_FUNC_START(serpent_ctr_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -734,9 +734,9 @@ ENTRY(serpent_ctr_8way_avx) FRAME_END ret; -ENDPROC(serpent_ctr_8way_avx) +SYM_FUNC_END(serpent_ctr_8way_avx) -ENTRY(serpent_xts_enc_8way_avx) +SYM_FUNC_START(serpent_xts_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -756,9 +756,9 @@ ENTRY(serpent_xts_enc_8way_avx) FRAME_END ret; -ENDPROC(serpent_xts_enc_8way_avx) +SYM_FUNC_END(serpent_xts_enc_8way_avx) -ENTRY(serpent_xts_dec_8way_avx) +SYM_FUNC_START(serpent_xts_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -778,4 +778,4 @@ ENTRY(serpent_xts_dec_8way_avx) FRAME_END ret; -ENDPROC(serpent_xts_dec_8way_avx) +SYM_FUNC_END(serpent_xts_dec_8way_avx) diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index 6149ba80b4d1..c9648aeae705 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -668,7 +668,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk16) ret; SYM_FUNC_END(__serpent_dec_blk16) -ENTRY(serpent_ecb_enc_16way) +SYM_FUNC_START(serpent_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -688,9 +688,9 @@ ENTRY(serpent_ecb_enc_16way) FRAME_END ret; -ENDPROC(serpent_ecb_enc_16way) +SYM_FUNC_END(serpent_ecb_enc_16way) -ENTRY(serpent_ecb_dec_16way) +SYM_FUNC_START(serpent_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -710,9 +710,9 @@ ENTRY(serpent_ecb_dec_16way) FRAME_END ret; -ENDPROC(serpent_ecb_dec_16way) +SYM_FUNC_END(serpent_ecb_dec_16way) -ENTRY(serpent_cbc_dec_16way) +SYM_FUNC_START(serpent_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -733,9 +733,9 @@ ENTRY(serpent_cbc_dec_16way) FRAME_END ret; -ENDPROC(serpent_cbc_dec_16way) +SYM_FUNC_END(serpent_cbc_dec_16way) -ENTRY(serpent_ctr_16way) +SYM_FUNC_START(serpent_ctr_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -758,9 +758,9 @@ ENTRY(serpent_ctr_16way) FRAME_END ret; -ENDPROC(serpent_ctr_16way) +SYM_FUNC_END(serpent_ctr_16way) -ENTRY(serpent_xts_enc_16way) +SYM_FUNC_START(serpent_xts_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -784,9 +784,9 @@ ENTRY(serpent_xts_enc_16way) FRAME_END ret; -ENDPROC(serpent_xts_enc_16way) +SYM_FUNC_END(serpent_xts_enc_16way) -ENTRY(serpent_xts_dec_16way) +SYM_FUNC_START(serpent_xts_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -810,4 +810,4 @@ ENTRY(serpent_xts_dec_16way) FRAME_END ret; -ENDPROC(serpent_xts_dec_16way) +SYM_FUNC_END(serpent_xts_dec_16way) diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S index 5e0b3a3e97af..efb6dc17dc90 100644 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S @@ -619,7 +619,7 @@ pxor t0, x3; \ movdqu x3, (3*4*4)(out); -ENTRY(__serpent_enc_blk_8way) +SYM_FUNC_START(__serpent_enc_blk_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -682,9 +682,9 @@ ENTRY(__serpent_enc_blk_8way) xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_enc_blk_8way) +SYM_FUNC_END(__serpent_enc_blk_8way) -ENTRY(serpent_dec_blk_8way) +SYM_FUNC_START(serpent_dec_blk_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -736,4 +736,4 @@ ENTRY(serpent_dec_blk_8way) write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; -ENDPROC(serpent_dec_blk_8way) +SYM_FUNC_END(serpent_dec_blk_8way) diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 9f712a7dfd79..6decc85ef7b7 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -634,7 +634,7 @@ _loop3: * param: function's name */ .macro SHA1_VECTOR_ASM name - ENTRY(\name) + SYM_FUNC_START(\name) push %rbx push %r12 @@ -676,7 +676,7 @@ _loop3: ret - ENDPROC(\name) + SYM_FUNC_END(\name) .endm .section .rodata diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S index ebbdba72ae07..11efe3a45a1f 100644 --- a/arch/x86/crypto/sha1_ni_asm.S +++ b/arch/x86/crypto/sha1_ni_asm.S @@ -95,7 +95,7 @@ */ .text .align 32 -ENTRY(sha1_ni_transform) +SYM_FUNC_START(sha1_ni_transform) mov %rsp, RSPSAVE sub $FRAME_SIZE, %rsp and $~0xF, %rsp @@ -291,7 +291,7 @@ ENTRY(sha1_ni_transform) mov RSPSAVE, %rsp ret -ENDPROC(sha1_ni_transform) +SYM_FUNC_END(sha1_ni_transform) .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index 99c5b8c4dc38..5d03c1173690 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -67,7 +67,7 @@ * param: function's name */ .macro SHA1_VECTOR_ASM name - ENTRY(\name) + SYM_FUNC_START(\name) push %rbx push %r12 @@ -101,7 +101,7 @@ pop %rbx ret - ENDPROC(\name) + SYM_FUNC_END(\name) .endm /* diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index 001bbcf93c79..22e14c8dd2e4 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -347,7 +347,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -ENTRY(sha256_transform_avx) +SYM_FUNC_START(sha256_transform_avx) .align 32 pushq %rbx pushq %r12 @@ -460,7 +460,7 @@ done_hash: popq %r12 popq %rbx ret -ENDPROC(sha256_transform_avx) +SYM_FUNC_END(sha256_transform_avx) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 1420db15dcdd..519b551ad576 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -526,7 +526,7 @@ STACK_SIZE = _RSP + _RSP_SIZE ## arg 3 : Num blocks ######################################################################## .text -ENTRY(sha256_transform_rorx) +SYM_FUNC_START(sha256_transform_rorx) .align 32 pushq %rbx pushq %r12 @@ -713,7 +713,7 @@ done_hash: popq %r12 popq %rbx ret -ENDPROC(sha256_transform_rorx) +SYM_FUNC_END(sha256_transform_rorx) .section .rodata.cst512.K256, "aM", @progbits, 512 .align 64 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index c6c05ed2c16a..69cc2f91dc4c 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@ -353,7 +353,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -ENTRY(sha256_transform_ssse3) +SYM_FUNC_START(sha256_transform_ssse3) .align 32 pushq %rbx pushq %r12 @@ -471,7 +471,7 @@ done_hash: popq %rbx ret -ENDPROC(sha256_transform_ssse3) +SYM_FUNC_END(sha256_transform_ssse3) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S index fb58f58ecfbc..7abade04a3a3 100644 --- a/arch/x86/crypto/sha256_ni_asm.S +++ b/arch/x86/crypto/sha256_ni_asm.S @@ -97,7 +97,7 @@ .text .align 32 -ENTRY(sha256_ni_transform) +SYM_FUNC_START(sha256_ni_transform) shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash @@ -327,7 +327,7 @@ ENTRY(sha256_ni_transform) .Ldone_hash: ret -ENDPROC(sha256_ni_transform) +SYM_FUNC_END(sha256_ni_transform) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 39235fefe6f7..3704ddd7e5d5 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -277,7 +277,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks ######################################################################## -ENTRY(sha512_transform_avx) +SYM_FUNC_START(sha512_transform_avx) cmp $0, msglen je nowork @@ -365,7 +365,7 @@ updateblock: nowork: ret -ENDPROC(sha512_transform_avx) +SYM_FUNC_END(sha512_transform_avx) ######################################################################## ### Binary Data diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index b16d56005162..80d830e7ee09 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -569,7 +569,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks ######################################################################## -ENTRY(sha512_transform_rorx) +SYM_FUNC_START(sha512_transform_rorx) # Allocate Stack Space mov %rsp, %rax sub $frame_size, %rsp @@ -682,7 +682,7 @@ done_hash: # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp ret -ENDPROC(sha512_transform_rorx) +SYM_FUNC_END(sha512_transform_rorx) ######################################################################## ### Binary Data diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S index 66bbd9058a90..838f984e95d9 100644 --- a/arch/x86/crypto/sha512-ssse3-asm.S +++ b/arch/x86/crypto/sha512-ssse3-asm.S @@ -275,7 +275,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks. ######################################################################## -ENTRY(sha512_transform_ssse3) +SYM_FUNC_START(sha512_transform_ssse3) cmp $0, msglen je nowork @@ -364,7 +364,7 @@ updateblock: nowork: ret -ENDPROC(sha512_transform_ssse3) +SYM_FUNC_END(sha512_transform_ssse3) ######################################################################## ### Binary Data diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index 588f0a2f63ab..a5151393bb2f 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -315,7 +315,7 @@ SYM_FUNC_START_LOCAL(__twofish_dec_blk8) ret; SYM_FUNC_END(__twofish_dec_blk8) -ENTRY(twofish_ecb_enc_8way) +SYM_FUNC_START(twofish_ecb_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -333,9 +333,9 @@ ENTRY(twofish_ecb_enc_8way) FRAME_END ret; -ENDPROC(twofish_ecb_enc_8way) +SYM_FUNC_END(twofish_ecb_enc_8way) -ENTRY(twofish_ecb_dec_8way) +SYM_FUNC_START(twofish_ecb_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -353,9 +353,9 @@ ENTRY(twofish_ecb_dec_8way) FRAME_END ret; -ENDPROC(twofish_ecb_dec_8way) +SYM_FUNC_END(twofish_ecb_dec_8way) -ENTRY(twofish_cbc_dec_8way) +SYM_FUNC_START(twofish_cbc_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -378,9 +378,9 @@ ENTRY(twofish_cbc_dec_8way) FRAME_END ret; -ENDPROC(twofish_cbc_dec_8way) +SYM_FUNC_END(twofish_cbc_dec_8way) -ENTRY(twofish_ctr_8way) +SYM_FUNC_START(twofish_ctr_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -405,9 +405,9 @@ ENTRY(twofish_ctr_8way) FRAME_END ret; -ENDPROC(twofish_ctr_8way) +SYM_FUNC_END(twofish_ctr_8way) -ENTRY(twofish_xts_enc_8way) +SYM_FUNC_START(twofish_xts_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -429,9 +429,9 @@ ENTRY(twofish_xts_enc_8way) FRAME_END ret; -ENDPROC(twofish_xts_enc_8way) +SYM_FUNC_END(twofish_xts_enc_8way) -ENTRY(twofish_xts_dec_8way) +SYM_FUNC_START(twofish_xts_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -453,4 +453,4 @@ ENTRY(twofish_xts_dec_8way) FRAME_END ret; -ENDPROC(twofish_xts_dec_8way) +SYM_FUNC_END(twofish_xts_dec_8way) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index e495e07c7f1b..fc23552afe37 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S @@ -220,7 +220,7 @@ rorq $32, RAB2; \ outunpack3(mov, RIO, 2, RAB, 2); -ENTRY(__twofish_enc_blk_3way) +SYM_FUNC_START(__twofish_enc_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -267,9 +267,9 @@ ENTRY(__twofish_enc_blk_3way) popq %r12; popq %r13; ret; -ENDPROC(__twofish_enc_blk_3way) +SYM_FUNC_END(__twofish_enc_blk_3way) -ENTRY(twofish_dec_blk_3way) +SYM_FUNC_START(twofish_dec_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -302,4 +302,4 @@ ENTRY(twofish_dec_blk_3way) popq %r12; popq %r13; ret; -ENDPROC(twofish_dec_blk_3way) +SYM_FUNC_END(twofish_dec_blk_3way) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S index ecef2cb9f43f..d2e56232494a 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S @@ -202,7 +202,7 @@ xor %r8d, d ## D;\ ror $1, d ## D; -ENTRY(twofish_enc_blk) +SYM_FUNC_START(twofish_enc_blk) pushq R1 /* %rdi contains the ctx address */ @@ -253,9 +253,9 @@ ENTRY(twofish_enc_blk) popq R1 movl $1,%eax ret -ENDPROC(twofish_enc_blk) +SYM_FUNC_END(twofish_enc_blk) -ENTRY(twofish_dec_blk) +SYM_FUNC_START(twofish_dec_blk) pushq R1 /* %rdi contains the ctx address */ @@ -305,4 +305,4 @@ ENTRY(twofish_dec_blk) popq R1 movl $1,%eax ret -ENDPROC(twofish_dec_blk) +SYM_FUNC_END(twofish_dec_blk) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 13e4fe378e5a..d58c01239457 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -15,7 +15,7 @@ * at the top of the kernel process stack. * * Some macro usage: - * - ENTRY/END: Define functions in the symbol table. + * - SYM_FUNC_START/END:Define functions in the symbol table. * - TRACE_IRQ_*: Trace hardirq state for lock debugging. * - idtentry: Define exception entry points. */ @@ -1040,7 +1040,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 * Reload gs selector with exception handling * edi: new selector */ -ENTRY(native_load_gs_index) +SYM_FUNC_START(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) @@ -1054,7 +1054,7 @@ ENTRY(native_load_gs_index) popfq FRAME_END ret -ENDPROC(native_load_gs_index) +SYM_FUNC_END(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, .Lbad_gs) @@ -1075,7 +1075,7 @@ SYM_CODE_END(.Lbad_gs) .previous /* Call softirq on interrupt stack. Interrupts are off. */ -ENTRY(do_softirq_own_stack) +SYM_FUNC_START(do_softirq_own_stack) pushq %rbp mov %rsp, %rbp ENTER_IRQ_STACK regs=0 old_rsp=%r11 @@ -1083,7 +1083,7 @@ ENTRY(do_softirq_own_stack) LEAVE_IRQ_STACK regs=0 leaveq ret -ENDPROC(do_softirq_own_stack) +SYM_FUNC_END(do_softirq_own_stack) #ifdef CONFIG_XEN_PV idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index da296435676e..f1d3ccae5dd5 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -46,7 +46,7 @@ * ebp user stack * 0(%ebp) arg6 */ -ENTRY(entry_SYSENTER_compat) +SYM_FUNC_START(entry_SYSENTER_compat) /* Interrupts are off on entry. */ SWAPGS @@ -147,7 +147,7 @@ ENTRY(entry_SYSENTER_compat) popfq jmp .Lsysenter_flags_fixed SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL) -ENDPROC(entry_SYSENTER_compat) +SYM_FUNC_END(entry_SYSENTER_compat) /* * 32-bit SYSCALL entry. diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 462a20f386e0..c8daa92f38dc 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -14,7 +14,7 @@ /* * Hooray, we are in Long 64-bit mode (but still running in low memory) */ -ENTRY(wakeup_long64) +SYM_FUNC_START(wakeup_long64) movq saved_magic, %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax @@ -40,9 +40,9 @@ ENTRY(wakeup_long64) movq saved_rip, %rax jmp *%rax -ENDPROC(wakeup_long64) +SYM_FUNC_END(wakeup_long64) -ENTRY(do_suspend_lowlevel) +SYM_FUNC_START(do_suspend_lowlevel) FRAME_BEGIN subq $8, %rsp xorl %eax, %eax @@ -125,7 +125,7 @@ ENTRY(do_suspend_lowlevel) addq $8, %rsp FRAME_END jmp restore_processor_state -ENDPROC(do_suspend_lowlevel) +SYM_FUNC_END(do_suspend_lowlevel) .data saved_rbp: .quad 0 diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 60f894b018e0..16deae706950 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -132,11 +132,11 @@ EXPORT_SYMBOL(__fentry__) #ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(function_hook) +SYM_FUNC_START(function_hook) retq -ENDPROC(function_hook) +SYM_FUNC_END(function_hook) -ENTRY(ftrace_caller) +SYM_FUNC_START(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs @@ -170,9 +170,9 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) */ WEAK(ftrace_stub) retq -ENDPROC(ftrace_caller) +SYM_FUNC_END(ftrace_caller) -ENTRY(ftrace_regs_caller) +SYM_FUNC_START(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ pushfq @@ -243,12 +243,12 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) jmp ftrace_epilogue -ENDPROC(ftrace_regs_caller) +SYM_FUNC_END(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ -ENTRY(function_hook) +SYM_FUNC_START(function_hook) cmpq $ftrace_stub, ftrace_trace_function jnz trace @@ -279,11 +279,11 @@ trace: restore_mcount_regs jmp fgraph_trace -ENDPROC(function_hook) +SYM_FUNC_END(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller) +SYM_FUNC_START(ftrace_graph_caller) /* Saves rbp into %rdx and fills first parameter */ save_mcount_regs @@ -294,7 +294,7 @@ ENTRY(ftrace_graph_caller) restore_mcount_regs retq -ENDPROC(ftrace_graph_caller) +SYM_FUNC_END(ftrace_graph_caller) SYM_CODE_START(return_to_handler) UNWIND_HINT_EMPTY diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S index ddeeaac8adda..0db0375235b4 100644 --- a/arch/x86/kernel/irqflags.S +++ b/arch/x86/kernel/irqflags.S @@ -7,20 +7,20 @@ /* * unsigned long native_save_fl(void) */ -ENTRY(native_save_fl) +SYM_FUNC_START(native_save_fl) pushf pop %_ASM_AX ret -ENDPROC(native_save_fl) +SYM_FUNC_END(native_save_fl) EXPORT_SYMBOL(native_save_fl) /* * void native_restore_fl(unsigned long flags) * %eax/%rdi: flags */ -ENTRY(native_restore_fl) +SYM_FUNC_START(native_restore_fl) push %_ASM_ARG1 popf ret -ENDPROC(native_restore_fl) +SYM_FUNC_END(native_restore_fl) EXPORT_SYMBOL(native_restore_fl) diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 751a384c2eb0..81ada2ce99e7 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -43,7 +43,7 @@ * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump * to vmx_vmexit. */ -ENTRY(vmx_vmenter) +SYM_FUNC_START(vmx_vmenter) /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ je 2f @@ -65,7 +65,7 @@ ENTRY(vmx_vmenter) _ASM_EXTABLE(1b, 5b) _ASM_EXTABLE(2b, 5b) -ENDPROC(vmx_vmenter) +SYM_FUNC_END(vmx_vmenter) /** * vmx_vmexit - Handle a VMX VM-Exit @@ -77,7 +77,7 @@ ENDPROC(vmx_vmenter) * here after hardware loads the host's state, i.e. this is the destination * referred to by VMCS.HOST_RIP. */ -ENTRY(vmx_vmexit) +SYM_FUNC_START(vmx_vmexit) #ifdef CONFIG_RETPOLINE ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE /* Preserve guest's RAX, it's used to stuff the RSB. */ @@ -90,7 +90,7 @@ ENTRY(vmx_vmexit) .Lvmexit_skip_rsb: #endif ret -ENDPROC(vmx_vmexit) +SYM_FUNC_END(vmx_vmexit) /** * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode @@ -101,7 +101,7 @@ ENDPROC(vmx_vmexit) * Returns: * 0 on VM-Exit, 1 on VM-Fail */ -ENTRY(__vmx_vcpu_run) +SYM_FUNC_START(__vmx_vcpu_run) push %_ASM_BP mov %_ASM_SP, %_ASM_BP #ifdef CONFIG_X86_64 @@ -233,4 +233,4 @@ ENTRY(__vmx_vcpu_run) /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ 2: mov $1, %eax jmp 1b -ENDPROC(__vmx_vcpu_run) +SYM_FUNC_END(__vmx_vcpu_run) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4df90c9ea383..74256bd193da 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -280,7 +280,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, #define ARGBASE 16 #define FP 12 -ENTRY(csum_partial_copy_generic) +SYM_FUNC_START(csum_partial_copy_generic) subl $4,%esp pushl %edi pushl %esi @@ -398,7 +398,7 @@ DST( movb %cl, (%edi) ) popl %edi popl %ecx # equivalent to addl $4,%esp ret -ENDPROC(csum_partial_copy_generic) +SYM_FUNC_END(csum_partial_copy_generic) #else @@ -416,7 +416,7 @@ ENDPROC(csum_partial_copy_generic) #define ARGBASE 12 -ENTRY(csum_partial_copy_generic) +SYM_FUNC_START(csum_partial_copy_generic) pushl %ebx pushl %edi pushl %esi @@ -483,7 +483,7 @@ DST( movb %dl, (%edi) ) popl %edi popl %ebx ret -ENDPROC(csum_partial_copy_generic) +SYM_FUNC_END(csum_partial_copy_generic) #undef ROUND #undef ROUND1 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index 75a5a4515fa7..c4c7dd115953 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -13,15 +13,15 @@ * Zero a page. * %rdi - page */ -ENTRY(clear_page_rep) +SYM_FUNC_START(clear_page_rep) movl $4096/8,%ecx xorl %eax,%eax rep stosq ret -ENDPROC(clear_page_rep) +SYM_FUNC_END(clear_page_rep) EXPORT_SYMBOL_GPL(clear_page_rep) -ENTRY(clear_page_orig) +SYM_FUNC_START(clear_page_orig) xorl %eax,%eax movl $4096/64,%ecx .p2align 4 @@ -40,13 +40,13 @@ ENTRY(clear_page_orig) jnz .Lloop nop ret -ENDPROC(clear_page_orig) +SYM_FUNC_END(clear_page_orig) EXPORT_SYMBOL_GPL(clear_page_orig) -ENTRY(clear_page_erms) +SYM_FUNC_START(clear_page_erms) movl $4096,%ecx xorl %eax,%eax rep stosb ret -ENDPROC(clear_page_erms) +SYM_FUNC_END(clear_page_erms) EXPORT_SYMBOL_GPL(clear_page_erms) diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S index d63185698a23..3542502faa3b 100644 --- a/arch/x86/lib/cmpxchg16b_emu.S +++ b/arch/x86/lib/cmpxchg16b_emu.S @@ -13,7 +13,7 @@ * %rcx : high 64 bits of new value * %al : Operation successful */ -ENTRY(this_cpu_cmpxchg16b_emu) +SYM_FUNC_START(this_cpu_cmpxchg16b_emu) # # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not @@ -44,4 +44,4 @@ ENTRY(this_cpu_cmpxchg16b_emu) xor %al,%al ret -ENDPROC(this_cpu_cmpxchg16b_emu) +SYM_FUNC_END(this_cpu_cmpxchg16b_emu) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index 691d80e97488..ca01ed6029f4 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -13,7 +13,7 @@ * %ebx : low 32 bits of new value * %ecx : high 32 bits of new value */ -ENTRY(cmpxchg8b_emu) +SYM_FUNC_START(cmpxchg8b_emu) # # Emulate 'cmpxchg8b (%esi)' on UP except we don't @@ -42,5 +42,5 @@ ENTRY(cmpxchg8b_emu) popfl ret -ENDPROC(cmpxchg8b_emu) +SYM_FUNC_END(cmpxchg8b_emu) EXPORT_SYMBOL(cmpxchg8b_emu) diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index f505870bd93b..2402d4c489d2 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -13,12 +13,12 @@ * prefetch distance based on SMP/UP. */ ALIGN -ENTRY(copy_page) +SYM_FUNC_START(copy_page) ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD movl $4096/8, %ecx rep movsq ret -ENDPROC(copy_page) +SYM_FUNC_END(copy_page) EXPORT_SYMBOL(copy_page) SYM_FUNC_START_LOCAL(copy_page_regs) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 4a12b3c120bf..816f128a6d52 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -53,7 +53,7 @@ * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_generic_unrolled) +SYM_FUNC_START(copy_user_generic_unrolled) ASM_STAC cmpl $8,%edx jb 20f /* less then 8 bytes, go to byte copy loop */ @@ -136,7 +136,7 @@ ENTRY(copy_user_generic_unrolled) _ASM_EXTABLE_UA(19b, 40b) _ASM_EXTABLE_UA(21b, 50b) _ASM_EXTABLE_UA(22b, 50b) -ENDPROC(copy_user_generic_unrolled) +SYM_FUNC_END(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) /* Some CPUs run faster using the string copy instructions. @@ -157,7 +157,7 @@ EXPORT_SYMBOL(copy_user_generic_unrolled) * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_generic_string) +SYM_FUNC_START(copy_user_generic_string) ASM_STAC cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ @@ -182,7 +182,7 @@ ENTRY(copy_user_generic_string) _ASM_EXTABLE_UA(1b, 11b) _ASM_EXTABLE_UA(3b, 12b) -ENDPROC(copy_user_generic_string) +SYM_FUNC_END(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) /* @@ -197,7 +197,7 @@ EXPORT_SYMBOL(copy_user_generic_string) * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_enhanced_fast_string) +SYM_FUNC_START(copy_user_enhanced_fast_string) ASM_STAC cmpl $64,%edx jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ @@ -214,7 +214,7 @@ ENTRY(copy_user_enhanced_fast_string) .previous _ASM_EXTABLE_UA(1b, 12b) -ENDPROC(copy_user_enhanced_fast_string) +SYM_FUNC_END(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* @@ -249,7 +249,7 @@ SYM_CODE_END(.Lcopy_user_handle_tail) * - Require 8-byte alignment when size is 8 bytes or larger. * - Require 4-byte alignment when size is 4 bytes. */ -ENTRY(__copy_user_nocache) +SYM_FUNC_START(__copy_user_nocache) ASM_STAC /* If size is less than 8 bytes, go to 4-byte copy */ @@ -388,5 +388,5 @@ ENTRY(__copy_user_nocache) _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy) _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy) _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy) -ENDPROC(__copy_user_nocache) +SYM_FUNC_END(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S index a4a379e79259..3394a8ff7fd0 100644 --- a/arch/x86/lib/csum-copy_64.S +++ b/arch/x86/lib/csum-copy_64.S @@ -49,7 +49,7 @@ .endm -ENTRY(csum_partial_copy_generic) +SYM_FUNC_START(csum_partial_copy_generic) cmpl $3*64, %edx jle .Lignore @@ -225,4 +225,4 @@ ENTRY(csum_partial_copy_generic) jz .Lende movl $-EFAULT, (%rax) jmp .Lende -ENDPROC(csum_partial_copy_generic) +SYM_FUNC_END(csum_partial_copy_generic) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index f9f59eb85635..c8a85b512796 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -36,7 +36,7 @@ #include .text -ENTRY(__get_user_1) +SYM_FUNC_START(__get_user_1) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user @@ -47,10 +47,10 @@ ENTRY(__get_user_1) xor %eax,%eax ASM_CLAC ret -ENDPROC(__get_user_1) +SYM_FUNC_END(__get_user_1) EXPORT_SYMBOL(__get_user_1) -ENTRY(__get_user_2) +SYM_FUNC_START(__get_user_2) add $1,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX @@ -63,10 +63,10 @@ ENTRY(__get_user_2) xor %eax,%eax ASM_CLAC ret -ENDPROC(__get_user_2) +SYM_FUNC_END(__get_user_2) EXPORT_SYMBOL(__get_user_2) -ENTRY(__get_user_4) +SYM_FUNC_START(__get_user_4) add $3,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX @@ -79,10 +79,10 @@ ENTRY(__get_user_4) xor %eax,%eax ASM_CLAC ret -ENDPROC(__get_user_4) +SYM_FUNC_END(__get_user_4) EXPORT_SYMBOL(__get_user_4) -ENTRY(__get_user_8) +SYM_FUNC_START(__get_user_8) #ifdef CONFIG_X86_64 add $7,%_ASM_AX jc bad_get_user @@ -111,7 +111,7 @@ ENTRY(__get_user_8) ASM_CLAC ret #endif -ENDPROC(__get_user_8) +SYM_FUNC_END(__get_user_8) EXPORT_SYMBOL(__get_user_8) diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S index a14f9939c365..dbf8cc97b7f5 100644 --- a/arch/x86/lib/hweight.S +++ b/arch/x86/lib/hweight.S @@ -8,7 +8,7 @@ * unsigned int __sw_hweight32(unsigned int w) * %rdi: w */ -ENTRY(__sw_hweight32) +SYM_FUNC_START(__sw_hweight32) #ifdef CONFIG_X86_64 movl %edi, %eax # w @@ -33,10 +33,10 @@ ENTRY(__sw_hweight32) shrl $24, %eax # w = w_tmp >> 24 __ASM_SIZE(pop,) %__ASM_REG(dx) ret -ENDPROC(__sw_hweight32) +SYM_FUNC_END(__sw_hweight32) EXPORT_SYMBOL(__sw_hweight32) -ENTRY(__sw_hweight64) +SYM_FUNC_START(__sw_hweight64) #ifdef CONFIG_X86_64 pushq %rdi pushq %rdx @@ -79,5 +79,5 @@ ENTRY(__sw_hweight64) popl %ecx ret #endif -ENDPROC(__sw_hweight64) +SYM_FUNC_END(__sw_hweight64) EXPORT_SYMBOL(__sw_hweight64) diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S index a9bdf0805be0..cb5a1964506b 100644 --- a/arch/x86/lib/iomap_copy_64.S +++ b/arch/x86/lib/iomap_copy_64.S @@ -8,8 +8,8 @@ /* * override generic version in lib/iomap_copy.c */ -ENTRY(__iowrite32_copy) +SYM_FUNC_START(__iowrite32_copy) movl %edx,%ecx rep movsd ret -ENDPROC(__iowrite32_copy) +SYM_FUNC_END(__iowrite32_copy) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 3265b21e86c0..56b243b14c3a 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -193,7 +193,7 @@ MCSAFE_TEST_CTL * Note that we only catch machine checks when reading the source addresses. * Writes to target are posted and don't generate machine checks. */ -ENTRY(__memcpy_mcsafe) +SYM_FUNC_START(__memcpy_mcsafe) cmpl $8, %edx /* Less than 8 bytes? Go to byte copy loop */ jb .L_no_whole_words @@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe) xorl %eax, %eax .L_done: ret -ENDPROC(__memcpy_mcsafe) +SYM_FUNC_END(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) .section .fixup, "ax" diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 50c1648311b3..337830d7a59c 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -27,7 +27,7 @@ .weak memmove SYM_FUNC_START_ALIAS(memmove) -ENTRY(__memmove) +SYM_FUNC_START(__memmove) /* Handle more 32 bytes in loop */ mov %rdi, %rax @@ -207,7 +207,7 @@ ENTRY(__memmove) movb %r11b, (%rdi) 13: retq -ENDPROC(__memmove) +SYM_FUNC_END(__memmove) SYM_FUNC_END_ALIAS(memmove) EXPORT_SYMBOL(__memmove) EXPORT_SYMBOL(memmove) diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 564abf9ecedb..9ff15ee404a4 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -20,7 +20,7 @@ * rax original destination */ SYM_FUNC_START_ALIAS(memset) -ENTRY(__memset) +SYM_FUNC_START(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended * to use it when possible. If not available, use fast string instructions. @@ -43,7 +43,7 @@ ENTRY(__memset) rep stosb movq %r9,%rax ret -ENDPROC(__memset) +SYM_FUNC_END(__memset) SYM_FUNC_END_ALIAS(memset) EXPORT_SYMBOL(memset) EXPORT_SYMBOL(__memset) diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S index ed33cbab3958..a2b9caa5274c 100644 --- a/arch/x86/lib/msr-reg.S +++ b/arch/x86/lib/msr-reg.S @@ -12,7 +12,7 @@ * */ .macro op_safe_regs op -ENTRY(\op\()_safe_regs) +SYM_FUNC_START(\op\()_safe_regs) pushq %rbx pushq %r12 movq %rdi, %r10 /* Save pointer */ @@ -41,13 +41,13 @@ ENTRY(\op\()_safe_regs) jmp 2b _ASM_EXTABLE(1b, 3b) -ENDPROC(\op\()_safe_regs) +SYM_FUNC_END(\op\()_safe_regs) .endm #else /* X86_32 */ .macro op_safe_regs op -ENTRY(\op\()_safe_regs) +SYM_FUNC_START(\op\()_safe_regs) pushl %ebx pushl %ebp pushl %esi @@ -83,7 +83,7 @@ ENTRY(\op\()_safe_regs) jmp 2b _ASM_EXTABLE(1b, 3b) -ENDPROC(\op\()_safe_regs) +SYM_FUNC_END(\op\()_safe_regs) .endm #endif diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index a9391d772c81..7c7c92db8497 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -34,7 +34,7 @@ #define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX .text -ENTRY(__put_user_1) +SYM_FUNC_START(__put_user_1) ENTER cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX jae .Lbad_put_user @@ -43,10 +43,10 @@ ENTRY(__put_user_1) xor %eax,%eax ASM_CLAC ret -ENDPROC(__put_user_1) +SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) -ENTRY(__put_user_2) +SYM_FUNC_START(__put_user_2) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX @@ -57,10 +57,10 @@ ENTRY(__put_user_2) xor %eax,%eax ASM_CLAC ret -ENDPROC(__put_user_2) +SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) -ENTRY(__put_user_4) +SYM_FUNC_START(__put_user_4) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX @@ -71,10 +71,10 @@ ENTRY(__put_user_4) xor %eax,%eax ASM_CLAC ret -ENDPROC(__put_user_4) +SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) -ENTRY(__put_user_8) +SYM_FUNC_START(__put_user_8) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX @@ -88,7 +88,7 @@ ENTRY(__put_user_8) xor %eax,%eax ASM_CLAC RET -ENDPROC(__put_user_8) +SYM_FUNC_END(__put_user_8) EXPORT_SYMBOL(__put_user_8) SYM_CODE_START_LOCAL(.Lbad_put_user_clac) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index c909961e678a..363ec132df7e 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -11,11 +11,11 @@ .macro THUNK reg .section .text.__x86.indirect_thunk -ENTRY(__x86_indirect_thunk_\reg) +SYM_FUNC_START(__x86_indirect_thunk_\reg) CFI_STARTPROC JMP_NOSPEC %\reg CFI_ENDPROC -ENDPROC(__x86_indirect_thunk_\reg) +SYM_FUNC_END(__x86_indirect_thunk_\reg) .endm /* diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 6d71481a1e70..106ead05bbe3 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -16,7 +16,7 @@ .text .code64 -ENTRY(sme_encrypt_execute) +SYM_FUNC_START(sme_encrypt_execute) /* * Entry parameters: @@ -66,9 +66,9 @@ ENTRY(sme_encrypt_execute) pop %rbp ret -ENDPROC(sme_encrypt_execute) +SYM_FUNC_END(sme_encrypt_execute) -ENTRY(__enc_copy) +SYM_FUNC_START(__enc_copy) /* * Routine used to encrypt memory in place. * This routine must be run outside of the kernel proper since @@ -153,4 +153,4 @@ ENTRY(__enc_copy) ret .L__enc_copy_end: -ENDPROC(__enc_copy) +SYM_FUNC_END(__enc_copy) diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 74628ec78f29..b1d2313fe3bf 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -39,7 +39,7 @@ mov %rsi, %cr0; \ mov (%rsp), %rsp -ENTRY(efi_call) +SYM_FUNC_START(efi_call) pushq %rbp movq %rsp, %rbp SAVE_XMM @@ -55,4 +55,4 @@ ENTRY(efi_call) RESTORE_XMM popq %rbp ret -ENDPROC(efi_call) +SYM_FUNC_END(efi_call) diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index d677a7eb2d0a..3189f1394701 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -25,7 +25,7 @@ .text .code64 -ENTRY(efi64_thunk) +SYM_FUNC_START(efi64_thunk) push %rbp push %rbx @@ -60,7 +60,7 @@ ENTRY(efi64_thunk) pop %rbx pop %rbp retq -ENDPROC(efi64_thunk) +SYM_FUNC_END(efi64_thunk) /* * We run this function from the 1:1 mapping. diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index 4057cd5af7e2..7918b8415f13 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -22,7 +22,7 @@ #include #include -ENTRY(swsusp_arch_suspend) +SYM_FUNC_START(swsusp_arch_suspend) movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) @@ -50,7 +50,7 @@ ENTRY(swsusp_arch_suspend) call swsusp_save FRAME_END ret -ENDPROC(swsusp_arch_suspend) +SYM_FUNC_END(swsusp_arch_suspend) SYM_CODE_START(restore_image) /* prepare to jump to the image kernel */ @@ -102,7 +102,7 @@ SYM_CODE_END(core_restore_code) /* code below belongs to the image kernel */ .align PAGE_SIZE -ENTRY(restore_registers) +SYM_FUNC_START(restore_registers) /* go back to the original page tables */ movq %r9, %cr3 @@ -144,4 +144,4 @@ ENTRY(restore_registers) movq %rax, in_suspend(%rip) ret -ENDPROC(restore_registers) +SYM_FUNC_END(restore_registers) diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index be104eef80be..508fe204520b 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -19,7 +19,7 @@ * event status with one and operation. If there are pending events, * then enter the hypervisor to get them handled. */ -ENTRY(xen_irq_enable_direct) +SYM_FUNC_START(xen_irq_enable_direct) FRAME_BEGIN /* Unmask events */ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask @@ -38,17 +38,17 @@ ENTRY(xen_irq_enable_direct) 1: FRAME_END ret - ENDPROC(xen_irq_enable_direct) +SYM_FUNC_END(xen_irq_enable_direct) /* * Disabling events is simply a matter of making the event mask * non-zero. */ -ENTRY(xen_irq_disable_direct) +SYM_FUNC_START(xen_irq_disable_direct) movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask ret -ENDPROC(xen_irq_disable_direct) +SYM_FUNC_END(xen_irq_disable_direct) /* * (xen_)save_fl is used to get the current interrupt enable status. @@ -59,12 +59,12 @@ ENDPROC(xen_irq_disable_direct) * undefined. We need to toggle the state of the bit, because Xen and * x86 use opposite senses (mask vs enable). */ -ENTRY(xen_save_fl_direct) +SYM_FUNC_START(xen_save_fl_direct) testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah addb %ah, %ah ret - ENDPROC(xen_save_fl_direct) +SYM_FUNC_END(xen_save_fl_direct) /* @@ -74,7 +74,7 @@ ENTRY(xen_save_fl_direct) * interrupt mask state, it checks for unmasked pending events and * enters the hypervisor to get them delivered if so. */ -ENTRY(xen_restore_fl_direct) +SYM_FUNC_START(xen_restore_fl_direct) FRAME_BEGIN #ifdef CONFIG_X86_64 testw $X86_EFLAGS_IF, %di @@ -95,14 +95,14 @@ ENTRY(xen_restore_fl_direct) 1: FRAME_END ret - ENDPROC(xen_restore_fl_direct) +SYM_FUNC_END(xen_restore_fl_direct) /* * Force an event check by making a hypercall, but preserve regs * before making the call. */ -ENTRY(check_events) +SYM_FUNC_START(check_events) FRAME_BEGIN #ifdef CONFIG_X86_32 push %eax @@ -135,19 +135,19 @@ ENTRY(check_events) #endif FRAME_END ret -ENDPROC(check_events) +SYM_FUNC_END(check_events) -ENTRY(xen_read_cr2) +SYM_FUNC_START(xen_read_cr2) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX FRAME_END ret - ENDPROC(xen_read_cr2); +SYM_FUNC_END(xen_read_cr2); -ENTRY(xen_read_cr2_direct) +SYM_FUNC_START(xen_read_cr2_direct) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX FRAME_END ret - ENDPROC(xen_read_cr2_direct); +SYM_FUNC_END(xen_read_cr2_direct); diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index 0060120f51dd..0a0fd168683a 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -126,7 +126,7 @@ SYM_CODE_END(xen_sysret64) */ /* Normal 64-bit system call target */ -ENTRY(xen_syscall_target) +SYM_FUNC_START(xen_syscall_target) popq %rcx popq %r11 @@ -139,12 +139,12 @@ ENTRY(xen_syscall_target) movq $__USER_CS, 1*8(%rsp) jmp entry_SYSCALL_64_after_hwframe -ENDPROC(xen_syscall_target) +SYM_FUNC_END(xen_syscall_target) #ifdef CONFIG_IA32_EMULATION /* 32-bit compat syscall target */ -ENTRY(xen_syscall32_target) +SYM_FUNC_START(xen_syscall32_target) popq %rcx popq %r11 @@ -157,25 +157,25 @@ ENTRY(xen_syscall32_target) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSCALL_compat_after_hwframe -ENDPROC(xen_syscall32_target) +SYM_FUNC_END(xen_syscall32_target) /* 32-bit compat sysenter target */ -ENTRY(xen_sysenter_target) +SYM_FUNC_START(xen_sysenter_target) mov 0*8(%rsp), %rcx mov 1*8(%rsp), %r11 mov 5*8(%rsp), %rsp jmp entry_SYSENTER_compat -ENDPROC(xen_sysenter_target) +SYM_FUNC_END(xen_sysenter_target) #else /* !CONFIG_IA32_EMULATION */ SYM_FUNC_START_ALIAS(xen_syscall32_target) -ENTRY(xen_sysenter_target) +SYM_FUNC_START(xen_sysenter_target) lea 16(%rsp), %rsp /* strip %rcx, %r11 */ mov $-ENOSYS, %rax pushq $0 jmp hypercall_iret -ENDPROC(xen_sysenter_target) +SYM_FUNC_END(xen_sysenter_target) SYM_FUNC_END_ALIAS(xen_syscall32_target) #endif /* CONFIG_IA32_EMULATION */ diff --git a/include/linux/linkage.h b/include/linux/linkage.h index cb1108dde385..19f3d796ab5b 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -114,11 +114,13 @@ #endif #endif +#ifndef CONFIG_X86_64 #ifndef ENTRY /* deprecated, use SYM_FUNC_START */ #define ENTRY(name) \ SYM_FUNC_START(name) #endif +#endif /* CONFIG_X86_64 */ #endif /* LINKER_SCRIPT */ #ifndef WEAK @@ -133,6 +135,7 @@ .size name, .-name #endif +#ifndef CONFIG_X86_64 /* If symbol 'name' is treated as a subroutine (gets called, and returns) * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of * static analysis tools such as stack depth analyzer. @@ -142,6 +145,7 @@ #define ENDPROC(name) \ SYM_FUNC_END(name) #endif +#endif /* CONFIG_X86_64 */ /* === generic annotations === */ -- cgit v1.2.3 From 5e63306f1629527799e34a9814dd8035df6ca854 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:06 +0200 Subject: x86/asm/32: Change all ENTRY+END to SYM_CODE_* Change all assembly code which is marked using END (and not ENDPROC) to appropriate new markings SYM_CODE_START and SYM_CODE_END. And since the last user of END on X86 is gone now, make sure that END is not defined there. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: linux-arch@vger.kernel.org Cc: Mark Rutland Cc: Peter Zijlstra Cc: "Rafael J. Wysocki" Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-27-jslaby@suse.cz --- arch/x86/entry/entry_32.S | 104 ++++++++++++++++++++++---------------------- arch/x86/kernel/ftrace_32.S | 8 ++-- include/linux/linkage.h | 2 + 3 files changed, 58 insertions(+), 56 deletions(-) (limited to 'include') diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 64fe7aa50ad2..0ecc12fcfc05 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -709,7 +709,7 @@ * %eax: prev task * %edx: next task */ -ENTRY(__switch_to_asm) +SYM_CODE_START(__switch_to_asm) /* * Save callee-saved registers * This must match the order in struct inactive_task_frame @@ -748,7 +748,7 @@ ENTRY(__switch_to_asm) popl %ebp jmp __switch_to -END(__switch_to_asm) +SYM_CODE_END(__switch_to_asm) /* * The unwinder expects the last frame on the stack to always be at the same @@ -774,7 +774,7 @@ ENDPROC(schedule_tail_wrapper) * ebx: kernel thread func (NULL for user thread) * edi: kernel thread arg */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) call schedule_tail_wrapper testl %ebx, %ebx @@ -797,7 +797,7 @@ ENTRY(ret_from_fork) */ movl $0, PT_EAX(%esp) jmp 2b -END(ret_from_fork) +SYM_CODE_END(ret_from_fork) /* * Return to user mode is not as complex as all this looks, @@ -1161,7 +1161,7 @@ ENDPROC(entry_INT80_32) * We pack 1 stub into every 8-byte block. */ .align 8 -ENTRY(irq_entries_start) +SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ @@ -1169,11 +1169,11 @@ ENTRY(irq_entries_start) jmp common_interrupt .align 8 .endr -END(irq_entries_start) +SYM_CODE_END(irq_entries_start) #ifdef CONFIG_X86_LOCAL_APIC .align 8 -ENTRY(spurious_entries_start) +SYM_CODE_START(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ @@ -1181,7 +1181,7 @@ ENTRY(spurious_entries_start) jmp common_spurious .align 8 .endr -END(spurious_entries_start) +SYM_CODE_END(spurious_entries_start) SYM_CODE_START_LOCAL(common_spurious) ASM_CLAC @@ -1230,14 +1230,14 @@ ENDPROC(name) /* The include is where all of the SMP etc. interrupts come from */ #include -ENTRY(coprocessor_error) +SYM_CODE_START(coprocessor_error) ASM_CLAC pushl $0 pushl $do_coprocessor_error jmp common_exception -END(coprocessor_error) +SYM_CODE_END(coprocessor_error) -ENTRY(simd_coprocessor_error) +SYM_CODE_START(simd_coprocessor_error) ASM_CLAC pushl $0 #ifdef CONFIG_X86_INVD_BUG @@ -1249,96 +1249,96 @@ ENTRY(simd_coprocessor_error) pushl $do_simd_coprocessor_error #endif jmp common_exception -END(simd_coprocessor_error) +SYM_CODE_END(simd_coprocessor_error) -ENTRY(device_not_available) +SYM_CODE_START(device_not_available) ASM_CLAC pushl $-1 # mark this as an int pushl $do_device_not_available jmp common_exception -END(device_not_available) +SYM_CODE_END(device_not_available) #ifdef CONFIG_PARAVIRT -ENTRY(native_iret) +SYM_CODE_START(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) -END(native_iret) +SYM_CODE_END(native_iret) #endif -ENTRY(overflow) +SYM_CODE_START(overflow) ASM_CLAC pushl $0 pushl $do_overflow jmp common_exception -END(overflow) +SYM_CODE_END(overflow) -ENTRY(bounds) +SYM_CODE_START(bounds) ASM_CLAC pushl $0 pushl $do_bounds jmp common_exception -END(bounds) +SYM_CODE_END(bounds) -ENTRY(invalid_op) +SYM_CODE_START(invalid_op) ASM_CLAC pushl $0 pushl $do_invalid_op jmp common_exception -END(invalid_op) +SYM_CODE_END(invalid_op) -ENTRY(coprocessor_segment_overrun) +SYM_CODE_START(coprocessor_segment_overrun) ASM_CLAC pushl $0 pushl $do_coprocessor_segment_overrun jmp common_exception -END(coprocessor_segment_overrun) +SYM_CODE_END(coprocessor_segment_overrun) -ENTRY(invalid_TSS) +SYM_CODE_START(invalid_TSS) ASM_CLAC pushl $do_invalid_TSS jmp common_exception -END(invalid_TSS) +SYM_CODE_END(invalid_TSS) -ENTRY(segment_not_present) +SYM_CODE_START(segment_not_present) ASM_CLAC pushl $do_segment_not_present jmp common_exception -END(segment_not_present) +SYM_CODE_END(segment_not_present) -ENTRY(stack_segment) +SYM_CODE_START(stack_segment) ASM_CLAC pushl $do_stack_segment jmp common_exception -END(stack_segment) +SYM_CODE_END(stack_segment) -ENTRY(alignment_check) +SYM_CODE_START(alignment_check) ASM_CLAC pushl $do_alignment_check jmp common_exception -END(alignment_check) +SYM_CODE_END(alignment_check) -ENTRY(divide_error) +SYM_CODE_START(divide_error) ASM_CLAC pushl $0 # no error code pushl $do_divide_error jmp common_exception -END(divide_error) +SYM_CODE_END(divide_error) #ifdef CONFIG_X86_MCE -ENTRY(machine_check) +SYM_CODE_START(machine_check) ASM_CLAC pushl $0 pushl machine_check_vector jmp common_exception -END(machine_check) +SYM_CODE_END(machine_check) #endif -ENTRY(spurious_interrupt_bug) +SYM_CODE_START(spurious_interrupt_bug) ASM_CLAC pushl $0 pushl $do_spurious_interrupt_bug jmp common_exception -END(spurious_interrupt_bug) +SYM_CODE_END(spurious_interrupt_bug) #ifdef CONFIG_XEN_PV ENTRY(xen_hypervisor_callback) @@ -1442,11 +1442,11 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, #endif /* CONFIG_HYPERV */ -ENTRY(page_fault) +SYM_CODE_START(page_fault) ASM_CLAC pushl $do_page_fault jmp common_exception_read_cr2 -END(page_fault) +SYM_CODE_END(page_fault) SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) /* the function address is in %gs's slot on the stack */ @@ -1495,7 +1495,7 @@ SYM_CODE_START_LOCAL_NOALIGN(common_exception) jmp ret_from_exception SYM_CODE_END(common_exception) -ENTRY(debug) +SYM_CODE_START(debug) /* * Entry from sysenter is now handled in common_exception */ @@ -1503,7 +1503,7 @@ ENTRY(debug) pushl $-1 # mark this as an int pushl $do_debug jmp common_exception -END(debug) +SYM_CODE_END(debug) /* * NMI is doubly nasty. It can happen on the first instruction of @@ -1512,7 +1512,7 @@ END(debug) * switched stacks. We handle both conditions by simply checking whether we * interrupted kernel code running on the SYSENTER stack. */ -ENTRY(nmi) +SYM_CODE_START(nmi) ASM_CLAC #ifdef CONFIG_X86_ESPFIX32 @@ -1577,9 +1577,9 @@ ENTRY(nmi) lss 12+4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif -END(nmi) +SYM_CODE_END(nmi) -ENTRY(int3) +SYM_CODE_START(int3) ASM_CLAC pushl $-1 # mark this as an int @@ -1590,22 +1590,22 @@ ENTRY(int3) movl %esp, %eax # pt_regs pointer call do_int3 jmp ret_from_exception -END(int3) +SYM_CODE_END(int3) -ENTRY(general_protection) +SYM_CODE_START(general_protection) pushl $do_general_protection jmp common_exception -END(general_protection) +SYM_CODE_END(general_protection) #ifdef CONFIG_KVM_GUEST -ENTRY(async_page_fault) +SYM_CODE_START(async_page_fault) ASM_CLAC pushl $do_async_page_fault jmp common_exception_read_cr2 -END(async_page_fault) +SYM_CODE_END(async_page_fault) #endif -ENTRY(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_do_exit) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1614,4 +1614,4 @@ ENTRY(rewind_stack_do_exit) call do_exit 1: jmp 1b -END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_do_exit) diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index a43ed4c0402d..b4f495bbd5a1 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -25,7 +25,7 @@ SYM_FUNC_START(function_hook) ret SYM_FUNC_END(function_hook) -ENTRY(ftrace_caller) +SYM_CODE_START(ftrace_caller) #ifdef CONFIG_FRAME_POINTER /* @@ -87,7 +87,7 @@ ftrace_graph_call: /* This is weak to keep gas from relaxing the jumps */ WEAK(ftrace_stub) ret -END(ftrace_caller) +SYM_CODE_END(ftrace_caller) SYM_CODE_START(ftrace_regs_caller) /* @@ -166,7 +166,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) SYM_CODE_END(ftrace_regs_caller) #ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller) +SYM_CODE_START(ftrace_graph_caller) pushl %eax pushl %ecx pushl %edx @@ -180,7 +180,7 @@ ENTRY(ftrace_graph_caller) popl %ecx popl %eax ret -END(ftrace_graph_caller) +SYM_CODE_END(ftrace_graph_caller) .globl return_to_handler return_to_handler: diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 19f3d796ab5b..5ffcf72c8f87 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -129,11 +129,13 @@ SYM_FUNC_START_WEAK(name) #endif +#ifndef CONFIG_X86 #ifndef END /* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */ #define END(name) \ .size name, .-name #endif +#endif /* CONFIG_X86 */ #ifndef CONFIG_X86_64 /* If symbol 'name' is treated as a subroutine (gets called, and returns) -- cgit v1.2.3 From 6d685e5318e51b843ca50adeca50dc6300bf2cbb Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:07 +0200 Subject: x86/asm/32: Change all ENTRY+ENDPROC to SYM_FUNC_* These are all functions which are invoked from elsewhere, so annotate them as global using the new SYM_FUNC_START and their ENDPROC's by SYM_FUNC_END. Now, ENTRY/ENDPROC can be forced to be undefined on X86, so do so. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Allison Randal Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Ard Biesheuvel Cc: Bill Metzenthen Cc: Boris Ostrovsky Cc: Darren Hart Cc: "David S. Miller" Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Cc: linux-crypto@vger.kernel.org Cc: linux-efi Cc: linux-efi@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: Mark Rutland Cc: Matt Fleming Cc: Pavel Machek Cc: platform-driver-x86@vger.kernel.org Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Cc: Will Deacon Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-28-jslaby@suse.cz --- arch/x86/boot/compressed/efi_stub_32.S | 4 ++-- arch/x86/boot/compressed/head_32.S | 12 +++++------ arch/x86/crypto/serpent-sse2-i586-asm_32.S | 8 ++++---- arch/x86/crypto/twofish-i586-asm_32.S | 8 ++++---- arch/x86/entry/entry_32.S | 24 +++++++++++----------- arch/x86/kernel/head_32.S | 16 +++++++-------- arch/x86/lib/atomic64_386_32.S | 4 ++-- arch/x86/lib/atomic64_cx8_32.S | 32 +++++++++++++++--------------- arch/x86/lib/checksum_32.S | 8 ++++---- arch/x86/math-emu/div_Xsig.S | 4 ++-- arch/x86/math-emu/div_small.S | 4 ++-- arch/x86/math-emu/mul_Xsig.S | 12 +++++------ arch/x86/math-emu/polynom_Xsig.S | 4 ++-- arch/x86/math-emu/reg_norm.S | 8 ++++---- arch/x86/math-emu/reg_round.S | 4 ++-- arch/x86/math-emu/reg_u_add.S | 4 ++-- arch/x86/math-emu/reg_u_div.S | 4 ++-- arch/x86/math-emu/reg_u_mul.S | 4 ++-- arch/x86/math-emu/reg_u_sub.S | 4 ++-- arch/x86/math-emu/round_Xsig.S | 8 ++++---- arch/x86/math-emu/shr_Xsig.S | 4 ++-- arch/x86/math-emu/wm_shrx.S | 8 ++++---- arch/x86/math-emu/wm_sqrt.S | 4 ++-- arch/x86/platform/efi/efi_stub_32.S | 4 ++-- arch/x86/power/hibernate_asm_32.S | 8 ++++---- include/linux/linkage.h | 8 ++------ 26 files changed, 104 insertions(+), 108 deletions(-) (limited to 'include') diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S index 257e341fd2c8..ed6c351d34ed 100644 --- a/arch/x86/boot/compressed/efi_stub_32.S +++ b/arch/x86/boot/compressed/efi_stub_32.S @@ -24,7 +24,7 @@ */ .text -ENTRY(efi_call_phys) +SYM_FUNC_START(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found @@ -77,7 +77,7 @@ ENTRY(efi_call_phys) movl saved_return_addr(%edx), %ecx pushl %ecx ret -ENDPROC(efi_call_phys) +SYM_FUNC_END(efi_call_phys) .previous .data diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index f9e2a80bd699..f2dfd6d083ef 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -61,7 +61,7 @@ .hidden _egot __HEAD -ENTRY(startup_32) +SYM_FUNC_START(startup_32) cld /* * Test KEEP_SEGMENTS flag to see if the bootloader is asking @@ -142,14 +142,14 @@ ENTRY(startup_32) */ leal .Lrelocated(%ebx), %eax jmp *%eax -ENDPROC(startup_32) +SYM_FUNC_END(startup_32) #ifdef CONFIG_EFI_STUB /* * We don't need the return address, so set up the stack so efi_main() can find * its arguments. */ -ENTRY(efi_pe_entry) +SYM_FUNC_START(efi_pe_entry) add $0x4, %esp call 1f @@ -174,9 +174,9 @@ ENTRY(efi_pe_entry) pushl %eax pushl %ecx jmp 2f /* Skip efi_config initialization */ -ENDPROC(efi_pe_entry) +SYM_FUNC_END(efi_pe_entry) -ENTRY(efi32_stub_entry) +SYM_FUNC_START(efi32_stub_entry) add $0x4, %esp popl %ecx popl %edx @@ -205,7 +205,7 @@ fail: movl BP_code32_start(%esi), %eax leal startup_32(%eax), %eax jmp *%eax -ENDPROC(efi32_stub_entry) +SYM_FUNC_END(efi32_stub_entry) #endif .text diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S index e5c4a4690ca9..6379b99cb722 100644 --- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S +++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S @@ -497,7 +497,7 @@ pxor t0, x3; \ movdqu x3, (3*4*4)(out); -ENTRY(__serpent_enc_blk_4way) +SYM_FUNC_START(__serpent_enc_blk_4way) /* input: * arg_ctx(%esp): ctx, CTX * arg_dst(%esp): dst @@ -559,9 +559,9 @@ ENTRY(__serpent_enc_blk_4way) xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); ret; -ENDPROC(__serpent_enc_blk_4way) +SYM_FUNC_END(__serpent_enc_blk_4way) -ENTRY(serpent_dec_blk_4way) +SYM_FUNC_START(serpent_dec_blk_4way) /* input: * arg_ctx(%esp): ctx, CTX * arg_dst(%esp): dst @@ -613,4 +613,4 @@ ENTRY(serpent_dec_blk_4way) write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); ret; -ENDPROC(serpent_dec_blk_4way) +SYM_FUNC_END(serpent_dec_blk_4way) diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S index 290cc4e9a6fe..a6f09e4f2e46 100644 --- a/arch/x86/crypto/twofish-i586-asm_32.S +++ b/arch/x86/crypto/twofish-i586-asm_32.S @@ -207,7 +207,7 @@ xor %esi, d ## D;\ ror $1, d ## D; -ENTRY(twofish_enc_blk) +SYM_FUNC_START(twofish_enc_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi @@ -261,9 +261,9 @@ ENTRY(twofish_enc_blk) pop %ebp mov $1, %eax ret -ENDPROC(twofish_enc_blk) +SYM_FUNC_END(twofish_enc_blk) -ENTRY(twofish_dec_blk) +SYM_FUNC_START(twofish_dec_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi @@ -318,4 +318,4 @@ ENTRY(twofish_dec_blk) pop %ebp mov $1, %eax ret -ENDPROC(twofish_dec_blk) +SYM_FUNC_END(twofish_dec_blk) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 0ecc12fcfc05..a987b62fc591 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -757,7 +757,7 @@ SYM_CODE_END(__switch_to_asm) * asmlinkage function so its argument has to be pushed on the stack. This * wrapper creates a proper "end of stack" frame header before the call. */ -ENTRY(schedule_tail_wrapper) +SYM_FUNC_START(schedule_tail_wrapper) FRAME_BEGIN pushl %eax @@ -766,7 +766,7 @@ ENTRY(schedule_tail_wrapper) FRAME_END ret -ENDPROC(schedule_tail_wrapper) +SYM_FUNC_END(schedule_tail_wrapper) /* * A newly forked process directly context switches into this address. * @@ -885,7 +885,7 @@ SYM_CODE_END(xen_sysenter_target) * ebp user stack * 0(%ebp) arg6 */ -ENTRY(entry_SYSENTER_32) +SYM_FUNC_START(entry_SYSENTER_32) /* * On entry-stack with all userspace-regs live - save and * restore eflags and %eax to use it as scratch-reg for the cr3 @@ -1013,7 +1013,7 @@ ENTRY(entry_SYSENTER_32) popfl jmp .Lsysenter_flags_fixed SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) -ENDPROC(entry_SYSENTER_32) +SYM_FUNC_END(entry_SYSENTER_32) /* * 32-bit legacy system call entry. @@ -1043,7 +1043,7 @@ ENDPROC(entry_SYSENTER_32) * edi arg5 * ebp arg6 */ -ENTRY(entry_INT80_32) +SYM_FUNC_START(entry_INT80_32) ASM_CLAC pushl %eax /* pt_regs->orig_ax */ @@ -1120,7 +1120,7 @@ SYM_CODE_START(iret_exc) SYM_CODE_END(iret_exc) .previous _ASM_EXTABLE(.Lirq_return, iret_exc) -ENDPROC(entry_INT80_32) +SYM_FUNC_END(entry_INT80_32) .macro FIXUP_ESPFIX_STACK /* @@ -1213,7 +1213,7 @@ SYM_CODE_START_LOCAL(common_interrupt) SYM_CODE_END(common_interrupt) #define BUILD_INTERRUPT3(name, nr, fn) \ -ENTRY(name) \ +SYM_FUNC_START(name) \ ASM_CLAC; \ pushl $~(nr); \ SAVE_ALL switch_stacks=1; \ @@ -1222,7 +1222,7 @@ ENTRY(name) \ movl %esp, %eax; \ call fn; \ jmp ret_from_intr; \ -ENDPROC(name) +SYM_FUNC_END(name) #define BUILD_INTERRUPT(name, nr) \ BUILD_INTERRUPT3(name, nr, smp_##name); \ @@ -1341,7 +1341,7 @@ SYM_CODE_START(spurious_interrupt_bug) SYM_CODE_END(spurious_interrupt_bug) #ifdef CONFIG_XEN_PV -ENTRY(xen_hypervisor_callback) +SYM_FUNC_START(xen_hypervisor_callback) pushl $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL ENCODE_FRAME_POINTER @@ -1369,7 +1369,7 @@ SYM_INNER_LABEL_ALIGN(xen_do_upcall, SYM_L_GLOBAL) call xen_maybe_preempt_hcall #endif jmp ret_from_intr -ENDPROC(xen_hypervisor_callback) +SYM_FUNC_END(xen_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -1383,7 +1383,7 @@ ENDPROC(xen_hypervisor_callback) * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by maintaining a status value in EAX. */ -ENTRY(xen_failsafe_callback) +SYM_FUNC_START(xen_failsafe_callback) pushl %eax movl $1, %eax 1: mov 4(%esp), %ds @@ -1420,7 +1420,7 @@ ENTRY(xen_failsafe_callback) _ASM_EXTABLE(2b, 7b) _ASM_EXTABLE(3b, 8b) _ASM_EXTABLE(4b, 9b) -ENDPROC(xen_failsafe_callback) +SYM_FUNC_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ #ifdef CONFIG_XEN_PVHVM diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 7029bbaccc41..ea24aa5465fd 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -180,12 +180,12 @@ SYM_CODE_END(startup_32) * up already except stack. We just set up stack here. Then call * start_secondary(). */ -ENTRY(start_cpu0) +SYM_FUNC_START(start_cpu0) movl initial_stack, %ecx movl %ecx, %esp call *(initial_code) 1: jmp 1b -ENDPROC(start_cpu0) +SYM_FUNC_END(start_cpu0) #endif /* @@ -196,7 +196,7 @@ ENDPROC(start_cpu0) * If cpu hotplug is not supported then this code can go in init section * which will be freed later */ -ENTRY(startup_32_smp) +SYM_FUNC_START(startup_32_smp) cld movl $(__BOOT_DS),%eax movl %eax,%ds @@ -363,7 +363,7 @@ ENTRY(startup_32_smp) call *(initial_code) 1: jmp 1b -ENDPROC(startup_32_smp) +SYM_FUNC_END(startup_32_smp) #include "verify_cpu.S" @@ -393,7 +393,7 @@ setup_once: andl $0,setup_once_ref /* Once is enough, thanks */ ret -ENTRY(early_idt_handler_array) +SYM_FUNC_START(early_idt_handler_array) # 36(%esp) %eflags # 32(%esp) %cs # 28(%esp) %eip @@ -408,7 +408,7 @@ ENTRY(early_idt_handler_array) i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr -ENDPROC(early_idt_handler_array) +SYM_FUNC_END(early_idt_handler_array) SYM_CODE_START_LOCAL(early_idt_handler_common) /* @@ -464,7 +464,7 @@ SYM_CODE_START_LOCAL(early_idt_handler_common) SYM_CODE_END(early_idt_handler_common) /* This is the default interrupt "handler" :-) */ -ENTRY(early_ignore_irq) +SYM_FUNC_START(early_ignore_irq) cld #ifdef CONFIG_PRINTK pushl %eax @@ -499,7 +499,7 @@ ENTRY(early_ignore_irq) hlt_loop: hlt jmp hlt_loop -ENDPROC(early_ignore_irq) +SYM_FUNC_END(early_ignore_irq) __INITDATA .align 4 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S index e0788bade5ab..3b6544111ac9 100644 --- a/arch/x86/lib/atomic64_386_32.S +++ b/arch/x86/lib/atomic64_386_32.S @@ -20,10 +20,10 @@ #define BEGIN(op) \ .macro endp; \ -ENDPROC(atomic64_##op##_386); \ +SYM_FUNC_END(atomic64_##op##_386); \ .purgem endp; \ .endm; \ -ENTRY(atomic64_##op##_386); \ +SYM_FUNC_START(atomic64_##op##_386); \ LOCK v; #define ENDP endp diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index 843d978ee341..1c5c81c16b06 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -16,12 +16,12 @@ cmpxchg8b (\reg) .endm -ENTRY(atomic64_read_cx8) +SYM_FUNC_START(atomic64_read_cx8) read64 %ecx ret -ENDPROC(atomic64_read_cx8) +SYM_FUNC_END(atomic64_read_cx8) -ENTRY(atomic64_set_cx8) +SYM_FUNC_START(atomic64_set_cx8) 1: /* we don't need LOCK_PREFIX since aligned 64-bit writes * are atomic on 586 and newer */ @@ -29,19 +29,19 @@ ENTRY(atomic64_set_cx8) jne 1b ret -ENDPROC(atomic64_set_cx8) +SYM_FUNC_END(atomic64_set_cx8) -ENTRY(atomic64_xchg_cx8) +SYM_FUNC_START(atomic64_xchg_cx8) 1: LOCK_PREFIX cmpxchg8b (%esi) jne 1b ret -ENDPROC(atomic64_xchg_cx8) +SYM_FUNC_END(atomic64_xchg_cx8) .macro addsub_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) +SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebp pushl %ebx pushl %esi @@ -69,14 +69,14 @@ ENTRY(atomic64_\func\()_return_cx8) popl %ebx popl %ebp ret -ENDPROC(atomic64_\func\()_return_cx8) +SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm addsub_return add add adc addsub_return sub sub sbb .macro incdec_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) +SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebx read64 %esi @@ -94,13 +94,13 @@ ENTRY(atomic64_\func\()_return_cx8) movl %ecx, %edx popl %ebx ret -ENDPROC(atomic64_\func\()_return_cx8) +SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm incdec_return inc add adc incdec_return dec sub sbb -ENTRY(atomic64_dec_if_positive_cx8) +SYM_FUNC_START(atomic64_dec_if_positive_cx8) pushl %ebx read64 %esi @@ -119,9 +119,9 @@ ENTRY(atomic64_dec_if_positive_cx8) movl %ecx, %edx popl %ebx ret -ENDPROC(atomic64_dec_if_positive_cx8) +SYM_FUNC_END(atomic64_dec_if_positive_cx8) -ENTRY(atomic64_add_unless_cx8) +SYM_FUNC_START(atomic64_add_unless_cx8) pushl %ebp pushl %ebx /* these just push these two parameters on the stack */ @@ -155,9 +155,9 @@ ENTRY(atomic64_add_unless_cx8) jne 2b xorl %eax, %eax jmp 3b -ENDPROC(atomic64_add_unless_cx8) +SYM_FUNC_END(atomic64_add_unless_cx8) -ENTRY(atomic64_inc_not_zero_cx8) +SYM_FUNC_START(atomic64_inc_not_zero_cx8) pushl %ebx read64 %esi @@ -177,4 +177,4 @@ ENTRY(atomic64_inc_not_zero_cx8) 3: popl %ebx ret -ENDPROC(atomic64_inc_not_zero_cx8) +SYM_FUNC_END(atomic64_inc_not_zero_cx8) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 74256bd193da..4742e8fa7ee7 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -46,7 +46,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) * Fortunately, it is easy to convert 2-byte alignment to 4-byte * alignment for the unrolled loop. */ -ENTRY(csum_partial) +SYM_FUNC_START(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum @@ -128,13 +128,13 @@ ENTRY(csum_partial) popl %ebx popl %esi ret -ENDPROC(csum_partial) +SYM_FUNC_END(csum_partial) #else /* Version for PentiumII/PPro */ -ENTRY(csum_partial) +SYM_FUNC_START(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum @@ -246,7 +246,7 @@ ENTRY(csum_partial) popl %ebx popl %esi ret -ENDPROC(csum_partial) +SYM_FUNC_END(csum_partial) #endif EXPORT_SYMBOL(csum_partial) diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S index ee08449d20fd..951da2ad54bb 100644 --- a/arch/x86/math-emu/div_Xsig.S +++ b/arch/x86/math-emu/div_Xsig.S @@ -75,7 +75,7 @@ FPU_result_1: .text -ENTRY(div_Xsig) +SYM_FUNC_START(div_Xsig) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU @@ -364,4 +364,4 @@ L_bugged_2: pop %ebx jmp L_exit #endif /* PARANOID */ -ENDPROC(div_Xsig) +SYM_FUNC_END(div_Xsig) diff --git a/arch/x86/math-emu/div_small.S b/arch/x86/math-emu/div_small.S index 8f5025c80ee0..d047d1816abe 100644 --- a/arch/x86/math-emu/div_small.S +++ b/arch/x86/math-emu/div_small.S @@ -19,7 +19,7 @@ #include "fpu_emu.h" .text -ENTRY(FPU_div_small) +SYM_FUNC_START(FPU_div_small) pushl %ebp movl %esp,%ebp @@ -45,4 +45,4 @@ ENTRY(FPU_div_small) leave ret -ENDPROC(FPU_div_small) +SYM_FUNC_END(FPU_div_small) diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S index 3e489122a2b0..4afc7b1fa6e9 100644 --- a/arch/x86/math-emu/mul_Xsig.S +++ b/arch/x86/math-emu/mul_Xsig.S @@ -25,7 +25,7 @@ #include "fpu_emu.h" .text -ENTRY(mul32_Xsig) +SYM_FUNC_START(mul32_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp @@ -63,10 +63,10 @@ ENTRY(mul32_Xsig) popl %esi leave ret -ENDPROC(mul32_Xsig) +SYM_FUNC_END(mul32_Xsig) -ENTRY(mul64_Xsig) +SYM_FUNC_START(mul64_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp @@ -116,11 +116,11 @@ ENTRY(mul64_Xsig) popl %esi leave ret -ENDPROC(mul64_Xsig) +SYM_FUNC_END(mul64_Xsig) -ENTRY(mul_Xsig_Xsig) +SYM_FUNC_START(mul_Xsig_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp @@ -176,4 +176,4 @@ ENTRY(mul_Xsig_Xsig) popl %esi leave ret -ENDPROC(mul_Xsig_Xsig) +SYM_FUNC_END(mul_Xsig_Xsig) diff --git a/arch/x86/math-emu/polynom_Xsig.S b/arch/x86/math-emu/polynom_Xsig.S index 604f0b2d17e8..702315eecb86 100644 --- a/arch/x86/math-emu/polynom_Xsig.S +++ b/arch/x86/math-emu/polynom_Xsig.S @@ -37,7 +37,7 @@ #define OVERFLOWED -16(%ebp) /* addition overflow flag */ .text -ENTRY(polynomial_Xsig) +SYM_FUNC_START(polynomial_Xsig) pushl %ebp movl %esp,%ebp subl $32,%esp @@ -134,4 +134,4 @@ L_accum_done: popl %esi leave ret -ENDPROC(polynomial_Xsig) +SYM_FUNC_END(polynomial_Xsig) diff --git a/arch/x86/math-emu/reg_norm.S b/arch/x86/math-emu/reg_norm.S index 7f6b4392a15d..cad1d60b1e84 100644 --- a/arch/x86/math-emu/reg_norm.S +++ b/arch/x86/math-emu/reg_norm.S @@ -22,7 +22,7 @@ .text -ENTRY(FPU_normalize) +SYM_FUNC_START(FPU_normalize) pushl %ebp movl %esp,%ebp pushl %ebx @@ -95,12 +95,12 @@ L_overflow: call arith_overflow pop %ebx jmp L_exit -ENDPROC(FPU_normalize) +SYM_FUNC_END(FPU_normalize) /* Normalise without reporting underflow or overflow */ -ENTRY(FPU_normalize_nuo) +SYM_FUNC_START(FPU_normalize_nuo) pushl %ebp movl %esp,%ebp pushl %ebx @@ -147,4 +147,4 @@ L_exit_nuo_zero: popl %ebx leave ret -ENDPROC(FPU_normalize_nuo) +SYM_FUNC_END(FPU_normalize_nuo) diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S index 04563421ee7d..11a1f798451b 100644 --- a/arch/x86/math-emu/reg_round.S +++ b/arch/x86/math-emu/reg_round.S @@ -109,7 +109,7 @@ FPU_denormal: .globl fpu_Arith_exit /* Entry point when called from C */ -ENTRY(FPU_round) +SYM_FUNC_START(FPU_round) pushl %ebp movl %esp,%ebp pushl %esi @@ -708,4 +708,4 @@ L_exception_exit: jmp fpu_reg_round_special_exit #endif /* PARANOID */ -ENDPROC(FPU_round) +SYM_FUNC_END(FPU_round) diff --git a/arch/x86/math-emu/reg_u_add.S b/arch/x86/math-emu/reg_u_add.S index 50fe9f8c893c..9c9e2c810afe 100644 --- a/arch/x86/math-emu/reg_u_add.S +++ b/arch/x86/math-emu/reg_u_add.S @@ -32,7 +32,7 @@ #include "control_w.h" .text -ENTRY(FPU_u_add) +SYM_FUNC_START(FPU_u_add) pushl %ebp movl %esp,%ebp pushl %esi @@ -166,4 +166,4 @@ L_exit: leave ret #endif /* PARANOID */ -ENDPROC(FPU_u_add) +SYM_FUNC_END(FPU_u_add) diff --git a/arch/x86/math-emu/reg_u_div.S b/arch/x86/math-emu/reg_u_div.S index 94d545e118e4..e2fb5c2644c5 100644 --- a/arch/x86/math-emu/reg_u_div.S +++ b/arch/x86/math-emu/reg_u_div.S @@ -75,7 +75,7 @@ FPU_ovfl_flag: #define DEST PARAM3 .text -ENTRY(FPU_u_div) +SYM_FUNC_START(FPU_u_div) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU @@ -471,4 +471,4 @@ L_exit: ret #endif /* PARANOID */ -ENDPROC(FPU_u_div) +SYM_FUNC_END(FPU_u_div) diff --git a/arch/x86/math-emu/reg_u_mul.S b/arch/x86/math-emu/reg_u_mul.S index 21cde47fb3e5..0c779c87ac5b 100644 --- a/arch/x86/math-emu/reg_u_mul.S +++ b/arch/x86/math-emu/reg_u_mul.S @@ -45,7 +45,7 @@ FPU_accum_1: .text -ENTRY(FPU_u_mul) +SYM_FUNC_START(FPU_u_mul) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU @@ -147,4 +147,4 @@ L_exit: ret #endif /* PARANOID */ -ENDPROC(FPU_u_mul) +SYM_FUNC_END(FPU_u_mul) diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S index f05dea7dec38..e9bb7c248649 100644 --- a/arch/x86/math-emu/reg_u_sub.S +++ b/arch/x86/math-emu/reg_u_sub.S @@ -33,7 +33,7 @@ #include "control_w.h" .text -ENTRY(FPU_u_sub) +SYM_FUNC_START(FPU_u_sub) pushl %ebp movl %esp,%ebp pushl %esi @@ -271,4 +271,4 @@ L_exit: popl %esi leave ret -ENDPROC(FPU_u_sub) +SYM_FUNC_END(FPU_u_sub) diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S index 226a51e991f1..d9d7de8dbd7b 100644 --- a/arch/x86/math-emu/round_Xsig.S +++ b/arch/x86/math-emu/round_Xsig.S @@ -23,7 +23,7 @@ .text -ENTRY(round_Xsig) +SYM_FUNC_START(round_Xsig) pushl %ebp movl %esp,%ebp pushl %ebx /* Reserve some space */ @@ -79,11 +79,11 @@ L_exit: popl %ebx leave ret -ENDPROC(round_Xsig) +SYM_FUNC_END(round_Xsig) -ENTRY(norm_Xsig) +SYM_FUNC_START(norm_Xsig) pushl %ebp movl %esp,%ebp pushl %ebx /* Reserve some space */ @@ -139,4 +139,4 @@ L_n_exit: popl %ebx leave ret -ENDPROC(norm_Xsig) +SYM_FUNC_END(norm_Xsig) diff --git a/arch/x86/math-emu/shr_Xsig.S b/arch/x86/math-emu/shr_Xsig.S index 96f4779aa9c1..726af985f758 100644 --- a/arch/x86/math-emu/shr_Xsig.S +++ b/arch/x86/math-emu/shr_Xsig.S @@ -22,7 +22,7 @@ #include "fpu_emu.h" .text -ENTRY(shr_Xsig) +SYM_FUNC_START(shr_Xsig) push %ebp movl %esp,%ebp pushl %esi @@ -86,4 +86,4 @@ L_more_than_95: popl %esi leave ret -ENDPROC(shr_Xsig) +SYM_FUNC_END(shr_Xsig) diff --git a/arch/x86/math-emu/wm_shrx.S b/arch/x86/math-emu/wm_shrx.S index d588874eb6fb..4fc89174caf0 100644 --- a/arch/x86/math-emu/wm_shrx.S +++ b/arch/x86/math-emu/wm_shrx.S @@ -33,7 +33,7 @@ | Results returned in the 64 bit arg and eax. | +---------------------------------------------------------------------------*/ -ENTRY(FPU_shrx) +SYM_FUNC_START(FPU_shrx) push %ebp movl %esp,%ebp pushl %esi @@ -93,7 +93,7 @@ L_more_than_95: popl %esi leave ret -ENDPROC(FPU_shrx) +SYM_FUNC_END(FPU_shrx) /*---------------------------------------------------------------------------+ @@ -112,7 +112,7 @@ ENDPROC(FPU_shrx) | part which has been shifted out of the arg. | | Results returned in the 64 bit arg and eax. | +---------------------------------------------------------------------------*/ -ENTRY(FPU_shrxs) +SYM_FUNC_START(FPU_shrxs) push %ebp movl %esp,%ebp pushl %esi @@ -204,4 +204,4 @@ Ls_more_than_95: popl %esi leave ret -ENDPROC(FPU_shrxs) +SYM_FUNC_END(FPU_shrxs) diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S index f031c0e19356..3b2b58164ec1 100644 --- a/arch/x86/math-emu/wm_sqrt.S +++ b/arch/x86/math-emu/wm_sqrt.S @@ -75,7 +75,7 @@ FPU_fsqrt_arg_0: .text -ENTRY(wm_sqrt) +SYM_FUNC_START(wm_sqrt) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU @@ -469,4 +469,4 @@ sqrt_more_prec_large: /* Our estimate is too large */ movl $0x7fffff00,%eax jmp sqrt_round_result -ENDPROC(wm_sqrt) +SYM_FUNC_END(wm_sqrt) diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index ab2e91e76894..eed8b5b441f8 100644 --- a/arch/x86/platform/efi/efi_stub_32.S +++ b/arch/x86/platform/efi/efi_stub_32.S @@ -22,7 +22,7 @@ */ .text -ENTRY(efi_call_phys) +SYM_FUNC_START(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found @@ -114,7 +114,7 @@ ENTRY(efi_call_phys) movl (%edx), %ecx pushl %ecx ret -ENDPROC(efi_call_phys) +SYM_FUNC_END(efi_call_phys) .previous .data diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index a19ed3d23185..8786653ad3c0 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -16,7 +16,7 @@ .text -ENTRY(swsusp_arch_suspend) +SYM_FUNC_START(swsusp_arch_suspend) movl %esp, saved_context_esp movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp @@ -33,7 +33,7 @@ ENTRY(swsusp_arch_suspend) call swsusp_save FRAME_END ret -ENDPROC(swsusp_arch_suspend) +SYM_FUNC_END(swsusp_arch_suspend) SYM_CODE_START(restore_image) /* prepare to jump to the image kernel */ @@ -82,7 +82,7 @@ SYM_CODE_END(core_restore_code) /* code below belongs to the image kernel */ .align PAGE_SIZE -ENTRY(restore_registers) +SYM_FUNC_START(restore_registers) /* go back to the original page tables */ movl %ebp, %cr3 movl mmu_cr4_features, %ecx @@ -109,4 +109,4 @@ ENTRY(restore_registers) movl %eax, in_suspend ret -ENDPROC(restore_registers) +SYM_FUNC_END(restore_registers) diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 5ffcf72c8f87..331a2306312c 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -112,15 +112,13 @@ .globl name ASM_NL \ name: #endif -#endif -#ifndef CONFIG_X86_64 #ifndef ENTRY /* deprecated, use SYM_FUNC_START */ #define ENTRY(name) \ SYM_FUNC_START(name) #endif -#endif /* CONFIG_X86_64 */ +#endif /* CONFIG_X86 */ #endif /* LINKER_SCRIPT */ #ifndef WEAK @@ -135,9 +133,7 @@ #define END(name) \ .size name, .-name #endif -#endif /* CONFIG_X86 */ -#ifndef CONFIG_X86_64 /* If symbol 'name' is treated as a subroutine (gets called, and returns) * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of * static analysis tools such as stack depth analyzer. @@ -147,7 +143,7 @@ #define ENDPROC(name) \ SYM_FUNC_END(name) #endif -#endif /* CONFIG_X86_64 */ +#endif /* CONFIG_X86 */ /* === generic annotations === */ -- cgit v1.2.3 From 13fbe784ef6e58d0267a6e183f90ce7826d7d885 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Fri, 11 Oct 2019 13:51:08 +0200 Subject: x86/asm: Replace WEAK uses by SYM_INNER_LABEL_ALIGN Use the new SYM_INNER_LABEL_ALIGN for WEAK entries in the middle of x86 assembly functions. And make sure WEAK is not defined for x86 anymore as these were the last users. Signed-off-by: Jiri Slaby Signed-off-by: Borislav Petkov Cc: Andrey Ryabinin Cc: Boris Ostrovsky Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Josh Poimboeuf Cc: linux-arch@vger.kernel.org Cc: Mark Rutland Cc: Peter Zijlstra Cc: "Rafael J. Wysocki" Cc: "Steven Rostedt (VMware)" Cc: Thomas Gleixner Cc: x86-ml Link: https://lkml.kernel.org/r/20191011115108.12392-29-jslaby@suse.cz --- arch/x86/kernel/ftrace_32.S | 2 +- arch/x86/kernel/ftrace_64.S | 2 +- arch/x86/kernel/head_32.S | 2 +- include/linux/linkage.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index b4f495bbd5a1..8ed1f5d371f0 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -85,7 +85,7 @@ ftrace_graph_call: #endif /* This is weak to keep gas from relaxing the jumps */ -WEAK(ftrace_stub) +SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) ret SYM_CODE_END(ftrace_caller) diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 16deae706950..69c8d1b9119e 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -168,7 +168,7 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) * This is weak to keep gas from relaxing the jumps. * It is also used to copy the retq for trampolines. */ -WEAK(ftrace_stub) +SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) retq SYM_FUNC_END(ftrace_caller) diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index ea24aa5465fd..3fe7d2008b7a 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -156,7 +156,7 @@ SYM_CODE_START(startup_32) jmp *%eax .Lbad_subarch: -WEAK(xen_entry) +SYM_INNER_LABEL_ALIGN(xen_entry, SYM_L_WEAK) /* Unknown implementation; there's really nothing we can do at this point. */ ud2a diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 331a2306312c..9280209d1f62 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -121,13 +121,13 @@ #endif /* CONFIG_X86 */ #endif /* LINKER_SCRIPT */ +#ifndef CONFIG_X86 #ifndef WEAK /* deprecated, use SYM_FUNC_START_WEAK* */ #define WEAK(name) \ SYM_FUNC_START_WEAK(name) #endif -#ifndef CONFIG_X86 #ifndef END /* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */ #define END(name) \ -- cgit v1.2.3 From 441110a547f86a2fd0c40bf04b274853622c53cc Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:30 -0700 Subject: vmlinux.lds.h: Provide EMIT_PT_NOTE to indicate export of .notes In preparation for moving NOTES into RO_DATA, provide a mechanism for architectures that want to emit a PT_NOTE Program Header to do so. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-9-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 3 +++ arch/ia64/kernel/vmlinux.lds.S | 2 ++ arch/mips/kernel/vmlinux.lds.S | 12 ++++++------ arch/powerpc/kernel/vmlinux.lds.S | 1 + arch/s390/kernel/vmlinux.lds.S | 2 ++ arch/x86/kernel/vmlinux.lds.S | 2 ++ include/asm-generic/vmlinux.lds.h | 8 ++++++++ 7 files changed, 24 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 781090cacc96..363a60ba7c31 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -1,4 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ + +#define EMITS_PT_NOTE + #include #include #include diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 2c4f23c390ad..7cf4958b732d 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -5,6 +5,8 @@ #include #include +#define EMITS_PT_NOTE + #include OUTPUT_FORMAT("elf64-ia64-little") diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 33ee0d18fb0a..1c95612eb800 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -10,6 +10,11 @@ */ #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) +/* Cavium Octeon should not have a separate PT_NOTE Program Header. */ +#ifndef CONFIG_CAVIUM_OCTEON_SOC +#define EMITS_PT_NOTE +#endif + #include #undef mips @@ -76,12 +81,7 @@ SECTIONS __stop___dbe_table = .; } -#ifdef CONFIG_CAVIUM_OCTEON_SOC -#define NOTES_HEADER -#else /* CONFIG_CAVIUM_OCTEON_SOC */ -#define NOTES_HEADER :note -#endif /* CONFIG_CAVIUM_OCTEON_SOC */ - NOTES :text NOTES_HEADER + NOTES NOTES_HEADERS .dummy : { *(.dummy) } :text _sdata = .; /* Start of data section */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index e184a63aa5b0..7e26e20c8324 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -6,6 +6,7 @@ #endif #define BSS_FIRST_SECTIONS *(.bss.prominit) +#define EMITS_PT_NOTE #include #include diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 13294fef473e..646d939346df 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -15,6 +15,8 @@ /* Handle ro_after_init data on our own. */ #define RO_AFTER_INIT_DATA +#define EMITS_PT_NOTE + #include #include diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 788e78978030..2e18bf5c1aed 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -21,6 +21,8 @@ #define LOAD_OFFSET __START_KERNEL_map #endif +#define EMITS_PT_NOTE + #include #include #include diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index dae64600ccbf..f5dd45ce73f1 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -54,6 +54,14 @@ #define LOAD_OFFSET 0 #endif +/* + * Only some architectures want to have the .notes segment visible in + * a separate PT_NOTE ELF Program Header. + */ +#ifdef EMITS_PT_NOTE +#define NOTES_HEADERS :text :note +#endif + /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) -- cgit v1.2.3 From fbe6a8e618a2d70621cff277e24f6eb338d3d149 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:31 -0700 Subject: vmlinux.lds.h: Move Program Header restoration into NOTES macro In preparation for moving NOTES into RO_DATA, make the Program Header assignment restoration be part of the NOTES macro itself. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-10-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 5 +---- arch/ia64/kernel/vmlinux.lds.S | 4 +--- arch/mips/kernel/vmlinux.lds.S | 3 +-- arch/powerpc/kernel/vmlinux.lds.S | 4 +--- arch/s390/kernel/vmlinux.lds.S | 4 +--- arch/x86/kernel/vmlinux.lds.S | 3 +-- include/asm-generic/vmlinux.lds.h | 13 +++++++++++-- 7 files changed, 17 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 363a60ba7c31..cdfdc91ce64c 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -34,10 +34,7 @@ SECTIONS swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ - NOTES :text :note - .dummy : { - *(.dummy) - } :text + NOTES RODATA EXCEPTION_TABLE(16) diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 7cf4958b732d..bfc937ec168c 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -70,9 +70,7 @@ SECTIONS { /* * Read-only data */ - NOTES :text :note /* put .notes in text and mark in PT_NOTE */ - code_continues : { - } :text /* switch back to regular program... */ + NOTES EXCEPTION_TABLE(16) diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 1c95612eb800..6a22f531d815 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -81,8 +81,7 @@ SECTIONS __stop___dbe_table = .; } - NOTES NOTES_HEADERS - .dummy : { *(.dummy) } :text + NOTES _sdata = .; /* Start of data section */ RODATA diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 7e26e20c8324..4f19d814d592 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -164,9 +164,7 @@ SECTIONS #endif EXCEPTION_TABLE(0) - NOTES :text :note - /* Restore program header away from PT_NOTE. */ - .dummy : { *(.dummy) } :text + NOTES /* * Init sections discarded at runtime diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 646d939346df..f88eedeb915a 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -52,9 +52,7 @@ SECTIONS _etext = .; /* End of text section */ } :text = 0x0700 - NOTES :text :note - - .dummy : { *(.dummy) } :text + NOTES RO_DATA_SECTION(PAGE_SIZE) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2e18bf5c1aed..8be25b09c2b7 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -148,8 +148,7 @@ SECTIONS _etext = .; } :text = 0x9090 - NOTES :text :note - .dummy : { *(.dummy) } :text + NOTES EXCEPTION_TABLE(16) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f5dd45ce73f1..97d4299f14dc 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -56,10 +56,18 @@ /* * Only some architectures want to have the .notes segment visible in - * a separate PT_NOTE ELF Program Header. + * a separate PT_NOTE ELF Program Header. When this happens, it needs + * to be visible in both the kernel text's PT_LOAD and the PT_NOTE + * Program Headers. In this case, though, the PT_LOAD needs to be made + * the default again so that all the following sections don't also end + * up in the PT_NOTE Program Header. */ #ifdef EMITS_PT_NOTE #define NOTES_HEADERS :text :note +#define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text +#else +#define NOTES_HEADERS +#define NOTES_HEADERS_RESTORE #endif /* Align . to a 8 byte boundary equals to maximum function alignment. */ @@ -798,7 +806,8 @@ __start_notes = .; \ KEEP(*(.note.*)) \ __stop_notes = .; \ - } + } NOTES_HEADERS \ + NOTES_HEADERS_RESTORE #define INIT_SETUP(initsetup_align) \ . = ALIGN(initsetup_align); \ -- cgit v1.2.3 From eaf937075c9a42eb8ba51eb3050773d7205d3595 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:32 -0700 Subject: vmlinux.lds.h: Move NOTES into RO_DATA The .notes section should be non-executable read-only data. As such, move it to the RO_DATA macro instead of being per-architecture defined. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-11-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 2 -- arch/arc/kernel/vmlinux.lds.S | 2 -- arch/arm/kernel/vmlinux-xip.lds.S | 2 -- arch/arm/kernel/vmlinux.lds.S | 2 -- arch/arm64/kernel/vmlinux.lds.S | 1 - arch/c6x/kernel/vmlinux.lds.S | 1 - arch/csky/kernel/vmlinux.lds.S | 1 - arch/h8300/kernel/vmlinux.lds.S | 1 - arch/hexagon/kernel/vmlinux.lds.S | 1 - arch/ia64/kernel/vmlinux.lds.S | 2 -- arch/microblaze/kernel/vmlinux.lds.S | 1 - arch/mips/kernel/vmlinux.lds.S | 2 -- arch/nds32/kernel/vmlinux.lds.S | 1 - arch/nios2/kernel/vmlinux.lds.S | 1 - arch/openrisc/kernel/vmlinux.lds.S | 1 - arch/parisc/kernel/vmlinux.lds.S | 1 - arch/powerpc/kernel/vmlinux.lds.S | 2 -- arch/riscv/kernel/vmlinux.lds.S | 1 - arch/s390/kernel/vmlinux.lds.S | 2 -- arch/sh/kernel/vmlinux.lds.S | 1 - arch/sparc/kernel/vmlinux.lds.S | 1 - arch/um/include/asm/common.lds.S | 1 - arch/unicore32/kernel/vmlinux.lds.S | 1 - arch/x86/kernel/vmlinux.lds.S | 2 -- arch/xtensa/kernel/vmlinux.lds.S | 1 - include/asm-generic/vmlinux.lds.h | 9 +++++---- 26 files changed, 5 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index cdfdc91ce64c..bf28043485f6 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -34,8 +34,6 @@ SECTIONS swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ - NOTES - RODATA EXCEPTION_TABLE(16) diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index 6c693a9d29b6..1d6eef4b6976 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -118,8 +118,6 @@ SECTIONS /DISCARD/ : { *(.eh_frame) } #endif - NOTES - . = ALIGN(PAGE_SIZE); _end = . ; diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 8c74037ade22..d2a9651c24ad 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -70,8 +70,6 @@ SECTIONS ARM_UNWIND_SECTIONS #endif - NOTES - _etext = .; /* End of text and rodata section */ ARM_VECTORS diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 23150c0f0f4d..068db6860867 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -81,8 +81,6 @@ SECTIONS ARM_UNWIND_SECTIONS #endif - NOTES - #ifdef CONFIG_STRICT_KERNEL_RWX . = ALIGN(1< Date: Tue, 29 Oct 2019 14:13:33 -0700 Subject: vmlinux.lds.h: Replace RODATA with RO_DATA There's no reason to keep the RODATA macro: replace the callers with the expected RO_DATA macro. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-12-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 2 +- arch/ia64/kernel/vmlinux.lds.S | 2 +- arch/microblaze/kernel/vmlinux.lds.S | 2 +- arch/mips/kernel/vmlinux.lds.S | 2 +- arch/um/include/asm/common.lds.S | 2 +- arch/xtensa/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 4 +--- 7 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index bf28043485f6..af411817dd7d 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -34,7 +34,7 @@ SECTIONS swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ - RODATA + RO_DATA(4096) EXCEPTION_TABLE(16) /* Will be freed after init */ diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index fae077595756..11d5115bc44d 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -104,7 +104,7 @@ SECTIONS { code_continues2 : { } :text - RODATA + RO_DATA(4096) .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index d008e50bb212..2299694748ea 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -51,7 +51,7 @@ SECTIONS { } . = ALIGN(16); - RODATA + RO_DATA(4096) EXCEPTION_TABLE(16) /* diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 91e566defc16..a5f00ec73ea6 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -82,7 +82,7 @@ SECTIONS } _sdata = .; /* Start of data section */ - RODATA + RO_DATA(4096) /* writeable */ .data : { /* Data */ diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index 91aca356095f..7145ce699982 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -9,7 +9,7 @@ _sdata = .; PROVIDE (sdata = .); - RODATA + RO_DATA(4096) .unprotected : { *(.unprotected) } . = ALIGN(4096); diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index a0a843745695..b97e5798b9cf 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -124,7 +124,7 @@ SECTIONS . = ALIGN(16); - RODATA + RO_DATA(4096) /* Relocation table */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index dc3390ec6b60..a0a989fbe411 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -518,9 +518,7 @@ . = ALIGN((align)); \ __end_rodata = .; -/* RODATA & RO_DATA provided for backward compatibility. - * All archs are supposed to use RO_DATA() */ -#define RODATA RO_DATA_SECTION(4096) +/* All archs are supposed to use RO_DATA() */ #define RO_DATA(align) RO_DATA_SECTION(align) /* -- cgit v1.2.3 From 93240b327929ff03c1878ea8badc5c6bd86f053f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:34 -0700 Subject: vmlinux.lds.h: Replace RO_DATA_SECTION with RO_DATA Finish renaming RO_DATA_SECTION to RO_DATA. (Calling this a "section" is a lie, since it's multiple sections and section flags cannot be applied to the macro.) Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Acked-by: Geert Uytterhoeven # m68k Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-13-keescook@chromium.org --- arch/arc/kernel/vmlinux.lds.S | 2 +- arch/c6x/kernel/vmlinux.lds.S | 2 +- arch/csky/kernel/vmlinux.lds.S | 2 +- arch/h8300/kernel/vmlinux.lds.S | 2 +- arch/hexagon/kernel/vmlinux.lds.S | 2 +- arch/m68k/kernel/vmlinux-nommu.lds | 2 +- arch/nds32/kernel/vmlinux.lds.S | 2 +- arch/nios2/kernel/vmlinux.lds.S | 2 +- arch/openrisc/kernel/vmlinux.lds.S | 4 ++-- arch/parisc/kernel/vmlinux.lds.S | 4 ++-- arch/riscv/kernel/vmlinux.lds.S | 2 +- arch/s390/kernel/vmlinux.lds.S | 2 +- arch/unicore32/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 7 ++----- 14 files changed, 17 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index 1d6eef4b6976..7d1d27066deb 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -95,7 +95,7 @@ SECTIONS _etext = .; _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) /* * 1. this is .data essentially diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index d6e3802536b3..a3547f9d415b 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S @@ -82,7 +82,7 @@ SECTIONS EXCEPTION_TABLE(16) - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) .const : { *(.const .const.* .gnu.linkonce.r.*) diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index 75dd31412242..8598bd7a7bcd 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -49,7 +49,7 @@ SECTIONS _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index 88776e785245..d3247d33b115 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -38,7 +38,7 @@ SECTIONS _etext = . ; } EXCEPTION_TABLE(16) - RO_DATA_SECTION(4) + RO_DATA(4) ROMEND = .; #if defined(CONFIG_ROMKERNEL) . = RAMTOP; diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index 6a6e8fc422ee..0145251fa317 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS _sdata = .; RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE) - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) _edata = .; EXCEPTION_TABLE(16) diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds index cf6edda38971..de80f8b8ae78 100644 --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -60,7 +60,7 @@ SECTIONS { #endif _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S index c4f1c5a604c3..10ff570ba95b 100644 --- a/arch/nds32/kernel/vmlinux.lds.S +++ b/arch/nds32/kernel/vmlinux.lds.S @@ -53,7 +53,7 @@ SECTIONS _etext = .; /* End of text and rodata section */ _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S index 20e4078b3477..318804a2c7a1 100644 --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -49,7 +49,7 @@ SECTIONS __init_end = .; _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 142c51c994f5..f73e0d3ea09f 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -67,8 +67,8 @@ SECTIONS _sdata = .; - /* Page alignment required for RO_DATA_SECTION */ - RO_DATA_SECTION(PAGE_SIZE) + /* Page alignment required for RO_DATA */ + RO_DATA(PAGE_SIZE) _e_kernel_ro = .; /* Whatever comes after _e_kernel_ro had better be page-aligend, too */ diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 168d12b2ebb8..e1c563c7dca1 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -109,7 +109,7 @@ SECTIONS _sdata = .; /* Architecturally we need to keep __gp below 0x1000000 and thus - * in front of RO_DATA_SECTION() which stores lots of tracepoint + * in front of RO_DATA() which stores lots of tracepoint * and ftrace symbols. */ #ifdef CONFIG_64BIT . = ALIGN(16); @@ -127,7 +127,7 @@ SECTIONS } #endif - RO_DATA_SECTION(8) + RO_DATA(8) /* RO because of BUILDTIME_EXTABLE_SORT */ EXCEPTION_TABLE(8) diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index df5229c4034d..66dc17d24dd9 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -52,7 +52,7 @@ SECTIONS /* Start of data section */ _sdata = .; - RO_DATA_SECTION(L1_CACHE_BYTES) + RO_DATA(L1_CACHE_BYTES) .srodata : { *(.srodata*) } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index beb4df053e20..b33c4823f8b5 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -52,7 +52,7 @@ SECTIONS _etext = .; /* End of text section */ } :text = 0x0700 - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); _sdata = .; /* Start of data section */ diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S index 78c4c56057b0..367c80313bec 100644 --- a/arch/unicore32/kernel/vmlinux.lds.S +++ b/arch/unicore32/kernel/vmlinux.lds.S @@ -43,7 +43,7 @@ SECTIONS _etext = .; _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index a0a989fbe411..061e57c609f6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -23,7 +23,7 @@ * _etext = .; * * _sdata = .; - * RO_DATA_SECTION(PAGE_SIZE) + * RO_DATA(PAGE_SIZE) * RW_DATA_SECTION(...) * _edata = .; * @@ -363,7 +363,7 @@ /* * Read only Data */ -#define RO_DATA_SECTION(align) \ +#define RO_DATA(align) \ . = ALIGN((align)); \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ @@ -518,9 +518,6 @@ . = ALIGN((align)); \ __end_rodata = .; -/* All archs are supposed to use RO_DATA() */ -#define RO_DATA(align) RO_DATA_SECTION(align) - /* * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map -- cgit v1.2.3 From c9174047b48d700a785b633319dd7d27288b86be Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:35 -0700 Subject: vmlinux.lds.h: Replace RW_DATA_SECTION with RW_DATA Rename RW_DATA_SECTION to RW_DATA. (Calling this a "section" is a lie, since it's multiple sections and section flags cannot be applied to the macro.) Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Acked-by: Geert Uytterhoeven # m68k Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-14-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 2 +- arch/arc/kernel/vmlinux.lds.S | 2 +- arch/arm/kernel/vmlinux-xip.lds.S | 2 +- arch/arm/kernel/vmlinux.lds.S | 2 +- arch/arm64/kernel/vmlinux.lds.S | 2 +- arch/csky/kernel/vmlinux.lds.S | 2 +- arch/h8300/kernel/vmlinux.lds.S | 2 +- arch/hexagon/kernel/vmlinux.lds.S | 2 +- arch/m68k/kernel/vmlinux-nommu.lds | 2 +- arch/m68k/kernel/vmlinux-std.lds | 2 +- arch/m68k/kernel/vmlinux-sun3.lds | 2 +- arch/microblaze/kernel/vmlinux.lds.S | 2 +- arch/nds32/kernel/vmlinux.lds.S | 2 +- arch/nios2/kernel/vmlinux.lds.S | 2 +- arch/openrisc/kernel/vmlinux.lds.S | 2 +- arch/parisc/kernel/vmlinux.lds.S | 2 +- arch/riscv/kernel/vmlinux.lds.S | 2 +- arch/s390/kernel/vmlinux.lds.S | 2 +- arch/sh/kernel/vmlinux.lds.S | 2 +- arch/sparc/kernel/vmlinux.lds.S | 2 +- arch/unicore32/kernel/vmlinux.lds.S | 2 +- arch/xtensa/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 4 ++-- 23 files changed, 24 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index af411817dd7d..edc45f45523b 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS _sdata = .; /* Start of rw data section */ _data = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .got : { *(.got) diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index 7d1d27066deb..54139a6f469b 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -101,7 +101,7 @@ SECTIONS * 1. this is .data essentially * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned */ - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index d2a9651c24ad..21b8b271c80d 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -112,7 +112,7 @@ SECTIONS . = ALIGN(THREAD_SIZE); _sdata = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { *(.data..ro_after_init) } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 068db6860867..319ccb10846a 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -141,7 +141,7 @@ SECTIONS __init_end = .; _sdata = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; BSS_SECTION(0, 0, 0) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index e7dafc29b1fa..a4b3e6c0680c 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -214,7 +214,7 @@ SECTIONS _data = .; _sdata = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) /* * Data written with the MMU off but read with the MMU on requires diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index 8598bd7a7bcd..2ff37beaf2bf 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(L1_CACHE_BYTES) diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index d3247d33b115..2ac7bdcd2fe0 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -47,7 +47,7 @@ SECTIONS #endif _sdata = . ; __data_start = . ; - RW_DATA_SECTION(0, PAGE_SIZE, THREAD_SIZE) + RW_DATA(0, PAGE_SIZE, THREAD_SIZE) #if defined(CONFIG_ROMKERNEL) #undef ADDR #endif diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index 0145251fa317..0ca2471ddb9f 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -49,7 +49,7 @@ SECTIONS INIT_DATA_SECTION(PAGE_SIZE) _sdata = .; - RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE) + RW_DATA(32,PAGE_SIZE,_THREAD_SIZE) RO_DATA(PAGE_SIZE) _edata = .; diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds index de80f8b8ae78..7b975420c3d9 100644 --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -61,7 +61,7 @@ SECTIONS { _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) + RW_DATA(16, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(16) diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 625a5785804f..6e7eb49ed985 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -33,7 +33,7 @@ SECTIONS RODATA - RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) + RW_DATA(16, PAGE_SIZE, THREAD_SIZE) BSS_SECTION(0, 0, 0) diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 9868270b0984..1a0ad6b6dd8c 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -30,7 +30,7 @@ SECTIONS EXCEPTION_TABLE(16) :data _sdata = .; /* Start of rw data section */ - RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) :data + RW_DATA(16, PAGE_SIZE, THREAD_SIZE) :data /* End of data goes *here* so that freeing init code works properly. */ _edata = .; NOTES diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 2299694748ea..b8efb08204a1 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -69,7 +69,7 @@ SECTIONS { } _sdata = . ; - RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) + RW_DATA(32, PAGE_SIZE, THREAD_SIZE) _edata = . ; /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S index 10ff570ba95b..f679d3397436 100644 --- a/arch/nds32/kernel/vmlinux.lds.S +++ b/arch/nds32/kernel/vmlinux.lds.S @@ -54,7 +54,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(16) diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S index 318804a2c7a1..c55a7cfa1075 100644 --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; BSS_SECTION(0, 0, 0) diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index f73e0d3ea09f..60449fd7f16f 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -74,7 +74,7 @@ SECTIONS /* Whatever comes after _e_kernel_ro had better be page-aligend, too */ /* 32 here is cacheline size... recheck this */ - RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE) + RW_DATA(32, PAGE_SIZE, PAGE_SIZE) _edata = .; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index e1c563c7dca1..12b3d7d5e9e4 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -148,7 +148,7 @@ SECTIONS data_start = .; /* Data */ - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE) /* PA-RISC locks requires 16-byte alignment */ . = ALIGN(16); diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 66dc17d24dd9..12f42f96d46e 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -57,7 +57,7 @@ SECTIONS *(.srodata*) } - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .sdata : { __global_pointer$ = . + 0x800; *(.sdata*) diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index b33c4823f8b5..37695499717d 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -67,7 +67,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __end_ro_after_init = .; - RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) + RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE) BOOT_DATA_PRESERVED _edata = .; /* End of data section */ diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index fef39054cc70..c60b19958c35 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -51,7 +51,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; DWARF_EH_FRAME diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 8929fbc35a80..7ec79918b566 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -67,7 +67,7 @@ SECTIONS .data1 : { *(.data1) } - RW_DATA_SECTION(SMP_CACHE_BYTES, 0, THREAD_SIZE) + RW_DATA(SMP_CACHE_BYTES, 0, THREAD_SIZE) /* End of data section */ _edata = .; diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S index 367c80313bec..6fb320b337ef 100644 --- a/arch/unicore32/kernel/vmlinux.lds.S +++ b/arch/unicore32/kernel/vmlinux.lds.S @@ -44,7 +44,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(L1_CACHE_BYTES) diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index b97e5798b9cf..bdbd7c4056c1 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -134,7 +134,7 @@ SECTIONS /* Data section */ _sdata = .; - RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) + RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) _edata = .; /* Initialization code and data: */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 061e57c609f6..356078e50a5c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -24,7 +24,7 @@ * * _sdata = .; * RO_DATA(PAGE_SIZE) - * RW_DATA_SECTION(...) + * RW_DATA(...) * _edata = .; * * EXCEPTION_TABLE(...) @@ -975,7 +975,7 @@ * matches the requirement of PAGE_ALIGNED_DATA. * * use 0 as page_align if page_aligned data is not used */ -#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ +#define RW_DATA(cacheline, pagealigned, inittask) \ . = ALIGN(PAGE_SIZE); \ .data : AT(ADDR(.data) - LOAD_OFFSET) { \ INIT_TASK_DATA(inittask) \ -- cgit v1.2.3 From b8c2f776164c8f74ac31c5e370ca3f029be0aa19 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:36 -0700 Subject: vmlinux.lds.h: Allow EXCEPTION_TABLE to live in RO_DATA Many architectures have an EXCEPTION_TABLE that needs to be only readable. As such, it should live in RO_DATA. Create a macro to identify this case for the architectures that can move EXCEPTION_TABLE into RO_DATA. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Will Deacon Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-s390@vger.kernel.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-15-keescook@chromium.org --- include/asm-generic/vmlinux.lds.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 356078e50a5c..9867d8e41eed 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -69,6 +69,17 @@ #define NOTES_HEADERS_RESTORE #endif +/* + * Some architectures have non-executable read-only exception tables. + * They can be added to the RO_DATA segment by specifying their desired + * alignment. + */ +#ifdef RO_EXCEPTION_TABLE_ALIGN +#define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) +#else +#define RO_EXCEPTION_TABLE +#endif + /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) @@ -513,6 +524,7 @@ __stop___modver = .; \ } \ \ + RO_EXCEPTION_TABLE \ NOTES \ \ . = ALIGN((align)); \ -- cgit v1.2.3