diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2022-12-08 23:57:47 +1100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2022-12-08 23:57:47 +1100 |
commit | f24f21c4122e837fa031cc512a7f20eb8c554c5e (patch) | |
tree | b19acb396850fbfb36218329659b926446c24914 /arch/powerpc | |
parent | 64fdcbcc064966bbf261bb455876dffa58858d32 (diff) | |
parent | a39818a3fb2bf12ae945a7c5fba8c5d9048a0e96 (diff) | |
download | linux-f24f21c4122e837fa031cc512a7f20eb8c554c5e.tar.bz2 |
Merge branch 'topic/objtool' into next
Merge the powerpc objtool support, which we were keeping in a topic
branch in case of any merge conflicts.
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/Kconfig | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/asm.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/bug.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/linkage.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/cpu_setup_6xx.S | 26 | ||||
-rw-r--r-- | arch/powerpc/kernel/cpu_setup_e500.S | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 9 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_40x.S | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_85xx.S | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_8xx.S | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_book3s_32.S | 29 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_64.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/swsusp_32.S | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/vector.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_interrupts.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 25 | ||||
-rw-r--r-- | arch/powerpc/kvm/fpu.S | 17 | ||||
-rw-r--r-- | arch/powerpc/platforms/52xx/lite5200_sleep.S | 15 |
22 files changed, 142 insertions, 49 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 65952f62ea4b..b8c4ac56bddc 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -241,6 +241,8 @@ config PPC select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) select HAVE_OPTPROBES + select HAVE_OBJTOOL if PPC32 || MPROFILE_KERNEL + select HAVE_OBJTOOL_MCOUNT if HAVE_OBJTOOL select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS diff --git a/arch/powerpc/include/asm/asm.h b/arch/powerpc/include/asm/asm.h new file mode 100644 index 000000000000..86f46b604e9a --- /dev/null +++ b/arch/powerpc/include/asm/asm.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_ASM_H +#define _ASM_POWERPC_ASM_H + +#define _ASM_PTR " .long " + +#endif /* _ASM_POWERPC_ASM_H */ diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 61a4736355c2..ef42adb44aa3 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -99,7 +99,8 @@ __label__ __label_warn_on; \ \ WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \ - unreachable(); \ + barrier_before_unreachable(); \ + __builtin_unreachable(); \ \ __label_warn_on: \ break; \ diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h index b71b9582e754..b88d1d2cf304 100644 --- a/arch/powerpc/include/asm/linkage.h +++ b/arch/powerpc/include/asm/linkage.h @@ -4,6 +4,9 @@ #include <asm/types.h> +#define __ALIGN .align 2 +#define __ALIGN_STR ".align 2" + #ifdef CONFIG_PPC64_ELF_ABI_V1 #define cond_syscall(x) \ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index f8b5ff64b604..f29ce3dd6140 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -4,6 +4,8 @@ * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) */ +#include <linux/linkage.h> + #include <asm/processor.h> #include <asm/page.h> #include <asm/cputable.h> @@ -81,7 +83,7 @@ _GLOBAL(__setup_cpu_745x) blr /* Enable caches for 603's, 604, 750 & 7400 */ -setup_common_caches: +SYM_FUNC_START_LOCAL(setup_common_caches) mfspr r11,SPRN_HID0 andi. r0,r11,HID0_DCE ori r11,r11,HID0_ICE|HID0_DCE @@ -95,11 +97,12 @@ setup_common_caches: sync isync blr +SYM_FUNC_END(setup_common_caches) /* 604, 604e, 604ev, ... * Enable superscalar execution & branch history table */ -setup_604_hid0: +SYM_FUNC_START_LOCAL(setup_604_hid0) mfspr r11,SPRN_HID0 ori r11,r11,HID0_SIED|HID0_BHTE ori r8,r11,HID0_BTCD @@ -110,6 +113,7 @@ setup_604_hid0: sync isync blr +SYM_FUNC_END(setup_604_hid0) /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some * erratas we work around here. @@ -125,13 +129,14 @@ setup_604_hid0: * needed once we have applied workaround #5 (though it's * not set by Apple's firmware at least). */ -setup_7400_workarounds: +SYM_FUNC_START_LOCAL(setup_7400_workarounds) mfpvr r3 rlwinm r3,r3,0,20,31 cmpwi 0,r3,0x0207 ble 1f blr -setup_7410_workarounds: +SYM_FUNC_END(setup_7400_workarounds) +SYM_FUNC_START_LOCAL(setup_7410_workarounds) mfpvr r3 rlwinm r3,r3,0,20,31 cmpwi 0,r3,0x0100 @@ -151,6 +156,7 @@ setup_7410_workarounds: sync isync blr +SYM_FUNC_END(setup_7410_workarounds) /* 740/750/7400/7410 * Enable Store Gathering (SGE), Address Broadcast (ABE), @@ -158,7 +164,7 @@ setup_7410_workarounds: * Dynamic Power Management (DPM), Speculative (SPD) * Clear Instruction cache throttling (ICTC) */ -setup_750_7400_hid0: +SYM_FUNC_START_LOCAL(setup_750_7400_hid0) mfspr r11,SPRN_HID0 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC oris r11,r11,HID0_DPM@h @@ -177,12 +183,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) sync isync blr +SYM_FUNC_END(setup_750_7400_hid0) /* 750cx specific * Looks like we have to disable NAP feature for some PLL settings... * (waiting for confirmation) */ -setup_750cx: +SYM_FUNC_START_LOCAL(setup_750cx) mfspr r10, SPRN_HID1 rlwinm r10,r10,4,28,31 cmpwi cr0,r10,7 @@ -196,11 +203,13 @@ setup_750cx: andc r6,r6,r7 stw r6,CPU_SPEC_FEATURES(r4) blr +SYM_FUNC_END(setup_750cx) /* 750fx specific */ -setup_750fx: +SYM_FUNC_START_LOCAL(setup_750fx) blr +SYM_FUNC_END(setup_750fx) /* MPC 745x * Enable Store Gathering (SGE), Branch Folding (FOLD) @@ -212,7 +221,7 @@ setup_750fx: * Clear Instruction cache throttling (ICTC) * Enable L2 HW prefetch */ -setup_745x_specifics: +SYM_FUNC_START_LOCAL(setup_745x_specifics) /* We check for the presence of an L3 cache setup by * the firmware. If any, we disable NAP capability as * it's known to be bogus on rev 2.1 and earlier @@ -270,6 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) sync isync blr +SYM_FUNC_END(setup_745x_specifics) /* * Initialize the FPU registers. This is needed to work around an errata diff --git a/arch/powerpc/kernel/cpu_setup_e500.S b/arch/powerpc/kernel/cpu_setup_e500.S index 2ab25161b0ad..077cfccc3461 100644 --- a/arch/powerpc/kernel/cpu_setup_e500.S +++ b/arch/powerpc/kernel/cpu_setup_e500.S @@ -8,6 +8,8 @@ * Benjamin Herrenschmidt <benh@kernel.crashing.org> */ +#include <linux/linkage.h> + #include <asm/page.h> #include <asm/processor.h> #include <asm/cputable.h> @@ -274,7 +276,7 @@ _GLOBAL(flush_dcache_L1) blr -has_L2_cache: +SYM_FUNC_START_LOCAL(has_L2_cache) /* skip L2 cache on P2040/P2040E as they have no L2 cache */ mfspr r3, SPRN_SVR /* shift right by 8 bits and clear E bit of SVR */ @@ -290,9 +292,10 @@ has_L2_cache: 1: li r3, 0 blr +SYM_FUNC_END(has_L2_cache) /* flush backside L2 cache */ -flush_backside_L2_cache: +SYM_FUNC_START_LOCAL(flush_backside_L2_cache) mflr r10 bl has_L2_cache mtlr r10 @@ -313,6 +316,7 @@ flush_backside_L2_cache: bne 1b 2: blr +SYM_FUNC_END(flush_backside_L2_cache) _GLOBAL(cpu_down_flush_e500v2) mflr r0 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 6e99ec10be89..5604c9a1ac22 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -18,6 +18,8 @@ #include <linux/err.h> #include <linux/sys.h> #include <linux/threads.h> +#include <linux/linkage.h> + #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> @@ -74,17 +76,18 @@ _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) - .globl __kuep_lock -__kuep_lock: +SYM_FUNC_START(__kuep_lock) lwz r9, THREAD+THSR0(r2) update_user_segments_by_4 r9, r10, r11, r12 blr +SYM_FUNC_END(__kuep_lock) -__kuep_unlock: +SYM_FUNC_START_LOCAL(__kuep_unlock) lwz r9, THREAD+THSR0(r2) rlwinm r9,r9,0,~SR_NX update_user_segments_by_4 r9, r10, r11, r12 blr +SYM_FUNC_END(__kuep_unlock) .macro kuep_lock bl __kuep_lock diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 3e2e37e6ecab..1bf1121e17f1 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -14,6 +14,7 @@ * code, and exception/interrupt return code for PowerPC. */ +#include <linux/objtool.h> #include <linux/errno.h> #include <linux/err.h> #include <asm/cache.h> @@ -73,6 +74,7 @@ flush_branch_caches: // Flush the link stack .rept 64 + ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr b 1f diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 429096b037d7..6441a1ba57ac 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -13,6 +13,7 @@ * */ +#include <linux/linkage.h> #include <asm/hw_irq.h> #include <asm/exception-64s.h> #include <asm/ptrace.h> @@ -3140,7 +3141,7 @@ _GLOBAL(enable_machine_check) blr /* MSR[RI] should be clear because this uses SRR[01] */ -disable_machine_check: +SYM_FUNC_START_LOCAL(disable_machine_check) mflr r0 bcl 20,31,$+4 0: mflr r3 @@ -3153,3 +3154,4 @@ disable_machine_check: RFI_TO_KERNEL 1: mtlr r0 blr +SYM_FUNC_END(disable_machine_check) diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 918547b93b5e..3f68a1624646 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -28,6 +28,8 @@ #include <linux/init.h> #include <linux/pgtable.h> #include <linux/sizes.h> +#include <linux/linkage.h> + #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> @@ -662,7 +664,7 @@ start_here: * kernel initialization. This maps the first 32 MBytes of memory 1:1 * virtual to physical and more importantly sets the cache mode. */ -initial_mmu: +SYM_FUNC_START_LOCAL(initial_mmu) tlbia /* Invalidate all TLB entries */ isync @@ -711,6 +713,7 @@ initial_mmu: mtspr SPRN_EVPR,r0 blr +SYM_FUNC_END(initial_mmu) _GLOBAL(abort) mfspr r13,SPRN_DBCR0 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index b513d13bf79e..7558ba4eb864 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -18,6 +18,7 @@ * variants. */ +#include <linux/linkage.h> #include <linux/threads.h> #include <linux/init.h> #include <asm/reg.h> @@ -462,7 +463,7 @@ generic_secondary_common_init: * Assumes we're mapped EA == RA if the MMU is on. */ #ifdef CONFIG_PPC_BOOK3S -__mmu_off: +SYM_FUNC_START_LOCAL(__mmu_off) mfmsr r3 andi. r0,r3,MSR_IR|MSR_DR beqlr @@ -473,6 +474,7 @@ __mmu_off: sync rfid b . /* prevent speculative execution */ +SYM_FUNC_END(__mmu_off) #endif @@ -869,7 +871,7 @@ _GLOBAL(start_secondary_resume) /* * This subroutine clobbers r11 and r12 */ -enable_64b_mode: +SYM_FUNC_START_LOCAL(enable_64b_mode) mfmsr r11 /* grab the current MSR */ #ifdef CONFIG_PPC_BOOK3E_64 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ @@ -881,6 +883,7 @@ enable_64b_mode: isync #endif blr +SYM_FUNC_END(enable_64b_mode) /* * This puts the TOC pointer into r2, offset by 0x8000 (as expected diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S index d9bd377dec91..d438ca74e96c 100644 --- a/arch/powerpc/kernel/head_85xx.S +++ b/arch/powerpc/kernel/head_85xx.S @@ -29,6 +29,8 @@ #include <linux/init.h> #include <linux/threads.h> #include <linux/pgtable.h> +#include <linux/linkage.h> + #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> @@ -885,7 +887,7 @@ KernelSPE: * Translate the effec addr in r3 to phys addr. The phys addr will be put * into r3(higher 32bit) and r4(lower 32bit) */ -get_phys_addr: +SYM_FUNC_START_LOCAL(get_phys_addr) mfmsr r8 mfspr r9,SPRN_PID rlwinm r9,r9,16,0x3fff0000 /* turn PID into MAS6[SPID] */ @@ -907,6 +909,7 @@ get_phys_addr: mfspr r3,SPRN_MAS7 #endif blr +SYM_FUNC_END(get_phys_addr) /* * Global functions diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index cf546d0e5c40..a79751e05781 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -18,6 +18,8 @@ #include <linux/magic.h> #include <linux/pgtable.h> #include <linux/sizes.h> +#include <linux/linkage.h> + #include <asm/processor.h> #include <asm/page.h> #include <asm/mmu.h> @@ -625,7 +627,7 @@ start_here: * 24 Mbytes of data, and the 512k IMMR space. Anything not covered by * these mappings is mapped by page tables. */ -initial_mmu: +SYM_FUNC_START_LOCAL(initial_mmu) li r8, 0 mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */ lis r10, MD_TWAM@h @@ -686,6 +688,7 @@ initial_mmu: #endif mtspr SPRN_DER, r8 blr +SYM_FUNC_END(initial_mmu) _GLOBAL(mmu_pin_tlb) lis r9, (1f - PAGE_OFFSET)@h diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 40854d092dd3..c51f28b5abc0 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -18,6 +18,8 @@ #include <linux/init.h> #include <linux/pgtable.h> +#include <linux/linkage.h> + #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> @@ -877,7 +879,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) * Load stuff into the MMU. Intended to be called with * IR=0 and DR=0. */ -early_hash_table: +SYM_FUNC_START_LOCAL(early_hash_table) sync /* Force all PTE updates to finish */ isync tlbia /* Clear all TLB entries */ @@ -888,8 +890,9 @@ early_hash_table: ori r6, r6, 3 /* 256kB table */ mtspr SPRN_SDR1, r6 blr +SYM_FUNC_END(early_hash_table) -load_up_mmu: +SYM_FUNC_START_LOCAL(load_up_mmu) sync /* Force all PTE updates to finish */ isync tlbia /* Clear all TLB entries */ @@ -918,6 +921,7 @@ BEGIN_MMU_FTR_SECTION LOAD_BAT(7,r3,r4,r5) END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr +SYM_FUNC_END(load_up_mmu) _GLOBAL(load_segment_registers) li r0, NUM_USER_SEGMENTS /* load up user segment register values */ @@ -1028,7 +1032,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE) * this makes sure it's done. * -- Cort */ -clear_bats: +SYM_FUNC_START_LOCAL(clear_bats) li r10,0 mtspr SPRN_DBAT0U,r10 @@ -1072,6 +1076,7 @@ BEGIN_MMU_FTR_SECTION mtspr SPRN_IBAT7L,r10 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr +SYM_FUNC_END(clear_bats) _GLOBAL(update_bats) lis r4, 1f@h @@ -1108,15 +1113,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) mtspr SPRN_SRR1, r6 rfi -flush_tlbs: +SYM_FUNC_START_LOCAL(flush_tlbs) lis r10, 0x40 1: addic. r10, r10, -0x1000 tlbie r10 bgt 1b sync blr +SYM_FUNC_END(flush_tlbs) -mmu_off: +SYM_FUNC_START_LOCAL(mmu_off) addi r4, r3, __after_mmu_off - _start mfmsr r3 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ @@ -1128,9 +1134,10 @@ mmu_off: mtspr SPRN_SRR1,r3 sync rfi +SYM_FUNC_END(mmu_off) /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */ -initial_bats: +SYM_FUNC_START_LOCAL(initial_bats) lis r11,PAGE_OFFSET@h tophys(r8,r11) #ifdef CONFIG_SMP @@ -1146,9 +1153,10 @@ initial_bats: mtspr SPRN_IBAT0U,r11 isync blr +SYM_FUNC_END(initial_bats) #ifdef CONFIG_BOOTX_TEXT -setup_disp_bat: +SYM_FUNC_START_LOCAL(setup_disp_bat) /* * setup the display bat prepared for us in prom.c */ @@ -1164,10 +1172,11 @@ setup_disp_bat: mtspr SPRN_DBAT3L,r8 mtspr SPRN_DBAT3U,r11 blr +SYM_FUNC_END(setup_disp_bat) #endif /* CONFIG_BOOTX_TEXT */ #ifdef CONFIG_PPC_EARLY_DEBUG_CPM -setup_cpm_bat: +SYM_FUNC_START_LOCAL(setup_cpm_bat) lis r8, 0xf000 ori r8, r8, 0x002a mtspr SPRN_DBAT1L, r8 @@ -1177,10 +1186,11 @@ setup_cpm_bat: mtspr SPRN_DBAT1U, r11 blr +SYM_FUNC_END(setup_cpm_bat) #endif #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO -setup_usbgecko_bat: +SYM_FUNC_START_LOCAL(setup_usbgecko_bat) /* prepare a BAT for early io */ #if defined(CONFIG_GAMECUBE) lis r8, 0x0c00 @@ -1199,6 +1209,7 @@ setup_usbgecko_bat: mtspr SPRN_DBAT1L, r8 mtspr SPRN_DBAT1U, r11 blr +SYM_FUNC_END(setup_usbgecko_bat) #endif .data diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 4bb6dd30c556..c39c07a4c06e 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -9,6 +9,7 @@ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) */ +#include <linux/linkage.h> #include <linux/sys.h> #include <asm/unistd.h> #include <asm/errno.h> @@ -353,7 +354,7 @@ _GLOBAL(kexec_smp_wait) * * don't overwrite r3 here, it is live for kexec_wait above. */ -real_mode: /* assume normal blr return */ +SYM_FUNC_START_LOCAL(real_mode) /* assume normal blr return */ #ifdef CONFIG_PPC_BOOK3E_64 /* Create an identity mapping. */ b kexec_create_tlb @@ -370,6 +371,7 @@ real_mode: /* assume normal blr return */ mtspr SPRN_SRR0,r11 rfid #endif +SYM_FUNC_END(real_mode) /* * kexec_sequence(newstack, start, image, control, clear_all(), diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index e0cbd63007f2..ffb79326483c 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/threads.h> +#include <linux/linkage.h> + #include <asm/processor.h> #include <asm/page.h> #include <asm/cputable.h> @@ -400,7 +402,7 @@ _ASM_NOKPROBE_SYMBOL(swsusp_arch_resume) /* FIXME:This construct is actually not useful since we don't shut * down the instruction MMU, we could just flip back MSR-DR on. */ -turn_on_mmu: +SYM_FUNC_START_LOCAL(turn_on_mmu) mflr r4 mtsrr0 r4 mtsrr1 r3 @@ -408,4 +410,5 @@ turn_on_mmu: isync rfi _ASM_NOKPROBE_SYMBOL(turn_on_mmu) +SYM_FUNC_END(turn_on_mmu) diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index a2e7b0ce5b19..6a977b0d8ffc 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -102,3 +102,5 @@ quiet_cmd_vdso64ld_and_check = VDSO64L $@ cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check) quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(VDSOCC) $(a_flags) $(CC64FLAGS) $(AS64FLAGS) -c -o $@ $< + +OBJECT_FILES_NON_STANDARD := y diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 5cf64740edb8..ffe5d90abe17 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/reg.h> @@ -185,7 +186,7 @@ fphalf: * Internal routine to enable floating point and set FPSCR to 0. * Don't call it from C; it doesn't use the normal calling convention. */ -fpenable: +SYM_FUNC_START_LOCAL(fpenable) #ifdef CONFIG_PPC32 stwu r1,-64(r1) #else @@ -202,6 +203,7 @@ fpenable: mffs fr31 MTFSF_L(fr1) blr +SYM_FUNC_END(fpenable) fpdisable: mtlr r12 diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 59d89e4b154a..c0deeea7eef3 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -9,6 +9,7 @@ * Authors: Alexander Graf <agraf@suse.de> */ +#include <linux/linkage.h> #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> @@ -107,7 +108,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* * void kvmhv_save_host_pmu(void) */ -kvmhv_save_host_pmu: +SYM_FUNC_START_LOCAL(kvmhv_save_host_pmu) BEGIN_FTR_SECTION /* Work around P8 PMAE bug */ li r3, -1 @@ -154,3 +155,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) stw r8, HSTATE_PMC5(r13) stw r9, HSTATE_PMC6(r13) 31: blr +SYM_FUNC_END(kvmhv_save_host_pmu) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a9e162a1deec..acf80915f406 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -10,6 +10,8 @@ * Authors: Alexander Graf <agraf@suse.de> */ +#include <linux/linkage.h> +#include <linux/objtool.h> #include <asm/ppc_asm.h> #include <asm/code-patching-asm.h> #include <asm/kvm_asm.h> @@ -1522,12 +1524,14 @@ kvm_flush_link_stack: /* Flush the link stack. On Power8 it's up to 32 entries in size. */ .rept 32 + ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr /* And on Power9 it's up to 64. */ BEGIN_FTR_SECTION .rept 32 + ANNOTATE_INTRA_FUNCTION_CALL bl .+4 .endr END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) @@ -2358,7 +2362,7 @@ hmi_realmode: * This routine calls kvmppc_read_intr, a C function, if an external * interrupt is pending. */ -kvmppc_check_wake_reason: +SYM_FUNC_START_LOCAL(kvmppc_check_wake_reason) mfspr r6, SPRN_SRR1 BEGIN_FTR_SECTION rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ @@ -2427,6 +2431,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) addi r1, r1, PPC_MIN_STKFRM mtlr r0 blr +SYM_FUNC_END(kvmppc_check_wake_reason) /* * Save away FP, VMX and VSX registers. @@ -2434,7 +2439,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * N.B. r30 and r31 are volatile across this function, * thus it is not callable from C. */ -kvmppc_save_fp: +SYM_FUNC_START_LOCAL(kvmppc_save_fp) mflr r30 mr r31,r3 mfmsr r5 @@ -2462,6 +2467,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) stw r6,VCPU_VRSAVE(r31) mtlr r30 blr +SYM_FUNC_END(kvmppc_save_fp) /* * Load up FP, VMX and VSX registers @@ -2469,7 +2475,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) * N.B. r30 and r31 are volatile across this function, * thus it is not callable from C. */ -kvmppc_load_fp: +SYM_FUNC_START_LOCAL(kvmppc_load_fp) mflr r30 mr r31,r4 mfmsr r9 @@ -2498,6 +2504,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtlr r30 mr r4,r31 blr +SYM_FUNC_END(kvmppc_load_fp) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* @@ -2746,7 +2753,7 @@ kvmppc_bad_host_intr: * r9 has a vcpu pointer (in) * r0 is used as a scratch register */ -kvmppc_msr_interrupt: +SYM_FUNC_START_LOCAL(kvmppc_msr_interrupt) rldicl r0, r11, 64 - MSR_TS_S_LG, 62 cmpwi r0, 2 /* Check if we are in transactional state.. */ ld r11, VCPU_INTR_MSR(r9) @@ -2755,13 +2762,14 @@ kvmppc_msr_interrupt: li r0, 1 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG blr +SYM_FUNC_END(kvmppc_msr_interrupt) /* * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu) * * Load up guest PMU state. R3 points to the vcpu struct. */ -kvmhv_load_guest_pmu: +SYM_FUNC_START_LOCAL(kvmhv_load_guest_pmu) mr r4, r3 mflr r0 li r3, 1 @@ -2811,13 +2819,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) isync mtlr r0 blr +SYM_FUNC_END(kvmhv_load_guest_pmu) /* * void kvmhv_load_host_pmu(void) * * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. */ -kvmhv_load_host_pmu: +SYM_FUNC_START_LOCAL(kvmhv_load_host_pmu) mflr r0 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ cmpwi r4, 0 @@ -2859,6 +2868,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) isync mtlr r0 23: blr +SYM_FUNC_END(kvmhv_load_host_pmu) /* * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use) @@ -2866,7 +2876,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * Save guest PMU state into the vcpu struct. * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) */ -kvmhv_save_guest_pmu: +SYM_FUNC_START_LOCAL(kvmhv_save_guest_pmu) mr r9, r3 mr r8, r4 BEGIN_FTR_SECTION @@ -2942,6 +2952,7 @@ BEGIN_FTR_SECTION mtspr SPRN_MMCRS, r4 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 22: blr +SYM_FUNC_END(kvmhv_save_guest_pmu) /* * This works around a hardware bug on POWER8E processors, where diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S index 315c94946bad..b68e7f26a81f 100644 --- a/arch/powerpc/kvm/fpu.S +++ b/arch/powerpc/kvm/fpu.S @@ -6,6 +6,8 @@ */ #include <linux/pgtable.h> +#include <linux/linkage.h> + #include <asm/reg.h> #include <asm/page.h> #include <asm/mmu.h> @@ -110,18 +112,22 @@ FPS_THREE_IN(fsel) * R8 = (double*)¶m3 [load_three] * LR = instruction call function */ -fpd_load_three: +SYM_FUNC_START_LOCAL(fpd_load_three) lfd 2,0(r8) /* load param3 */ -fpd_load_two: +SYM_FUNC_START_LOCAL(fpd_load_two) lfd 1,0(r7) /* load param2 */ -fpd_load_one: +SYM_FUNC_START_LOCAL(fpd_load_one) lfd 0,0(r6) /* load param1 */ -fpd_load_none: +SYM_FUNC_START_LOCAL(fpd_load_none) lfd 3,0(r3) /* load up fpscr value */ MTFSF_L(3) lwz r6, 0(r4) /* load cr */ mtcr r6 blr +SYM_FUNC_END(fpd_load_none) +SYM_FUNC_END(fpd_load_one) +SYM_FUNC_END(fpd_load_two) +SYM_FUNC_END(fpd_load_three) /* * End of double instruction processing @@ -131,13 +137,14 @@ fpd_load_none: * R5 = (double*)&result * LR = caller of instruction call function */ -fpd_return: +SYM_FUNC_START_LOCAL(fpd_return) mfcr r6 stfd 0,0(r5) /* save result */ mffs 0 stfd 0,0(r3) /* save new fpscr value */ stw r6,0(r4) /* save new cr value */ blr +SYM_FUNC_END(fpd_return) /* * Double operation with no input operand diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S index afee8b1515a8..0b12647e7b42 100644 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/linkage.h> + #include <asm/reg.h> #include <asm/ppc_asm.h> #include <asm/processor.h> @@ -178,7 +180,8 @@ sram_code: /* local udelay in sram is needed */ - udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ +SYM_FUNC_START_LOCAL(udelay) + /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */ mullw r12, r12, r11 mftb r13 /* start */ add r12, r13, r12 /* end */ @@ -187,6 +190,7 @@ sram_code: cmp cr0, r13, r12 blt 1b blr +SYM_FUNC_END(udelay) sram_code_end: @@ -271,7 +275,7 @@ _ASM_NOKPROBE_SYMBOL(lite5200_wakeup) SAVE_SR(n+2, addr+2); \ SAVE_SR(n+3, addr+3); -save_regs: +SYM_FUNC_START_LOCAL(save_regs) stw r0, 0(r4) stw r1, 0x4(r4) stw r2, 0x8(r4) @@ -317,6 +321,7 @@ save_regs: SAVE_SPRN(TBRU, 0x5b) blr +SYM_FUNC_END(save_regs) /* restore registers */ @@ -336,7 +341,7 @@ save_regs: LOAD_SR(n+2, addr+2); \ LOAD_SR(n+3, addr+3); -restore_regs: +SYM_FUNC_START_LOCAL(restore_regs) lis r4, registers@h ori r4, r4, registers@l @@ -393,6 +398,7 @@ restore_regs: blr _ASM_NOKPROBE_SYMBOL(restore_regs) +SYM_FUNC_END(restore_regs) @@ -403,7 +409,7 @@ _ASM_NOKPROBE_SYMBOL(restore_regs) * Flush data cache * Do this by just reading lots of stuff into the cache. */ -flush_data_cache: +SYM_FUNC_START_LOCAL(flush_data_cache) lis r3,CONFIG_KERNEL_START@h ori r3,r3,CONFIG_KERNEL_START@l li r4,NUM_CACHE_LINES @@ -413,3 +419,4 @@ flush_data_cache: addi r3,r3,L1_CACHE_BYTES /* Next line, please */ bdnz 1b blr +SYM_FUNC_END(flush_data_cache) |