From 58aedccb1907f05f702f0f6d8f8a57e8efe485b7 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 23 Apr 2014 10:02:04 +1000 Subject: powerpc: Don't build assembly files with ABIv2 We avoid ABIv2 when building c files since commit b2ca8c89 (powerpc: Don't use ELFv2 ABI to build the kernel). Do the same for assembly files. Signed-off-by: Anton Blanchard --- arch/powerpc/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 4c0cedf4e2c7..e8dd01af504d 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -115,6 +115,7 @@ endif CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) +AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) @@ -151,7 +152,7 @@ endif CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) KBUILD_CPPFLAGS += -Iarch/$(ARCH) -KBUILD_AFLAGS += -Iarch/$(ARCH) +KBUILD_AFLAGS += -Iarch/$(ARCH) $(AFLAGS-y) KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) CPP = $(CC) -E $(KBUILD_CFLAGS) -- cgit v1.2.3 From b1576fec7f4dd4657694fefc97fda4cf28ec68e9 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:04:35 +1100 Subject: powerpc: No need to use dot symbols when branching to a function binutils is smart enough to know that a branch to a function descriptor is actually a branch to the functions text address. Alan tells me that binutils has been doing this for 9 years. Signed-off-by: Anton Blanchard --- arch/powerpc/boot/util.S | 4 +- arch/powerpc/include/asm/context_tracking.h | 4 +- arch/powerpc/include/asm/exception-64e.h | 6 +- arch/powerpc/include/asm/exception-64s.h | 2 +- arch/powerpc/include/asm/irqflags.h | 4 +- arch/powerpc/include/asm/ppc_asm.h | 2 +- arch/powerpc/kernel/cpu_setup_fsl_booke.S | 28 +++--- arch/powerpc/kernel/entry_64.S | 86 ++++++++--------- arch/powerpc/kernel/exceptions-64e.S | 128 ++++++++++++------------- arch/powerpc/kernel/exceptions-64s.S | 140 ++++++++++++++-------------- arch/powerpc/kernel/head_64.S | 66 ++++++------- arch/powerpc/kernel/idle_book3e.S | 2 +- arch/powerpc/kernel/idle_power4.S | 2 +- arch/powerpc/kernel/idle_power7.S | 4 +- arch/powerpc/kernel/misc_64.S | 10 +- arch/powerpc/kvm/book3s_hv_interrupts.S | 2 +- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 6 +- arch/powerpc/lib/copypage_64.S | 2 +- arch/powerpc/lib/copypage_power7.S | 4 +- arch/powerpc/lib/copyuser_power7.S | 8 +- arch/powerpc/lib/hweight_64.S | 8 +- arch/powerpc/lib/mem_64.S | 4 +- arch/powerpc/lib/memcpy_power7.S | 6 +- arch/powerpc/mm/hash_low_64.S | 8 +- arch/powerpc/platforms/pasemi/powersave.S | 2 +- arch/powerpc/platforms/pseries/hvCall.S | 4 +- 26 files changed, 271 insertions(+), 271 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S index 6636b1d7821b..243b8497d58b 100644 --- a/arch/powerpc/boot/util.S +++ b/arch/powerpc/boot/util.S @@ -45,7 +45,7 @@ udelay: mfspr r4,SPRN_PVR srwi r4,r4,16 cmpwi 0,r4,1 /* 601 ? */ - bne .udelay_not_601 + bne .Ludelay_not_601 00: li r0,86 /* Instructions / microsecond? */ mtctr r0 10: addi r0,r0,0 /* NOP */ @@ -54,7 +54,7 @@ udelay: bne 00b blr -.udelay_not_601: +.Ludelay_not_601: mulli r4,r3,1000 /* nanoseconds */ /* Change r4 to be the number of ticks using: * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h index b6f5a33b8ee2..40014921ffff 100644 --- a/arch/powerpc/include/asm/context_tracking.h +++ b/arch/powerpc/include/asm/context_tracking.h @@ -2,9 +2,9 @@ #define _ASM_POWERPC_CONTEXT_TRACKING_H #ifdef CONFIG_CONTEXT_TRACKING -#define SCHEDULE_USER bl .schedule_user +#define SCHEDULE_USER bl schedule_user #else -#define SCHEDULE_USER bl .schedule +#define SCHEDULE_USER bl schedule #endif #endif diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index a563d9afd179..a8b52b61043f 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -174,10 +174,10 @@ exc_##label##_book3e: mtlr r16; #define TLB_MISS_STATS_D(name) \ addi r9,r13,MMSTAT_DSTATS+name; \ - bl .tlb_stat_inc; + bl tlb_stat_inc; #define TLB_MISS_STATS_I(name) \ addi r9,r13,MMSTAT_ISTATS+name; \ - bl .tlb_stat_inc; + bl tlb_stat_inc; #define TLB_MISS_STATS_X(name) \ ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ cmpdi cr2,r8,-1; \ @@ -185,7 +185,7 @@ exc_##label##_book3e: addi r9,r13,MMSTAT_DSTATS+name; \ b 62f; \ 61: addi r9,r13,MMSTAT_ISTATS+name; \ -62: bl .tlb_stat_inc; +62: bl tlb_stat_inc; #define TLB_MISS_STATS_SAVE_INFO \ std r14,EX_TLB_ESR(r12); /* save ESR */ #define TLB_MISS_STATS_SAVE_INFO_BOLTED \ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index aeaa56cd9b54..8f35cd7d59cc 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -517,7 +517,7 @@ label##_relon_hv: \ #define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11) #define ADD_NVGPRS \ - bl .save_nvgprs + bl save_nvgprs #define RUNLATCH_ON \ BEGIN_FTR_SECTION \ diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index f51a5580bfd0..f62c056e75bf 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -36,8 +36,8 @@ * have to call a C function so call a wrapper that saves all the * C-clobbered registers. */ -#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) -#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) +#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) +#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off) /* * This is used by assembly code to soft-disable interrupts first and diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6586a40a46ce..3128ba3ba7a0 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -57,7 +57,7 @@ BEGIN_FW_FTR_SECTION; \ LDX_BE r10,0,r10; /* get log write index */ \ cmpd cr1,r11,r10; \ beq+ cr1,33f; \ - bl .accumulate_stolen_time; \ + bl accumulate_stolen_time; \ ld r12,_MSR(r1); \ andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ 33: \ diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index cc2d8962e090..4f1393d20079 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -94,12 +94,12 @@ _GLOBAL(setup_altivec_idle) _GLOBAL(__setup_cpu_e6500) mflr r6 #ifdef CONFIG_PPC64 - bl .setup_altivec_ivors + bl setup_altivec_ivors /* Touch IVOR42 only if the CPU supports E.HV category */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_lrat_ivor + bl setup_lrat_ivor 1: #endif bl setup_pw20_idle @@ -164,15 +164,15 @@ _GLOBAL(__setup_cpu_e5500) #ifdef CONFIG_PPC_BOOK3E_64 _GLOBAL(__restore_cpu_e6500) mflr r5 - bl .setup_altivec_ivors + bl setup_altivec_ivors /* Touch IVOR42 only if the CPU supports E.HV category */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_lrat_ivor + bl setup_lrat_ivor 1: - bl .setup_pw20_idle - bl .setup_altivec_idle + bl setup_pw20_idle + bl setup_altivec_idle bl __restore_cpu_e5500 mtlr r5 blr @@ -181,9 +181,9 @@ _GLOBAL(__restore_cpu_e5500) mflr r4 bl __e500_icache_setup bl __e500_dcache_setup - bl .__setup_base_ivors - bl .setup_perfmon_ivor - bl .setup_doorbell_ivors + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine @@ -192,7 +192,7 @@ _GLOBAL(__restore_cpu_e5500) mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_ehv_ivors + bl setup_ehv_ivors 1: mtlr r4 blr @@ -201,9 +201,9 @@ _GLOBAL(__setup_cpu_e5500) mflr r5 bl __e500_icache_setup bl __e500_dcache_setup - bl .__setup_base_ivors - bl .setup_perfmon_ivor - bl .setup_doorbell_ivors + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine @@ -212,7 +212,7 @@ _GLOBAL(__setup_cpu_e5500) mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_ehv_ivors + bl setup_ehv_ivors b 2f 1: ld r10,CPU_SPEC_FEATURES(r4) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 662c6dd98072..b629198b072c 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -106,7 +106,7 @@ BEGIN_FW_FTR_SECTION LDX_BE r10,0,r10 /* get log write index */ cmpd cr1,r11,r10 beq+ cr1,33f - bl .accumulate_stolen_time + bl accumulate_stolen_time REST_GPR(0,r1) REST_4GPRS(3,r1) REST_2GPRS(7,r1) @@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) std r10,SOFTE(r1) #ifdef SHOW_SYSCALLS - bl .do_show_syscall + bl do_show_syscall REST_GPR(0,r1) REST_4GPRS(3,r1) REST_2GPRS(7,r1) @@ -181,7 +181,7 @@ system_call: /* label this so stack traces look sane */ syscall_exit: std r3,RESULT(r1) #ifdef SHOW_SYSCALLS - bl .do_show_syscall_exit + bl do_show_syscall_exit ld r3,RESULT(r1) #endif CURRENT_THREAD_INFO(r12, r1) @@ -248,9 +248,9 @@ syscall_error: /* Traced system call support */ syscall_dotrace: - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_syscall_trace_enter + bl do_syscall_trace_enter /* * Restore argument registers possibly just changed. * We use the return value of do_syscall_trace_enter @@ -308,7 +308,7 @@ syscall_exit_work: 4: /* Anything else left to do? */ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) - beq .ret_from_except_lite + beq ret_from_except_lite /* Re-enable interrupts */ #ifdef CONFIG_PPC_BOOK3E @@ -319,10 +319,10 @@ syscall_exit_work: mtmsrd r10,1 #endif /* CONFIG_PPC_BOOK3E */ - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_syscall_trace_leave - b .ret_from_except + bl do_syscall_trace_leave + b ret_from_except /* Save non-volatile GPRs, if not already saved. */ _GLOBAL(save_nvgprs) @@ -345,38 +345,38 @@ _GLOBAL(save_nvgprs) */ _GLOBAL(ppc_fork) - bl .save_nvgprs - bl .sys_fork + bl save_nvgprs + bl sys_fork b syscall_exit _GLOBAL(ppc_vfork) - bl .save_nvgprs - bl .sys_vfork + bl save_nvgprs + bl sys_vfork b syscall_exit _GLOBAL(ppc_clone) - bl .save_nvgprs - bl .sys_clone + bl save_nvgprs + bl sys_clone b syscall_exit _GLOBAL(ppc32_swapcontext) - bl .save_nvgprs - bl .compat_sys_swapcontext + bl save_nvgprs + bl compat_sys_swapcontext b syscall_exit _GLOBAL(ppc64_swapcontext) - bl .save_nvgprs - bl .sys_swapcontext + bl save_nvgprs + bl sys_swapcontext b syscall_exit _GLOBAL(ret_from_fork) - bl .schedule_tail + bl schedule_tail REST_NVGPRS(r1) li r3,0 b syscall_exit _GLOBAL(ret_from_kernel_thread) - bl .schedule_tail + bl schedule_tail REST_NVGPRS(r1) ld r14, 0(r14) mtlr r14 @@ -611,7 +611,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) _GLOBAL(ret_from_except) ld r11,_TRAP(r1) andi. r0,r11,1 - bne .ret_from_except_lite + bne ret_from_except_lite REST_NVGPRS(r1) _GLOBAL(ret_from_except_lite) @@ -661,23 +661,23 @@ _GLOBAL(ret_from_except_lite) #endif 1: andi. r0,r4,_TIF_NEED_RESCHED beq 2f - bl .restore_interrupts + bl restore_interrupts SCHEDULE_USER - b .ret_from_except_lite + b ret_from_except_lite 2: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM bne 3f /* only restore TM if nothing else to do */ addi r3,r1,STACK_FRAME_OVERHEAD - bl .restore_tm_state + bl restore_tm_state b restore 3: #endif - bl .save_nvgprs - bl .restore_interrupts + bl save_nvgprs + bl restore_interrupts addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_notify_resume - b .ret_from_except + bl do_notify_resume + b ret_from_except resume_kernel: /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ @@ -730,7 +730,7 @@ resume_kernel: * sure we are soft-disabled first and reconcile irq state. */ RECONCILE_IRQ_STATE(r3,r4) -1: bl .preempt_schedule_irq +1: bl preempt_schedule_irq /* Re-test flags and eventually loop */ CURRENT_THREAD_INFO(r9, r1) @@ -792,7 +792,7 @@ restore_no_replay: */ do_restore: #ifdef CONFIG_PPC_BOOK3E - b .exception_return_book3e + b exception_return_book3e #else /* * Clear the reservation. If we know the CPU tracks the address of @@ -907,7 +907,7 @@ restore_check_irq_replay: * * Still, this might be useful for things like hash_page */ - bl .__check_irq_replay + bl __check_irq_replay cmpwi cr0,r3,0 beq restore_no_replay @@ -928,13 +928,13 @@ restore_check_irq_replay: cmpwi cr0,r3,0x500 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .do_IRQ - b .ret_from_except + bl do_IRQ + b ret_from_except 1: cmpwi cr0,r3,0x900 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .timer_interrupt - b .ret_from_except + bl timer_interrupt + b ret_from_except #ifdef CONFIG_PPC_DOORBELL 1: #ifdef CONFIG_PPC_BOOK3E @@ -948,14 +948,14 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .doorbell_exception - b .ret_from_except + bl doorbell_exception + b ret_from_except #endif /* CONFIG_PPC_DOORBELL */ -1: b .ret_from_except /* What else to do here ? */ +1: b ret_from_except /* What else to do here ? */ unrecov_restore: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b unrecov_restore #ifdef CONFIG_PPC_RTAS @@ -1238,7 +1238,7 @@ _GLOBAL(ftrace_graph_caller) ld r11, 112(r1) addi r3, r11, 16 - bl .prepare_ftrace_return + bl prepare_ftrace_return nop ld r0, 128(r1) @@ -1254,7 +1254,7 @@ _GLOBAL(return_to_handler) mr r31, r1 stdu r1, -112(r1) - bl .ftrace_return_to_handler + bl ftrace_return_to_handler nop /* return value has real return address */ @@ -1284,7 +1284,7 @@ _GLOBAL(mod_return_to_handler) */ ld r2, PACATOC(r13) - bl .ftrace_return_to_handler + bl ftrace_return_to_handler nop /* return value has real return address */ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index c1bee3ce9d1f..5e37338c2e5c 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -499,7 +499,7 @@ exc_##n##_bad_stack: \ CHECK_NAPPING(); \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ - b .ret_from_except_lite; + b ret_from_except_lite; /* This value is used to mark exception frames on the stack. */ .section ".toc","aw" @@ -550,11 +550,11 @@ interrupt_end_book3e: CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x100) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* Machine Check Interrupt */ @@ -562,11 +562,11 @@ interrupt_end_book3e: MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_MC(0x000) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_exception + bl machine_check_exception b ret_from_mc_except /* Data Storage Interrupt */ @@ -612,9 +612,9 @@ interrupt_end_book3e: std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) - bl .save_nvgprs - bl .program_check_exception - b .ret_from_except + bl save_nvgprs + bl program_check_exception + b ret_from_except /* Floating Point Unavailable Interrupt */ START_EXCEPTION(fp_unavailable); @@ -625,13 +625,13 @@ interrupt_end_book3e: ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f - bl .load_up_fpu + bl load_up_fpu b fast_exception_return 1: INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_fp_unavailable_exception - b .ret_from_except + bl kernel_fp_unavailable_exception + b ret_from_except /* Altivec Unavailable Interrupt */ START_EXCEPTION(altivec_unavailable); @@ -644,16 +644,16 @@ BEGIN_FTR_SECTION ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f - bl .load_up_altivec + bl load_up_altivec b fast_exception_return 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_exception - b .ret_from_except + bl altivec_unavailable_exception + b ret_from_except /* AltiVec Assist */ START_EXCEPTION(altivec_assist); @@ -662,16 +662,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x220) INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION - bl .altivec_assist_exception + bl altivec_assist_exception END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #else - bl .unknown_exception + bl unknown_exception #endif - b .ret_from_except + b ret_from_except /* Decrementer Interrupt */ @@ -687,14 +687,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x9f0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_BOOKE_WDT - bl .WatchdogException + bl WatchdogException #else - bl .unknown_exception + bl unknown_exception #endif b ret_from_crit_except @@ -712,10 +712,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20) INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Debug exception as a critical interrupt*/ START_EXCEPTION(debug_crit); @@ -774,9 +774,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mr r4,r14 ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) - bl .save_nvgprs - bl .DebugException - b .ret_from_except + bl save_nvgprs + bl DebugException + b ret_from_except kernel_dbg_exc: b . /* NYI */ @@ -839,9 +839,9 @@ kernel_dbg_exc: mr r4,r14 ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) - bl .save_nvgprs - bl .DebugException - b .ret_from_except + bl save_nvgprs + bl DebugException + b ret_from_except START_EXCEPTION(perfmon); NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, @@ -850,8 +850,8 @@ kernel_dbg_exc: INTS_DISABLE CHECK_NAPPING() addi r3,r1,STACK_FRAME_OVERHEAD - bl .performance_monitor_exception - b .ret_from_except_lite + bl performance_monitor_exception + b ret_from_except_lite /* Doorbell interrupt */ MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, @@ -862,11 +862,11 @@ kernel_dbg_exc: CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2a0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* @@ -878,21 +878,21 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x2c0) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Guest Doorbell critical Interrupt */ START_EXCEPTION(guest_doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2e0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* Hypervisor call */ @@ -901,10 +901,10 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x310) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Embedded Hypervisor priviledged */ START_EXCEPTION(ehpriv); @@ -912,10 +912,10 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x320) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* LRAT Error interrupt */ START_EXCEPTION(lrat_error); @@ -1014,16 +1014,16 @@ storage_fault_common: mr r5,r15 ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) - bl .do_page_fault + bl do_page_fault cmpdi r3,0 bne- 1f - b .ret_from_except_lite -1: bl .save_nvgprs + b ret_from_except_lite +1: bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* * Alignment exception doesn't fit entirely in the 0x100 bytes so it @@ -1035,10 +1035,10 @@ alignment_more: addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .alignment_exception - b .ret_from_except + bl alignment_exception + b ret_from_except /* * We branch here from entry_64.S for the last stage of the exception @@ -1172,7 +1172,7 @@ bad_stack_book3e: std r12,0(r11) ld r2,PACATOC(r13) 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_bad_stack + bl kernel_bad_stack b 1b /* @@ -1521,13 +1521,13 @@ _GLOBAL(start_initialization_book3e) * and always use AS 0, so we just set it up to match our link * address and never use 0 based addresses. */ - bl .initial_tlb_book3e + bl initial_tlb_book3e /* Init global core bits */ - bl .init_core_book3e + bl init_core_book3e /* Init per-thread bits */ - bl .init_thread_book3e + bl init_thread_book3e /* Return to common init code */ tovirt(r28,r28) @@ -1548,7 +1548,7 @@ _GLOBAL(start_initialization_book3e) */ _GLOBAL(book3e_secondary_core_init_tlb_set) li r4,1 - b .generic_secondary_smp_init + b generic_secondary_smp_init _GLOBAL(book3e_secondary_core_init) mflr r28 @@ -1558,18 +1558,18 @@ _GLOBAL(book3e_secondary_core_init) bne 2f /* Setup TLB for this core */ - bl .initial_tlb_book3e + bl initial_tlb_book3e /* We can return from the above running at a different * address, so recalculate r2 (TOC) */ - bl .relative_toc + bl relative_toc /* Init global core bits */ -2: bl .init_core_book3e +2: bl init_core_book3e /* Init per-thread bits */ -3: bl .init_thread_book3e +3: bl init_thread_book3e /* Return to common init code at proper virtual address. * diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3afd3915921a..28391e048120 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -132,12 +132,12 @@ BEGIN_FTR_SECTION #endif beq cr1,2f - b .power7_wakeup_noloss -2: b .power7_wakeup_loss + b power7_wakeup_noloss +2: b power7_wakeup_loss /* Fast Sleep wakeup on PowerNV */ 8: GET_PACA(r13) - b .power7_wakeup_tb_loss + b power7_wakeup_tb_loss 9: END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) @@ -211,7 +211,7 @@ data_access_slb_pSeries: #endif /* __DISABLED__ */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else /* * We can't just use a direct branch to .slb_miss_realmode @@ -243,7 +243,7 @@ instruction_access_slb_pSeries: #endif /* __DISABLED__ */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else mfctr r11 ld r10,PACAKBASE(r13) @@ -829,7 +829,7 @@ data_access_slb_relon_pSeries: mfspr r3,SPRN_DAR mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else /* * We can't just use a direct branch to .slb_miss_realmode @@ -854,7 +854,7 @@ instruction_access_slb_relon_pSeries: mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else mfctr r11 ld r10,PACAKBASE(r13) @@ -966,7 +966,7 @@ system_call_entry: b system_call_common ppc64_runlatch_on_trampoline: - b .__ppc64_runlatch_on + b __ppc64_runlatch_on /* * Here we have detected that the kernel stack pointer is bad. @@ -1025,7 +1025,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) std r12,RESULT(r1) std r11,STACK_FRAME_OVERHEAD-16(r1) 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_bad_stack + bl kernel_bad_stack b 1b /* @@ -1046,7 +1046,7 @@ data_access_common: ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) li r5,0x300 - b .do_hash_page /* Try to handle as hpte fault */ + b do_hash_page /* Try to handle as hpte fault */ .align 7 .globl h_data_storage_common @@ -1056,11 +1056,11 @@ h_data_storage_common: mfspr r10,SPRN_HDSISR stw r10,PACA_EXGEN+EX_DSISR(r13) EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except .align 7 .globl instruction_access_common @@ -1071,7 +1071,7 @@ instruction_access_common: ld r3,_NIP(r1) andis. r4,r12,0x5820 li r5,0x400 - b .do_hash_page /* Try to handle as hpte fault */ + b do_hash_page /* Try to handle as hpte fault */ STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) @@ -1088,7 +1088,7 @@ slb_miss_user_common: stw r9,PACA_EXGEN+EX_CCR(r13) std r10,PACA_EXGEN+EX_LR(r13) std r11,PACA_EXGEN+EX_SRR0(r13) - bl .slb_allocate_user + bl slb_allocate_user ld r10,PACA_EXGEN+EX_LR(r13) ld r3,PACA_EXGEN+EX_R3(r13) @@ -1131,9 +1131,9 @@ slb_miss_fault: unrecov_user_slb: EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) DISABLE_INTS - bl .save_nvgprs + bl save_nvgprs 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b #endif /* __DISABLED__ */ @@ -1158,10 +1158,10 @@ machine_check_common: lwz r4,PACA_EXGEN+EX_DSISR(r13) std r3,_DAR(r1) std r4,_DSISR(r1) - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_exception - b .ret_from_except + bl machine_check_exception + b ret_from_except .align 7 .globl alignment_common @@ -1175,31 +1175,31 @@ alignment_common: lwz r4,PACA_EXGEN+EX_DSISR(r13) std r3,_DAR(r1) std r4,_DSISR(r1) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .alignment_exception - b .ret_from_except + bl alignment_exception + b ret_from_except .align 7 .globl program_check_common program_check_common: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .program_check_exception - b .ret_from_except + bl program_check_exception + b ret_from_except .align 7 .globl fp_unavailable_common fp_unavailable_common: EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) bne 1f /* if from user, just load it up */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_fp_unavailable_exception + bl kernel_fp_unavailable_exception BUG_OPCODE 1: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -1211,15 +1211,15 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif - bl .load_up_fpu + bl load_up_fpu b fast_exception_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .fp_unavailable_tm - b .ret_from_except + bl fp_unavailable_tm + b ret_from_except #endif .align 7 .globl altivec_unavailable_common @@ -1237,24 +1237,24 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) #endif - bl .load_up_altivec + bl load_up_altivec b fast_exception_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_tm - b .ret_from_except + bl altivec_unavailable_tm + b ret_from_except #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_exception - b .ret_from_except + bl altivec_unavailable_exception + b ret_from_except .align 7 .globl vsx_unavailable_common @@ -1272,23 +1272,23 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) #endif - b .load_up_vsx + b load_up_vsx #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .vsx_unavailable_tm - b .ret_from_except + bl vsx_unavailable_tm + b ret_from_except #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .vsx_unavailable_exception - b .ret_from_except + bl vsx_unavailable_exception + b ret_from_except STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) @@ -1386,9 +1386,9 @@ _GLOBAL(opal_mc_secondary_handler) machine_check_handle_early: std r0,GPR0(r1) /* Save r0 */ EXCEPTION_PROLOG_COMMON_3(0x200) - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_early + bl machine_check_early ld r12,_MSR(r1) #ifdef CONFIG_PPC_P7_NAP /* @@ -1408,11 +1408,11 @@ machine_check_handle_early: /* Supervisor state loss */ li r0,1 stb r0,PACA_NAPSTATELOST(r13) -3: bl .machine_check_queue_event +3: bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP GET_PACA(r13) ld r1,PACAR1(r13) - b .power7_enter_nap_mode + b power7_enter_nap_mode 4: #endif /* @@ -1444,7 +1444,7 @@ machine_check_handle_early: andi. r11,r12,MSR_RI bne 2f 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b 2: /* @@ -1452,7 +1452,7 @@ machine_check_handle_early: * Queue up the MCE event so that we can log it later, while * returning from kernel or opal call. */ - bl .machine_check_queue_event + bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP rfid 9: @@ -1477,7 +1477,7 @@ _GLOBAL(slb_miss_realmode) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ - bl .slb_allocate_realmode + bl slb_allocate_realmode /* All done -- return from exception. */ @@ -1517,9 +1517,9 @@ _GLOBAL(slb_miss_realmode) unrecov_slb: EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) DISABLE_INTS - bl .save_nvgprs + bl save_nvgprs 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b @@ -1573,7 +1573,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) * * at return r3 = 0 for success, 1 for page fault, negative for error */ - bl .hash_page /* build HPTE if possible */ + bl hash_page /* build HPTE if possible */ cmpdi r3,0 /* see if hash_page succeeded */ /* Success */ @@ -1587,35 +1587,35 @@ handle_page_fault: 11: ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_page_fault + bl do_page_fault cmpdi r3,0 beq+ 12f - bl .save_nvgprs + bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD lwz r4,_DAR(r1) - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* We have a data breakpoint exception - handle it */ handle_dabr_fault: - bl .save_nvgprs + bl save_nvgprs ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_break -12: b .ret_from_except_lite + bl do_break +12: b ret_from_except_lite /* We have a page fault that hash_page could handle but HV refused * the PTE insertion */ -13: bl .save_nvgprs +13: bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) - bl .low_hash_fault - b .ret_from_except + bl low_hash_fault + b ret_from_except /* * We come here as a result of a DSI at a point where we don't want @@ -1624,16 +1624,16 @@ handle_dabr_fault: * were soft-disabled. We want to invoke the exception handler for * the access, or panic if there isn't a handler. */ -77: bl .save_nvgprs +77: bl save_nvgprs mr r4,r3 addi r3,r1,STACK_FRAME_OVERHEAD li r5,SIGSEGV - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* here we have a segment miss */ do_ste_alloc: - bl .ste_allocate /* try to insert stab entry */ + bl ste_allocate /* try to insert stab entry */ cmpdi r3,0 bne- handle_page_fault b fast_exception_return diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index b7363bd42452..afcfd631bf7f 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -70,7 +70,7 @@ _GLOBAL(__start) /* NOP this out unconditionally */ BEGIN_FTR_SECTION FIXUP_ENDIAN - b .__start_initialization_multiplatform + b __start_initialization_multiplatform END_FTR_SECTION(0, 1) /* Catch branch to 0 in real mode */ @@ -186,16 +186,16 @@ _GLOBAL(generic_secondary_thread_init) mr r24,r3 /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode /* get a valid TOC pointer, wherever we're mapped at */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ mr r3,r24 - bl .book3e_secondary_thread_init + bl book3e_secondary_thread_init #endif b generic_secondary_common_init @@ -214,17 +214,17 @@ _GLOBAL(generic_secondary_smp_init) mr r25,r4 /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode /* get a valid TOC pointer, wherever we're mapped at */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ mr r3,r24 mr r4,r25 - bl .book3e_secondary_core_init + bl book3e_secondary_core_init #endif generic_secondary_common_init: @@ -236,7 +236,7 @@ generic_secondary_common_init: ld r13,0(r13) /* Get base vaddr of paca array */ #ifndef CONFIG_SMP addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ - b .kexec_wait /* wait for next kernel if !SMP */ + b kexec_wait /* wait for next kernel if !SMP */ #else LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ lwz r7,0(r7) /* also the max paca allocated */ @@ -250,7 +250,7 @@ generic_secondary_common_init: blt 1b mr r3,r24 /* not found, copy phys to r3 */ - b .kexec_wait /* next kernel might do better */ + b kexec_wait /* next kernel might do better */ 2: SET_PACA(r13) #ifdef CONFIG_PPC_BOOK3E @@ -326,10 +326,10 @@ _STATIC(__mmu_off) */ _GLOBAL(__start_initialization_multiplatform) /* Make sure we are running in 64 bits mode */ - bl .enable_64b_mode + bl enable_64b_mode /* Get TOC pointer (current runtime address) */ - bl .relative_toc + bl relative_toc /* find out where we are now */ bcl 20,31,$+4 @@ -342,7 +342,7 @@ _GLOBAL(__start_initialization_multiplatform) */ cmpldi cr0,r5,0 beq 1f - b .__boot_from_prom /* yes -> prom */ + b __boot_from_prom /* yes -> prom */ 1: /* Save parameters */ mr r31,r3 @@ -354,8 +354,8 @@ _GLOBAL(__start_initialization_multiplatform) #endif #ifdef CONFIG_PPC_BOOK3E - bl .start_initialization_book3e - b .__after_prom_start + bl start_initialization_book3e + b __after_prom_start #else /* Setup some critical 970 SPRs before switching MMU off */ mfspr r0,SPRN_PVR @@ -368,12 +368,12 @@ _GLOBAL(__start_initialization_multiplatform) beq 1f cmpwi r0,0x45 /* 970GX */ bne 2f -1: bl .__cpu_preinit_ppc970 +1: bl __cpu_preinit_ppc970 2: /* Switch off MMU if not already off */ - bl .__mmu_off - b .__after_prom_start + bl __mmu_off + b __after_prom_start #endif /* CONFIG_PPC_BOOK3E */ _INIT_STATIC(__boot_from_prom) @@ -395,7 +395,7 @@ _INIT_STATIC(__boot_from_prom) #ifdef CONFIG_RELOCATABLE /* Relocate code for where we are now */ mr r3,r26 - bl .relocate + bl relocate #endif /* Restore parameters */ @@ -407,7 +407,7 @@ _INIT_STATIC(__boot_from_prom) /* Do all of the interaction with OF client interface */ mr r8,r26 - bl .prom_init + bl prom_init #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ /* We never return. We also hit that trap if trying to boot @@ -424,7 +424,7 @@ _STATIC(__after_prom_start) bne 1f add r25,r25,r26 1: mr r3,r25 - bl .relocate + bl relocate #endif /* @@ -464,7 +464,7 @@ _STATIC(__after_prom_start) lis r5,(copy_to_here - _stext)@ha addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ - bl .copy_and_flush /* copy the first n bytes */ + bl copy_and_flush /* copy the first n bytes */ /* this includes the code being */ /* executed here. */ addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ @@ -478,9 +478,9 @@ p_end: .llong _end - _stext 4: /* Now copy the rest of the kernel up to _end */ addis r5,r26,(p_end - _stext)@ha ld r5,(p_end - _stext)@l(r5) /* get _end */ -5: bl .copy_and_flush /* copy the rest */ +5: bl copy_and_flush /* copy the rest */ -9: b .start_here_multiplatform +9: b start_here_multiplatform /* * Copy routine used to copy the kernel to start at physical address 0 @@ -544,7 +544,7 @@ __secondary_start_pmac_0: _GLOBAL(pmac_secondary_start) /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode li r0,0 mfspr r3,SPRN_HID4 @@ -556,11 +556,11 @@ _GLOBAL(pmac_secondary_start) slbia /* get TOC pointer (real address) */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) /* Copy some CPU settings from CPU 0 */ - bl .__restore_cpu_ppc970 + bl __restore_cpu_ppc970 /* pSeries do that early though I don't think we really need it */ mfmsr r3 @@ -619,7 +619,7 @@ __secondary_start: std r14,PACAKSAVE(r13) /* Do early setup for that CPU (stab, slb, hash table pointer) */ - bl .early_setup_secondary + bl early_setup_secondary /* * setup the new stack pointer, but *don't* use this until @@ -656,7 +656,7 @@ _GLOBAL(start_secondary_prolog) ld r2,PACATOC(r13) li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ - bl .start_secondary + bl start_secondary b . /* * Reset stack pointer and call start_secondary @@ -667,7 +667,7 @@ _GLOBAL(start_secondary_resume) ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ - bl .start_secondary + bl start_secondary b . #endif @@ -717,7 +717,7 @@ p_toc: .llong __toc_start + 0x8000 - 0b */ _INIT_STATIC(start_here_multiplatform) /* set up the TOC */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) /* Clear out the BSS. It may have been done in prom_init, @@ -776,7 +776,7 @@ _INIT_STATIC(start_here_multiplatform) /* Restore parameters passed from prom_init/kexec */ mr r3,r31 - bl .early_setup /* also sets r13 and SPRG_PACA */ + bl early_setup /* also sets r13 and SPRG_PACA */ LOAD_REG_ADDR(r3, .start_here_common) ld r4,PACAKMSR(r13) @@ -794,7 +794,7 @@ _INIT_GLOBAL(start_here_common) ld r2,PACATOC(r13) /* Do more system initializations in virtual mode */ - bl .setup_system + bl setup_system /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) @@ -805,7 +805,7 @@ _INIT_GLOBAL(start_here_common) stb r0,PACAIRQHAPPENED(r13) /* Generic kernel entry */ - bl .start_kernel + bl start_kernel /* Not reached */ BUG_OPCODE diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index bfb73cc209ce..48c21acef915 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -43,7 +43,7 @@ _GLOBAL(\name) */ #ifdef CONFIG_TRACE_IRQFLAGS stdu r1,-128(r1) - bl .trace_hardirqs_on + bl trace_hardirqs_on addi r1,r1,128 #endif li r0,1 diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index e3edaa189911..f57a19348bdd 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -46,7 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) mflr r0 std r0,16(r1) stdu r1,-128(r1) - bl .trace_hardirqs_on + bl trace_hardirqs_on addi r1,r1,128 ld r0,16(r1) mtlr r0 diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index c3ab86975614..dca6e16c2436 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -58,7 +58,7 @@ _GLOBAL(power7_powersave_common) /* Make sure FPU, VSX etc... are flushed as we may lose * state when going to nap mode */ - bl .discard_lazy_cpu_state + bl discard_lazy_cpu_state #endif /* CONFIG_SMP */ /* Hard disable interrupts */ @@ -168,7 +168,7 @@ _GLOBAL(power7_wakeup_loss) _GLOBAL(power7_wakeup_noloss) lbz r0,PACA_NAPSTATELOST(r13) cmpwi r0,0 - bne .power7_wakeup_loss + bne power7_wakeup_loss ld r1,PACAR1(r13) ld r4,_MSR(r1) ld r5,_NIP(r1) diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 3d0249599d52..b39cf4afad4b 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -34,7 +34,7 @@ _GLOBAL(call_do_softirq) std r0,16(r1) stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) mr r1,r3 - bl .__do_softirq + bl __do_softirq ld r1,0(r1) ld r0,16(r1) mtlr r0 @@ -45,7 +45,7 @@ _GLOBAL(call_do_irq) std r0,16(r1) stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) mr r1,r4 - bl .__do_irq + bl __do_irq ld r1,0(r1) ld r0,16(r1) mtlr r0 @@ -506,7 +506,7 @@ _GLOBAL(kexec_smp_wait) stb r4,PACAKEXECSTATE(r13) SYNC - b .kexec_wait + b kexec_wait /* * switch to real mode (turn mmu off) @@ -576,7 +576,7 @@ _GLOBAL(kexec_sequence) /* copy dest pages, flush whole dest image */ mr r3,r29 - bl .kexec_copy_flush /* (image) */ + bl kexec_copy_flush /* (image) */ /* turn off mmu */ bl real_mode @@ -586,7 +586,7 @@ _GLOBAL(kexec_sequence) mr r4,r30 /* start, aka phys mem offset */ li r5,0x100 li r6,0 - bl .copy_and_flush /* (dest, src, copy limit, start offset) */ + bl copy_and_flush /* (dest, src, copy limit, start offset) */ 1: /* assume normal blr return */ /* release other cpus to the new kernel secondary start at 0x60 */ diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index e18e3cfc32de..8c86422a1e37 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -171,7 +171,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) #endif /* CONFIG_SMP */ /* Jump to partition switch code */ - bl .kvmppc_hv_entry_trampoline + bl kvmppc_hv_entry_trampoline nop /* diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ffbb871c2bd8..7cfabe3881d8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1647,7 +1647,7 @@ kvmppc_hdsi: /* Search the hash table. */ mr r3, r9 /* vcpu pointer */ li r7, 1 /* data fault */ - bl .kvmppc_hpte_hv_fault + bl kvmppc_hpte_hv_fault ld r9, HSTATE_KVM_VCPU(r13) ld r10, VCPU_PC(r9) ld r11, VCPU_MSR(r9) @@ -1721,7 +1721,7 @@ kvmppc_hisi: mr r4, r10 mr r6, r11 li r7, 0 /* instruction fault */ - bl .kvmppc_hpte_hv_fault + bl kvmppc_hpte_hv_fault ld r9, HSTATE_KVM_VCPU(r13) ld r10, VCPU_PC(r9) ld r11, VCPU_MSR(r9) @@ -2099,7 +2099,7 @@ kvm_cede_exit: /* Try to handle a machine check in real mode */ machine_check_realmode: mr r3, r9 /* get vcpu pointer */ - bl .kvmppc_realmode_machine_check + bl kvmppc_realmode_machine_check nop cmpdi r3, 0 /* continue exiting from guest? */ ld r9, HSTATE_KVM_VCPU(r13) diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 9f9434a85264..e59c9c2ebe98 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -20,7 +20,7 @@ _GLOBAL(copy_page) BEGIN_FTR_SECTION lis r5,PAGE_SIZE@h FTR_SECTION_ELSE - b .copypage_power7 + b copypage_power7 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ori r5,r5,PAGE_SIZE@l BEGIN_FTR_SECTION diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index 395c594722a2..0f1e2398f83c 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -60,7 +60,7 @@ _GLOBAL(copypage_power7) std r4,56(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl enter_vmx_copy cmpwi r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STACKFRAMESIZE+48(r1) @@ -103,7 +103,7 @@ _GLOBAL(copypage_power7) addi r3,r3,128 bdnz 1b - b .exit_vmx_copy /* tail call optimise */ + b exit_vmx_copy /* tail call optimise */ #else li r0,(PAGE_SIZE/128) diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index e8e9c36dc784..62f0540418b9 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -66,7 +66,7 @@ ld r15,STK_REG(R15)(r1) ld r14,STK_REG(R14)(r1) .Ldo_err3: - bl .exit_vmx_usercopy + bl exit_vmx_usercopy ld r0,STACKFRAMESIZE+16(r1) mtlr r0 b .Lexit @@ -295,7 +295,7 @@ err1; stb r0,0(r3) mflr r0 std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_usercopy + bl enter_vmx_usercopy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STACKFRAMESIZE+48(r1) @@ -514,7 +514,7 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_usercopy /* tail call optimise */ + b exit_vmx_usercopy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -717,5 +717,5 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_usercopy /* tail call optimise */ + b exit_vmx_usercopy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S index 9b96ff2ecd4d..19e66001a4f9 100644 --- a/arch/powerpc/lib/hweight_64.S +++ b/arch/powerpc/lib/hweight_64.S @@ -24,7 +24,7 @@ _GLOBAL(__arch_hweight8) BEGIN_FTR_SECTION - b .__sw_hweight8 + b __sw_hweight8 nop nop FTR_SECTION_ELSE @@ -35,7 +35,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight16) BEGIN_FTR_SECTION - b .__sw_hweight16 + b __sw_hweight16 nop nop nop @@ -57,7 +57,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight32) BEGIN_FTR_SECTION - b .__sw_hweight32 + b __sw_hweight32 nop nop nop @@ -82,7 +82,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight64) BEGIN_FTR_SECTION - b .__sw_hweight64 + b __sw_hweight64 nop nop nop diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index f4fcb0bc6563..0738f96befbf 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -79,8 +79,8 @@ _GLOBAL(memset) _GLOBAL(memmove) cmplw 0,r3,r4 - bgt .backwards_memcpy - b .memcpy + bgt backwards_memcpy + b memcpy _GLOBAL(backwards_memcpy) rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index e4177dbea6bd..bae3f214c2d9 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -230,7 +230,7 @@ _GLOBAL(memcpy_power7) std r5,64(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl enter_vmx_copy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STACKFRAMESIZE+48(r1) @@ -448,7 +448,7 @@ _GLOBAL(memcpy_power7) 15: addi r1,r1,STACKFRAMESIZE ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ + b exit_vmx_copy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -652,5 +652,5 @@ _GLOBAL(memcpy_power7) 15: addi r1,r1,STACKFRAMESIZE ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ + b exit_vmx_copy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 1136d26a95ae..8bf7537a7f53 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -159,7 +159,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in @@ -471,7 +471,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in @@ -588,7 +588,7 @@ htab_inval_old_hpte: li r6,MMU_PAGE_64K /* psize */ ld r7,STK_PARAM(R9)(r1) /* ssize */ ld r8,STK_PARAM(R8)(r1) /* local */ - bl .flush_hash_page + bl flush_hash_page /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ lis r0,_PAGE_HPTE_SUB@h ori r0,r0,_PAGE_HPTE_SUB@l @@ -812,7 +812,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in diff --git a/arch/powerpc/platforms/pasemi/powersave.S b/arch/powerpc/platforms/pasemi/powersave.S index 56f45adcd089..81ab555aa491 100644 --- a/arch/powerpc/platforms/pasemi/powersave.S +++ b/arch/powerpc/platforms/pasemi/powersave.S @@ -66,7 +66,7 @@ sleep_common: std r3, 48(r1) /* Only do power savings when in astate 0 */ - bl .check_astate + bl check_astate cmpwi r3,0 bne 1f diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 444fe7759e55..7891a86066e8 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -49,7 +49,7 @@ END_FTR_SECTION(0, 1); \ std r0,16(r1); \ addi r4,r1,STK_PARAM(FIRST_REG); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \ - bl .__trace_hcall_entry; \ + bl __trace_hcall_entry; \ addi r1,r1,STACK_FRAME_OVERHEAD; \ ld r0,16(r1); \ ld r3,STK_PARAM(R3)(r1); \ @@ -83,7 +83,7 @@ END_FTR_SECTION(0, 1); \ mr r3,r6; \ std r0,16(r1); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \ - bl .__trace_hcall_exit; \ + bl __trace_hcall_exit; \ addi r1,r1,STACK_FRAME_OVERHEAD; \ ld r0,16(r1); \ ld r3,STK_PARAM(R3)(r1); \ -- cgit v1.2.3 From ad0289e4acf2bd6989e745cff3b4f0781a919e30 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:04:52 +1100 Subject: powerpc: Remove superflous function descriptors in assembly only code We have a number of places where we load the text address of a local function and indirectly branch to it in assembly. Since it is an indirect branch binutils will not know to use the function text address, so that trick wont work. There is no need for these functions to have a function descriptor so we can replace it with a label and remove the dot symbol. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/entry_64.S | 10 +++++----- arch/powerpc/kernel/exceptions-64s.S | 18 +++++++++--------- arch/powerpc/kernel/head_64.S | 9 +++++---- arch/powerpc/platforms/powernv/opal-wrappers.S | 4 ++-- 4 files changed, 21 insertions(+), 20 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index b629198b072c..2d92eeb08b76 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -1021,7 +1021,7 @@ _GLOBAL(enter_rtas) std r6,PACASAVEDMSR(r13) /* Setup our real return addr */ - LOAD_REG_ADDR(r4,.rtas_return_loc) + LOAD_REG_ADDR(r4,rtas_return_loc) clrldi r4,r4,2 /* convert to realmode address */ mtlr r4 @@ -1045,7 +1045,7 @@ _GLOBAL(enter_rtas) rfid b . /* prevent speculative execution */ -_STATIC(rtas_return_loc) +rtas_return_loc: FIXUP_ENDIAN /* relocation is off at this point */ @@ -1054,7 +1054,7 @@ _STATIC(rtas_return_loc) bcl 20,31,$+4 0: mflr r3 - ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ + ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ mfmsr r6 li r0,MSR_RI @@ -1071,9 +1071,9 @@ _STATIC(rtas_return_loc) b . /* prevent speculative execution */ .align 3 -1: .llong .rtas_restore_regs +1: .llong rtas_restore_regs -_STATIC(rtas_restore_regs) +rtas_restore_regs: /* relocation is on at this point */ REST_GPR(2, r1) /* Restore the TOC */ REST_GPR(13, r1) /* Restore paca */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 28391e048120..f2f9d6144ae1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -214,13 +214,13 @@ data_access_slb_pSeries: b slb_miss_realmode #else /* - * We can't just use a direct branch to .slb_miss_realmode + * We can't just use a direct branch to slb_miss_realmode * because the distance from here to there depends on where * the kernel ends up being put. */ mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -247,7 +247,7 @@ instruction_access_slb_pSeries: #else mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -524,7 +524,7 @@ do_stab_bolted_pSeries: std r12,PACA_EXSLB+EX_R12(r13) GET_SCRATCH0(r10) std r10,PACA_EXSLB+EX_R13(r13) - EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) + EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) @@ -832,13 +832,13 @@ data_access_slb_relon_pSeries: b slb_miss_realmode #else /* - * We can't just use a direct branch to .slb_miss_realmode + * We can't just use a direct branch to slb_miss_realmode * because the distance from here to there depends on where * the kernel ends up being put. */ mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -858,7 +858,7 @@ instruction_access_slb_relon_pSeries: #else mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -1468,7 +1468,7 @@ machine_check_handle_early: * r3 is saved in paca->slb_r3 * We assume we aren't going to take any exceptions during this procedure. */ -_GLOBAL(slb_miss_realmode) +slb_miss_realmode: mflr r10 #ifdef CONFIG_RELOCATABLE mtctr r11 @@ -1646,7 +1646,7 @@ do_ste_alloc: * We assume (DAR >> 60) == 0xc. */ .align 7 -_GLOBAL(do_stab_bolted) +do_stab_bolted: stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ mfspr r11,SPRN_DAR /* ea */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index afcfd631bf7f..8d7c868e5a43 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -639,7 +639,7 @@ __secondary_start: stb r0,PACAIRQHAPPENED(r13) /* enable MMU and jump to start_secondary */ - LOAD_REG_ADDR(r3, .start_secondary_prolog) + LOAD_REG_ADDR(r3, start_secondary_prolog) LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) mtspr SPRN_SRR0,r3 @@ -652,7 +652,7 @@ __secondary_start: * zero the stack back-chain pointer and get the TOC virtual address * before going into C code. */ -_GLOBAL(start_secondary_prolog) +start_secondary_prolog: ld r2,PACATOC(r13) li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ @@ -778,7 +778,7 @@ _INIT_STATIC(start_here_multiplatform) mr r3,r31 bl early_setup /* also sets r13 and SPRG_PACA */ - LOAD_REG_ADDR(r3, .start_here_common) + LOAD_REG_ADDR(r3, start_here_common) ld r4,PACAKMSR(r13) mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 @@ -786,7 +786,8 @@ _INIT_STATIC(start_here_multiplatform) b . /* prevent speculative execution */ /* This is where all platforms converge execution */ -_INIT_GLOBAL(start_here_common) + +start_here_common: /* relocation is on at this point */ std r1,PACAKSAVE(r13) diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index f531ffe35b3e..b5ebc545a373 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -32,7 +32,7 @@ std r12,PACASAVEDMSR(r13); \ andc r12,r12,r0; \ mtmsrd r12,1; \ - LOAD_REG_ADDR(r0,.opal_return); \ + LOAD_REG_ADDR(r0,opal_return); \ mtlr r0; \ li r0,MSR_DR|MSR_IR|MSR_LE;\ andc r12,r12,r0; \ @@ -44,7 +44,7 @@ mtspr SPRN_HSRR0,r12; \ hrfid -_STATIC(opal_return) +opal_return: /* * Fixup endian on OPAL return... we should be able to simplify * this by instead converting the below trampoline to a set of -- cgit v1.2.3 From c857c43b34ecbfd686d860d4e85281d3750e3b47 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:05:53 +1100 Subject: powerpc: Don't use a function descriptor for system call table There is no need to create a function descriptor for the system call table. By using one we force the system call table into the text section and it really belongs in the rodata section. This also removes another use of dot symbols. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/entry_64.S | 6 +++--- arch/powerpc/kernel/systbl.S | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 2d92eeb08b76..2662f02ba6cf 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -39,8 +39,8 @@ * System calls. */ .section ".toc","aw" -.SYS_CALL_TABLE: - .tc .sys_call_table[TC],.sys_call_table +SYS_CALL_TABLE: + .tc sys_call_table[TC],sys_call_table /* This value is used to mark exception frames on the stack. */ exception_marker: @@ -162,7 +162,7 @@ system_call: /* label this so stack traces look sane */ * Need to vector to 32 Bit or default sys_call_table here, * based on caller's run-mode / personality. */ - ld r11,.SYS_CALL_TABLE@toc(2) + ld r11,SYS_CALL_TABLE@toc(2) andi. r10,r10,_TIF_32BIT beq 15f addi r11,r11,8 /* use 32-bit syscall entries */ diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 93219c34af32..75822f97bfea 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -36,6 +36,8 @@ #define PPC_SYS_SPU(func) PPC_SYS(func) #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) +.section .rodata,"a" + #ifdef CONFIG_PPC64 #define sys_sigpending sys_ni_syscall #define sys_old_getrlimit sys_ni_syscall @@ -43,5 +45,7 @@ .p2align 3 #endif -_GLOBAL(sys_call_table) +.globl sys_call_table +sys_call_table: + #include -- cgit v1.2.3 From 6a3bab90cf78bc579638525cb76ac240f8253803 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:06:11 +1100 Subject: powerpc: Remove some unnecessary uses of _GLOBAL() and _STATIC() There is no need to create a function descriptor for functions called locally out of assembly. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/exceptions-64e.S | 4 ++-- arch/powerpc/kernel/exceptions-64s.S | 2 +- arch/powerpc/kernel/head_64.S | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 5e37338c2e5c..c8ac8a09b0f8 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -1596,14 +1596,14 @@ _GLOBAL(book3e_secondary_thread_init) mflr r28 b 3b -_STATIC(init_core_book3e) +init_core_book3e: /* Establish the interrupt vector base */ LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) mtspr SPRN_IVPR,r3 sync blr -_STATIC(init_thread_book3e) +init_thread_book3e: lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h mtspr SPRN_EPCR,r3 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index f2f9d6144ae1..a0741b65f658 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1536,7 +1536,7 @@ power4_fixup_nap: * Hash table stuff */ .align 7 -_STATIC(do_hash_page) +do_hash_page: std r3,_DAR(r1) std r4,_DSISR(r1) diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 8d7c868e5a43..82841242dc26 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -299,7 +299,7 @@ generic_secondary_common_init: * Assumes we're mapped EA == RA if the MMU is on. */ #ifdef CONFIG_PPC_BOOK3S -_STATIC(__mmu_off) +__mmu_off: mfmsr r3 andi. r0,r3,MSR_IR|MSR_DR beqlr @@ -324,7 +324,7 @@ _STATIC(__mmu_off) * DT block, r4 is a physical pointer to the kernel itself * */ -_GLOBAL(__start_initialization_multiplatform) +__start_initialization_multiplatform: /* Make sure we are running in 64 bits mode */ bl enable_64b_mode @@ -376,7 +376,7 @@ _GLOBAL(__start_initialization_multiplatform) b __after_prom_start #endif /* CONFIG_PPC_BOOK3E */ -_INIT_STATIC(__boot_from_prom) +__boot_from_prom: #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE /* Save parameters */ mr r31,r3 @@ -414,7 +414,7 @@ _INIT_STATIC(__boot_from_prom) * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ trap -_STATIC(__after_prom_start) +__after_prom_start: #ifdef CONFIG_RELOCATABLE /* process relocations for the final address of the kernel */ lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ @@ -674,7 +674,7 @@ _GLOBAL(start_secondary_resume) /* * This subroutine clobbers r11 and r12 */ -_GLOBAL(enable_64b_mode) +enable_64b_mode: mfmsr r11 /* grab the current MSR */ #ifdef CONFIG_PPC_BOOK3E oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ @@ -715,7 +715,7 @@ p_toc: .llong __toc_start + 0x8000 - 0b /* * This is where the main kernel code starts. */ -_INIT_STATIC(start_here_multiplatform) +start_here_multiplatform: /* set up the TOC */ bl relative_toc tovirt(r2,r2) -- cgit v1.2.3 From a0e971ffb9d9dae3b9892fb548bd2497db758f60 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:06:25 +1100 Subject: powerpc: Remove _INIT_GLOBAL(), _STATIC() and _INIT_STATIC() Now there are no users of _INIT_GLOBAL(), _STATIC() and _INIT_STATIC() we can remove them. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/ppc_asm.h | 38 -------------------------------------- 1 file changed, 38 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 3128ba3ba7a0..35b23a6584cb 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -209,20 +209,6 @@ name: \ .type GLUE(.,name),@function; \ GLUE(.,name): -#define _INIT_GLOBAL(name) \ - __REF; \ - .align 2 ; \ - .globl name; \ - .globl GLUE(.,name); \ - .section ".opd","aw"; \ -name: \ - .quad GLUE(.,name); \ - .quad .TOC.@tocbase; \ - .quad 0; \ - .previous; \ - .type GLUE(.,name),@function; \ -GLUE(.,name): - #define _KPROBE(name) \ .section ".kprobes.text","a"; \ .align 2 ; \ @@ -237,30 +223,6 @@ name: \ .type GLUE(.,name),@function; \ GLUE(.,name): -#define _STATIC(name) \ - .section ".text"; \ - .align 2 ; \ - .section ".opd","aw"; \ -name: \ - .quad GLUE(.,name); \ - .quad .TOC.@tocbase; \ - .quad 0; \ - .previous; \ - .type GLUE(.,name),@function; \ -GLUE(.,name): - -#define _INIT_STATIC(name) \ - __REF; \ - .align 2 ; \ - .section ".opd","aw"; \ -name: \ - .quad GLUE(.,name); \ - .quad .TOC.@tocbase; \ - .quad 0; \ - .previous; \ - .type GLUE(.,name),@function; \ -GLUE(.,name): - #else /* 32-bit */ #define _ENTRY(n) \ -- cgit v1.2.3 From 354255014a9042b9204e5bed22704110326d5ecf Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:06:46 +1100 Subject: powerpc: Remove dot symbol usage in exception macros STD_EXCEPTION_COMMON, STD_EXCEPTION_COMMON_ASYNC and MASKABLE_EXCEPTION branch to the handler, so we can remove the explicit dot symbol and binutils will do the right thing. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/exceptions-64e.S | 8 +++---- arch/powerpc/kernel/exceptions-64s.S | 46 ++++++++++++++++++------------------ 2 files changed, 27 insertions(+), 27 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index c8ac8a09b0f8..771b4e92e5d9 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -591,7 +591,7 @@ interrupt_end_book3e: /* External Input Interrupt */ MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, - external_input, .do_IRQ, ACK_NONE) + external_input, do_IRQ, ACK_NONE) /* Alignment */ START_EXCEPTION(alignment); @@ -676,11 +676,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Decrementer Interrupt */ MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, - decrementer, .timer_interrupt, ACK_DEC) + decrementer, timer_interrupt, ACK_DEC) /* Fixed Interval Timer Interrupt */ MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, - fixed_interval, .unknown_exception, ACK_FIT) + fixed_interval, unknown_exception, ACK_FIT) /* Watchdog Timer Interrupt */ START_EXCEPTION(watchdog); @@ -855,7 +855,7 @@ kernel_dbg_exc: /* Doorbell interrupt */ MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, - doorbell, .doorbell_exception, ACK_NONE) + doorbell, doorbell_exception, ACK_NONE) /* Doorbell critical Interrupt */ START_EXCEPTION(doorbell_crit); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a0741b65f658..20f11eb4dff7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -769,38 +769,38 @@ kvmppc_skip_Hinterrupt: /*** Common interrupt handlers ***/ - STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) + STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception) STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) - STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) - STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) + STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt) + STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt) #ifdef CONFIG_PPC_DOORBELL - STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception) #else - STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception) #endif - STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) - STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) - STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) - STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) + STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception) + STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception) + STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception) + STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt) + STD_EXCEPTION_COMMON(0xe60, hmi_exception, unknown_exception) #ifdef CONFIG_PPC_DOORBELL - STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception) #else - STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception) #endif - STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) - STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) - STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception) + STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception) + STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception) #ifdef CONFIG_ALTIVEC - STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) + STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception) #else - STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) + STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception) #endif #ifdef CONFIG_CBE_RAS - STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) - STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) - STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) + STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception) + STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception) + STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception) #endif /* CONFIG_CBE_RAS */ /* @@ -1073,7 +1073,7 @@ instruction_access_common: li r5,0x400 b do_hash_page /* Try to handle as hpte fault */ - STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) + STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) /* * Here is the common SLB miss user that is used when going to virtual @@ -1290,8 +1290,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) bl vsx_unavailable_exception b ret_from_except - STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) - STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) + STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception) + STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception) .align 7 .globl __end_handlers -- cgit v1.2.3 From c1fb019477c27bfe309be282d178a08e56f05249 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:07:01 +1100 Subject: powerpc: Create DOTSYM to wrap dot symbol usage There are a few places we have to use dot symbols with the current ABI - the syscall table and the kvm hcall table. Wrap both of these with a new macro called DOTSYM so it will be easy to transition away from dot symbols in a future ABI. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/ppc_asm.h | 2 ++ arch/powerpc/include/asm/systbl.h | 6 +++--- arch/powerpc/kernel/systbl.S | 12 ++++++------ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 28 ++++++++++++++-------------- 4 files changed, 25 insertions(+), 23 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 35b23a6584cb..61992d8f99df 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -223,6 +223,8 @@ name: \ .type GLUE(.,name),@function; \ GLUE(.,name): +#define DOTSYM(a) GLUE(.,a) + #else /* 32-bit */ #define _ENTRY(n) \ diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 3ddf70276706..ac062f504736 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -62,7 +62,7 @@ COMPAT_SYS_SPU(fcntl) SYSCALL(ni_syscall) SYSCALL_SPU(setpgid) SYSCALL(ni_syscall) -SYSX(sys_ni_syscall,sys_olduname, sys_olduname) +SYSX(sys_ni_syscall,sys_olduname,sys_olduname) SYSCALL_SPU(umask) SYSCALL_SPU(chroot) COMPAT_SYS(ustat) @@ -258,7 +258,7 @@ SYSCALL_SPU(tgkill) COMPAT_SYS_SPU(utimes) COMPAT_SYS_SPU(statfs64) COMPAT_SYS_SPU(fstatfs64) -SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64) +SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64) PPC_SYS_SPU(rtas) OLDSYS(debug_setcontext) SYSCALL(ni_syscall) @@ -295,7 +295,7 @@ SYSCALL_SPU(mkdirat) SYSCALL_SPU(mknodat) SYSCALL_SPU(fchownat) COMPAT_SYS_SPU(futimesat) -SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64) +SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64) SYSCALL_SPU(unlinkat) SYSCALL_SPU(renameat) SYSCALL_SPU(linkat) diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 75822f97bfea..895c50ca943c 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -17,12 +17,12 @@ #include #ifdef CONFIG_PPC64 -#define SYSCALL(func) .llong .sys_##func,.sys_##func -#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func -#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func -#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall -#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func -#define SYSX(f, f3264, f32) .llong .f,.f3264 +#define SYSCALL(func) .llong DOTSYM(sys_##func),DOTSYM(sys_##func) +#define COMPAT_SYS(func) .llong DOTSYM(sys_##func),DOTSYM(compat_sys_##func) +#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) +#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) +#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) +#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) #else #define SYSCALL(func) .long sys_##func #define COMPAT_SYS(func) .long sys_##func diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 7cfabe3881d8..e9593f58a501 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1795,16 +1795,16 @@ hcall_real_fallback: .globl hcall_real_table hcall_real_table: .long 0 /* 0 - unused */ - .long .kvmppc_h_remove - hcall_real_table - .long .kvmppc_h_enter - hcall_real_table - .long .kvmppc_h_read - hcall_real_table + .long DOTSYM(kvmppc_h_remove) - hcall_real_table + .long DOTSYM(kvmppc_h_enter) - hcall_real_table + .long DOTSYM(kvmppc_h_read) - hcall_real_table .long 0 /* 0x10 - H_CLEAR_MOD */ .long 0 /* 0x14 - H_CLEAR_REF */ - .long .kvmppc_h_protect - hcall_real_table - .long .kvmppc_h_get_tce - hcall_real_table - .long .kvmppc_h_put_tce - hcall_real_table + .long DOTSYM(kvmppc_h_protect) - hcall_real_table + .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table + .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table .long 0 /* 0x24 - H_SET_SPRG0 */ - .long .kvmppc_h_set_dabr - hcall_real_table + .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table .long 0 /* 0x2c */ .long 0 /* 0x30 */ .long 0 /* 0x34 */ @@ -1820,11 +1820,11 @@ hcall_real_table: .long 0 /* 0x5c */ .long 0 /* 0x60 */ #ifdef CONFIG_KVM_XICS - .long .kvmppc_rm_h_eoi - hcall_real_table - .long .kvmppc_rm_h_cppr - hcall_real_table - .long .kvmppc_rm_h_ipi - hcall_real_table + .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table + .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table + .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table .long 0 /* 0x70 - H_IPOLL */ - .long .kvmppc_rm_h_xirr - hcall_real_table + .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table #else .long 0 /* 0x64 - H_EOI */ .long 0 /* 0x68 - H_CPPR */ @@ -1858,7 +1858,7 @@ hcall_real_table: .long 0 /* 0xd4 */ .long 0 /* 0xd8 */ .long 0 /* 0xdc */ - .long .kvmppc_h_cede - hcall_real_table + .long DOTSYM(kvmppc_h_cede) - hcall_real_table .long 0 /* 0xe4 */ .long 0 /* 0xe8 */ .long 0 /* 0xec */ @@ -1875,11 +1875,11 @@ hcall_real_table: .long 0 /* 0x118 */ .long 0 /* 0x11c */ .long 0 /* 0x120 */ - .long .kvmppc_h_bulk_remove - hcall_real_table + .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table .long 0 /* 0x128 */ .long 0 /* 0x12c */ .long 0 /* 0x130 */ - .long .kvmppc_h_set_xdabr - hcall_real_table + .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table hcall_real_table_end: ignore_hdec: -- cgit v1.2.3 From 7167af7cebedc7c2051184fef0e165aeb67d0b9d Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:07:20 +1100 Subject: powerpc: Remove function descriptors and dot symbols on new ABI ABIv2 doesn't have function descriptors or dot symbols. One new thing it does add is a function global and a local entry point, so add that to our _GLOBAL macro. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/ftrace.h | 2 ++ arch/powerpc/include/asm/linkage.h | 2 ++ arch/powerpc/include/asm/ppc_asm.h | 22 ++++++++++++++++++++++ 3 files changed, 26 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 169d039ed402..e3661872fbea 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -61,6 +61,7 @@ struct dyn_arch_ftrace { #endif #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__) +#if !defined(_CALL_ELF) || _CALL_ELF != 2 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) { @@ -72,6 +73,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name */ return !strcmp(sym + 4, name + 3); } +#endif #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_FTRACE */ diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h index b36f650a13ff..e3ad5c72724a 100644 --- a/arch/powerpc/include/asm/linkage.h +++ b/arch/powerpc/include/asm/linkage.h @@ -2,6 +2,7 @@ #define _ASM_POWERPC_LINKAGE_H #ifdef CONFIG_PPC64 +#if !defined(_CALL_ELF) || _CALL_ELF != 2 #define cond_syscall(x) \ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n") @@ -9,5 +10,6 @@ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) #endif +#endif #endif /* _ASM_POWERPC_LINKAGE_H */ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 61992d8f99df..5394d41a7140 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -192,6 +192,26 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define __STK_PARAM(i) (48 + ((i)-3)*8) #define STK_PARAM(i) __STK_PARAM(__REG_##i) +#if defined(_CALL_ELF) && _CALL_ELF == 2 + +#define _GLOBAL(name) \ + .section ".text"; \ + .align 2 ; \ + .type name,@function; \ + .globl name; \ +name: + +#define _KPROBE(name) \ + .section ".kprobes.text","a"; \ + .align 2 ; \ + .type name,@function; \ + .globl name; \ +name: + +#define DOTSYM(a) a + +#else + #define XGLUE(a,b) a##b #define GLUE(a,b) XGLUE(a,b) @@ -225,6 +245,8 @@ GLUE(.,name): #define DOTSYM(a) GLUE(.,a) +#endif + #else /* 32-bit */ #define _ENTRY(n) \ -- cgit v1.2.3 From cc7efbf91933a4b59c20e60115c336b26dfc1195 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:07:47 +1100 Subject: powerpc: ABIv2 function calls must place target address in r12 To establish addressability quickly, ABIv2 requires the target address of the function being called to be in r12. Fix a number of places in assembly code that we do indirect function calls. We need to avoid function descriptors on ABIv2 too. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/entry_64.S | 4 ++-- arch/powerpc/kernel/head_64.S | 20 +++++++++++--------- arch/powerpc/kernel/misc_64.S | 8 ++++++-- 3 files changed, 19 insertions(+), 13 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 2662f02ba6cf..d23d7526d37a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -174,8 +174,8 @@ system_call: /* label this so stack traces look sane */ clrldi r8,r8,32 15: slwi r0,r0,4 - ldx r10,r11,r0 /* Fetch system call handler [ptr] */ - mtctr r10 + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ + mtctr r12 bctrl /* Call handler */ syscall_exit: diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 82841242dc26..97329a19c76b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -140,16 +140,18 @@ __secondary_hold: tovirt(r26,r26) #endif /* All secondary cpus wait here until told to start. */ -100: ld r4,__secondary_hold_spinloop-_stext(r26) - cmpdi 0,r4,0 +100: ld r12,__secondary_hold_spinloop-_stext(r26) + cmpdi 0,r12,0 beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) #ifdef CONFIG_PPC_BOOK3E - tovirt(r4,r4) + tovirt(r12,r12) #endif - ld r4,0(r4) /* deref function descriptor */ - mtctr r4 +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + ld r12,0(r12) /* deref function descriptor */ +#endif + mtctr r12 mr r3,r24 /* * it may be the case that other platforms have r4 right to @@ -267,8 +269,8 @@ generic_secondary_common_init: ld r23,CPU_SPEC_RESTORE(r23) cmpdi 0,r23,0 beq 3f - ld r23,0(r23) - mtctr r23 + ld r12,0(r23) + mtctr r12 bctrl 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ @@ -468,8 +470,8 @@ __after_prom_start: /* this includes the code being */ /* executed here. */ addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ - addi r8,r8,(4f - _stext)@l /* that we just made */ - mtctr r8 + addi r12,r8,(4f - _stext)@l /* that we just made */ + mtctr r12 bctr .balign 8 diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index b39cf4afad4b..bda85a193abf 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -595,8 +595,12 @@ _GLOBAL(kexec_sequence) stw r6,kexec_flag-1b(5) /* clear out hardware hash page table and tlb */ - ld r5,0(r27) /* deref function descriptor */ - mtctr r5 +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + ld r12,0(r27) /* deref function descriptor */ +#else + mr r12,r27 +#endif + mtctr r12 bctrl /* ppc_md.hpte_clear_all(void); */ /* -- cgit v1.2.3 From 814e4cd98f777c7fa3b42e0468030cd341fb8b6b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:08:02 +1100 Subject: powerpc: Ignore .TOC. relocations The linker fixes up .TOC. relocations, so prom_init_check.sh should ignore them. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/prom_init_check.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index b0c263da219a..77aa1e95e904 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -23,7 +23,7 @@ strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 reloc_got2 kernstart_addr memstart_addr linux_banner _stext opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry boot_command_line __prom_init_toc_start __prom_init_toc_end -btext_setup_display" +btext_setup_display TOC." NM="$1" OBJ="$2" -- cgit v1.2.3 From c71b7eff426fa7d8fd33e0964a7f79a3b41faff9 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:09:15 +1100 Subject: powerpc: Add ABIv2 support to ppc_function_entry Skip over the well known global entry point code for ABIv2. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/code-patching.h | 40 ++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 97e02f985df8..37991e154ef8 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -42,15 +42,47 @@ void __patch_exception(int exc, unsigned long addr); } while (0) #endif +#define OP_RT_RA_MASK 0xffff0000UL +#define LIS_R2 0x3c020000UL +#define ADDIS_R2_R12 0x3c4c0000UL +#define ADDI_R2_R2 0x38420000UL + static inline unsigned long ppc_function_entry(void *func) { -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) +#if defined(_CALL_ELF) && _CALL_ELF == 2 + u32 *insn = func; + + /* + * A PPC64 ABIv2 function may have a local and a global entry + * point. We need to use the local entry point when patching + * functions, so identify and step over the global entry point + * sequence. + * + * The global entry point sequence is always of the form: + * + * addis r2,r12,XXXX + * addi r2,r2,XXXX + * + * A linker optimisation may convert the addis to lis: + * + * lis r2,XXXX + * addi r2,r2,XXXX + */ + if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) || + ((*insn & OP_RT_RA_MASK) == LIS_R2)) && + ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2)) + return (unsigned long)(insn + 2); + else + return (unsigned long)func; +#else /* - * On PPC64 the function pointer actually points to the function's - * descriptor. The first entry in the descriptor is the address - * of the function text. + * On PPC64 ABIv1 the function pointer actually points to the + * function's descriptor. The first entry in the descriptor is the + * address of the function text. */ return ((func_descr_t *)func)->entry; +#endif #else return (unsigned long)func; #endif -- cgit v1.2.3 From 26f920605680b69e484a114b3dcb47ce11df9827 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 10 Mar 2014 09:40:26 +1100 Subject: powerpc: Use ppc_function_entry instead of open coding it Replace FUNCTION_TEXT with ppc_function_entry which can handle both ABIv1 and ABIv2. Signed-off-by: Anton Blanchard --- arch/powerpc/mm/hash_utils_64.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index d766d6ee33fe..49fc935ee807 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -603,8 +603,6 @@ int remove_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ -#define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) - static void __init htab_finish_init(void) { extern unsigned int *htab_call_hpte_insert1; @@ -619,31 +617,31 @@ static void __init htab_finish_init(void) extern unsigned int *ht64_call_hpte_updatepp; patch_branch(ht64_call_hpte_insert1, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_insert2, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_remove, - FUNCTION_TEXT(ppc_md.hpte_remove), + ppc_function_entry(ppc_md.hpte_remove), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_updatepp, - FUNCTION_TEXT(ppc_md.hpte_updatepp), + ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); #endif /* CONFIG_PPC_HAS_HASH_64K */ patch_branch(htab_call_hpte_insert1, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(htab_call_hpte_insert2, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(htab_call_hpte_remove, - FUNCTION_TEXT(ppc_md.hpte_remove), + ppc_function_entry(ppc_md.hpte_remove), BRANCH_SET_LINK); patch_branch(htab_call_hpte_updatepp, - FUNCTION_TEXT(ppc_md.hpte_updatepp), + ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); } -- cgit v1.2.3 From b86206e4c32cbe6ac3de1c6dc52c2d64bcf461cb Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 10 Mar 2014 09:44:22 +1100 Subject: powerpc: Fix branch patching code for ABIv2 The MMU hashtable and SLB branch patching code uses function pointers for the update sites. This creates a difference between ABIv1 and ABIv2 because we don't have function descriptors on ABIv2. Get rid of the function pointer and just point at the update sites directly. This works on both ABIs. Signed-off-by: Anton Blanchard --- arch/powerpc/mm/hash_low_64.S | 36 ++++++++++++++++++++++++------------ arch/powerpc/mm/hash_utils_64.c | 20 +++++++++----------- arch/powerpc/mm/slb.c | 12 ++++++------ arch/powerpc/mm/slb_low.S | 12 ++++++++---- 4 files changed, 47 insertions(+), 33 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 8bf7537a7f53..057cbbb4c576 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -201,7 +201,8 @@ htab_insert_pte: li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert1) +.globl htab_call_hpte_insert1 +htab_call_hpte_insert1: bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 bge htab_pte_insert_ok /* Insertion successful */ @@ -225,7 +226,8 @@ _GLOBAL(htab_call_hpte_insert1) li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert2) +.globl htab_call_hpte_insert2 +htab_call_hpte_insert2: bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ htab_pte_insert_ok /* Insertion successful */ @@ -242,7 +244,8 @@ _GLOBAL(htab_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(htab_call_hpte_remove) +.globl htab_call_hpte_remove +htab_call_hpte_remove: bl . /* Patched by htab_finish_init() */ /* Try all again */ @@ -296,7 +299,8 @@ htab_modify_pte: li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(htab_call_hpte_updatepp) +.globl htab_call_hpte_updatepp +htab_call_hpte_updatepp: bl . /* Patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here @@ -526,7 +530,8 @@ htab_special_pfn: li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert1) +.globl htab_call_hpte_insert1 +htab_call_hpte_insert1: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge htab_pte_insert_ok /* Insertion successful */ @@ -554,7 +559,8 @@ _GLOBAL(htab_call_hpte_insert1) li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert2) +.globl htab_call_hpte_insert2 +htab_call_hpte_insert2: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ htab_pte_insert_ok /* Insertion successful */ @@ -571,7 +577,8 @@ _GLOBAL(htab_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(htab_call_hpte_remove) +.globl htab_call_hpte_remove +htab_call_hpte_remove: bl . /* patched by htab_finish_init() */ /* Try all again */ @@ -660,7 +667,8 @@ htab_modify_pte: li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(htab_call_hpte_updatepp) +.globl htab_call_hpte_updatepp +htab_call_hpte_updatepp: bl . /* patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here @@ -857,7 +865,8 @@ ht64_insert_pte: li r8,MMU_PAGE_64K li r9,MMU_PAGE_64K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(ht64_call_hpte_insert1) +.globl ht64_call_hpte_insert1 +ht64_call_hpte_insert1: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge ht64_pte_insert_ok /* Insertion successful */ @@ -881,7 +890,8 @@ _GLOBAL(ht64_call_hpte_insert1) li r8,MMU_PAGE_64K li r9,MMU_PAGE_64K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(ht64_call_hpte_insert2) +.globl ht64_call_hpte_insert2 +ht64_call_hpte_insert2: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ ht64_pte_insert_ok /* Insertion successful */ @@ -898,7 +908,8 @@ _GLOBAL(ht64_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(ht64_call_hpte_remove) +.globl ht64_call_hpte_remove +ht64_call_hpte_remove: bl . /* patched by htab_finish_init() */ /* Try all again */ @@ -952,7 +963,8 @@ ht64_modify_pte: li r7,MMU_PAGE_64K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(ht64_call_hpte_updatepp) +.globl ht64_call_hpte_updatepp +ht64_call_hpte_updatepp: bl . /* patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 49fc935ee807..d685dff382c9 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -603,19 +603,18 @@ int remove_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ +extern u32 htab_call_hpte_insert1[]; +extern u32 htab_call_hpte_insert2[]; +extern u32 htab_call_hpte_remove[]; +extern u32 htab_call_hpte_updatepp[]; +extern u32 ht64_call_hpte_insert1[]; +extern u32 ht64_call_hpte_insert2[]; +extern u32 ht64_call_hpte_remove[]; +extern u32 ht64_call_hpte_updatepp[]; + static void __init htab_finish_init(void) { - extern unsigned int *htab_call_hpte_insert1; - extern unsigned int *htab_call_hpte_insert2; - extern unsigned int *htab_call_hpte_remove; - extern unsigned int *htab_call_hpte_updatepp; - #ifdef CONFIG_PPC_HAS_HASH_64K - extern unsigned int *ht64_call_hpte_insert1; - extern unsigned int *ht64_call_hpte_insert2; - extern unsigned int *ht64_call_hpte_remove; - extern unsigned int *ht64_call_hpte_updatepp; - patch_branch(ht64_call_hpte_insert1, ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); @@ -628,7 +627,6 @@ static void __init htab_finish_init(void) patch_branch(ht64_call_hpte_updatepp, ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); - #endif /* CONFIG_PPC_HAS_HASH_64K */ patch_branch(htab_call_hpte_insert1, diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 9d1d33cd2be5..4623366f82e9 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -256,10 +256,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr, patch_instruction(insn_addr, insn); } +extern u32 slb_compare_rr_to_size[]; +extern u32 slb_miss_kernel_load_linear[]; +extern u32 slb_miss_kernel_load_io[]; +extern u32 slb_compare_rr_to_size[]; +extern u32 slb_miss_kernel_load_vmemmap[]; + void slb_set_size(u16 size) { - extern unsigned int *slb_compare_rr_to_size; - if (mmu_slb_size == size) return; @@ -272,11 +276,7 @@ void slb_initialize(void) unsigned long linear_llp, vmalloc_llp, io_llp; unsigned long lflags, vflags; static int slb_encoding_inited; - extern unsigned int *slb_miss_kernel_load_linear; - extern unsigned int *slb_miss_kernel_load_io; - extern unsigned int *slb_compare_rr_to_size; #ifdef CONFIG_SPARSEMEM_VMEMMAP - extern unsigned int *slb_miss_kernel_load_vmemmap; unsigned long vmemmap_llp; #endif diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 17aa6dfceb34..28cffb68c2e1 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -59,7 +59,8 @@ _GLOBAL(slb_allocate_realmode) /* Linear mapping encoding bits, the "li" instruction below will * be patched by the kernel at boot */ -_GLOBAL(slb_miss_kernel_load_linear) +.globl slb_miss_kernel_load_linear +slb_miss_kernel_load_linear: li r11,0 /* * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 @@ -79,7 +80,8 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) /* Check virtual memmap region. To be patches at kernel boot */ cmpldi cr0,r9,0xf bne 1f -_GLOBAL(slb_miss_kernel_load_vmemmap) +.globl slb_miss_kernel_load_vmemmap +slb_miss_kernel_load_vmemmap: li r11,0 b 6f 1: @@ -95,7 +97,8 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) b 6f 5: /* IO mapping */ - _GLOBAL(slb_miss_kernel_load_io) +.globl slb_miss_kernel_load_io +slb_miss_kernel_load_io: li r11,0 6: /* @@ -250,7 +253,8 @@ slb_finish_load: 7: ld r10,PACASTABRR(r13) addi r10,r10,1 /* This gets soft patched on boot. */ -_GLOBAL(slb_compare_rr_to_size) +.globl slb_compare_rr_to_size +slb_compare_rr_to_size: cmpldi r10,0 blt+ 4f -- cgit v1.2.3 From 7cedd6014bfe353d4b552ed8d54d63f6e06e26ba Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:08:51 +1100 Subject: powerpc: Fix kernel thread creation on ABIv2 Change how we setup registers for ret_from_kernel_thread. In ABIv1, instead of passing a function descriptor in, dereference it and pass the target in directly. Use ppc_global_function_entry to get it right on both ABIv1 and ABIv2. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/entry_64.S | 4 +++- arch/powerpc/kernel/process.c | 17 +++++------------ 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index d23d7526d37a..cf4f6e693437 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -378,9 +378,11 @@ _GLOBAL(ret_from_fork) _GLOBAL(ret_from_kernel_thread) bl schedule_tail REST_NVGPRS(r1) - ld r14, 0(r14) mtlr r14 mr r3,r15 +#if defined(_CALL_ELF) && _CALL_ELF == 2 + mr r12,r14 +#endif blrl li r3,0 b syscall_exit diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 31d021506d21..2ae1b99166c6 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -54,6 +54,7 @@ #ifdef CONFIG_PPC64 #include #endif +#include #include #include @@ -1108,7 +1109,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, struct thread_info *ti = (void *)task_stack_page(p); memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); - childregs->gpr[14] = usp; /* function */ + /* function */ + if (usp) + childregs->gpr[14] = ppc_function_entry((void *)usp); #ifdef CONFIG_PPC64 clear_tsk_thread_flag(p, TIF_32BIT); childregs->softe = 1; @@ -1187,17 +1190,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (cpu_has_feature(CPU_FTR_HAS_PPR)) p->thread.ppr = INIT_PPR; #endif - /* - * The PPC64 ABI makes use of a TOC to contain function - * pointers. The function (ret_from_except) is actually a pointer - * to the TOC entry. The first entry is a pointer to the actual - * function. - */ -#ifdef CONFIG_PPC64 - kregs->nip = *((unsigned long *)f); -#else - kregs->nip = (unsigned long)f; -#endif + kregs->nip = ppc_function_entry(f); return 0; } -- cgit v1.2.3 From b37c10d128a2fa3256d4e67c184177270eac4b86 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:09:02 +1100 Subject: powerpc: Fix ABIv2 issues with stack offsets in assembly code Fix STK_PARAM and use it instead of hardcoding ABIv1 offsets. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/ppc_asm.h | 4 ++++ arch/powerpc/lib/copypage_power7.S | 8 ++++---- arch/powerpc/lib/copyuser_power7.S | 24 ++++++++++++------------ arch/powerpc/lib/memcpy_64.S | 8 ++++---- arch/powerpc/lib/memcpy_power7.S | 20 ++++++++++---------- 5 files changed, 34 insertions(+), 30 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 5394d41a7140..3185d11b6691 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -189,7 +189,11 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define __STK_REG(i) (112 + ((i)-14)*8) #define STK_REG(i) __STK_REG(__REG_##i) +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define __STK_PARAM(i) (32 + ((i)-3)*8) +#else #define __STK_PARAM(i) (48 + ((i)-3)*8) +#endif #define STK_PARAM(i) __STK_PARAM(__REG_##i) #if defined(_CALL_ELF) && _CALL_ELF == 2 diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index 0f1e2398f83c..affc6d308e13 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -56,15 +56,15 @@ _GLOBAL(copypage_power7) #ifdef CONFIG_ALTIVEC mflr r0 - std r3,48(r1) - std r4,56(r1) + std r3,STK_PARAM(R3)(r1) + std r4,STK_PARAM(R4)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl enter_vmx_copy cmpwi r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) + ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1) + ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1) mtlr r0 li r0,(PAGE_SIZE/128) diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index 62f0540418b9..db0fcbcc1d60 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -85,9 +85,9 @@ .Lexit: addi r1,r1,STACKFRAMESIZE .Ldo_err1: - ld r3,48(r1) - ld r4,56(r1) - ld r5,64(r1) + ld r3,STK_PARAM(R3)(r1) + ld r4,STK_PARAM(R4)(r1) + ld r5,STK_PARAM(R5)(r1) b __copy_tofrom_user_base @@ -96,18 +96,18 @@ _GLOBAL(__copy_tofrom_user_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,48(r1) - std r4,56(r1) - std r5,64(r1) + std r3,STK_PARAM(R3)(r1) + std r4,STK_PARAM(R4)(r1) + std r5,STK_PARAM(R5)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,48(r1) - std r4,56(r1) - std r5,64(r1) + std r3,STK_PARAM(R3)(r1) + std r4,STK_PARAM(R4)(r1) + std r5,STK_PARAM(R5)(r1) blt .Lshort_copy #endif @@ -298,9 +298,9 @@ err1; stb r0,0(r3) bl enter_vmx_usercopy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) + ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1) + ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1) + ld r5,STACKFRAMESIZE+STK_PARAM(R5)(r1) mtlr r0 /* diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index 72ad055168a3..01da956a52fb 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -12,7 +12,7 @@ .align 7 _GLOBAL(memcpy) BEGIN_FTR_SECTION - std r3,48(r1) /* save destination pointer for return value */ + std r3,STK_PARAM(R3)(r1) /* save destination pointer for return value */ FTR_SECTION_ELSE #ifndef SELFTEST b memcpy_power7 @@ -73,7 +73,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+3,3f lbz r9,8(r4) stb r9,0(r3) -3: ld r3,48(r1) /* return dest pointer */ +3: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */ blr .Lsrc_unaligned: @@ -156,7 +156,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+3,3f rotldi r9,r9,8 stb r9,0(r3) -3: ld r3,48(r1) /* return dest pointer */ +3: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */ blr .Ldst_unaligned: @@ -201,5 +201,5 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 3: bf cr7*4+3,4f lbz r0,0(r4) stb r0,0(r3) -4: ld r3,48(r1) /* return dest pointer */ +4: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */ blr diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index bae3f214c2d9..87d8eeccd4b7 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,48(r1) + std r3,STK_PARAM(R1)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,48(r1) + std r3,STK_PARAM(R1)(r1) blt .Lshort_copy #endif @@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7) lbz r0,0(r4) stb r0,0(r3) -15: ld r3,48(r1) +15: ld r3,STK_PARAM(R3)(r1) blr .Lunwind_stack_nonvmx_copy: @@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7) #ifdef CONFIG_ALTIVEC .Lvmx_copy: mflr r0 - std r4,56(r1) - std r5,64(r1) + std r4,STK_PARAM(R4)(r1) + std r5,STK_PARAM(R5)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl enter_vmx_copy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) + ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1) + ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1) + ld r5,STACKFRAMESIZE+STK_PARAM(R5)(r1) mtlr r0 /* @@ -447,7 +447,7 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) + ld r3,STK_PARAM(R3)(r1) b exit_vmx_copy /* tail call optimise */ .Lvmx_unaligned_copy: @@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) + ld r3,STK_PARAM(R3)(r1) b exit_vmx_copy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ -- cgit v1.2.3 From 752a6422fec3c0f5f9d4ac43d92f5dd13e22fde4 Mon Sep 17 00:00:00 2001 From: Ulrich Weigand Date: Fri, 14 Feb 2014 19:21:03 +0100 Subject: powerpc: Fix unsafe accesses to parameter area in ELFv2 Some of the assembler files in lib/ make use of the fact that in the ELFv1 ABI, the caller guarantees to provide stack space to save the parameter registers r3 ... r10. This guarantee is no longer present in ELFv2 for functions that have no variable argument list and no more than 8 arguments. Change the affected routines to temporarily store registers in the red zone and/or the top of their own stack frame (in the space provided to save r31 .. r29, which is actually not used in these routines). In opal_query_takeover, simply always allocate a stack frame; the routine is not performance critical. Signed-off-by: Ulrich Weigand Signed-off-by: Anton Blanchard --- arch/powerpc/lib/copypage_power7.S | 8 ++++---- arch/powerpc/lib/copyuser_power7.S | 24 ++++++++++++------------ arch/powerpc/lib/memcpy_64.S | 8 ++++---- arch/powerpc/lib/memcpy_power7.S | 20 ++++++++++---------- arch/powerpc/platforms/powernv/opal-takeover.S | 2 ++ 5 files changed, 32 insertions(+), 30 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index affc6d308e13..d7dafb3777ac 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -56,15 +56,15 @@ _GLOBAL(copypage_power7) #ifdef CONFIG_ALTIVEC mflr r0 - std r3,STK_PARAM(R3)(r1) - std r4,STK_PARAM(R4)(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl enter_vmx_copy cmpwi r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1) - ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) mtlr r0 li r0,(PAGE_SIZE/128) diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index db0fcbcc1d60..c46c876ac96a 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -85,9 +85,9 @@ .Lexit: addi r1,r1,STACKFRAMESIZE .Ldo_err1: - ld r3,STK_PARAM(R3)(r1) - ld r4,STK_PARAM(R4)(r1) - ld r5,STK_PARAM(R5)(r1) + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1) b __copy_tofrom_user_base @@ -96,18 +96,18 @@ _GLOBAL(__copy_tofrom_user_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,STK_PARAM(R3)(r1) - std r4,STK_PARAM(R4)(r1) - std r5,STK_PARAM(R5)(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,STK_PARAM(R3)(r1) - std r4,STK_PARAM(R4)(r1) - std r5,STK_PARAM(R5)(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) blt .Lshort_copy #endif @@ -298,9 +298,9 @@ err1; stb r0,0(r3) bl enter_vmx_usercopy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1) - ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1) - ld r5,STACKFRAMESIZE+STK_PARAM(R5)(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index 01da956a52fb..9d3960c16fde 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -12,7 +12,7 @@ .align 7 _GLOBAL(memcpy) BEGIN_FTR_SECTION - std r3,STK_PARAM(R3)(r1) /* save destination pointer for return value */ + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */ FTR_SECTION_ELSE #ifndef SELFTEST b memcpy_power7 @@ -73,7 +73,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+3,3f lbz r9,8(r4) stb r9,0(r3) -3: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */ +3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr .Lsrc_unaligned: @@ -156,7 +156,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+3,3f rotldi r9,r9,8 stb r9,0(r3) -3: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */ +3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr .Ldst_unaligned: @@ -201,5 +201,5 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 3: bf cr7*4+3,4f lbz r0,0(r4) stb r0,0(r3) -4: ld r3,STK_PARAM(R3)(r1) /* return dest pointer */ +4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 87d8eeccd4b7..2ff5c142f87b 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,STK_PARAM(R1)(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,STK_PARAM(R1)(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy #endif @@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7) lbz r0,0(r4) stb r0,0(r3) -15: ld r3,STK_PARAM(R3)(r1) +15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blr .Lunwind_stack_nonvmx_copy: @@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7) #ifdef CONFIG_ALTIVEC .Lvmx_copy: mflr r0 - std r4,STK_PARAM(R4)(r1) - std r5,STK_PARAM(R5)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl enter_vmx_copy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1) - ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1) - ld r5,STACKFRAMESIZE+STK_PARAM(R5)(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* @@ -447,7 +447,7 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,STK_PARAM(R3)(r1) + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) b exit_vmx_copy /* tail call optimise */ .Lvmx_unaligned_copy: @@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,STK_PARAM(R3)(r1) + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) b exit_vmx_copy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S index 3cd262897c27..11a3169ee583 100644 --- a/arch/powerpc/platforms/powernv/opal-takeover.S +++ b/arch/powerpc/platforms/powernv/opal-takeover.S @@ -21,11 +21,13 @@ _GLOBAL(opal_query_takeover) mfcr r0 stw r0,8(r1) + stdu r1,-STACKFRAMESIZE(r1) std r3,STK_PARAM(R3)(r1) std r4,STK_PARAM(R4)(r1) li r3,H_HAL_TAKEOVER li r4,H_HAL_TAKEOVER_QUERY_MAGIC HVSC + addi r1,r1,STACKFRAMESIZE ld r10,STK_PARAM(R3)(r1) std r4,0(r10) ld r10,STK_PARAM(R4)(r1) -- cgit v1.2.3 From c2e31bdc120d992a90b75d94c7fa403fea362e43 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 10 Mar 2014 10:48:44 +1100 Subject: powerpc/tm: Use STK_PARAM Get rid of the tm specific STACK_PARAM and use STK_PARAM Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/tm.S | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 03567c05950a..27aad248c002 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -41,7 +41,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ /* Stack frame offsets for local variables. */ #define TM_FRAME_L0 TM_FRAME_SIZE-16 #define TM_FRAME_L1 TM_FRAME_SIZE-8 -#define STACK_PARAM(x) (48+((x)*8)) /* In order to access the TM SPRs, TM must be enabled. So, do so: */ @@ -113,7 +112,7 @@ _GLOBAL(tm_reclaim) /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ - std r3, STACK_PARAM(0)(r1) + std r3, STK_PARAM(R3)(r1) SAVE_NVGPRS(r1) /* We need to setup MSR for VSX register save instructions. Here we @@ -202,7 +201,7 @@ dont_backup_fp: /* Now get some more GPRS free */ std r7, GPR7(r1) /* Temporary stash */ std r12, GPR12(r1) /* '' '' '' */ - ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ + ld r12, STK_PARAM(R3)(r1) /* Param 0, thread_struct * */ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ -- cgit v1.2.3 From 6403105bfda4d6934b39aeb85ff818b185b42de8 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 10 Mar 2014 10:52:17 +1100 Subject: powerpc/tm: Fix GOT save offset for ABIv2 The r2 TOC/GOT save offset is 40 on ABIv1 and 24 on ABIv2. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/ppc_asm.h | 2 ++ arch/powerpc/kernel/tm.S | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 3185d11b6691..2cc2511ff076 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -190,8 +190,10 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define STK_REG(i) __STK_REG(__REG_##i) #if defined(_CALL_ELF) && _CALL_ELF == 2 +#define STK_GOT 24 #define __STK_PARAM(i) (32 + ((i)-3)*8) #else +#define STK_GOT 40 #define __STK_PARAM(i) (48 + ((i)-3)*8) #endif #define STK_PARAM(i) __STK_PARAM(__REG_##i) diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 27aad248c002..cf1027efca30 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -107,7 +107,7 @@ _GLOBAL(tm_reclaim) mflr r0 stw r6, 8(r1) std r0, 16(r1) - std r2, 40(r1) + std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ @@ -288,7 +288,7 @@ dont_backup_fp: ld r0, 16(r1) mtcr r4 mtlr r0 - ld r2, 40(r1) + ld r2, STK_GOT(r1) /* Load system default DSCR */ ld r4, DSCR_DEFAULT@toc(r2) @@ -311,7 +311,7 @@ _GLOBAL(__tm_recheckpoint) mflr r0 stw r5, 8(r1) std r0, 16(r1) - std r2, 40(r1) + std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. @@ -447,7 +447,7 @@ restore_gprs: ld r0, 16(r1) mtcr r4 mtlr r0 - ld r2, 40(r1) + ld r2, STK_GOT(r1) /* Load system default DSCR */ ld r4, DSCR_DEFAULT@toc(r2) -- cgit v1.2.3 From d51959d70ffc55d1c829e881a6121e6fbbfb29af Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 10 Mar 2014 12:51:58 +1100 Subject: powerpc/tracing: TRACE_WITH_FRAME_BUFFER creates invalid stack frames TRACE_WITH_FRAME_BUFFER creates 32 byte stack frames. On ppc64 ABIv1 this is too small and a callee could corrupt the stack by writing to the parameter save area (starting at offset 48). Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/irqflags.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index f62c056e75bf..e20eb95429a8 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -20,9 +20,9 @@ */ #define TRACE_WITH_FRAME_BUFFER(func) \ mflr r0; \ - stdu r1, -32(r1); \ + stdu r1, -STACK_FRAME_OVERHEAD(r1); \ std r0, 16(r1); \ - stdu r1, -32(r1); \ + stdu r1, -STACK_FRAME_OVERHEAD(r1); \ bl func; \ ld r1, 0(r1); \ ld r1, 0(r1); -- cgit v1.2.3 From 2751b628c97e66e61f482935ca59148751972941 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 11 Mar 2014 11:54:06 +1100 Subject: powerpc: Fix SMP issues with ppc64le ABIv2 There is no need to put a function descriptor in __secondary_hold_spinloop. Use ppc_function_entry to get the instruction address and put it in __secondary_hold_spinloop instead. Also fix an issue where we assumed cur_cpu_spec held a function descriptor. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/head_64.S | 18 ++++++++---------- arch/powerpc/kernel/setup_64.c | 2 +- arch/powerpc/platforms/85xx/smp.c | 3 ++- arch/powerpc/platforms/cell/smp.c | 5 +++-- arch/powerpc/platforms/powernv/smp.c | 5 +++-- arch/powerpc/platforms/pseries/smp.c | 5 +++-- arch/powerpc/platforms/wsp/scom_smp.c | 3 ++- 7 files changed, 22 insertions(+), 19 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 97329a19c76b..a95145d7f61b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -76,10 +76,9 @@ END_FTR_SECTION(0, 1) /* Catch branch to 0 in real mode */ trap - /* Secondary processors spin on this value until it becomes nonzero. - * When it does it contains the real address of the descriptor - * of the function that the cpu should jump to to continue - * initialization. + /* Secondary processors spin on this value until it becomes non-zero. + * When non-zero, it contains the real address of the function the cpu + * should jump to. */ .balign 8 .globl __secondary_hold_spinloop @@ -147,9 +146,6 @@ __secondary_hold: #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) #ifdef CONFIG_PPC_BOOK3E tovirt(r12,r12) -#endif -#if !defined(_CALL_ELF) || _CALL_ELF != 2 - ld r12,0(r12) /* deref function descriptor */ #endif mtctr r12 mr r3,r24 @@ -266,10 +262,12 @@ generic_secondary_common_init: /* See if we need to call a cpu state restore handler */ LOAD_REG_ADDR(r23, cur_cpu_spec) ld r23,0(r23) - ld r23,CPU_SPEC_RESTORE(r23) - cmpdi 0,r23,0 + ld r12,CPU_SPEC_RESTORE(r23) + cmpdi 0,r12,0 beq 3f - ld r12,0(r23) +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + ld r12,0(r12) +#endif mtctr r12 bctrl diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index fbe24377eda3..90b532ace0d5 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -341,7 +341,7 @@ void smp_release_cpus(void) ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop - PHYSICAL_START); - *ptr = __pa(generic_secondary_smp_init); + *ptr = ppc_function_entry(generic_secondary_smp_init); /* And wait a bit for them to catch up */ for (i = 0; i < 100000; i++) { diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 6382098d6f8d..ba093f553678 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -267,7 +268,7 @@ out: flush_spin_table(spin_table); out_be32(&spin_table->pir, hw_cpu); out_be64((u64 *)(&spin_table->addr_h), - __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); + __pa(ppc_function_entry(generic_secondary_smp_init))); flush_spin_table(spin_table); #endif diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index 90745eaa45fe..c8017a7bcabd 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "interrupt.h" #include @@ -70,8 +71,8 @@ static cpumask_t of_spin_map; static inline int smp_startup_cpu(unsigned int lcpu) { int status; - unsigned long start_here = __pa((u32)*((unsigned long *) - generic_secondary_smp_init)); + unsigned long start_here = + __pa(ppc_function_entry(generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 908672bdcea6..b370b86263a6 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "powernv.h" @@ -49,8 +50,8 @@ static void pnv_smp_setup_cpu(int cpu) int pnv_smp_kick_cpu(int nr) { unsigned int pcpu = get_hard_smp_processor_id(nr); - unsigned long start_here = __pa(*((unsigned long *) - generic_secondary_smp_init)); + unsigned long start_here = + __pa(ppc_function_entry(generic_secondary_smp_init)); long rc; BUG_ON(nr < 0 || nr >= NR_CPUS); diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 24f58cb0a543..a3555b10c1a5 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "pseries.h" #include "offline_states.h" @@ -96,8 +97,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) static inline int smp_startup_cpu(unsigned int lcpu) { int status; - unsigned long start_here = __pa((u32)*((unsigned long *) - generic_secondary_smp_init)); + unsigned long start_here = + __pa(ppc_function_entry(generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c index 268bc899c1f7..8c79ce016cf1 100644 --- a/arch/powerpc/platforms/wsp/scom_smp.c +++ b/arch/powerpc/platforms/wsp/scom_smp.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "wsp.h" @@ -405,7 +406,7 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np) goto fail; } - start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init + start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init : generic_secondary_thread_init); pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here); -- cgit v1.2.3 From 07de8377f7488f262f9694a1567ab93b4dda63bc Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 11 Mar 2014 12:15:27 +1100 Subject: powerpc: Fix ABIv2 issue with dereference_function_descriptor Don't try and dereference a function descriptor on ABIv2. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/sections.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index d0e784e0ff48..d1bb96d5a298 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -39,6 +39,7 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end) (unsigned long)_stext < end; } +#if !defined(_CALL_ELF) || _CALL_ELF != 2 #undef dereference_function_descriptor static inline void *dereference_function_descriptor(void *ptr) { @@ -49,6 +50,7 @@ static inline void *dereference_function_descriptor(void *ptr) ptr = p; return ptr; } +#endif #endif -- cgit v1.2.3 From 0e60e46e2aa318c92bb224de29b68b6296bb0fde Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 17:35:28 +1030 Subject: powerpc: make module stub code endian independent By representing them as words, rather than chars, we can avoid endian ifdefs. Signed-off-by: Rusty Russell --- arch/powerpc/kernel/module_64.c | 42 +++++++++++------------------------------ 1 file changed, 11 insertions(+), 31 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 12664c130d73..7c16b2eefd95 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -47,8 +47,8 @@ struct ppc64_stub_entry { /* 28 byte jump instruction sequence (7 instructions) */ - unsigned char jump[28]; - unsigned char unused[4]; + u32 jump[7]; + u32 unused; /* Data for the above code */ struct ppc64_opd_entry opd; }; @@ -61,25 +61,14 @@ struct ppc64_stub_entry r2) into the stub. */ static struct ppc64_stub_entry ppc64_stub = { .jump = { -#ifdef __LITTLE_ENDIAN__ - 0x00, 0x00, 0x82, 0x3d, /* addis r12,r2, */ - 0x00, 0x00, 0x8c, 0x39, /* addi r12,r12, */ + 0x3d820000, /* addis r12,r2, */ + 0x398c0000, /* addi r12,r12, */ /* Save current r2 value in magic place on the stack. */ - 0x28, 0x00, 0x41, 0xf8, /* std r2,40(r1) */ - 0x20, 0x00, 0x6c, 0xe9, /* ld r11,32(r12) */ - 0x28, 0x00, 0x4c, 0xe8, /* ld r2,40(r12) */ - 0xa6, 0x03, 0x69, 0x7d, /* mtctr r11 */ - 0x20, 0x04, 0x80, 0x4e /* bctr */ -#else - 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, */ - 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, */ - /* Save current r2 value in magic place on the stack. */ - 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */ - 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */ - 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */ - 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */ - 0x4e, 0x80, 0x04, 0x20 /* bctr */ -#endif + 0xf8410028, /* std r2,40(r1) */ + 0xe96c0020, /* ld r11,32(r12) */ + 0xe84c0028, /* ld r2,40(r12) */ + 0x7d6903a6, /* mtctr r11 */ + 0x4e800420 /* bctr */ } }; /* Count how many different 24-bit relocations (different symbol, @@ -274,19 +263,10 @@ static inline int create_stub(Elf64_Shdr *sechdrs, struct ppc64_opd_entry *opd, struct module *me) { - Elf64_Half *loc1, *loc2; long reladdr; *entry = ppc64_stub; -#ifdef __LITTLE_ENDIAN__ - loc1 = (Elf64_Half *)&entry->jump[0]; - loc2 = (Elf64_Half *)&entry->jump[4]; -#else - loc1 = (Elf64_Half *)&entry->jump[2]; - loc2 = (Elf64_Half *)&entry->jump[6]; -#endif - /* Stub uses address relative to r2. */ reladdr = (unsigned long)entry - my_r2(sechdrs, me); if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { @@ -296,8 +276,8 @@ static inline int create_stub(Elf64_Shdr *sechdrs, } DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); - *loc1 = PPC_HA(reladdr); - *loc2 = PPC_LO(reladdr); + entry->jump[0] |= PPC_HA(reladdr); + entry->jump[1] |= PPC_LO(reladdr); entry->opd.funcaddr = opd->funcaddr; entry->opd.r2 = opd->r2; return 1; -- cgit v1.2.3 From d247da0a8ebcc4ebb4c766487de6af5df560adac Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 17:36:28 +1030 Subject: powerpc: modules implement R_PPC64_TOCSAVE relocation. Signed-off-by: Rusty Russell --- arch/powerpc/include/uapi/asm/elf.h | 5 ++++- arch/powerpc/kernel/module_64.c | 8 ++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index 7e39c9146a71..0341109e4395 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h @@ -291,9 +291,12 @@ do { \ #define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */ #define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */ #define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */ +#define R_PPC64_TLSGD 107 +#define R_PPC64_TLSLD 108 +#define R_PPC64_TOCSAVE 109 /* Keep this the last entry. */ -#define R_PPC64_NUM 107 +#define R_PPC64_NUM 110 /* There's actually a third entry here, but it's unused */ struct ppc64_opd_entry diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 7c16b2eefd95..a8694d462079 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -454,6 +454,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, *location = value - (unsigned long)location; break; + case R_PPC64_TOCSAVE: + /* + * Marker reloc indicates we don't have to save r2. + * That would only save us one instruction, so ignore + * it. + */ + break; + default: printk("%s: Unknown ADD relocation: %lu\n", me->name, -- cgit v1.2.3 From 9baeaef64095eab00c232f55df2e7c2d8e89845d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 17:37:28 +1030 Subject: powerpc: EXPORT_SYMBOL(.TOC.) For the ELFv2 ABI, powerpc introduces a magic symbol ".TOC.". depmod then complains that this doesn't resolve (so does modpost, but we could easily fix that). To export this, we need to use asm. modpost and depmod both strip "." from symbols for the old PPC64 ELFv1 ABI, so we actually export a "TOC.". Signed-off-by: Rusty Russell --- arch/powerpc/kernel/misc_64.S | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index bda85a193abf..b80fafbfab2f 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -634,3 +634,22 @@ _GLOBAL(kexec_sequence) li r5,0 blr /* image->start(physid, image->start, 0); */ #endif /* CONFIG_KEXEC */ + +#ifdef CONFIG_MODULES +#if defined(_CALL_ELF) && _CALL_ELF == 2 +/* + * Export a fake .TOC. since both modpost and depmod will complain otherwise. + * Both modpost and depmod strip the leading . so we do the same here. + */ +.section "__ksymtab_strings","a" +__kstrtab_TOC.: + .asciz "TOC." + +.section "___ksymtab+TOC.","a" +/* This symbol name is important: it's used by modpost to find exported syms */ +.globl __ksymtab_TOC. +__ksymtab_TOC.: + .llong 0 /* .value */ + .llong __kstrtab_TOC. +#endif /* ELFv2 */ +#endif /* MODULES */ -- cgit v1.2.3 From 71ec7c55ed91e2352c00d51d171fccaa7cef5a00 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 19:59:11 +1030 Subject: powerpc: module: handle MODVERSION for .TOC. For the ELFv2 ABI, powerpc introduces a magic symbol ".TOC.". If we don't create a CRC for it (minus the leading ".", since we strip that) we get a modpost warning about missing CRC and the CRC array seems to be displaced by 1 so other CRCs mismatch too. Signed-off-by: Rusty Russell --- arch/powerpc/kernel/misc_64.S | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index b80fafbfab2f..4e314b90c75d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -637,6 +637,15 @@ _GLOBAL(kexec_sequence) #ifdef CONFIG_MODULES #if defined(_CALL_ELF) && _CALL_ELF == 2 + +#ifdef CONFIG_MODVERSIONS +.weak __crc_TOC. +.section "___kcrctab+TOC.","a" +.globl __kcrctab_TOC. +__kcrctab_TOC.: + .llong __crc_TOC. +#endif + /* * Export a fake .TOC. since both modpost and depmod will complain otherwise. * Both modpost and depmod strip the leading . so we do the same here. -- cgit v1.2.3 From 4edebbeae3085e71f75584b6582495459e2e6cb2 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 19:59:26 +1030 Subject: powerpc: Fix up TOC. for modules. The kernel resolved the '.TOC.' to a fake symbol, so we need to fix it up to point to our .toc section plus 0x8000. Signed-off-by: Rusty Russell --- arch/powerpc/include/asm/module.h | 1 + arch/powerpc/kernel/module_64.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 49fa55bfbac4..c9c7aaaf95f5 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -35,6 +35,7 @@ struct mod_arch_specific { #ifdef __powerpc64__ unsigned int stubs_section; /* Index of stubs section in module */ unsigned int toc_section; /* What section is the TOC? */ + bool toc_fixed; /* Have we fixed up .TOC.? */ #ifdef CONFIG_DYNAMIC_FTRACE unsigned long toc; unsigned long tramp; diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index a8694d462079..f6544d7071d6 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -196,6 +196,24 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) } } +static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex) +{ + unsigned int i, numsyms; + Elf64_Sym *syms; + + syms = (Elf64_Sym *)sechdrs[symindex].sh_addr; + numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); + + for (i = 1; i < numsyms; i++) { + if (syms[i].st_shndx == SHN_UNDEF + && strcmp(strtab + syms[i].st_name, ".TOC.") == 0) + return &syms[i]; + } + return NULL; +} + int module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, char *secstrings, @@ -337,6 +355,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, DEBUGP("Applying ADD relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); + + /* First time we're called, we can fix up .TOC. */ + if (!me->arch.toc_fixed) { + sym = find_dot_toc(sechdrs, strtab, symindex); + /* It's theoretically possible that a module doesn't want a + * .TOC. so don't fail it just for that. */ + if (sym) + sym->st_value = my_r2(sechdrs, me); + me->arch.toc_fixed = true; + } + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr -- cgit v1.2.3 From 0906584a0a4b689f6e80307f699247621321670a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 19:59:27 +1030 Subject: powerpc: Handle new ELFv2 module relocations The new ELF ABI tends to use R_PPC64_REL16_LO and R_PPC64_REL16_HA relocations (PC-relative), so implement them. Signed-off-by: Rusty Russell --- arch/powerpc/include/uapi/asm/elf.h | 7 ++++++- arch/powerpc/kernel/module_64.c | 17 +++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index 0341109e4395..59dad113897b 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h @@ -295,8 +295,13 @@ do { \ #define R_PPC64_TLSLD 108 #define R_PPC64_TOCSAVE 109 +#define R_PPC64_REL16 249 +#define R_PPC64_REL16_LO 250 +#define R_PPC64_REL16_HI 251 +#define R_PPC64_REL16_HA 252 + /* Keep this the last entry. */ -#define R_PPC64_NUM 110 +#define R_PPC64_NUM 253 /* There's actually a third entry here, but it's unused */ struct ppc64_opd_entry diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index f6544d7071d6..34ba326ccc30 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -491,6 +491,23 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, */ break; + case R_PPC64_REL16_HA: + /* Subtract location pointer */ + value -= (unsigned long)location; + value = ((value + 0x8000) >> 16); + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + + case R_PPC64_REL16_LO: + /* Subtract location pointer */ + value -= (unsigned long)location; + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + default: printk("%s: Unknown ADD relocation: %lu\n", me->name, -- cgit v1.2.3 From 5b12c5c69415b184aadb930660a47a8af4c6deb5 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 20:11:28 +1030 Subject: powerpc: modules: comment about de-dotifying symbols when using the ELFv2 ABI. ELFv2 doesn't use function descriptors, so we don't expect symbols to start with ".". But because depmod and modpost strip ".", and we have the special symbol ".TOC.", we still need to do it. Signed-off-by: Rusty Russell --- arch/powerpc/kernel/module_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 34ba326ccc30..05b27a5efc7e 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -172,6 +172,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, return relocs * sizeof(struct ppc64_stub_entry); } +/* Still needed for ELFv2, for .TOC. */ static void dedotify_versions(struct modversion_info *vers, unsigned long size) { @@ -182,7 +183,7 @@ static void dedotify_versions(struct modversion_info *vers, memmove(vers->name, vers->name+1, strlen(vers->name)); } -/* Undefined symbols which refer to .funcname, hack to funcname */ +/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) { unsigned int i; -- cgit v1.2.3 From d2fae548039987e0c64957ede44822305fdafb66 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 20:12:44 +1030 Subject: powerpc: modules: change r2 save/restore offset for ELFv2 ABI. ELFv2 uses a different stack offset (24 vs 40) to save r2. Signed-off-by: Rusty Russell --- arch/powerpc/kernel/module_64.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 05b27a5efc7e..8bfcf1b8b6d4 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -41,6 +41,12 @@ #define DEBUGP(fmt , ...) #endif +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define R2_STACK_OFFSET 24 +#else +#define R2_STACK_OFFSET 40 +#endif + /* Like PPC32, we need little trampolines to do > 24-bit jumps (into the kernel itself). But on PPC64, these need to be used for every jump, actually, to reset r2 (TOC+0x8000). */ @@ -61,14 +67,14 @@ struct ppc64_stub_entry r2) into the stub. */ static struct ppc64_stub_entry ppc64_stub = { .jump = { - 0x3d820000, /* addis r12,r2, */ - 0x398c0000, /* addi r12,r12, */ + 0x3d820000, /* addis r12,r2, */ + 0x398c0000, /* addi r12,r12, */ /* Save current r2 value in magic place on the stack. */ - 0xf8410028, /* std r2,40(r1) */ - 0xe96c0020, /* ld r11,32(r12) */ - 0xe84c0028, /* ld r2,40(r12) */ - 0x7d6903a6, /* mtctr r11 */ - 0x4e800420 /* bctr */ + 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ + 0xe96c0020, /* ld r11,32(r12) */ + 0xe84c0028, /* ld r2,40(r12) */ + 0x7d6903a6, /* mtctr r11 */ + 0x4e800420 /* bctr */ } }; /* Count how many different 24-bit relocations (different symbol, @@ -338,7 +344,8 @@ static int restore_r2(u32 *instruction, struct module *me) me->name, *instruction); return 0; } - *instruction = 0xe8410028; /* ld r2,40(r1) */ + /* ld r2,R2_STACK_OFFSET(r1) */ + *instruction = 0xe8410000 | R2_STACK_OFFSET; return 1; } -- cgit v1.2.3 From b1ce369e820aaca3d91e9d9bbaaf860794d9ab01 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 20:12:59 +1030 Subject: powerpc: modules: use r12 for stub jump address. In ELFv2, r12 is supposed to equal to PC on entry to a function. Our stubs use r11, so change swap that with r12. Signed-off-by: Rusty Russell --- arch/powerpc/kernel/module_64.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 8bfcf1b8b6d4..f8b6d28784ef 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -67,13 +67,13 @@ struct ppc64_stub_entry r2) into the stub. */ static struct ppc64_stub_entry ppc64_stub = { .jump = { - 0x3d820000, /* addis r12,r2, */ - 0x398c0000, /* addi r12,r12, */ + 0x3d620000, /* addis r11,r2, */ + 0x396b0000, /* addi r11,r11, */ /* Save current r2 value in magic place on the stack. */ 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ - 0xe96c0020, /* ld r11,32(r12) */ - 0xe84c0028, /* ld r2,40(r12) */ - 0x7d6903a6, /* mtctr r11 */ + 0xe98b0020, /* ld r12,32(r11) */ + 0xe84b0026, /* ld r2,40(r11) */ + 0x7d8903a6, /* mtctr r12 */ 0x4e800420 /* bctr */ } }; -- cgit v1.2.3 From 5c729a115e4727fd71308e4d68846f64fa460ead Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 18 Mar 2014 20:13:03 +1030 Subject: powerpc: modules: skip r2 setup for ELFv2 ELFv2 doesn't need to set up r2 when calling a function. Signed-off-by: Rusty Russell --- arch/powerpc/kernel/module_64.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index f8b6d28784ef..d7222495e24c 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -59,12 +59,19 @@ struct ppc64_stub_entry struct ppc64_opd_entry opd; }; -/* We use a stub to fix up r2 (TOC ptr) and to jump to the (external) - function which may be more than 24-bits away. We could simply - patch the new r2 value and function pointer into the stub, but it's - significantly shorter to put these values at the end of the stub - code, and patch the stub address (32-bits relative to the TOC ptr, - r2) into the stub. */ +/* + * PPC64 uses 24 bit jumps, but we need to jump into other modules or + * the kernel which may be further. So we jump to a stub. + * + * For ELFv1 we need to use this to set up the new r2 value (aka TOC + * pointer). For ELFv2 it's the callee's responsibility to set up the + * new r2, but for both we need to save the old r2. + * + * We could simply patch the new r2 value and function pointer into + * the stub, but it's significantly shorter to put these values at the + * end of the stub code, and patch the stub address (32-bits relative + * to the TOC ptr, r2) into the stub. + */ static struct ppc64_stub_entry ppc64_stub = { .jump = { 0x3d620000, /* addis r11,r2, */ @@ -72,7 +79,10 @@ static struct ppc64_stub_entry ppc64_stub = /* Save current r2 value in magic place on the stack. */ 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ 0xe98b0020, /* ld r12,32(r11) */ +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + /* Set up new r2 from function descriptor */ 0xe84b0026, /* ld r2,40(r11) */ +#endif 0x7d8903a6, /* mtctr r12 */ 0x4e800420 /* bctr */ } }; -- cgit v1.2.3 From 008d7a914efee6ee5afe59bcc46d3d6b60657598 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 19 Mar 2014 10:42:22 +1030 Subject: powerpc: modules: implement stubs for ELFv2 ABI. ELFv2 doesn't use function descriptors, because it doesn't need to load a new r2 when calling into a function. On the other hand, you're supposed to use a local entry point for R_PPC_REL24 branches. Signed-off-by: Rusty Russell --- arch/powerpc/kernel/module_64.c | 73 ++++++++++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 12 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index d7222495e24c..042360135260 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -43,8 +43,58 @@ #if defined(_CALL_ELF) && _CALL_ELF == 2 #define R2_STACK_OFFSET 24 + +/* An address is simply the address of the function. */ +typedef unsigned long func_desc_t; + +static func_desc_t func_desc(unsigned long addr) +{ + return addr; +} +static unsigned long func_addr(unsigned long addr) +{ + return addr; +} +static unsigned long stub_func_addr(func_desc_t func) +{ + return func; +} + +/* PowerPC64 specific values for the Elf64_Sym st_other field. */ +#define STO_PPC64_LOCAL_BIT 5 +#define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT) +#define PPC64_LOCAL_ENTRY_OFFSET(other) \ + (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2) + +static unsigned int local_entry_offset(const Elf64_Sym *sym) +{ + /* sym->st_other indicates offset to local entry point + * (otherwise it will assume r12 is the address of the start + * of function and try to derive r2 from it). */ + return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); +} #else #define R2_STACK_OFFSET 40 + +/* An address is address of the OPD entry, which contains address of fn. */ +typedef struct ppc64_opd_entry func_desc_t; + +static func_desc_t func_desc(unsigned long addr) +{ + return *(struct ppc64_opd_entry *)addr; +} +static unsigned long func_addr(unsigned long addr) +{ + return func_desc(addr).funcaddr; +} +static unsigned long stub_func_addr(func_desc_t func) +{ + return func.funcaddr; +} +static unsigned int local_entry_offset(const Elf64_Sym *sym) +{ + return 0; +} #endif /* Like PPC32, we need little trampolines to do > 24-bit jumps (into @@ -56,7 +106,7 @@ struct ppc64_stub_entry u32 jump[7]; u32 unused; /* Data for the above code */ - struct ppc64_opd_entry opd; + func_desc_t funcdata; }; /* @@ -225,7 +275,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, for (i = 1; i < numsyms; i++) { if (syms[i].st_shndx == SHN_UNDEF - && strcmp(strtab + syms[i].st_name, ".TOC.") == 0) + && strcmp(strtab + syms[i].st_name, "TOC.") == 0) return &syms[i]; } return NULL; @@ -295,7 +345,7 @@ static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me) /* Patch stub to reference function and correct r2 value. */ static inline int create_stub(Elf64_Shdr *sechdrs, struct ppc64_stub_entry *entry, - struct ppc64_opd_entry *opd, + unsigned long addr, struct module *me) { long reladdr; @@ -313,33 +363,31 @@ static inline int create_stub(Elf64_Shdr *sechdrs, entry->jump[0] |= PPC_HA(reladdr); entry->jump[1] |= PPC_LO(reladdr); - entry->opd.funcaddr = opd->funcaddr; - entry->opd.r2 = opd->r2; + entry->funcdata = func_desc(addr); return 1; } -/* Create stub to jump to function described in this OPD: we need the +/* Create stub to jump to function described in this OPD/ptr: we need the stub to set up the TOC ptr (r2) for the function. */ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, - unsigned long opdaddr, + unsigned long addr, struct module *me) { struct ppc64_stub_entry *stubs; - struct ppc64_opd_entry *opd = (void *)opdaddr; unsigned int i, num_stubs; num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); /* Find this stub, or if that fails, the next avail. entry */ stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; - for (i = 0; stubs[i].opd.funcaddr; i++) { + for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { BUG_ON(i >= num_stubs); - if (stubs[i].opd.funcaddr == opd->funcaddr) + if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) return (unsigned long)&stubs[i]; } - if (!create_stub(sechdrs, &stubs[i], opd, me)) + if (!create_stub(sechdrs, &stubs[i], addr, me)) return 0; return (unsigned long)&stubs[i]; @@ -480,7 +528,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, return -ENOENT; if (!restore_r2((u32 *)location + 1, me)) return -ENOEXEC; - } + } else + value += local_entry_offset(sym); /* Convert value to relative */ value -= (unsigned long)location; -- cgit v1.2.3 From 169c7cee3131cdf5e2f2d2a6c722c7db0283bcd5 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 3 Apr 2014 16:01:11 +1100 Subject: powerpc: Add _GLOBAL_TOC for ABIv2 assembly functions exported to modules If an assembly function that calls back into c code is exported to modules, we need to ensure r2 is setup correctly. There are only two places crazy enough to do it (two of which are my fault). Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/ppc_asm.h | 12 ++++++++++++ arch/powerpc/lib/copyuser_64.S | 2 +- arch/powerpc/lib/memcpy_64.S | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 2cc2511ff076..6400f1814fe8 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -207,6 +207,16 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) .globl name; \ name: +#define _GLOBAL_TOC(name) \ + .section ".text"; \ + .align 2 ; \ + .type name,@function; \ + .globl name; \ +name: \ +0: addis r2,r12,(.TOC.-0b)@ha; \ + addi r2,r2,(.TOC.-0b)@l; \ + .localentry name,.-name + #define _KPROBE(name) \ .section ".kprobes.text","a"; \ .align 2 ; \ @@ -235,6 +245,8 @@ name: \ .type GLUE(.,name),@function; \ GLUE(.,name): +#define _GLOBAL_TOC(name) _GLOBAL(name) + #define _KPROBE(name) \ .section ".kprobes.text","a"; \ .align 2 ; \ diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 596a285c0755..0860ee46013c 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -18,7 +18,7 @@ #endif .align 7 -_GLOBAL(__copy_tofrom_user) +_GLOBAL_TOC(__copy_tofrom_user) BEGIN_FTR_SECTION nop FTR_SECTION_ELSE diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index 9d3960c16fde..bc9a2ca591c3 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -10,7 +10,7 @@ #include .align 7 -_GLOBAL(memcpy) +_GLOBAL_TOC(memcpy) BEGIN_FTR_SECTION std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */ FTR_SECTION_ELSE -- cgit v1.2.3 From 5e66684fe4c71e4d62d6a5d313057185ac0890cc Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 4 Apr 2014 09:06:33 +1100 Subject: powerpc: ftrace_caller, _mcount is exported to modules so needs _GLOBAL_TOC() When testing the ftrace function tracer, I realised that ftrace_caller and mcount are called from modules and they both call into C, therefore they need the ABIv2 global entry point to establish r2. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/entry_64.S | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index cf4f6e693437..9fde8a1bf1e1 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -1175,7 +1175,7 @@ _GLOBAL(mcount) _GLOBAL(_mcount) blr -_GLOBAL(ftrace_caller) +_GLOBAL_TOC(ftrace_caller) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) @@ -1199,10 +1199,7 @@ _GLOBAL(ftrace_graph_stub) _GLOBAL(ftrace_stub) blr #else -_GLOBAL(mcount) - blr - -_GLOBAL(_mcount) +_GLOBAL_TOC(_mcount) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) -- cgit v1.2.3 From 47f86b4e07afd4652ab0b092cbf493bf8b96559e Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 3 Apr 2014 16:08:38 +1100 Subject: powerpc/kprobes: Fix ABIv2 issues with kprobe_lookup_name Use ppc_function_entry in places where we previously assumed function descriptors exist. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/kprobes.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 7b6feab6fd26..af15d4d8d604 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -30,6 +30,7 @@ #include #include #include +#include #define __ARCH_WANT_KPROBES_INSN_SLOT @@ -56,9 +57,9 @@ typedef ppc_opcode_t kprobe_opcode_t; if ((colon = strchr(name, ':')) != NULL) { \ colon++; \ if (*colon != '\0' && *colon != '.') \ - addr = *(kprobe_opcode_t **)addr; \ + addr = (kprobe_opcode_t *)ppc_function_entry(addr); \ } else if (name[0] != '.') \ - addr = *(kprobe_opcode_t **)addr; \ + addr = (kprobe_opcode_t *)ppc_function_entry(addr); \ } else { \ char dot_name[KSYM_NAME_LEN]; \ dot_name[0] = '.'; \ -- cgit v1.2.3 From 83775b85668a85036973c71264a959236e7becbd Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 3 Apr 2014 20:00:43 +1100 Subject: powerpc/modules: Create is_module_trampoline() ftrace has way too much knowledge of our kernel module trampoline layout hidden inside it. Create is_module_trampoline() that can abstract this away inside the module loader code. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/module.h | 1 + arch/powerpc/kernel/module_64.c | 51 +++++++++++++++++++++++++++++++++++---- 2 files changed, 47 insertions(+), 5 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index c9c7aaaf95f5..f2711f0eb873 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -78,6 +78,7 @@ struct mod_arch_specific { # endif /* MODULE */ #endif +bool is_module_trampoline(u32 *insns); struct exception_table_entry; void sort_ex_table(struct exception_table_entry *start, diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 042360135260..4db5ecdc06e6 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -102,7 +103,9 @@ static unsigned int local_entry_offset(const Elf64_Sym *sym) jump, actually, to reset r2 (TOC+0x8000). */ struct ppc64_stub_entry { - /* 28 byte jump instruction sequence (7 instructions) */ + /* 28 byte jump instruction sequence (7 instructions). We only + * need 6 instructions on ABIv2 but we always allocate 7 so + * so we don't have to modify the trampoline load instruction. */ u32 jump[7]; u32 unused; /* Data for the above code */ @@ -122,8 +125,8 @@ struct ppc64_stub_entry * end of the stub code, and patch the stub address (32-bits relative * to the TOC ptr, r2) into the stub. */ -static struct ppc64_stub_entry ppc64_stub = -{ .jump = { + +static u32 ppc64_stub_insns[] = { 0x3d620000, /* addis r11,r2, */ 0x396b0000, /* addi r11,r11, */ /* Save current r2 value in magic place on the stack. */ @@ -135,7 +138,45 @@ static struct ppc64_stub_entry ppc64_stub = #endif 0x7d8903a6, /* mtctr r12 */ 0x4e800420 /* bctr */ -} }; +}; + +#ifdef CONFIG_DYNAMIC_FTRACE + +static u32 ppc64_stub_mask[] = { + 0xffff0000, + 0xffff0000, + 0xffffffff, + 0xffffffff, +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + 0xffffffff, +#endif + 0xffffffff, + 0xffffffff +}; + +bool is_module_trampoline(u32 *p) +{ + unsigned int i; + u32 insns[ARRAY_SIZE(ppc64_stub_insns)]; + + BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask)); + + if (probe_kernel_read(insns, p, sizeof(insns))) + return -EFAULT; + + for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) { + u32 insna = insns[i]; + u32 insnb = ppc64_stub_insns[i]; + u32 mask = ppc64_stub_mask[i]; + + if ((insna & mask) != (insnb & mask)) + return false; + } + + return true; +} + +#endif /* Count how many different 24-bit relocations (different symbol, different addend) */ @@ -350,7 +391,7 @@ static inline int create_stub(Elf64_Shdr *sechdrs, { long reladdr; - *entry = ppc64_stub; + memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); /* Stub uses address relative to r2. */ reladdr = (unsigned long)entry - my_r2(sechdrs, me); -- cgit v1.2.3 From dd9fa162505c07e1917c96a1a12ca117b1afe55a Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 4 Apr 2014 15:58:42 +1100 Subject: powerpc/modules: Create module_trampoline_target() ftrace has way too much knowledge of our kernel module trampoline layout hidden inside it. Create module_trampoline_target() that gives the target address of a kernel module trampoline. Signed-off-by: Anton Blanchard --- arch/powerpc/include/asm/module.h | 2 ++ arch/powerpc/kernel/module_64.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index f2711f0eb873..dcfcad139bcc 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -79,6 +79,8 @@ struct mod_arch_specific { #endif bool is_module_trampoline(u32 *insns); +int module_trampoline_target(struct module *mod, u32 *trampoline, + unsigned long *target); struct exception_table_entry; void sort_ex_table(struct exception_table_entry *start, diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 4db5ecdc06e6..ef349d077129 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -176,6 +176,35 @@ bool is_module_trampoline(u32 *p) return true; } +int module_trampoline_target(struct module *mod, u32 *trampoline, + unsigned long *target) +{ + u32 buf[2]; + u16 upper, lower; + long offset; + void *toc_entry; + + if (probe_kernel_read(buf, trampoline, sizeof(buf))) + return -EFAULT; + + upper = buf[0] & 0xffff; + lower = buf[1] & 0xffff; + + /* perform the addis/addi, both signed */ + offset = ((short)upper << 16) + (short)lower; + + /* + * Now get the address this trampoline jumps to. This + * is always 32 bytes into our trampoline stub. + */ + toc_entry = (void *)mod->arch.toc + offset + 32; + + if (probe_kernel_read(target, toc_entry, sizeof(*target))) + return -EFAULT; + + return 0; +} + #endif /* Count how many different 24-bit relocations (different symbol, -- cgit v1.2.3 From 62c9da6a8b394eb9336a255fc23457202d6b9755 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 4 Apr 2014 16:52:58 +1100 Subject: powerpc/ftrace: Use module loader helpers to parse trampolines Now we have is_module_trampoline() and module_trampoline_target() we can remove a bunch of intimate kernel module trampoline knowledge from ftrace. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/ftrace.c | 97 +++++++++----------------------------------- 1 file changed, 20 insertions(+), 77 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 6a014c763cc7..78cdd7fbecd0 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -105,11 +105,9 @@ __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned int op; - unsigned int jmp[5]; unsigned long ptr; unsigned long ip = rec->ip; - unsigned long tramp; - int offset; + void *tramp; /* read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) @@ -122,96 +120,41 @@ __ftrace_make_nop(struct module *mod, } /* lets find where the pointer goes */ - tramp = find_bl_target(ip, op); - - /* - * On PPC64 the trampoline looks like: - * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, - * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, - * Where the bytes 2,3,6 and 7 make up the 32bit offset - * to the TOC that holds the pointer. - * to jump to. - * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) - * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) - * The actually address is 32 bytes from the offset - * into the TOC. - * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) - */ + tramp = (void *)find_bl_target(ip, op); - pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); + pr_devel("ip:%lx jumps to %p", ip, tramp); - /* Find where the trampoline jumps to */ - if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { - printk(KERN_ERR "Failed to read %lx\n", tramp); - return -EFAULT; - } - - pr_devel(" %08x %08x", jmp[0], jmp[1]); - - /* verify that this is what we expect it to be */ - if (((jmp[0] & 0xffff0000) != 0x3d820000) || - ((jmp[1] & 0xffff0000) != 0x398c0000) || - (jmp[2] != 0xf8410028) || - (jmp[3] != 0xe96c0020) || - (jmp[4] != 0xe84c0028)) { + if (!is_module_trampoline(tramp)) { printk(KERN_ERR "Not a trampoline\n"); return -EINVAL; } - /* The bottom half is signed extended */ - offset = ((unsigned)((unsigned short)jmp[0]) << 16) + - (int)((short)jmp[1]); - - pr_devel(" %x ", offset); - - /* get the address this jumps too */ - tramp = mod->arch.toc + offset + 32; - pr_devel("toc: %lx", tramp); - - if (probe_kernel_read(jmp, (void *)tramp, 8)) { - printk(KERN_ERR "Failed to read %lx\n", tramp); + if (module_trampoline_target(mod, tramp, &ptr)) { + printk(KERN_ERR "Failed to get trampoline target\n"); return -EFAULT; } - pr_devel(" %08x %08x\n", jmp[0], jmp[1]); - -#ifdef __LITTLE_ENDIAN__ - ptr = ((unsigned long)jmp[1] << 32) + jmp[0]; -#else - ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; -#endif + pr_devel("trampoline target %lx", ptr); /* This should match what was called */ if (ptr != ppc_function_entry((void *)addr)) { - printk(KERN_ERR "addr does not match %lx\n", ptr); + printk(KERN_ERR "addr %lx does not match expected %lx\n", + ptr, ppc_function_entry((void *)addr)); return -EINVAL; } /* - * We want to nop the line, but the next line is - * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) - * This needs to be turned to a nop too. - */ - if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) - return -EFAULT; - - if (op != 0xe8410028) { - printk(KERN_ERR "Next line is not ld! (%08x)\n", op); - return -EINVAL; - } - - /* - * Milton Miller pointed out that we can not blindly do nops. - * If a task was preempted when calling a trace function, - * the nops will remove the way to restore the TOC in r2 - * and the r2 TOC will get corrupted. - */ - - /* - * Replace: - * bl <==== will be replaced with "b 1f" - * ld r2,40(r1) - * 1: + * Our original call site looks like: + * + * bl + * ld r2,XX(r1) + * + * Milton Miller pointed out that we can not simply nop the branch. + * If a task was preempted when calling a trace function, the nops + * will remove the way to restore the TOC in r2 and the r2 TOC will + * get corrupted. + * + * Use a b +8 to jump over the load. */ op = 0x48000008; /* b +8 */ -- cgit v1.2.3 From 24a1bdc358bf3c533f7d575202e92aaca0f91761 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 4 Apr 2014 16:54:04 +1100 Subject: powerpc/ftrace: Fix ABIv2 issues with __ftrace_make_call __ftrace_make_call assumed ABIv1 TOC stack offsets, so it broke on ABIv2. While we are here, we can simplify the instruction modification code. Since we always update one instruction there is no need to probe_kernel_write and flush_icache_range, just use patch_branch. Signed-off-by: Anton Blanchard --- arch/powerpc/kernel/ftrace.c | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 78cdd7fbecd0..f202d0731b06 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -292,19 +292,24 @@ static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned int op[2]; - unsigned long ip = rec->ip; + void *ip = (void *)rec->ip; /* read where this goes */ - if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) + if (probe_kernel_read(op, ip, sizeof(op))) return -EFAULT; /* - * It should be pointing to two nops or - * b +8; ld r2,40(r1) + * We expect to see: + * + * b +8 + * ld r2,XX(r1) + * + * The load offset is different depending on the ABI. For simplicity + * just mask it out when doing the compare. */ - if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && - ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) { - printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); + if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) { + printk(KERN_ERR "Unexpected call sequence: %x %x\n", + op[0], op[1]); return -EINVAL; } @@ -314,23 +319,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EINVAL; } - /* create the branch to the trampoline */ - op[0] = create_branch((unsigned int *)ip, - rec->arch.mod->arch.tramp, BRANCH_SET_LINK); - if (!op[0]) { - printk(KERN_ERR "REL24 out of range!\n"); + /* Ensure branch is within 24 bits */ + if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { + printk(KERN_ERR "Branch out of range"); return -EINVAL; } - /* ld r2,40(r1) */ - op[1] = 0xe8410028; - - pr_devel("write to %lx\n", rec->ip); - - if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) - return -EPERM; - - flush_icache_range(ip, ip + 8); + if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { + printk(KERN_ERR "REL24 out of range!\n"); + return -EINVAL; + } return 0; } -- cgit v1.2.3 From 721aeaa9fdf35a672eef8ebdc4cd04bde38c3cea Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 10 Mar 2014 21:06:12 +1100 Subject: powerpc: Build little endian ppc64 kernel with ABIv2 Build the little endian ppc64 kernel with ABIv2 if the toolchain supports it. We can identify an ABIv2 capable toolchain by the -mabi=elfv2 compiler flag. Signed-off-by: Anton Blanchard --- arch/powerpc/Makefile | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index e8dd01af504d..5ba603b2fb4a 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -113,9 +113,13 @@ else endif endif -CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc -CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) -AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) +CFLAGS-$(CONFIG_PPC64) := -mtraceback=no +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,-mcall-aixdesc) +AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) +else +CFLAGS-$(CONFIG_PPC64) += -mcall-aixdesc +endif CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) -- cgit v1.2.3