diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-06 14:54:03 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-06 14:54:03 -0700 |
commit | 00f178e15095fbcf04db00486378a6fa416a125e (patch) | |
tree | a66dd51d66f2737abac8a14eae2ce8a78828aa2c /arch/xtensa/kernel | |
parent | 0b707e572a1955b892dfcb32e7b573fab78767d9 (diff) | |
parent | bd47cdb78997f83bd170c389ef59de9eec65976a (diff) | |
download | linux-00f178e15095fbcf04db00486378a6fa416a125e.tar.bz2 |
Merge tag 'xtensa-20211105' of git://github.com/jcmvbkbc/linux-xtensa
Pull xtensa updates from Max Filippov:
- add support for xtensa cores without windowed registers option
* tag 'xtensa-20211105' of git://github.com/jcmvbkbc/linux-xtensa:
xtensa: move section symbols to asm/sections.h
xtensa: remove unused variable wmask
xtensa: only build windowed register support code when needed
xtensa: use register window specific opcodes only when present
xtensa: implement call0 ABI support in assembly
xtensa: definitions for call0 ABI
xtensa: don't use a12 in __xtensa_copy_user in call0 ABI
xtensa: don't use a12 in strncpy_user
xtensa: use a14 instead of a15 in inline assembly
xtensa: move _SimulateUserKernelVectorException out of WindowVectors
Diffstat (limited to 'arch/xtensa/kernel')
-rw-r--r-- | arch/xtensa/kernel/align.S | 2 | ||||
-rw-r--r-- | arch/xtensa/kernel/entry.S | 216 | ||||
-rw-r--r-- | arch/xtensa/kernel/head.S | 24 | ||||
-rw-r--r-- | arch/xtensa/kernel/mcount.S | 38 | ||||
-rw-r--r-- | arch/xtensa/kernel/process.c | 27 | ||||
-rw-r--r-- | arch/xtensa/kernel/setup.c | 102 | ||||
-rw-r--r-- | arch/xtensa/kernel/signal.c | 12 | ||||
-rw-r--r-- | arch/xtensa/kernel/traps.c | 6 | ||||
-rw-r--r-- | arch/xtensa/kernel/vectors.S | 55 | ||||
-rw-r--r-- | arch/xtensa/kernel/vmlinux.lds.S | 12 |
10 files changed, 305 insertions, 189 deletions
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 9301452e521e..d062c732ef18 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -58,7 +58,9 @@ * BE shift left / mask 0 0 X X */ +#if XCHAL_HAVE_WINDOWED #define UNALIGNED_USER_EXCEPTION +#endif #if XCHAL_HAVE_BE diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 647b162f959b..99ab3c1a3387 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -158,6 +158,7 @@ _user_exception: /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ +#if defined(USER_SUPPORT_WINDOWED) rsr a2, windowbase rsr a3, windowstart ssr a2 @@ -167,24 +168,33 @@ _user_exception: src a2, a3, a2 srli a2, a2, 32-WSBITS s32i a2, a1, PT_WMASK # needed for restoring registers +#else + movi a2, 0 + movi a3, 1 + s32i a2, a1, PT_WINDOWBASE + s32i a3, a1, PT_WINDOWSTART + s32i a3, a1, PT_WMASK +#endif /* Save only live registers. */ - _bbsi.l a2, 1, 1f +UABI_W _bbsi.l a2, 1, 1f s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 - _bbsi.l a2, 2, 1f +UABI_W _bbsi.l a2, 2, 1f s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 - _bbsi.l a2, 3, 1f +UABI_W _bbsi.l a2, 3, 1f s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 + +#if defined(USER_SUPPORT_WINDOWED) _bnei a2, 1, 1f # only one valid frame? /* Only one valid frame, skip saving regs. */ @@ -239,7 +249,7 @@ _user_exception: rsync /* We are back to the original stack pointer (a1) */ - +#endif 2: /* Now, jump to the common exception handler. */ j common_exception @@ -295,6 +305,7 @@ _kernel_exception: s32i a3, a1, PT_SAR s32i a2, a1, PT_ICOUNTLEVEL +#if defined(__XTENSA_WINDOWED_ABI__) /* Rotate ws so that the current windowbase is at bit0. */ /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ @@ -305,27 +316,28 @@ _kernel_exception: src a2, a3, a2 srli a2, a2, 32-WSBITS s32i a2, a1, PT_WMASK # needed for kernel_exception_exit +#endif /* Save only the live window-frame */ - _bbsi.l a2, 1, 1f +KABI_W _bbsi.l a2, 1, 1f s32i a4, a1, PT_AREG4 s32i a5, a1, PT_AREG5 s32i a6, a1, PT_AREG6 s32i a7, a1, PT_AREG7 - _bbsi.l a2, 2, 1f +KABI_W _bbsi.l a2, 2, 1f s32i a8, a1, PT_AREG8 s32i a9, a1, PT_AREG9 s32i a10, a1, PT_AREG10 s32i a11, a1, PT_AREG11 - _bbsi.l a2, 3, 1f +KABI_W _bbsi.l a2, 3, 1f s32i a12, a1, PT_AREG12 s32i a13, a1, PT_AREG13 s32i a14, a1, PT_AREG14 s32i a15, a1, PT_AREG15 +#ifdef __XTENSA_WINDOWED_ABI__ _bnei a2, 1, 1f - /* Copy spill slots of a0 and a1 to imitate movsp * in order to keep exception stack continuous */ @@ -333,6 +345,7 @@ _kernel_exception: l32i a0, a1, PT_SIZE + 4 s32e a3, a1, -16 s32e a0, a1, -12 +#endif 1: l32i a0, a1, PT_AREG0 # restore saved a0 wsr a0, depc @@ -419,16 +432,16 @@ common_exception: movi a3, LOCKLEVEL .Lexception: - movi a0, PS_WOE_MASK - or a3, a3, a0 +KABI_W movi a0, PS_WOE_MASK +KABI_W or a3, a3, a0 #else addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT movi a0, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt - movi a2, PS_WOE_MASK - or a3, a3, a2 +KABI_W movi a2, PS_WOE_MASK +KABI_W or a3, a3, a2 rsr a2, exccause #endif @@ -461,14 +474,14 @@ common_exception: */ rsr a4, excsave1 - mov a6, a1 # pass stack frame - mov a7, a2 # pass EXCCAUSE addx4 a4, a2, a4 l32i a4, a4, EXC_TABLE_DEFAULT # load handler + mov abi_arg1, a2 # pass EXCCAUSE + mov abi_arg0, a1 # pass stack frame /* Call the second-level handler */ - callx4 a4 + abi_callx a4 /* Jump here for exception exit */ .global common_exception_return @@ -482,15 +495,15 @@ common_exception_return: 1: irq_save a2, a3 #ifdef CONFIG_TRACE_IRQFLAGS - call4 trace_hardirqs_off + abi_call trace_hardirqs_off #endif /* Jump if we are returning from kernel exceptions. */ - l32i a3, a1, PT_PS + l32i abi_saved1, a1, PT_PS GET_THREAD_INFO(a2, a1) l32i a4, a2, TI_FLAGS - _bbci.l a3, PS_UM_BIT, 6f + _bbci.l abi_saved1, PS_UM_BIT, 6f /* Specific to a user exception exit: * We need to check some flags for signal handling and rescheduling, @@ -509,20 +522,20 @@ common_exception_return: /* Call do_signal() */ #ifdef CONFIG_TRACE_IRQFLAGS - call4 trace_hardirqs_on + abi_call trace_hardirqs_on #endif rsil a2, 0 - mov a6, a1 - call4 do_notify_resume # int do_notify_resume(struct pt_regs*) + mov abi_arg0, a1 + abi_call do_notify_resume # int do_notify_resume(struct pt_regs*) j 1b 3: /* Reschedule */ #ifdef CONFIG_TRACE_IRQFLAGS - call4 trace_hardirqs_on + abi_call trace_hardirqs_on #endif rsil a2, 0 - call4 schedule # void schedule (void) + abi_call schedule # void schedule (void) j 1b #ifdef CONFIG_PREEMPTION @@ -533,33 +546,33 @@ common_exception_return: l32i a4, a2, TI_PRE_COUNT bnez a4, 4f - call4 preempt_schedule_irq + abi_call preempt_schedule_irq j 4f #endif #if XTENSA_FAKE_NMI .LNMIexit: - l32i a3, a1, PT_PS - _bbci.l a3, PS_UM_BIT, 4f + l32i abi_saved1, a1, PT_PS + _bbci.l abi_saved1, PS_UM_BIT, 4f #endif 5: #ifdef CONFIG_HAVE_HW_BREAKPOINT _bbci.l a4, TIF_DB_DISABLED, 7f - call4 restore_dbreak + abi_call restore_dbreak 7: #endif #ifdef CONFIG_DEBUG_TLB_SANITY l32i a4, a1, PT_DEPC bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f - call4 check_tlb_sanity + abi_call check_tlb_sanity #endif 6: 4: #ifdef CONFIG_TRACE_IRQFLAGS - extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH + extui a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH bgei a4, LOCKLEVEL, 1f - call4 trace_hardirqs_on + abi_call trace_hardirqs_on 1: #endif /* Restore optional registers. */ @@ -572,14 +585,15 @@ common_exception_return: l32i a2, a1, PT_SCOMPARE1 wsr a2, scompare1 #endif - wsr a3, ps /* disable interrupts */ + wsr abi_saved1, ps /* disable interrupts */ - _bbci.l a3, PS_UM_BIT, kernel_exception_exit + _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit user_exception_exit: /* Restore the state of the task and return from the exception. */ +#if defined(USER_SUPPORT_WINDOWED) /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ l32i a2, a1, PT_WINDOWBASE @@ -634,8 +648,10 @@ user_exception_exit: * frame where we had loaded a2), or at least the lower 4 bits * (if we have restored WSBITS-1 frames). */ - 2: +#else + movi a2, 1 +#endif #if XCHAL_HAVE_THREADPTR l32i a3, a1, PT_THREADPTR wur a3, threadptr @@ -650,6 +666,7 @@ user_exception_exit: kernel_exception_exit: +#if defined(__XTENSA_WINDOWED_ABI__) /* Check if we have to do a movsp. * * We only have to do a movsp if the previous window-frame has @@ -702,6 +719,9 @@ kernel_exception_exit: * * Note: We expect a2 to hold PT_WMASK */ +#else + movi a2, 1 +#endif common_exception_exit: @@ -920,14 +940,16 @@ unrecoverable_text: ENTRY(unrecoverable_exception) +#if XCHAL_HAVE_WINDOWED movi a0, 1 movi a1, 0 wsr a0, windowstart wsr a1, windowbase rsync +#endif - movi a1, PS_WOE_MASK | LOCKLEVEL + movi a1, KERNEL_PS_WOE_MASK | LOCKLEVEL wsr a1, ps rsync @@ -935,8 +957,8 @@ ENTRY(unrecoverable_exception) movi a0, 0 addi a1, a1, PT_REGS_OFFSET - movi a6, unrecoverable_text - call4 panic + movi abi_arg0, unrecoverable_text + abi_call panic 1: j 1b @@ -947,6 +969,7 @@ ENDPROC(unrecoverable_exception) __XTENSA_HANDLER .literal_position +#ifdef SUPPORT_WINDOWED /* * Fast-handler for alloca exceptions * @@ -1010,6 +1033,7 @@ ENTRY(fast_alloca) 8: j _WindowUnderflow8 4: j _WindowUnderflow4 ENDPROC(fast_alloca) +#endif #ifdef CONFIG_USER_ABI_CALL0_PROBE /* @@ -1206,7 +1230,8 @@ ENDPROC(fast_syscall_xtensa) * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. */ -#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS +#if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \ + defined(USER_SUPPORT_WINDOWED) ENTRY(fast_syscall_spill_registers) @@ -1403,12 +1428,12 @@ ENTRY(fast_syscall_spill_registers) rsr a3, excsave1 l32i a1, a3, EXC_TABLE_KSTK - movi a4, PS_WOE_MASK | LOCKLEVEL + movi a4, KERNEL_PS_WOE_MASK | LOCKLEVEL wsr a4, ps rsync - movi a6, SIGSEGV - call4 do_exit + movi abi_arg0, SIGSEGV + abi_call do_exit /* shouldn't return, so panic */ @@ -1887,57 +1912,77 @@ ENDPROC(fast_store_prohibited) ENTRY(system_call) +#if defined(__XTENSA_WINDOWED_ABI__) abi_entry_default +#elif defined(__XTENSA_CALL0_ABI__) + abi_entry(12) + + s32i a0, sp, 0 + s32i abi_saved0, sp, 4 + s32i abi_saved1, sp, 8 + mov abi_saved0, a2 +#else +#error Unsupported Xtensa ABI +#endif /* regs->syscall = regs->areg[2] */ - l32i a7, a2, PT_AREG2 - s32i a7, a2, PT_SYSCALL + l32i a7, abi_saved0, PT_AREG2 + s32i a7, abi_saved0, PT_SYSCALL GET_THREAD_INFO(a4, a1) - l32i a3, a4, TI_FLAGS + l32i abi_saved1, a4, TI_FLAGS movi a4, _TIF_WORK_MASK - and a3, a3, a4 - beqz a3, 1f + and abi_saved1, abi_saved1, a4 + beqz abi_saved1, 1f - mov a6, a2 - call4 do_syscall_trace_enter - beqz a6, .Lsyscall_exit - l32i a7, a2, PT_SYSCALL + mov abi_arg0, abi_saved0 + abi_call do_syscall_trace_enter + beqz abi_rv, .Lsyscall_exit + l32i a7, abi_saved0, PT_SYSCALL 1: /* syscall = sys_call_table[syscall_nr] */ movi a4, sys_call_table movi a5, __NR_syscalls - movi a6, -ENOSYS + movi abi_rv, -ENOSYS bgeu a7, a5, 1f addx4 a4, a7, a4 - l32i a4, a4, 0 + l32i abi_tmp0, a4, 0 /* Load args: arg0 - arg5 are passed via regs. */ - l32i a6, a2, PT_AREG6 - l32i a7, a2, PT_AREG3 - l32i a8, a2, PT_AREG4 - l32i a9, a2, PT_AREG5 - l32i a10, a2, PT_AREG8 - l32i a11, a2, PT_AREG9 + l32i abi_arg0, abi_saved0, PT_AREG6 + l32i abi_arg1, abi_saved0, PT_AREG3 + l32i abi_arg2, abi_saved0, PT_AREG4 + l32i abi_arg3, abi_saved0, PT_AREG5 + l32i abi_arg4, abi_saved0, PT_AREG8 + l32i abi_arg5, abi_saved0, PT_AREG9 - callx4 a4 + abi_callx abi_tmp0 1: /* regs->areg[2] = return_value */ - s32i a6, a2, PT_AREG2 - bnez a3, 1f + s32i abi_rv, abi_saved0, PT_AREG2 + bnez abi_saved1, 1f .Lsyscall_exit: +#if defined(__XTENSA_WINDOWED_ABI__) abi_ret_default +#elif defined(__XTENSA_CALL0_ABI__) + l32i a0, sp, 0 + l32i abi_saved0, sp, 4 + l32i abi_saved1, sp, 8 + abi_ret(12) +#else +#error Unsupported Xtensa ABI +#endif 1: - mov a6, a2 - call4 do_syscall_trace_leave - abi_ret_default + mov abi_arg0, abi_saved0 + abi_call do_syscall_trace_leave + j .Lsyscall_exit ENDPROC(system_call) @@ -1988,8 +2033,18 @@ ENDPROC(system_call) ENTRY(_switch_to) +#if defined(__XTENSA_WINDOWED_ABI__) abi_entry(XTENSA_SPILL_STACK_RESERVE) +#elif defined(__XTENSA_CALL0_ABI__) + abi_entry(16) + s32i a12, sp, 0 + s32i a13, sp, 4 + s32i a14, sp, 8 + s32i a15, sp, 12 +#else +#error Unsupported Xtensa ABI +#endif mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO @@ -2033,7 +2088,9 @@ ENTRY(_switch_to) /* Flush register file. */ +#if defined(__XTENSA_WINDOWED_ABI__) spill_registers_kernel +#endif /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten @@ -2055,34 +2112,43 @@ ENTRY(_switch_to) wsr a14, ps rsync +#if defined(__XTENSA_WINDOWED_ABI__) abi_ret(XTENSA_SPILL_STACK_RESERVE) +#elif defined(__XTENSA_CALL0_ABI__) + l32i a12, sp, 0 + l32i a13, sp, 4 + l32i a14, sp, 8 + l32i a15, sp, 12 + abi_ret(16) +#else +#error Unsupported Xtensa ABI +#endif ENDPROC(_switch_to) ENTRY(ret_from_fork) /* void schedule_tail (struct task_struct *prev) - * Note: prev is still in a6 (return value from fake call4 frame) + * Note: prev is still in abi_arg0 (return value from fake call frame) */ - call4 schedule_tail - - mov a6, a1 - call4 do_syscall_trace_leave + abi_call schedule_tail - j common_exception_return + mov abi_arg0, a1 + abi_call do_syscall_trace_leave + j common_exception_return ENDPROC(ret_from_fork) /* * Kernel thread creation helper - * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg - * left from _switch_to: a6 = prev + * On entry, set up by copy_thread: abi_saved0 = thread_fn, + * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev */ ENTRY(ret_from_kernel_thread) - call4 schedule_tail - mov a6, a3 - callx4 a2 - j common_exception_return + abi_call schedule_tail + mov abi_arg0, abi_saved1 + abi_callx abi_saved0 + j common_exception_return ENDPROC(ret_from_kernel_thread) diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index b9b81e76beea..8484294bc623 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -15,6 +15,7 @@ * Kevin Chea */ +#include <asm/asmmacro.h> #include <asm/processor.h> #include <asm/page.h> #include <asm/cacheasm.h> @@ -66,11 +67,13 @@ _SetupOCD: * xt-gdb to single step via DEBUG exceptions received directly * by ocd. */ +#if XCHAL_HAVE_WINDOWED movi a1, 1 movi a0, 0 wsr a1, windowstart wsr a0, windowbase rsync +#endif movi a1, LOCKLEVEL wsr a1, ps @@ -193,9 +196,10 @@ ENTRY(_startup) movi a1, start_info l32i a1, a1, 0 - movi a2, PS_WOE_MASK | LOCKLEVEL - # WOE=1, INTLEVEL=LOCKLEVEL, UM=0 - wsr a2, ps # (enable reg-windows; progmode stack) + /* Disable interrupts. */ + /* Enable window exceptions if kernel is built with windowed ABI. */ + movi a2, KERNEL_PS_WOE_MASK | LOCKLEVEL + wsr a2, ps rsync #ifdef CONFIG_SMP @@ -267,13 +271,13 @@ ENTRY(_startup) l32i a1, a1, 0 #endif - movi a6, 0 - xsr a6, excsave1 + movi abi_arg0, 0 + xsr abi_arg0, excsave1 /* init_arch kick-starts the linux kernel */ - call4 init_arch - call4 start_kernel + abi_call init_arch + abi_call start_kernel should_never_return: j should_never_return @@ -297,10 +301,10 @@ should_never_return: s32i a3, a2, 0 memw - movi a6, 0 - wsr a6, excsave1 + movi abi_arg0, 0 + wsr abi_arg0, excsave1 - call4 secondary_start_kernel + abi_call secondary_start_kernel j should_never_return #endif /* CONFIG_SMP */ diff --git a/arch/xtensa/kernel/mcount.S b/arch/xtensa/kernel/mcount.S index 5e4619f52858..51daaf4e0b82 100644 --- a/arch/xtensa/kernel/mcount.S +++ b/arch/xtensa/kernel/mcount.S @@ -17,11 +17,16 @@ /* * Entry condition: * - * a2: a0 of the caller + * a2: a0 of the caller in windowed ABI + * a10: a0 of the caller in call0 ABI + * + * In call0 ABI the function _mcount is called with the special ABI: + * its argument is in a10 and all the usual argument registers (a2 - a7) + * must be preserved in addition to callee-saved a12 - a15. */ ENTRY(_mcount) - +#if defined(__XTENSA_WINDOWED_ABI__) abi_entry_default movi a4, ftrace_trace_function @@ -42,7 +47,36 @@ ENTRY(_mcount) callx4 a4 abi_ret_default +#elif defined(__XTENSA_CALL0_ABI__) + abi_entry_default + + movi a9, ftrace_trace_function + l32i a9, a9, 0 + movi a11, ftrace_stub + bne a9, a11, 1f + abi_ret_default +1: abi_entry(28) + s32i a0, sp, 0 + s32i a2, sp, 4 + s32i a3, sp, 8 + s32i a4, sp, 12 + s32i a5, sp, 16 + s32i a6, sp, 20 + s32i a7, sp, 24 + addi a2, a10, -MCOUNT_INSN_SIZE + callx0 a9 + l32i a0, sp, 0 + l32i a2, sp, 4 + l32i a3, sp, 8 + l32i a4, sp, 12 + l32i a5, sp, 16 + l32i a6, sp, 20 + l32i a7, sp, 24 + abi_ret(28) +#else +#error Unsupported Xtensa ABI +#endif ENDPROC(_mcount) ENTRY(ftrace_stub) diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 47f933fed870..bd80df890b1e 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -211,11 +211,18 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, struct thread_info *ti; #endif +#if defined(__XTENSA_WINDOWED_ABI__) /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ SPILL_SLOT(childregs, 1) = (unsigned long)childregs; SPILL_SLOT(childregs, 0) = 0; p->thread.sp = (unsigned long)childregs; +#elif defined(__XTENSA_CALL0_ABI__) + /* Reserve 16 bytes for the _switch_to stack frame. */ + p->thread.sp = (unsigned long)childregs - 16; +#else +#error Unsupported Xtensa ABI +#endif if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { struct pt_regs *regs = current_pt_regs(); @@ -272,11 +279,25 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn, p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); - /* pass parameters to ret_from_kernel_thread: - * a2 = thread_fn, a3 = thread_fn arg + /* pass parameters to ret_from_kernel_thread: */ +#if defined(__XTENSA_WINDOWED_ABI__) + /* + * a2 = thread_fn, a3 = thread_fn arg. + * Window underflow will load registers from the + * spill slots on the stack on return from _switch_to. */ - SPILL_SLOT(childregs, 3) = thread_fn_arg; SPILL_SLOT(childregs, 2) = usp_thread_fn; + SPILL_SLOT(childregs, 3) = thread_fn_arg; +#elif defined(__XTENSA_CALL0_ABI__) + /* + * a12 = thread_fn, a13 = thread_fn arg. + * _switch_to epilogue will load registers from the stack. + */ + ((unsigned long *)p->thread.sp)[0] = usp_thread_fn; + ((unsigned long *)p->thread.sp)[1] = thread_fn_arg; +#else +#error Unsupported Xtensa ABI +#endif /* Childregs are only used when we're going to userspace * in which case start_thread will set them up. diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index ee9082a142fe..8db20cfb44ab 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -37,14 +37,15 @@ #include <asm/bootparam.h> #include <asm/kasan.h> #include <asm/mmu_context.h> -#include <asm/processor.h> -#include <asm/timex.h> -#include <asm/platform.h> #include <asm/page.h> -#include <asm/setup.h> #include <asm/param.h> +#include <asm/platform.h> +#include <asm/processor.h> +#include <asm/sections.h> +#include <asm/setup.h> #include <asm/smp.h> #include <asm/sysmem.h> +#include <asm/timex.h> #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) struct screen_info screen_info = { @@ -271,49 +272,6 @@ void __init init_arch(bp_tag_t *bp_start) * Initialize system. Setup memory and reserve regions. */ -extern char _end[]; -extern char _stext[]; -extern char _WindowVectors_text_start; -extern char _WindowVectors_text_end; -extern char _DebugInterruptVector_text_start; -extern char _DebugInterruptVector_text_end; -extern char _KernelExceptionVector_text_start; -extern char _KernelExceptionVector_text_end; -extern char _UserExceptionVector_text_start; -extern char _UserExceptionVector_text_end; -extern char _DoubleExceptionVector_text_start; -extern char _DoubleExceptionVector_text_end; -extern char _exception_text_start; -extern char _exception_text_end; -#if XCHAL_EXCM_LEVEL >= 2 -extern char _Level2InterruptVector_text_start; -extern char _Level2InterruptVector_text_end; -#endif -#if XCHAL_EXCM_LEVEL >= 3 -extern char _Level3InterruptVector_text_start; -extern char _Level3InterruptVector_text_end; -#endif -#if XCHAL_EXCM_LEVEL >= 4 -extern char _Level4InterruptVector_text_start; -extern char _Level4InterruptVector_text_end; -#endif -#if XCHAL_EXCM_LEVEL >= 5 -extern char _Level5InterruptVector_text_start; -extern char _Level5InterruptVector_text_end; -#endif -#if XCHAL_EXCM_LEVEL >= 6 -extern char _Level6InterruptVector_text_start; -extern char _Level6InterruptVector_text_end; -#endif -#ifdef CONFIG_SMP -extern char _SecondaryResetVector_text_start; -extern char _SecondaryResetVector_text_end; -#endif -#ifdef CONFIG_XIP_KERNEL -extern char _xip_start[]; -extern char _xip_end[]; -#endif - static inline int __init_memblock mem_reserve(unsigned long start, unsigned long end) { @@ -349,49 +307,51 @@ void __init setup_arch(char **cmdline_p) #endif #ifdef CONFIG_VECTORS_ADDR - mem_reserve(__pa(&_WindowVectors_text_start), - __pa(&_WindowVectors_text_end)); +#ifdef SUPPORT_WINDOWED + mem_reserve(__pa(_WindowVectors_text_start), + __pa(_WindowVectors_text_end)); +#endif - mem_reserve(__pa(&_DebugInterruptVector_text_start), - __pa(&_DebugInterruptVector_text_end)); + mem_reserve(__pa(_DebugInterruptVector_text_start), + __pa(_DebugInterruptVector_text_end)); - mem_reserve(__pa(&_KernelExceptionVector_text_start), - __pa(&_KernelExceptionVector_text_end)); + mem_reserve(__pa(_KernelExceptionVector_text_start), + __pa(_KernelExceptionVector_text_end)); - mem_reserve(__pa(&_UserExceptionVector_text_start), - __pa(&_UserExceptionVector_text_end)); + mem_reserve(__pa(_UserExceptionVector_text_start), + __pa(_UserExceptionVector_text_end)); - mem_reserve(__pa(&_DoubleExceptionVector_text_start), - __pa(&_DoubleExceptionVector_text_end)); + mem_reserve(__pa(_DoubleExceptionVector_text_start), + __pa(_DoubleExceptionVector_text_end)); - mem_reserve(__pa(&_exception_text_start), - __pa(&_exception_text_end)); + mem_reserve(__pa(_exception_text_start), + __pa(_exception_text_end)); #if XCHAL_EXCM_LEVEL >= 2 - mem_reserve(__pa(&_Level2InterruptVector_text_start), - __pa(&_Level2InterruptVector_text_end)); + mem_reserve(__pa(_Level2InterruptVector_text_start), + __pa(_Level2InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 3 - mem_reserve(__pa(&_Level3InterruptVector_text_start), - __pa(&_Level3InterruptVector_text_end)); + mem_reserve(__pa(_Level3InterruptVector_text_start), + __pa(_Level3InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 4 - mem_reserve(__pa(&_Level4InterruptVector_text_start), - __pa(&_Level4InterruptVector_text_end)); + mem_reserve(__pa(_Level4InterruptVector_text_start), + __pa(_Level4InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 5 - mem_reserve(__pa(&_Level5InterruptVector_text_start), - __pa(&_Level5InterruptVector_text_end)); + mem_reserve(__pa(_Level5InterruptVector_text_start), + __pa(_Level5InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 6 - mem_reserve(__pa(&_Level6InterruptVector_text_start), - __pa(&_Level6InterruptVector_text_end)); + mem_reserve(__pa(_Level6InterruptVector_text_start), + __pa(_Level6InterruptVector_text_end)); #endif #endif /* CONFIG_VECTORS_ADDR */ #ifdef CONFIG_SMP - mem_reserve(__pa(&_SecondaryResetVector_text_start), - __pa(&_SecondaryResetVector_text_end)); + mem_reserve(__pa(_SecondaryResetVector_text_start), + __pa(_SecondaryResetVector_text_end)); #endif parse_early_param(); bootmem_init(); diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index c4d77dbfb61a..f6c949895b3e 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -45,12 +45,13 @@ struct rt_sigframe unsigned int window[4]; }; -/* +#if defined(USER_SUPPORT_WINDOWED) +/* * Flush register windows stored in pt_regs to stack. * Returns 1 for errors. */ -int +static int flush_window_regs_user(struct pt_regs *regs) { const unsigned long ws = regs->windowstart; @@ -121,6 +122,13 @@ flush_window_regs_user(struct pt_regs *regs) errout: return err; } +#else +static int +flush_window_regs_user(struct pt_regs *regs) +{ + return 0; +} +#endif /* * Note: We don't copy double exception 'regs', we have to finish double exc. diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 874b6efc6fb3..35a7d47f28cf 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -97,7 +97,9 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { /* EXCCAUSE_INSTRUCTION_FETCH unhandled */ /* EXCCAUSE_LOAD_STORE_ERROR unhandled*/ { EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, +#ifdef SUPPORT_WINDOWED { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, +#endif /* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */ /* EXCCAUSE_PRIVILEGED unhandled */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION @@ -462,12 +464,10 @@ void secondary_trap_init(void) void show_regs(struct pt_regs * regs) { - int i, wmask; + int i; show_regs_print_info(KERN_DEFAULT); - wmask = regs->wmask & ~1; - for (i = 0; i < 16; i++) { if ((i % 8) == 0) pr_info("a%02d:", i); diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 1a7538ccfc5a..407ece204e7c 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -226,6 +226,7 @@ ENTRY(_DoubleExceptionVector) xsr a0, depc # get DEPC, save a0 +#ifdef SUPPORT_WINDOWED movi a2, WINDOW_VECTORS_VADDR _bltu a0, a2, .Lfixup addi a2, a2, WINDOW_VECTORS_SIZE @@ -275,6 +276,10 @@ _DoubleExceptionVector_WindowUnderflow: l32i a0, a0, EXC_TABLE_FAST_USER jx a0 +#else + j .Lfixup +#endif + /* * We only allow the ITLB miss exception if we are in kernel space. * All other exceptions are unexpected and thus unrecoverable! @@ -343,6 +348,7 @@ _DoubleExceptionVector_WindowUnderflow: l32i a0, a0, EXC_TABLE_FAST_USER jx a0 +#ifdef SUPPORT_WINDOWED /* * Restart window OVERFLOW exception. * Currently: @@ -475,9 +481,12 @@ _DoubleExceptionVector_handle_exception: rsr a0, depc rotw -3 j 1b +#endif ENDPROC(_DoubleExceptionVector) +#ifdef SUPPORT_WINDOWED + /* * Fixup handler for TLB miss in double exception handler for window owerflow. * We get here with windowbase set to the window that was being spilled and @@ -590,6 +599,8 @@ ENTRY(window_overflow_restore_a0_fixup) ENDPROC(window_overflow_restore_a0_fixup) +#endif + /* * Debug interrupt vector * @@ -650,6 +661,25 @@ ENTRY(_Level\level\()InterruptVector) irq_entry_level 5 irq_entry_level 6 +#if XCHAL_EXCM_LEVEL >= 2 + /* + * Continuation of medium priority interrupt dispatch code. + * On entry here, a0 contains PS, and EPC2 contains saved a0: + */ + __XTENSA_HANDLER + .align 4 +_SimulateUserKernelVectorException: + addi a0, a0, (1 << PS_EXCM_BIT) +#if !XTENSA_FAKE_NMI + wsr a0, ps +#endif + bbsi.l a0, PS_UM_BIT, 1f # branch if user mode + xsr a0, excsave2 # restore a0 + j _KernelExceptionVector # simulate kernel vector exception +1: xsr a0, excsave2 # restore a0 + j _UserExceptionVector # simulate user vector exception +#endif + /* Window overflow and underflow handlers. * The handlers must be 64 bytes apart, first starting with the underflow @@ -668,6 +698,8 @@ ENTRY(_Level\level\()InterruptVector) .section .WindowVectors.text, "ax" +#ifdef SUPPORT_WINDOWED + /* 4-Register Window Overflow Vector (Handler) */ ENTRY_ALIGN64(_WindowOverflow4) @@ -680,27 +712,6 @@ ENTRY_ALIGN64(_WindowOverflow4) ENDPROC(_WindowOverflow4) - -#if XCHAL_EXCM_LEVEL >= 2 - /* Not a window vector - but a convenient location - * (where we know there's space) for continuation of - * medium priority interrupt dispatch code. - * On entry here, a0 contains PS, and EPC2 contains saved a0: - */ - .align 4 -_SimulateUserKernelVectorException: - addi a0, a0, (1 << PS_EXCM_BIT) -#if !XTENSA_FAKE_NMI - wsr a0, ps -#endif - bbsi.l a0, PS_UM_BIT, 1f # branch if user mode - xsr a0, excsave2 # restore a0 - j _KernelExceptionVector # simulate kernel vector exception -1: xsr a0, excsave2 # restore a0 - j _UserExceptionVector # simulate user vector exception -#endif - - /* 4-Register Window Underflow Vector (Handler) */ ENTRY_ALIGN64(_WindowUnderflow4) @@ -789,4 +800,6 @@ ENTRY_ALIGN64(_WindowUnderflow12) ENDPROC(_WindowUnderflow12) +#endif + .text diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index d23a6e38f062..eee270a039a4 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -94,7 +94,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); _vecbase = .; +#ifdef SUPPORT_WINDOWED SECTION_VECTOR2 (.WindowVectors.text, WINDOW_VECTORS_VADDR) +#endif #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR2 (.Level2InterruptVector.text, INTLEVEL2_VECTOR_VADDR) #endif @@ -166,8 +168,10 @@ SECTIONS __boot_reloc_table_start = ABSOLUTE(.); #if !MERGED_VECTORS +#ifdef SUPPORT_WINDOWED RELOCATE_ENTRY(_WindowVectors_text, .WindowVectors.text); +#endif #if XCHAL_EXCM_LEVEL >= 2 RELOCATE_ENTRY(_Level2InterruptVector_text, .Level2InterruptVector.text); @@ -229,14 +233,18 @@ SECTIONS #if !MERGED_VECTORS /* The vectors are relocated to the real position at startup time */ +#ifdef SUPPORT_WINDOWED SECTION_VECTOR4 (_WindowVectors_text, .WindowVectors.text, WINDOW_VECTORS_VADDR, - .dummy) + LAST) +#undef LAST +#define LAST .WindowVectors.text +#endif SECTION_VECTOR4 (_DebugInterruptVector_text, .DebugInterruptVector.text, DEBUG_VECTOR_VADDR, - .WindowVectors.text) + LAST) #undef LAST #define LAST .DebugInterruptVector.text #if XCHAL_EXCM_LEVEL >= 2 |