summaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 17:35:57 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-23 17:35:57 -0700
commit9c0e6a89b592f4c4e4d769dbc22d399ab0685159 (patch)
tree54865d08ede844e868b3403670a9a91ad24bba82 /arch/arm/include
parente6aef3496a00a12e78a571f61d98300cf0a86e6a (diff)
parent234a0f202a09a6144fd3c17ac6d018bdab9780bb (diff)
downloadlinux-9c0e6a89b592f4c4e4d769dbc22d399ab0685159.tar.bz2
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Updates for IRQ stacks and virtually mapped stack support, and ftrace: - Support for IRQ and vmap'ed stacks This covers all the work related to implementing IRQ stacks and vmap'ed stacks for all 32-bit ARM systems that are currently supported by the Linux kernel, including RiscPC and Footbridge. It has been submitted for review in four different waves: - IRQ stacks support for v7 SMP systems [0] - vmap'ed stacks support for v7 SMP systems[1] - extending support for both IRQ stacks and vmap'ed stacks for all remaining configurations, including v6/v7 SMP multiplatform kernels and uniprocessor configurations including v7-M [2] - fixes and updates in [3] - ftrace fixes and cleanups Make all flavors of ftrace available on all builds, regardless of ISA choice, unwinder choice or compiler [4]: - use ADD not POP where possible - fix a couple of Thumb2 related issues - enable HAVE_FUNCTION_GRAPH_FP_TEST for robustness - enable the graph tracer with the EABI unwinder - avoid clobbering frame pointer registers to make Clang happy - Fixes for the above" [0] https://lore.kernel.org/linux-arm-kernel/20211115084732.3704393-1-ardb@kernel.org/ [1] https://lore.kernel.org/linux-arm-kernel/20211122092816.2865873-1-ardb@kernel.org/ [2] https://lore.kernel.org/linux-arm-kernel/20211206164659.1495084-1-ardb@kernel.org/ [3] https://lore.kernel.org/linux-arm-kernel/20220124174744.1054712-1-ardb@kernel.org/ [4] https://lore.kernel.org/linux-arm-kernel/20220203082204.1176734-1-ardb@kernel.org/ * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (62 commits) ARM: fix building NOMMU ARMv4/v5 kernels ARM: unwind: only permit stack switch when unwinding call_with_stack() ARM: Revert "unwind: dump exception stack from calling frame" ARM: entry: fix unwinder problems caused by IRQ stacks ARM: unwind: set frame.pc correctly for current-thread unwinding ARM: 9184/1: return_address: disable again for CONFIG_ARM_UNWIND=y ARM: 9183/1: unwind: avoid spurious warnings on bogus code addresses Revert "ARM: 9144/1: forbid ftrace with clang and thumb2_kernel" ARM: mach-bcm: disable ftrace in SMC invocation routines ARM: cacheflush: avoid clobbering the frame pointer ARM: kprobes: treat R7 as the frame pointer register in Thumb2 builds ARM: ftrace: enable the graph tracer with the EABI unwinder ARM: unwind: track location of LR value in stack frame ARM: ftrace: enable HAVE_FUNCTION_GRAPH_FP_TEST ARM: ftrace: avoid unnecessary literal loads ARM: ftrace: avoid redundant loads or clobbering IP ARM: ftrace: use trampolines to keep .init.text in branching range ARM: ftrace: use ADD not POP to counter PUSH at entry ARM: ftrace: ensure that ADR takes the Thumb bit into account ARM: make get_current() and __my_cpu_offset() __always_inline ...
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/assembler.h204
-rw-r--r--arch/arm/include/asm/cacheflush.h12
-rw-r--r--arch/arm/include/asm/current.h46
-rw-r--r--arch/arm/include/asm/elf.h3
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S40
-rw-r--r--arch/arm/include/asm/ftrace.h4
-rw-r--r--arch/arm/include/asm/hardware/entry-macro-iomd.S131
-rw-r--r--arch/arm/include/asm/insn.h17
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/include/asm/mmu.h2
-rw-r--r--arch/arm/include/asm/mmu_context.h22
-rw-r--r--arch/arm/include/asm/page.h3
-rw-r--r--arch/arm/include/asm/percpu.h35
-rw-r--r--arch/arm/include/asm/smp.h5
-rw-r--r--arch/arm/include/asm/stacktrace.h5
-rw-r--r--arch/arm/include/asm/switch_to.h3
-rw-r--r--arch/arm/include/asm/thread_info.h35
-rw-r--r--arch/arm/include/asm/tls.h31
-rw-r--r--arch/arm/include/asm/v7m.h3
20 files changed, 319 insertions, 285 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index aee73ef5b3dc..34fe8d2dd5d1 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -86,6 +86,10 @@
#define IMM12_MASK 0xfff
+/* the frame pointer used for stack unwinding */
+ARM( fpreg .req r11 )
+THUMB( fpreg .req r7 )
+
/*
* Enable and disable interrupts
*/
@@ -209,43 +213,12 @@
.endm
.endr
- .macro get_current, rd
-#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
- mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
-#else
- get_thread_info \rd
- ldr \rd, [\rd, #TI_TASK]
-#endif
- .endm
-
- .macro set_current, rn
-#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
- mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
-#endif
- .endm
-
- .macro reload_current, t1:req, t2:req
-#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
- adr_l \t1, __entry_task @ get __entry_task base address
- mrc p15, 0, \t2, c13, c0, 4 @ get per-CPU offset
- ldr \t1, [\t1, \t2] @ load variable
- mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
-#endif
- .endm
-
/*
* Get current thread_info.
*/
.macro get_thread_info, rd
-#ifdef CONFIG_THREAD_INFO_IN_TASK
/* thread_info is the first member of struct task_struct */
get_current \rd
-#else
- ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
- THUMB( mov \rd, sp )
- THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
- mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
-#endif
.endm
/*
@@ -320,6 +293,80 @@
#define ALT_UP_B(label) b label
#endif
+ /*
+ * this_cpu_offset - load the per-CPU offset of this CPU into
+ * register 'rd'
+ */
+ .macro this_cpu_offset, rd:req
+#ifdef CONFIG_SMP
+ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L1_\@)
+.L0_\@:
+ .subsection 1
+.L1_\@: ldr_va \rd, __per_cpu_offset
+ b .L0_\@
+ .previous
+#endif
+#else
+ mov \rd, #0
+#endif
+ .endm
+
+ /*
+ * set_current - store the task pointer of this CPU's current task
+ */
+ .macro set_current, rn:req, tmp:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: str_va \rn, __current, \tmp
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ str_va \rn, __current, \tmp
+#endif
+ .endm
+
+ /*
+ * get_current - load the task pointer of this CPU's current task
+ */
+ .macro get_current, rd:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
+#ifdef CONFIG_CPU_V6
+ALT_UP_B(.L0_\@)
+ .subsection 1
+.L0_\@: ldr_va \rd, __current
+ b .L1_\@
+ .previous
+.L1_\@:
+#endif
+#else
+ ldr_va \rd, __current
+#endif
+ .endm
+
+ /*
+ * reload_current - reload the task pointer of this CPU's current task
+ * into the TLS register
+ */
+ .macro reload_current, t1:req, t2:req
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+#ifdef CONFIG_CPU_V6
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+#endif
+ ldr_this_cpu \t1, __entry_task, \t1, \t2
+ mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
+.L0_\@:
+#endif
+ .endm
+
/*
* Instruction barrier
*/
@@ -576,12 +623,12 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
/*
* mov_l - move a constant value or [relocated] address into a register
*/
- .macro mov_l, dst:req, imm:req
+ .macro mov_l, dst:req, imm:req, cond
.if __LINUX_ARM_ARCH__ < 7
- ldr \dst, =\imm
+ ldr\cond \dst, =\imm
.else
- movw \dst, #:lower16:\imm
- movt \dst, #:upper16:\imm
+ movw\cond \dst, #:lower16:\imm
+ movt\cond \dst, #:upper16:\imm
.endif
.endm
@@ -619,6 +666,78 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
__adldst_l str, \src, \sym, \tmp, \cond
.endm
+ .macro __ldst_va, op, reg, tmp, sym, cond
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ mov_l \tmp, \sym, \cond
+ \op\cond \reg, [\tmp]
+#else
+ /*
+ * Avoid a literal load, by emitting a sequence of ADD/LDR instructions
+ * with the appropriate relocations. The combined sequence has a range
+ * of -/+ 256 MiB, which should be sufficient for the core kernel and
+ * for modules loaded into the module region.
+ */
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+.L0_\@: sub\cond \tmp, pc, #8
+.L1_\@: sub\cond \tmp, \tmp, #4
+.L2_\@: \op\cond \reg, [\tmp, #0]
+#endif
+ .endm
+
+ /*
+ * ldr_va - load a 32-bit word from the virtual address of \sym
+ */
+ .macro ldr_va, rd:req, sym:req, cond
+ __ldst_va ldr, \rd, \rd, \sym, \cond
+ .endm
+
+ /*
+ * str_va - store a 32-bit word to the virtual address of \sym
+ */
+ .macro str_va, rn:req, sym:req, tmp:req, cond
+ __ldst_va str, \rn, \tmp, \sym, \cond
+ .endm
+
+ /*
+ * ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
+ * without using a temp register. Supported in ARM mode
+ * only.
+ */
+ .macro ldr_this_cpu_armv6, rd:req, sym:req
+ this_cpu_offset \rd
+ .globl \sym
+ .reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
+ .reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
+ .reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
+ add \rd, \rd, pc
+.L0_\@: sub \rd, \rd, #4
+.L1_\@: sub \rd, \rd, #0
+.L2_\@: ldr \rd, [\rd, #4]
+ .endm
+
+ /*
+ * ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
+ * into register 'rd', which may be the stack pointer,
+ * using 't1' and 't2' as general temp registers. These
+ * are permitted to overlap with 'rd' if != sp
+ */
+ .macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
+#if __LINUX_ARM_ARCH__ >= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ this_cpu_offset \t1
+ mov_l \t2, \sym
+ ldr \rd, [\t1, \t2]
+#else
+ ldr_this_cpu_armv6 \rd, \sym
+#endif
+ .endm
+
/*
* rev_l - byte-swap a 32-bit value
*
@@ -636,4 +755,19 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.endif
.endm
+ /*
+ * bl_r - branch and link to register
+ *
+ * @dst: target to branch to
+ * @c: conditional opcode suffix
+ */
+ .macro bl_r, dst:req, c
+ .if __LINUX_ARM_ARCH__ < 6
+ mov\c lr, pc
+ mov\c pc, \dst
+ .else
+ blx\c \dst
+ .endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 5e56288e343b..a094f964c869 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -445,15 +445,10 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
* however some exceptions may exist. Caveat emptor.
*
* - The clobber list is dictated by the call to v7_flush_dcache_*.
- * fp is preserved to the stack explicitly prior disabling the cache
- * since adding it to the clobber list is incompatible with having
- * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
- * trampoline are inserted by the linker and to keep sp 64-bit aligned.
*/
#define v7_exit_coherency_flush(level) \
asm volatile( \
".arch armv7-a \n\t" \
- "stmfd sp!, {fp, ip} \n\t" \
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
@@ -463,10 +458,9 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
"isb \n\t" \
- "dsb \n\t" \
- "ldmfd sp!, {fp, ip}" \
- : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
- "r9","r10","lr","memory" )
+ "dsb" \
+ : : : "r0","r1","r2","r3","r4","r5","r6", \
+ "r9","r10","ip","lr","memory" )
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
diff --git a/arch/arm/include/asm/current.h b/arch/arm/include/asm/current.h
index 6bf0aad672c3..1e1178bf176d 100644
--- a/arch/arm/include/asm/current.h
+++ b/arch/arm/include/asm/current.h
@@ -8,25 +8,18 @@
#define _ASM_ARM_CURRENT_H
#ifndef __ASSEMBLY__
+#include <asm/insn.h>
struct task_struct;
-static inline void set_current(struct task_struct *cur)
-{
- if (!IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO))
- return;
-
- /* Set TPIDRURO */
- asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
-}
-
-#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
+extern struct task_struct *__current;
-static inline struct task_struct *get_current(void)
+static __always_inline __attribute_const__ struct task_struct *get_current(void)
{
struct task_struct *cur;
#if __has_builtin(__builtin_thread_pointer) && \
+ defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) && \
!(defined(CONFIG_THUMB2_KERNEL) && \
defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 130001)
/*
@@ -39,16 +32,39 @@ static inline struct task_struct *get_current(void)
* https://github.com/ClangBuiltLinux/linux/issues/1485
*/
cur = __builtin_thread_pointer();
+#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+ asm("0: mrc p15, 0, %0, c13, c0, 3 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __current) " \n\t"
+ " b 1b \n\t"
#else
- asm("mrc p15, 0, %0, c13, c0, 3" : "=r"(cur));
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __current \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r"(cur));
+#elif __LINUX_ARM_ARCH__>= 7 || \
+ !defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
+ (defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ cur = __current;
+#else
+ asm(LOAD_SYM_ARMV6(%0, __current) : "=r"(cur));
#endif
return cur;
}
#define current get_current()
-#else
-#include <asm-generic/current.h>
-#endif /* CONFIG_CURRENT_POINTER_IN_TPIDRURO */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index b8102a6ddf16..d68101655b74 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -61,6 +61,9 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_MOVT_ABS 44
#define R_ARM_MOVW_PREL_NC 45
#define R_ARM_MOVT_PREL 46
+#define R_ARM_ALU_PC_G0_NC 57
+#define R_ARM_ALU_PC_G1_NC 59
+#define R_ARM_LDR_PC_G2 63
#define R_ARM_THM_CALL 10
#define R_ARM_THM_JUMP24 30
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
deleted file mode 100644
index dfc6bfa43012..000000000000
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm/assembler.h>
-
-/*
- * Interrupt handling. Preserves r7, r8, r9
- */
- .macro arch_irq_handler_default
- get_irqnr_preamble r6, lr
-1: get_irqnr_and_base r0, r2, r6, lr
- movne r1, sp
- @
- @ routine called with r0 = irq number, r1 = struct pt_regs *
- @
- badrne lr, 1b
- bne asm_do_IRQ
-
-#ifdef CONFIG_SMP
- /*
- * XXX
- *
- * this macro assumes that irqstat (r2) and base (r6) are
- * preserved from get_irqnr_and_base above
- */
- ALT_SMP(test_for_ipi r0, r2, r6, lr)
- ALT_UP_B(9997f)
- movne r1, sp
- badrne lr, 1b
- bne do_IPI
-#endif
-9997:
- .endm
-
- .macro arch_irq_handler, symbol_name
- .align 5
- .global \symbol_name
-\symbol_name:
- mov r8, lr
- arch_irq_handler_default
- ret r8
- .endm
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index a4dbac07e4ef..7e9251ca29fe 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -2,6 +2,8 @@
#ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
@@ -48,7 +50,7 @@ void *return_address(unsigned int);
static inline void *return_address(unsigned int level)
{
- return NULL;
+ return NULL;
}
#endif
diff --git a/arch/arm/include/asm/hardware/entry-macro-iomd.S b/arch/arm/include/asm/hardware/entry-macro-iomd.S
deleted file mode 100644
index f7692731e514..000000000000
--- a/arch/arm/include/asm/hardware/entry-macro-iomd.S
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/entry-macro-iomd.S
- *
- * Low-level IRQ helper macros for IOC/IOMD based platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-/* IOC / IOMD based hardware */
-#include <asm/hardware/iomd.h>
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldrb \irqstat, [\base, #IOMD_IRQREQB] @ get high priority first
- ldr \tmp, =irq_prio_h
- teq \irqstat, #0
-#ifdef IOMD_BASE
- ldrbeq \irqstat, [\base, #IOMD_DMAREQ] @ get dma
- addeq \tmp, \tmp, #256 @ irq_prio_h table size
- teqeq \irqstat, #0
- bne 2406f
-#endif
- ldrbeq \irqstat, [\base, #IOMD_IRQREQA] @ get low priority
- addeq \tmp, \tmp, #256 @ irq_prio_d table size
- teqeq \irqstat, #0
-#ifdef IOMD_IRQREQC
- ldrbeq \irqstat, [\base, #IOMD_IRQREQC]
- addeq \tmp, \tmp, #256 @ irq_prio_l table size
- teqeq \irqstat, #0
-#endif
-#ifdef IOMD_IRQREQD
- ldrbeq \irqstat, [\base, #IOMD_IRQREQD]
- addeq \tmp, \tmp, #256 @ irq_prio_lc table size
- teqeq \irqstat, #0
-#endif
-2406: ldrbne \irqnr, [\tmp, \irqstat] @ get IRQ number
- .endm
-
-/*
- * Interrupt table (incorporates priority). Please note that we
- * rely on the order of these tables (see above code).
- */
- .align 5
-irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
- .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
-#ifdef IOMD_BASE
-irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
- .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
-#endif
-irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
-#ifdef IOMD_IRQREQC
-irq_prio_lc: .byte 24,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
- .byte 28,24,25,24,26,26,26,26,27,27,27,27,27,27,27,27
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
- .byte 30,30,30,30,30,30,30,30,27,27,27,27,27,27,27,27
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
- .byte 31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31
-#endif
-#ifdef IOMD_IRQREQD
-irq_prio_ld: .byte 40,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
- .byte 44,40,41,40,42,42,42,42,43,43,43,43,43,43,43,43
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
- .byte 46,46,46,46,46,46,46,46,43,43,43,43,43,43,43,43
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
- .byte 47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47
-#endif
-
diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h
index 5475cbf9fb6b..faf3d1c28368 100644
--- a/arch/arm/include/asm/insn.h
+++ b/arch/arm/include/asm/insn.h
@@ -2,6 +2,23 @@
#ifndef __ASM_ARM_INSN_H
#define __ASM_ARM_INSN_H
+#include <linux/types.h>
+
+/*
+ * Avoid a literal load by emitting a sequence of ADD/LDR instructions with the
+ * appropriate relocations. The combined sequence has a range of -/+ 256 MiB,
+ * which should be sufficient for the core kernel as well as modules loaded
+ * into the module region. (Not supported by LLD before release 14)
+ */
+#define LOAD_SYM_ARMV6(reg, sym) \
+ " .globl " #sym " \n\t" \
+ " .reloc 10f, R_ARM_ALU_PC_G0_NC, " #sym " \n\t" \
+ " .reloc 11f, R_ARM_ALU_PC_G1_NC, " #sym " \n\t" \
+ " .reloc 12f, R_ARM_LDR_PC_G2, " #sym " \n\t" \
+ "10: sub " #reg ", pc, #8 \n\t" \
+ "11: sub " #reg ", " #reg ", #4 \n\t" \
+ "12: ldr " #reg ", [" #reg ", #0] \n\t"
+
static inline unsigned long
arm_gen_nop(void)
{
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 1cbcc462b07e..a7c2337b0c7d 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -26,7 +26,6 @@
struct irqaction;
struct pt_regs;
-extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
void init_IRQ(void);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index eec0c0bda766..9349e7a82c9c 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -56,9 +56,7 @@ struct machine_desc {
void (*init_time)(void);
void (*init_machine)(void);
void (*init_late)(void);
-#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
void (*handle_irq)(struct pt_regs *);
-#endif
void (*restart)(enum reboot_mode, const char *);
};
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 1592a4264488..e049723840d3 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -10,7 +10,7 @@ typedef struct {
#else
int switch_pending;
#endif
- unsigned int vmalloc_seq;
+ atomic_t vmalloc_seq;
unsigned long sigpage;
#ifdef CONFIG_VDSO
unsigned long vdso;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 84e58956fcab..db2cb06aa8cf 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -23,6 +23,16 @@
void __check_vmalloc_seq(struct mm_struct *mm);
+#ifdef CONFIG_MMU
+static inline void check_vmalloc_seq(struct mm_struct *mm)
+{
+ if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
+ unlikely(atomic_read(&mm->context.vmalloc_seq) !=
+ atomic_read(&init_mm.context.vmalloc_seq)))
+ __check_vmalloc_seq(mm);
+}
+#endif
+
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
@@ -52,8 +62,7 @@ static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
- if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
- __check_vmalloc_seq(mm);
+ check_vmalloc_seq(mm);
if (irqs_disabled())
/*
@@ -129,6 +138,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif
}
+#ifdef CONFIG_VMAP_STACK
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ if (mm != &init_mm)
+ check_vmalloc_seq(mm);
+}
+#define enter_lazy_tlb enter_lazy_tlb
+#endif
+
#include <asm-generic/mmu_context.h>
#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 11b058a72a5b..5fcc8a600e36 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -147,6 +147,9 @@ extern void copy_page(void *to, const void *from);
#include <asm/pgtable-3level-types.h>
#else
#include <asm/pgtable-2level-types.h>
+#ifdef CONFIG_VMAP_STACK
+#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
+#endif
#endif
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index e2fcb3cfd3de..7545c87c251f 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -5,20 +5,27 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
+#include <asm/insn.h>
+
register unsigned long current_stack_pointer asm ("sp");
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
*/
-#if defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6)
+#ifdef CONFIG_SMP
static inline void set_my_cpu_offset(unsigned long off)
{
+ extern unsigned int smp_on_up;
+
+ if (IS_ENABLED(CONFIG_CPU_V6) && !smp_on_up)
+ return;
+
/* Set TPIDRPRW */
asm volatile("mcr p15, 0, %0, c13, c0, 4" : : "r" (off) : "memory");
}
-static inline unsigned long __my_cpu_offset(void)
+static __always_inline unsigned long __my_cpu_offset(void)
{
unsigned long off;
@@ -27,8 +34,28 @@ static inline unsigned long __my_cpu_offset(void)
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
- : "Q" (*(const unsigned long *)current_stack_pointer));
+ asm("0: mrc p15, 0, %0, c13, c0, 4 \n\t"
+#ifdef CONFIG_CPU_V6
+ "1: \n\t"
+ " .subsection 1 \n\t"
+#if defined(CONFIG_ARM_HAS_GROUP_RELOCS) && \
+ !(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
+ "2: " LOAD_SYM_ARMV6(%0, __per_cpu_offset) " \n\t"
+ " b 1b \n\t"
+#else
+ "2: ldr %0, 3f \n\t"
+ " ldr %0, [%0] \n\t"
+ " b 1b \n\t"
+ "3: .long __per_cpu_offset \n\t"
+#endif
+ " .previous \n\t"
+ " .pushsection \".alt.smp.init\", \"a\" \n\t"
+ " .long 0b - . \n\t"
+ " b . + (2b - 0b) \n\t"
+ " .popsection \n\t"
+#endif
+ : "=r" (off)
+ : "Q" (*(const unsigned long *)current_stack_pointer));
return off;
}
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index f16cbbd5cda4..7c1c90d9f582 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -25,11 +25,6 @@ struct seq_file;
extern void show_ipi_list(struct seq_file *, int);
/*
- * Called from assembly code, this handles an IPI.
- */
-asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
-
-/*
* Called from C code, this handles an IPI.
*/
void handle_IPI(int ipinr, struct pt_regs *regs);
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 8f54f9ad8a9b..3e78f921b8b2 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -14,6 +14,9 @@ struct stackframe {
unsigned long sp;
unsigned long lr;
unsigned long pc;
+
+ /* address of the LR value on the stack */
+ unsigned long *lr_addr;
#ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur;
struct task_struct *tsk;
@@ -36,5 +39,7 @@ void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
extern int unwind_frame(struct stackframe *frame);
extern void walk_stackframe(struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
+extern void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index 61e4a3c4ca6e..9372348516ce 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -3,6 +3,7 @@
#define __ASM_ARM_SWITCH_TO_H
#include <linux/thread_info.h>
+#include <asm/smp_plat.h>
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
@@ -26,7 +27,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
- if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO)) \
+ if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || is_smp()) \
__this_cpu_write(__entry_task, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 164e15f26485..aecc403b2880 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -25,6 +25,14 @@
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#define THREAD_START_SP (THREAD_SIZE - 8)
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define OVERFLOW_STACK_SIZE SZ_4K
+
#ifndef __ASSEMBLY__
struct task_struct;
@@ -54,9 +62,6 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
-#ifndef CONFIG_THREAD_INFO_IN_TASK
- struct task_struct *task; /* main task structure */
-#endif
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
struct cpu_context_save cpu_context; /* cpu context */
@@ -72,39 +77,15 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
- INIT_THREAD_INFO_TASK(tsk) \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-#define INIT_THREAD_INFO_TASK(tsk)
-
static inline struct task_struct *thread_task(struct thread_info* ti)
{
return (struct task_struct *)ti;
}
-#else
-#define INIT_THREAD_INFO_TASK(tsk) .task = &(tsk),
-
-static inline struct task_struct *thread_task(struct thread_info* ti)
-{
- return ti->task;
-}
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-static inline struct thread_info *current_thread_info(void)
-{
- return (struct thread_info *)
- (current_stack_pointer & ~(THREAD_SIZE - 1));
-}
-#endif
-
#define thread_saved_pc(tsk) \
((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
#define thread_saved_sp(tsk) \
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index c3296499176c..3dcd0f71a0da 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -18,21 +18,32 @@
.endm
.macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
- ldr \tmp1, =elf_hwcap
- ldr \tmp1, [\tmp1, #0]
+#ifdef CONFIG_SMP
+ALT_SMP(nop)
+ALT_UP_B(.L0_\@)
+ .subsection 1
+#endif
+.L0_\@:
+ ldr_va \tmp1, elf_hwcap
mov \tmp2, #0xffff0fff
tst \tmp1, #HWCAP_TLS @ hardware TLS available?
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
- mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
- mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
- mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
- strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
+ beq .L2_\@
+ mcr p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
+#ifdef CONFIG_SMP
+ b .L1_\@
+ .previous
+#endif
+.L1_\@: switch_tls_v6k \base, \tp, \tpuser, \tmp1, \tmp2
+.L2_\@:
.endm
.macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
mov \tmp1, #0xffff0fff
str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
.endm
+#else
+#include <asm/smp_plat.h>
#endif
#ifdef CONFIG_TLS_REG_EMUL
@@ -43,7 +54,7 @@
#elif defined(CONFIG_CPU_V6)
#define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS)
-#define defer_tls_reg_update 0
+#define defer_tls_reg_update is_smp()
#define switch_tls switch_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu 0
@@ -81,11 +92,11 @@ static inline void set_tls(unsigned long val)
*/
barrier();
- if (!tls_emu && !defer_tls_reg_update) {
- if (has_tls_reg) {
+ if (!tls_emu) {
+ if (has_tls_reg && !defer_tls_reg_update) {
asm("mcr p15, 0, %0, c13, c0, 3"
: : "r" (val));
- } else {
+ } else if (!has_tls_reg) {
#ifdef CONFIG_KUSER_HELPERS
/*
* User space must never try to access this
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index 2cb00d15831b..4512f7e1918f 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -13,6 +13,7 @@
#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
+#define V7M_SCB_ICSR_VECTACTIVE 0x000001ff
#define V7M_SCB_VTOR 0x08
@@ -38,7 +39,7 @@
#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
-#define V7M_xPSR_EXCEPTIONNO 0x000001ff
+#define V7M_xPSR_EXCEPTIONNO V7M_SCB_ICSR_VECTACTIVE
/*
* When branching to an address that has bits [31:28] == 0xf an exception return