From 85751e9e5b1480fc675106aeaf94fadb8028469b Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Wed, 10 Jul 2019 15:01:19 +0100 Subject: arm64: vdso: Fix population of AT_SYSINFO_EHDR for compat vdso Prior to the introduction of Unified vDSO support and compat layer for vDSO on arm64, AT_SYSINFO_EHDR was not defined for compat tasks. In the current implementation, AT_SYSINFO_EHDR is defined even if the compat vdso layer is not built, which has been shown to break Android applications using bionic: | 01-01 01:22:14.097 755 755 F libc : Fatal signal 11 (SIGSEGV), | code 1 (SEGV_MAPERR), fault addr 0x3cf2c96c in tid 755 (cameraserver), | pid 755 (cameraserver) | 01-01 01:22:14.112 759 759 F libc : Fatal signal 11 (SIGSEGV), | code 1 (SEGV_MAPERR), fault addr 0x3cf2c96c in tid 759 | (android.hardwar), pid 759 (android.hardwar) | 01-01 01:22:14.120 756 756 F libc : Fatal signal 11 (SIGSEGV) | code 1 (SEGV_MAPERR), fault addr 0x3cf2c96c in tid 756 (drmserver), | pid 756 (drmserver) Restore the old behaviour by making sure that AT_SYSINFO_EHDR for compat tasks is defined only when CONFIG_COMPAT_VDSO is enabled. Reported-by: John Stultz Tested-by: John Stultz Signed-off-by: Vincenzo Frascino Signed-off-by: Will Deacon --- arch/arm64/include/asm/elf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 3c7037c6ba9b..b618017205a3 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -202,7 +202,7 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; ({ \ set_thread_flag(TIF_32BIT); \ }) -#ifdef CONFIG_GENERIC_COMPAT_VDSO +#ifdef CONFIG_COMPAT_VDSO #define COMPAT_ARCH_DLINFO \ do { \ /* \ -- cgit v1.2.3 From 2e2f3c9b864d9f21fb82aa8da5ac9adc1e020f60 Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Fri, 12 Jul 2019 19:15:56 +0900 Subject: arm64: vdso: fix flip/flop vdso build bug Running "make" on an already compiled kernel tree will rebuild the kernel even without any modifications: $ make ARCH=arm64 CROSS_COMPILE=/usr/bin/aarch64-unknown-linux-gnu- arch/arm64/Makefile:58: CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built CALL scripts/checksyscalls.sh CALL scripts/atomic/check-atomics.sh VDSOCHK arch/arm64/kernel/vdso/vdso.so.dbg VDSOSYM include/generated/vdso-offsets.h CHK include/generated/compile.h CC arch/arm64/kernel/signal.o CC arch/arm64/kernel/vdso.o CC arch/arm64/kernel/signal32.o LD arch/arm64/kernel/vdso/vdso.so.dbg OBJCOPY arch/arm64/kernel/vdso/vdso.so AS arch/arm64/kernel/vdso/vdso.o AR arch/arm64/kernel/vdso/built-in.a AR arch/arm64/kernel/built-in.a GEN .version CHK include/generated/compile.h UPD include/generated/compile.h CC init/version.o AR init/built-in.a LD vmlinux.o This is the same bug fixed in commit 92a4728608a8 ("x86/boot: Fix if_changed build flip/flop bug"). We cannot use two "if_changed" in one target. Fix this build bug by merging two commands into one function. Fixes: a7f71a2c8903 ("arm64: compat: Add vDSO") Fixes: 28b1a824a4f4 ("arm64: vdso: Substitute gettimeofday() with C implementation") Reviewed-by: Masahiro Yamada Reviewed-by: Vincenzo Frascino Tested-by: Vincenzo Frascino Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Signed-off-by: Naohiro Aota [will: merged in compat fix from Vincenzo and made rule names consistent] Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso/Makefile | 6 ++++-- arch/arm64/kernel/vdso32/Makefile | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 4ab863045188..75d25679d879 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -57,8 +57,7 @@ $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,ld) - $(call if_changed,vdso_check) + $(call if_changed,vdsold_and_vdso_check) # Strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -77,6 +76,9 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE quiet_cmd_vdsocc = VDSOCC $@ cmd_vdsocc = $(CC) $(a_flags) $(c_flags) -c -o $@ $< +quiet_cmd_vdsold_and_vdso_check = LD $@ + cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check) + # Install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 60a4c6239712..8dfa45bc3c9f 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -144,8 +144,7 @@ $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) - $(call if_changed,vdso_check) + $(call if_changed,vdsold_and_vdso_check) # Compilation rules for the vDSO sources $(c-obj-vdso): %.o: %.c FORCE @@ -156,6 +155,9 @@ $(asm-obj-vdso): %.o: %.S FORCE $(call if_changed_dep,vdsoas) # Actual build commands +quiet_cmd_vdsold_and_vdso_check = LD $@ + cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) + quiet_cmd_vdsold = VDSOL $@ cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ -- cgit v1.2.3 From a88754b23114cfb61efed56ed448c05e4cb7e20d Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Fri, 19 Jul 2019 11:10:18 +0100 Subject: arm64: vdso: Cleanup Makefiles The recent changes to the vdso library for arm64 and the introduction of the compat vdso library have generated some misalignment in the Makefiles. Cleanup the Makefiles for vdso and vdso32 libraries: * Removing unused rules. * Unifying the displayed compilation messages. * Simplifying the generic library inclusion path for arm64 vdso. Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Vincenzo Frascino Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso/Makefile | 9 +++------ arch/arm64/kernel/vdso32/Makefile | 10 +++++----- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 75d25679d879..dd2514bb1511 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -32,10 +32,10 @@ UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n -ifeq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -else -CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -include $(c-gettimeofday-y) + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) endif # Clang versions less than 8 do not support -mcmodel=tiny @@ -73,9 +73,6 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE $(call if_changed,vdsosym) # Actual build commands -quiet_cmd_vdsocc = VDSOCC $@ - cmd_vdsocc = $(CC) $(a_flags) $(c_flags) -c -o $@ $< - quiet_cmd_vdsold_and_vdso_check = LD $@ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 8dfa45bc3c9f..1fba0776ed40 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -155,17 +155,17 @@ $(asm-obj-vdso): %.o: %.S FORCE $(call if_changed_dep,vdsoas) # Actual build commands -quiet_cmd_vdsold_and_vdso_check = LD $@ +quiet_cmd_vdsold_and_vdso_check = LD32 $@ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) -quiet_cmd_vdsold = VDSOL $@ +quiet_cmd_vdsold = LD32 $@ cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ -quiet_cmd_vdsocc = VDSOC $@ +quiet_cmd_vdsocc = CC32 $@ cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< -quiet_cmd_vdsocc_gettimeofday = VDSOC_GTD $@ +quiet_cmd_vdsocc_gettimeofday = CC32 $@ cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< -quiet_cmd_vdsoas = VDSOA $@ +quiet_cmd_vdsoas = AS32 $@ cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< quiet_cmd_vdsomunge = MUNGE $@ -- cgit v1.2.3 From 8caa6e2be72313c170f2b30e8475323526dd7ed1 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 2 Jul 2019 14:07:27 +0100 Subject: arm64: stacktrace: Constify stacktrace.h functions on_accessible_stack() and on_task_stack() shouldn't (and don't) modify their task argument, so it can be const. This patch adds the appropriate modifiers. Whitespace violations in the parameter lists are fixed at the same time. No functional change. Acked-by: Catalin Marinas Signed-off-by: Dave Martin [Mark: fixup const location, whitespace] Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/stacktrace.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index df45af931459..1e0c5a7cdce5 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -64,8 +64,9 @@ static inline bool on_irq_stack(unsigned long sp, return true; } -static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, - struct stack_info *info) +static inline bool on_task_stack(const struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; @@ -112,9 +113,9 @@ static inline bool on_overflow_stack(unsigned long sp, * We can only safely access per-cpu stacks from current in a non-preemptible * context. */ -static inline bool on_accessible_stack(struct task_struct *tsk, - unsigned long sp, - struct stack_info *info) +static inline bool on_accessible_stack(const struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { if (on_task_stack(tsk, sp, info)) return true; -- cgit v1.2.3 From f3dcbe67ed424f1cf92065f9ad0cc647f2b44eac Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 2 Jul 2019 14:07:28 +0100 Subject: arm64: stacktrace: Factor out backtrace initialisation Some common code is required by each stacktrace user to initialise struct stackframe before the first call to unwind_frame(). In preparation for adding to the common code, this patch factors it out into a separate function start_backtrace(), and modifies the stacktrace callers appropriately. No functional change. Signed-off-by: Dave Martin [Mark: drop tsk argument, update more callsites] Signed-off-by: Mark Rutland Reviewed-by: James Morse Acked-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/stacktrace.h | 10 ++++++++++ arch/arm64/kernel/perf_callchain.c | 7 +------ arch/arm64/kernel/process.c | 7 ++----- arch/arm64/kernel/return_address.c | 9 +++------ arch/arm64/kernel/stacktrace.c | 19 ++++++------------- arch/arm64/kernel/time.c | 7 ++----- arch/arm64/kernel/traps.c | 13 ++++++------- 7 files changed, 30 insertions(+), 42 deletions(-) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 1e0c5a7cdce5..7fa0dfedb8e9 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -131,4 +131,14 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, return false; } +static inline void start_backtrace(struct stackframe *frame, + unsigned long fp, unsigned long pc) +{ + frame->fp = fp; + frame->pc = pc; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + frame->graph = 0; +#endif +} + #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 9d63514b9836..b0e03e052dd1 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -154,12 +154,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, return; } - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, regs->regs[29], regs->pc); walk_stackframe(current, &frame, callchain_trace, entry); } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6a869d9f304f..8d836d0abc96 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -498,11 +498,8 @@ unsigned long get_wchan(struct task_struct *p) if (!stack_page) return 0; - frame.fp = thread_saved_fp(p); - frame.pc = thread_saved_pc(p); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif + start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p)); + do { if (unwind_frame(p, &frame)) goto out; diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index b21cba90f82d..c4ae647d2306 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -38,12 +38,9 @@ void *return_address(unsigned int level) data.level = level + 2; data.addr = NULL; - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)return_address; /* dummy */ -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)return_address); walk_stackframe(current, &frame, save_return_addr, &data); if (!data.level) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 62d395151abe..017972c2de90 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -122,12 +122,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) data.skip = trace->skip; data.no_sched_functions = 0; - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, regs->regs[29], regs->pc); walk_stackframe(current, &frame, save_trace, &data); } EXPORT_SYMBOL_GPL(save_stack_trace_regs); @@ -146,17 +141,15 @@ static noinline void __save_stack_trace(struct task_struct *tsk, data.no_sched_functions = nosched; if (tsk != current) { - frame.fp = thread_saved_fp(tsk); - frame.pc = thread_saved_pc(tsk); + start_backtrace(&frame, thread_saved_fp(tsk), + thread_saved_pc(tsk)); } else { /* We don't want this function nor the caller */ data.skip += 2; - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)__save_stack_trace; + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)__save_stack_trace); } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif walk_stackframe(tsk, &frame, save_trace, &data); diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 9f25aedeac9d..0b2946414dc9 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -38,11 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs) if (!in_lock_functions(regs->pc)) return regs->pc; - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif + start_backtrace(&frame, regs->regs[29], regs->pc); + do { int ret = unwind_frame(NULL, &frame); if (ret < 0) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8c03456dade6..d3313797cca9 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -100,18 +100,17 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) return; if (tsk == current) { - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)dump_backtrace; + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)dump_backtrace); } else { /* * task blocked in __switch_to */ - frame.fp = thread_saved_fp(tsk); - frame.pc = thread_saved_pc(tsk); + start_backtrace(&frame, + thread_saved_fp(tsk), + thread_saved_pc(tsk)); } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif printk("Call trace:\n"); do { -- cgit v1.2.3 From 592700f094be229b5c9cc1192d5cea46eb4c7afc Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 2 Jul 2019 14:07:29 +0100 Subject: arm64: stacktrace: Better handle corrupted stacks The arm64 stacktrace code is careful to only dereference frame records in valid stack ranges, ensuring that a corrupted frame record won't result in a faulting access. However, it's still possible for corrupt frame records to result in infinite loops in the stacktrace code, which is also undesirable. This patch ensures that we complete a stacktrace in finite time, by keeping track of which stacks we have already completed unwinding, and verifying that if the next frame record is on the same stack, it is at a higher address. As this has turned out to be particularly subtle, comments are added to explain the procedure. Signed-off-by: Mark Rutland Reviewed-by: James Morse Tested-by: James Morse Acked-by: Dave Martin Acked-by: Catalin Marinas Cc: Tengfei Fan Signed-off-by: Will Deacon --- arch/arm64/include/asm/stacktrace.h | 57 +++++++++++++++++++++++++++++++------ arch/arm64/kernel/stacktrace.c | 40 +++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 7fa0dfedb8e9..4d9b1f48dc39 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -8,19 +8,12 @@ #include #include #include +#include #include #include #include -struct stackframe { - unsigned long fp; - unsigned long pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - int graph; -#endif -}; - enum stack_type { STACK_TYPE_UNKNOWN, STACK_TYPE_TASK, @@ -28,6 +21,7 @@ enum stack_type { STACK_TYPE_OVERFLOW, STACK_TYPE_SDEI_NORMAL, STACK_TYPE_SDEI_CRITICAL, + __NR_STACK_TYPES }; struct stack_info { @@ -36,6 +30,37 @@ struct stack_info { enum stack_type type; }; +/* + * A snapshot of a frame record or fp/lr register values, along with some + * accounting information necessary for robust unwinding. + * + * @fp: The fp value in the frame record (or the real fp) + * @pc: The fp value in the frame record (or the real lr) + * + * @stacks_done: Stacks which have been entirely unwound, for which it is no + * longer valid to unwind to. + * + * @prev_fp: The fp that pointed to this frame record, or a synthetic value + * of 0. This is used to ensure that within a stack, each + * subsequent frame record is at an increasing address. + * @prev_type: The type of stack this frame record was on, or a synthetic + * value of STACK_TYPE_UNKNOWN. This is used to detect a + * transition from one stack to another. + * + * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a + * replacement lr value in the ftrace graph stack. + */ +struct stackframe { + unsigned long fp; + unsigned long pc; + DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); + unsigned long prev_fp; + enum stack_type prev_type; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int graph; +#endif +}; + extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data); @@ -117,6 +142,9 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, struct stack_info *info) { + if (info) + info->type = STACK_TYPE_UNKNOWN; + if (on_task_stack(tsk, sp, info)) return true; if (tsk != current || preemptible()) @@ -139,6 +167,19 @@ static inline void start_backtrace(struct stackframe *frame, #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame->graph = 0; #endif + + /* + * Prime the first unwind. + * + * In unwind_frame() we'll check that the FP points to a valid stack, + * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be + * treated as a transition to whichever stack that happens to be. The + * prev_fp value won't be used, but we set it to 0 such that it is + * definitely not an accessible stack address. + */ + bitmap_zero(frame->stacks_done, __NR_STACK_TYPES); + frame->prev_fp = 0; + frame->prev_type = STACK_TYPE_UNKNOWN; } #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 017972c2de90..2b160ae594eb 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -29,9 +29,18 @@ * ldp x29, x30, [sp] * add sp, sp, #0x10 */ + +/* + * Unwind from one frame record (A) to the next frame record (B). + * + * We terminate early if the location of B indicates a malformed chain of frame + * records (e.g. a cycle), determined based on the location and fp value of A + * and the location (but not the fp value) of B. + */ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) { unsigned long fp = frame->fp; + struct stack_info info; if (fp & 0xf) return -EINVAL; @@ -39,11 +48,40 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) if (!tsk) tsk = current; - if (!on_accessible_stack(tsk, fp, NULL)) + if (!on_accessible_stack(tsk, fp, &info)) return -EINVAL; + if (test_bit(info.type, frame->stacks_done)) + return -EINVAL; + + /* + * As stacks grow downward, any valid record on the same stack must be + * at a strictly higher address than the prior record. + * + * Stacks can nest in several valid orders, e.g. + * + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW + * + * ... but the nesting itself is strict. Once we transition from one + * stack to another, it's never valid to unwind back to that first + * stack. + */ + if (info.type == frame->prev_type) { + if (fp <= frame->prev_fp) + return -EINVAL; + } else { + set_bit(frame->prev_type, frame->stacks_done); + } + + /* + * Record this frame record's values and location. The prev_fp and + * prev_type are only meaningful to the next unwind_frame() invocation. + */ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + frame->prev_fp = fp; + frame->prev_type = info.type; #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && -- cgit v1.2.3 From d16af870a70e0e399126c6419f92e7badf5e465b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 12 Jun 2019 17:00:33 +0100 Subject: arm64/sve: Factor out FPSIMD to SVE state conversion Currently we convert from FPSIMD to SVE register state in memory in two places. To ease future maintenance, let's consolidate this in one place. Reviewed-by: Julien Grall Acked-by: Catalin Marinas Signed-off-by: Dave Martin Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index eec4776ae5f0..386d848e6b42 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -406,6 +406,18 @@ static __uint128_t arm64_cpu_to_le128(__uint128_t x) #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) +static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, + unsigned int vq) +{ + unsigned int i; + __uint128_t *p; + + for (i = 0; i < 32; ++i) { + p = (__uint128_t *)ZREG(sst, vq, i); + *p = arm64_cpu_to_le128(fst->vregs[i]); + } +} + /* * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to * task->thread.sve_state. @@ -423,17 +435,12 @@ static void fpsimd_to_sve(struct task_struct *task) unsigned int vq; void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - unsigned int i; - __uint128_t *p; if (!system_supports_sve()) return; vq = sve_vq_from_vl(task->thread.sve_vl); - for (i = 0; i < 32; ++i) { - p = (__uint128_t *)ZREG(sst, vq, i); - *p = arm64_cpu_to_le128(fst->vregs[i]); - } + __fpsimd_to_sve(sst, fst, vq); } /* @@ -550,8 +557,6 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) unsigned int vq; void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - unsigned int i; - __uint128_t *p; if (!test_tsk_thread_flag(task, TIF_SVE)) return; @@ -559,11 +564,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) vq = sve_vq_from_vl(task->thread.sve_vl); memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); - - for (i = 0; i < 32; ++i) { - p = (__uint128_t *)ZREG(sst, vq, i); - *p = arm64_cpu_to_le128(fst->vregs[i]); - } + __fpsimd_to_sve(sst, fst, vq); } int sve_set_vector_length(struct task_struct *task, -- cgit v1.2.3 From ed2f3e9ff637c7c9f65b98468a084393683ff93b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Wed, 12 Jun 2019 17:00:34 +0100 Subject: arm64/sve: Fix a couple of magic numbers for the Z-reg count There are some hand-written instances of "32" to express the number of SVE Z-registers. Since this code was written a #define was added for this, so convert trivial instances of this magic number as appropriate. No functional change. Reviewed-by: Julien Grall Acked-by: Catalin Marinas Signed-off-by: Dave Martin Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 386d848e6b42..37d3912cfe06 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -412,7 +412,7 @@ static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, unsigned int i; __uint128_t *p; - for (i = 0; i < 32; ++i) { + for (i = 0; i < SVE_NUM_ZREGS; ++i) { p = (__uint128_t *)ZREG(sst, vq, i); *p = arm64_cpu_to_le128(fst->vregs[i]); } @@ -466,7 +466,7 @@ static void sve_to_fpsimd(struct task_struct *task) return; vq = sve_vq_from_vl(task->thread.sve_vl); - for (i = 0; i < 32; ++i) { + for (i = 0; i < SVE_NUM_ZREGS; ++i) { p = (__uint128_t const *)ZREG(sst, vq, i); fst->vregs[i] = arm64_le128_to_cpu(*p); } -- cgit v1.2.3 From 5a9060e9437be47f92f85a2b5c7cd73314d080e8 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 2 Jul 2019 11:02:55 +0530 Subject: arm64: mm: Drop pte_huge() This helper is required from generic huge_pte_alloc() which is available when arch subscribes ARCH_WANT_GENERAL_HUGETLB. arm64 implements it's own huge_pte_alloc() and does not depend on the generic definition. Drop this helper which is redundant on arm64. Cc: Mark Rutland Cc: Steve Capper Acked-by: Catalin Marinas Signed-off-by: Anshuman Khandual Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 87a4b2ddc1a1..3f5461f7b560 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -301,7 +301,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) /* * Huge pte definitions. */ -#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) /* -- cgit v1.2.3 From 0aafd138b322d0b1bec3f14cdef4be3374d9fc39 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 16 Jul 2019 18:43:08 +0100 Subject: MAINTAINERS: Update my email address to @kernel.org I will soon lose access to my @arm.com email address, so let's update the MAINTAINERS file to point to my @kernel.org address, as well as .mailmap for good measure. Note that my @arm.com address will still work, but someone else will be reading whatever is sent there. Don't say you didn't know! Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon --- .mailmap | 1 + MAINTAINERS | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.mailmap b/.mailmap index 0fef932de3db..23cfed2e015c 100644 --- a/.mailmap +++ b/.mailmap @@ -132,6 +132,7 @@ Linus Lüssing Li Yang Li Yang Maciej W. Rozycki +Marc Zyngier Marcin Nowakowski Mark Brown Mark Yao diff --git a/MAINTAINERS b/MAINTAINERS index 783569e3c4b4..b3d1eaddc5cf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1194,7 +1194,7 @@ F: include/uapi/linux/if_arcnet.h ARM ARCHITECTED TIMER DRIVER M: Mark Rutland -M: Marc Zyngier +M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/include/asm/arch_timer.h @@ -8490,7 +8490,7 @@ S: Obsolete F: include/uapi/linux/ipx.h IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) -M: Marc Zyngier +M: Marc Zyngier S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: Documentation/IRQ-domain.txt @@ -8508,7 +8508,7 @@ F: kernel/irq/ IRQCHIP DRIVERS M: Thomas Gleixner M: Jason Cooper -M: Marc Zyngier +M: Marc Zyngier L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core @@ -8828,7 +8828,7 @@ F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) -M: Marc Zyngier +M: Marc Zyngier R: James Morse R: Julien Thierry R: Suzuki K Pouloze -- cgit v1.2.3 From 01233d47836d96264f3d66eda4514739b0ce7d9d Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 17 Jul 2019 13:33:30 +0100 Subject: MAINTAINERS: Fix spelling mistake in my name Fix a typo in my name for the KVM-ARM reviewers entry. Acked-by: Marc Zyngier Signed-off-by: Suzuki K Poulose Signed-off-by: Will Deacon --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index b3d1eaddc5cf..ef195bda7c70 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8831,7 +8831,7 @@ KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) M: Marc Zyngier R: James Morse R: Julien Thierry -R: Suzuki K Pouloze +R: Suzuki K Poulose L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git -- cgit v1.2.3 From 8d419adb0511c8f0a2324fefb27752051eacc995 Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Wed, 17 Jul 2019 11:32:15 +0100 Subject: MAINTAINERS: Update my email address My @arm.com address will stop working in a couple of weeks. Update MAINTAINERS and .mailmap files with an address I'll have access to. Signed-off-by: Julien Thierry Signed-off-by: Will Deacon --- .mailmap | 1 + MAINTAINERS | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 23cfed2e015c..1043793cd557 100644 --- a/.mailmap +++ b/.mailmap @@ -116,6 +116,7 @@ John Stultz Juha Yrjola Juha Yrjola Juha Yrjola +Julien Thierry Kay Sievers Kenneth W Chen Konstantin Khlebnikov diff --git a/MAINTAINERS b/MAINTAINERS index ef195bda7c70..50cf99beb7af 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8830,7 +8830,7 @@ F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) M: Marc Zyngier R: James Morse -R: Julien Thierry +R: Julien Thierry R: Suzuki K Poulose L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu -- cgit v1.2.3 From 4574b0b9abc83a3522b2e91c43b9f0c479855d3a Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Mon, 22 Jul 2019 14:44:40 +0100 Subject: MAINTAINERS: Update my email address Update MAINTAINERS and .mailmap with my @linaro.org address, since I don't have access to my @arm.com address anymore. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Will Deacon --- .mailmap | 1 + MAINTAINERS | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index 1043793cd557..acba1a6163f1 100644 --- a/.mailmap +++ b/.mailmap @@ -98,6 +98,7 @@ Jason Gunthorpe Javi Merino Jean Tourrilhes + Jeff Garzik Jeff Layton Jeff Layton diff --git a/MAINTAINERS b/MAINTAINERS index 50cf99beb7af..3d5577d5ca3d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17123,7 +17123,7 @@ F: drivers/virtio/virtio_input.c F: include/uapi/linux/virtio_input.h VIRTIO IOMMU DRIVER -M: Jean-Philippe Brucker +M: Jean-Philippe Brucker L: virtualization@lists.linux-foundation.org S: Maintained F: drivers/iommu/virtio-iommu.c -- cgit v1.2.3 From cbdf8a189a66001c36007bf0f5c975d0376c5c3a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jul 2019 14:53:09 +0100 Subject: arm64: Force SSBS on context switch On a CPU that doesn't support SSBS, PSTATE[12] is RES0. In a system where only some of the CPUs implement SSBS, we end-up losing track of the SSBS bit across task migration. To address this issue, let's force the SSBS bit on context switch. Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3") Signed-off-by: Marc Zyngier [will: inverted logic and added comments] Signed-off-by: Will Deacon --- arch/arm64/include/asm/processor.h | 14 ++++++++++++-- arch/arm64/kernel/process.c | 29 ++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index fd5b1a4efc70..844e2964b0f5 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) regs->pmr_save = GIC_PRIO_IRQON; } +static inline void set_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_SSBS_BIT; +} + +static inline void set_compat_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_AA32_SSBS_BIT; +} + static inline void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { @@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate = PSR_MODE_EL0t; if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) - regs->pstate |= PSR_SSBS_BIT; + set_ssbs_bit(regs); regs->sp = sp; } @@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, #endif if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) - regs->pstate |= PSR_AA32_SSBS_BIT; + set_compat_ssbs_bit(regs); regs->compat_sp = sp; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 8d836d0abc96..f674f28df663 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, childregs->pstate |= PSR_UAO_BIT; if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) - childregs->pstate |= PSR_SSBS_BIT; + set_ssbs_bit(childregs); if (system_uses_irq_prio_masking()) childregs->pmr_save = GIC_PRIO_IRQON; @@ -442,6 +442,32 @@ void uao_thread_switch(struct task_struct *next) } } +/* + * Force SSBS state on context-switch, since it may be lost after migrating + * from a CPU which treats the bit as RES0 in a heterogeneous system. + */ +static void ssbs_thread_switch(struct task_struct *next) +{ + struct pt_regs *regs = task_pt_regs(next); + + /* + * Nothing to do for kernel threads, but 'regs' may be junk + * (e.g. idle task) so check the flags and bail early. + */ + if (unlikely(next->flags & PF_KTHREAD)) + return; + + /* If the mitigation is enabled, then we leave SSBS clear. */ + if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || + test_tsk_thread_flag(next, TIF_SSBD)) + return; + + if (compat_user_mode(regs)) + set_compat_ssbs_bit(regs); + else if (user_mode(regs)) + set_ssbs_bit(regs); +} + /* * We store our current task in sp_el0, which is clobbered by userspace. Keep a * shadow copy so that we can restore this upon entry from userspace. @@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ptrauth_thread_switch(next); + ssbs_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case -- cgit v1.2.3 From 40ca0ce56d4bb889dc43b455c55398468115569a Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 22 Jul 2019 16:11:48 +0100 Subject: arm64: entry: SP Alignment Fault doesn't write to FAR_EL1 Comparing the arm-arm's pseudocode for AArch64.PCAlignmentFault() with AArch64.SPAlignmentFault() shows that SP faults don't copy the faulty-SP to FAR_EL1, but this is where we read from, and the address we provide to user-space with the BUS_ADRALN signal. For user-space this value will be UNKNOWN due to the previous ERET to user-space. If the last value is preserved, on systems with KASLR or KPTI this will be the user-space link-register left in FAR_EL1 by tramp_exit(). Fix this to retrieve the original sp_el0 value, and pass this to do_sp_pc_fault(). SP alignment faults from EL1 will cause us to take the fault again when trying to store the pt_regs. This eventually takes us to the overflow stack. Remove the ESR_ELx_EC_SP_ALIGN check as we will never make it this far. Fixes: 60ffc30d5652 ("arm64: Exception handling") Signed-off-by: James Morse [will: change label name and fleshed out comment] Signed-off-by: Will Deacon --- arch/arm64/kernel/entry.S | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9cdc4592da3e..320a30dbe35e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -586,10 +586,8 @@ el1_sync: b.eq el1_ia cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el1_undef - cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el1_sp_pc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el1_sp_pc + b.eq el1_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 b.eq el1_undef cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 @@ -611,9 +609,11 @@ el1_da: bl do_mem_abort kernel_exit 1 -el1_sp_pc: +el1_pc: /* - * Stack or PC alignment exception handling + * PC alignment exception handling. We don't handle SP alignment faults, + * since we will have hit a recursive exception when trying to push the + * initial pt_regs. */ mrs x0, far_el1 inherit_daif pstate=x23, tmp=x2 @@ -732,9 +732,9 @@ el0_sync: ccmp x24, #ESR_ELx_EC_WFx, #4, ne b.eq el0_sys cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el0_sp_pc + b.eq el0_sp cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_sp_pc + b.eq el0_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 @@ -758,7 +758,7 @@ el0_sync_compat: cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception b.eq el0_fpsimd_exc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_sp_pc + b.eq el0_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap @@ -858,11 +858,15 @@ el0_fpsimd_exc: mov x1, sp bl do_fpsimd_exc b ret_to_user +el0_sp: + ldr x26, [sp, #S_SP] + b el0_sp_pc +el0_pc: + mrs x26, far_el1 el0_sp_pc: /* * Stack or PC alignment exception handling */ - mrs x26, far_el1 gic_prio_kentry_setup tmp=x0 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS -- cgit v1.2.3 From 5a46d3f71d5e5a9f82eabc682f996f1281705ac7 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 22 Jul 2019 17:25:48 +0100 Subject: ACPI/IORT: Fix off-by-one check in iort_dev_find_its_id() Static analysis identified that index comparison against ITS entries in iort_dev_find_its_id() is off by one. Update the comparison condition and clarify the resulting error message. Fixes: 4bf2efd26d76 ("ACPI: Add new IORT functions to support MSI domain handling") Link: https://lore.kernel.org/linux-arm-kernel/20190613065410.GB16334@mwanda/ Reviewed-by: Hanjun Guo Reported-by: Dan Carpenter Signed-off-by: Lorenzo Pieralisi Cc: Dan Carpenter Cc: Will Deacon Cc: Hanjun Guo Cc: Sudeep Holla Cc: Catalin Marinas Cc: Robin Murphy Signed-off-by: Will Deacon --- drivers/acpi/arm64/iort.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index d4551e33fa71..8569b79e8b58 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, /* Move to ITS specific data */ its = (struct acpi_iort_its_group *)node->node_data; - if (idx > its->its_count) { - dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", + if (idx >= its->its_count) { + dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", idx, its->its_count); return -ENXIO; } -- cgit v1.2.3