summaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2021-10-08 12:12:56 -0500
committerEric W. Biederman <ebiederm@xmission.com>2021-10-08 12:12:56 -0500
commit3f66f86bfed33dee2e9c1d0e14486915bb0750b0 (patch)
tree55800c054b48245841afec2f62605d0ef8990dd6 /kernel/exit.c
parent6880fa6c56601bb8ed59df6c30fd390cc5f6dd8f (diff)
parent0258b5fd7c7124b87e185a1a9322d2c66b1876b7 (diff)
downloadlinux-3f66f86bfed33dee2e9c1d0e14486915bb0750b0.tar.bz2
per signal_struct coredumps
Current coredumps are mixed up with the exit code, the signal handling code and with the ptrace code in was they are much more complicated than necessary and difficult to follow. This series of changes starts with ptrace_stop and cleans it up, making it easier to follow what is happening in ptrace_stop. Then cleans up the exec interactions with coredumps. Then cleans up the coredump interactions with exit. Then the coredump interactions with the signal handling code is clean up. The first and last changes are bug fixes for minor bugs. I believe the fact that vfork followed by execve can kill the process the called vfork if exec fails is sufficient justification to change the userspace visible behavior. In previous conversations it was suggested that some of these cleanups did not stand on their own. I think I have managed to organize things so all of their patches stand on their own. Which means that if for some reason the last change needs to be reverted we can still keep the gains from the other changes. Eric W. Biederman (6): signal: Remove the bogus sigkill_pending in ptrace_stop ptrace: Remove the unnecessary arguments from arch_ptrace_stop exec: Check for a pending fatal signal instead of core_state exit: Factor coredump_exit_mm out of exit_mm coredump: Don't perform any cleanups before dumping core coredump: Limit coredumps to a single thread group arch/ia64/include/asm/ptrace.h | 4 +- arch/sparc/include/asm/ptrace.h | 8 ++-- fs/binfmt_elf.c | 4 +- fs/binfmt_elf_fdpic.c | 2 +- fs/coredump.c | 88 ++++++----------------------------------- fs/exec.c | 14 +++---- fs/proc/array.c | 6 +-- include/linux/mm_types.h | 13 ------ include/linux/ptrace.h | 22 +++++------ include/linux/sched.h | 1 + include/linux/sched/signal.h | 13 ++++++ kernel/exit.c | 76 +++++++++++++++++++---------------- kernel/fork.c | 4 +- kernel/signal.c | 49 ++++------------------- mm/debug.c | 4 +- mm/oom_kill.c | 6 +-- 16 files changed, 106 insertions(+), 208 deletions(-) Link: https://lkml.kernel.org/r/87v92qx2c6.fsf@disp2133 Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c76
1 files changed, 41 insertions, 35 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 91a43e57a32e..2b355e926c13 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -339,6 +339,46 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
}
}
+static void coredump_task_exit(struct task_struct *tsk)
+{
+ struct core_state *core_state;
+
+ /*
+ * Serialize with any possible pending coredump.
+ * We must hold siglock around checking core_state
+ * and setting PF_POSTCOREDUMP. The core-inducing thread
+ * will increment ->nr_threads for each thread in the
+ * group without PF_POSTCOREDUMP set.
+ */
+ spin_lock_irq(&tsk->sighand->siglock);
+ tsk->flags |= PF_POSTCOREDUMP;
+ core_state = tsk->signal->core_state;
+ spin_unlock_irq(&tsk->sighand->siglock);
+ if (core_state) {
+ struct core_thread self;
+
+ self.task = current;
+ if (self.task->flags & PF_SIGNALED)
+ self.next = xchg(&core_state->dumper.next, &self);
+ else
+ self.task = NULL;
+ /*
+ * Implies mb(), the result of xchg() must be visible
+ * to core_state->dumper.
+ */
+ if (atomic_dec_and_test(&core_state->nr_threads))
+ complete(&core_state->startup);
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!self.task) /* see coredump_finish() */
+ break;
+ freezable_schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ }
+}
+
#ifdef CONFIG_MEMCG
/*
* A task is exiting. If it owned this mm, find a new owner for the mm.
@@ -434,47 +474,12 @@ assign_new_owner:
static void exit_mm(void)
{
struct mm_struct *mm = current->mm;
- struct core_state *core_state;
exit_mm_release(current, mm);
if (!mm)
return;
sync_mm_rss(mm);
- /*
- * Serialize with any possible pending coredump.
- * We must hold mmap_lock around checking core_state
- * and clearing tsk->mm. The core-inducing thread
- * will increment ->nr_threads for each thread in the
- * group with ->mm != NULL.
- */
mmap_read_lock(mm);
- core_state = mm->core_state;
- if (core_state) {
- struct core_thread self;
-
- mmap_read_unlock(mm);
-
- self.task = current;
- if (self.task->flags & PF_SIGNALED)
- self.next = xchg(&core_state->dumper.next, &self);
- else
- self.task = NULL;
- /*
- * Implies mb(), the result of xchg() must be visible
- * to core_state->dumper.
- */
- if (atomic_dec_and_test(&core_state->nr_threads))
- complete(&core_state->startup);
-
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (!self.task) /* see coredump_finish() */
- break;
- freezable_schedule();
- }
- __set_current_state(TASK_RUNNING);
- mmap_read_lock(mm);
- }
mmgrab(mm);
BUG_ON(mm != current->active_mm);
/* more a memory barrier than a real lock */
@@ -762,6 +767,7 @@ void __noreturn do_exit(long code)
profile_task_exit(tsk);
kcov_task_exit(tsk);
+ coredump_task_exit(tsk);
ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk);