fs/coredump.c | 2 +- kernel/exit.c | 6 ++++++ kernel/signal.c | 18 ++++++++++-------- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/fs/coredump.c b/fs/coredump.c index ece7badf701b..46f8145b39e6 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -368,7 +368,7 @@ static int zap_process(struct task_struct *start, int exit_code) for_each_thread(start, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); - if (t != current && !(t->flags & PF_POSTCOREDUMP)) { + if (t != current && !(t->flags & (PF_POSTCOREDUMP | PF_IO_WORKER))) { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); nr++; diff --git a/kernel/exit.c b/kernel/exit.c index 34b90e2e7cf7..fde57b9f4494 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -400,6 +400,12 @@ static void coredump_task_exit(struct task_struct *tsk) { struct core_state *core_state; + /* + * IO workers do not participate in dumping core + */ + if (tsk->flags & PF_IO_WORKER) + return; + /* * Serialize with any possible pending coredump. * We must hold siglock around checking core_state diff --git a/kernel/signal.c b/kernel/signal.c index 8f6330f0e9ca..e0acb11d3a1d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2845,6 +2845,16 @@ bool get_signal(struct ksignal *ksig) */ current->flags |= PF_SIGNALED; + /* + * PF_IO_WORKER threads will catch and exit on fatal signals + * themselves and do not participate in core dumping. + * + * They have cleanup that must be performed, so we cannot + * call do_exit() on their behalf. + */ + if (current->flags & PF_IO_WORKER) + goto out; + if (sig_kernel_coredump(signr)) { if (print_fatal_signals) print_fatal_signal(ksig->info.si_signo); @@ -2860,14 +2870,6 @@ bool get_signal(struct ksignal *ksig) do_coredump(&ksig->info); } - /* - * PF_IO_WORKER threads will catch and exit on fatal signals - * themselves. They have cleanup that must be performed, so - * we cannot call do_exit() on their behalf. - */ - if (current->flags & PF_IO_WORKER) - goto out; - /* * Death signals, no core dump. */