[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87h6dph8wh.fsf_-_@email.froward.int.ebiederm.org>
Date: Tue, 18 Jun 2024 23:08:14 -0500
From: "Eric W. Biederman" <ebiederm@...ssion.com>
To: Oleg Nesterov <oleg@...hat.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>, Tejun Heo <tj@...nel.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 07/17] signal: Always set JOBCTL_WILL_EXIT for exiting tasks
This is a semantic change. Previously JOBCTL_WILL_EXIT replaced the
use of sigaddset(&t->pending.signal, SIGKILL) to mark tasks for
termination but was otherwise identical. Now JOBCTL_WILL_EXIT once
set remains set (except possibly for the thread performing a
coredump).
Keeping JOBCTL_WILL_EXIT set makes it possible to detect and skip
unnecessary work when the decision has been made for a task to exit.
Getting there involves not clearing JOBCTL_WILL_EXIT when short
circuited signals are being processed, and setting JOBCTL_WILL_EXIT on
all tasks when SIGNAL_GROUP_EXIT is set.
Never clearing JOBCTL_WILL_EXIT requires updating ptrace_stop
so it will continue to stop in PTRACE_EVENT_EXIT.
Set JOBCTL_WILL_EXIT in coredump_finish so that it winds up on the
coredumping thread. It was initially set in zap_process when all of
the tasks of the processed were asked to exit, and then cleared in
zap_threads so that the coredump thread can be killed.
Signed-off-by: "Eric W. Biederman" <ebiederm@...ssion.com>
---
fs/coredump.c | 1 +
kernel/exit.c | 2 ++
kernel/signal.c | 20 ++++++++++++--------
3 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/fs/coredump.c b/fs/coredump.c
index bec9e290802a..c8b057724bf6 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -443,6 +443,7 @@ static void coredump_finish(bool core_dumped)
current->signal->group_exit_code |= 0x80;
next = current->signal->core_state->dumper.next;
current->signal->core_state = NULL;
+ current->jobctl |= JOBCTL_WILL_EXIT;
spin_unlock_irq(¤t->sighand->siglock);
while ((curr = next) != NULL) {
diff --git a/kernel/exit.c b/kernel/exit.c
index 08de33740b9c..0059c60946a3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -801,6 +801,7 @@ static void synchronize_group_exit(struct task_struct *tsk, long code)
struct signal_struct *signal = tsk->signal;
spin_lock_irq(&sighand->siglock);
+ tsk->jobctl |= JOBCTL_WILL_EXIT;
signal->quick_threads--;
if ((signal->quick_threads == 0) &&
!(signal->flags & SIGNAL_GROUP_EXIT)) {
@@ -1012,6 +1013,7 @@ do_group_exit(int exit_code)
sig->group_exit_code = exit_code;
sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
+ current->jobctl |= JOBCTL_WILL_EXIT;
}
spin_unlock_irq(&sighand->siglock);
}
diff --git a/kernel/signal.c b/kernel/signal.c
index ea7753b31be7..d169b47775b0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2253,15 +2253,19 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
spin_lock_irq(¤t->sighand->siglock);
}
- /*
- * After this point ptrace_signal_wake_up or signal_wake_up
- * will clear TASK_TRACED if ptrace_unlink happens or a fatal
- * signal comes in. Handle previous ptrace_unlinks and fatal
- * signals here to prevent ptrace_stop sleeping in schedule.
- */
- if (!current->ptrace || __fatal_signal_pending(current))
+ /* Do not stop if ptrace_unlink has happened. */
+ if (!current->ptrace)
+ return exit_code;
+
+ /* Do not stop in a killed task except for PTRACE_EVENT_EXIT */
+ if (task_exit_pending(current) &&
+ ((exit_code >> 8) != PTRACE_EVENT_EXIT))
return exit_code;
+ /*
+ * After this point ptrace_unlink or a fatal signal will clear
+ * TASK_TRACED preventing ptrace_stop from sleeping.
+ */
set_special_state(TASK_TRACED);
current->jobctl |= JOBCTL_TRACED;
@@ -2746,7 +2750,6 @@ bool get_signal(struct ksignal *ksig)
if ((signal->flags & SIGNAL_GROUP_EXIT) ||
signal->group_exec_task) {
signr = SIGKILL;
- current->jobctl &= ~JOBCTL_WILL_EXIT;
trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
&sighand->action[SIGKILL-1]);
recalc_sigpending();
@@ -2889,6 +2892,7 @@ bool get_signal(struct ksignal *ksig)
signal->group_exit_code = exit_code;
signal->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
+ current->jobctl |= JOBCTL_WILL_EXIT;
}
fatal:
spin_unlock_irq(&sighand->siglock);
--
2.41.0
Powered by blists - more mailing lists