[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87plsdh8xt.fsf_-_@email.froward.int.ebiederm.org>
Date: Tue, 18 Jun 2024 23:07:26 -0500
From: "Eric W. Biederman" <ebiederm@...ssion.com>
To: Oleg Nesterov <oleg@...hat.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>, Tejun Heo <tj@...nel.org>,
linux-kernel@...r.kernel.org
Subject: [PATCH 06/17] signal: Add JOBCTL_WILL_EXIT to mark exiting tasks
Mark tasks that need to exit with JOBCTL_WILL_EXIT instead of reusing
the per thread SIGKILL.
A jobctl flag was choosen for this purpose because jobctl changes are
protected by siglock, and updates are already careful not to change or
clear other bits in jobctl. Protection by a lock when changing the
value is necessary as JOBCTL_WILL_EXIT will not be limited to being
set by the current task. That task->jobctl is protected by siglock is
convenient as siglock is already held everywhere I want to set or
reset JOBCTL_WILL_EXIT.
Instead of having __fatal_signal_pending test JOBCTL_WILL_EXIT
directly add a more accurately named helper task_will_exit to test
JOBCTL_WILL_EXIT that __fatal_signal_pending calls.
Signed-off-by: "Eric W. Biederman" <ebiederm@...ssion.com>
---
fs/coredump.c | 4 ++--
include/linux/sched/jobctl.h | 2 ++
include/linux/sched/signal.h | 7 ++++++-
kernel/signal.c | 11 ++++++-----
4 files changed, 16 insertions(+), 8 deletions(-)
diff --git a/fs/coredump.c b/fs/coredump.c
index be0405346882..bec9e290802a 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -373,7 +373,7 @@ static int zap_process(struct task_struct *start, int exit_code)
for_each_thread(start, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
if (!(t->flags & PF_POSTCOREDUMP)) {
- sigaddset(&t->pending.signal, SIGKILL);
+ t->jobctl |= JOBCTL_WILL_EXIT;
signal_wake_up(t, 1);
}
nr += (t != current) && !(t->flags & PF_POSTCOREDUMP);
@@ -396,7 +396,7 @@ static int zap_threads(struct task_struct *tsk,
/* Allow SIGKILL, see prepare_signal() */
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
- sigdelset(&tsk->pending.signal, SIGKILL);
+ tsk->jobctl &= ~JOBCTL_WILL_EXIT;
tsk->flags |= PF_DUMPCORE;
}
spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index 68876d0a7ef9..2e840f2db746 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -23,6 +23,7 @@ struct task_struct;
#define JOBCTL_STOPPED_BIT 26 /* do_signal_stop() */
#define JOBCTL_TRACED_BIT 27 /* ptrace_stop() */
+#define JOBCTL_WILL_EXIT_BIT 31 /* task will exit */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -36,6 +37,7 @@ struct task_struct;
#define JOBCTL_STOPPED (1UL << JOBCTL_STOPPED_BIT)
#define JOBCTL_TRACED (1UL << JOBCTL_TRACED_BIT)
+#define JOBCTL_WILL_EXIT (1UL << JOBCTL_WILL_EXIT_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0a0e23c45406..33bf363a3354 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -393,9 +393,14 @@ static inline int signal_pending(struct task_struct *p)
return task_sigpending(p);
}
+static inline bool task_exit_pending(struct task_struct *p)
+{
+ return unlikely(READ_ONCE(p->jobctl) & JOBCTL_WILL_EXIT);
+}
+
static inline int __fatal_signal_pending(struct task_struct *p)
{
- return unlikely(sigismember(&p->pending.signal, SIGKILL));
+ return task_exit_pending(p);
}
static inline int fatal_signal_pending(struct task_struct *p)
diff --git a/kernel/signal.c b/kernel/signal.c
index 269ec88f650d..ea7753b31be7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -156,7 +156,8 @@ static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
static bool recalc_sigpending_tsk(struct task_struct *t)
{
- if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
+ if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE |
+ JOBCTL_WILL_EXIT)) ||
PENDING(&t->pending, &t->blocked) ||
PENDING(&t->signal->shared_pending, &t->blocked) ||
cgroup_task_frozen(t)) {
@@ -910,7 +911,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
if (signal->core_state && (sig == SIGKILL)) {
struct task_struct *dumper =
signal->core_state->dumper.task;
- sigaddset(&dumper->pending.signal, SIGKILL);
+ dumper->jobctl |= JOBCTL_WILL_EXIT;
signal_wake_up(dumper, 1);
}
/*
@@ -1054,7 +1055,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
signal->group_stop_count = 0;
__for_each_thread(signal, t) {
task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
- sigaddset(&t->pending.signal, SIGKILL);
+ t->jobctl |= JOBCTL_WILL_EXIT;
signal_wake_up(t, 1);
}
return;
@@ -1383,7 +1384,7 @@ int zap_other_threads(struct task_struct *p)
/* Don't bother with already dead threads */
if (t->exit_state)
continue;
- sigaddset(&t->pending.signal, SIGKILL);
+ t->jobctl |= JOBCTL_WILL_EXIT;
signal_wake_up(t, 1);
}
@@ -2745,7 +2746,7 @@ bool get_signal(struct ksignal *ksig)
if ((signal->flags & SIGNAL_GROUP_EXIT) ||
signal->group_exec_task) {
signr = SIGKILL;
- sigdelset(¤t->pending.signal, SIGKILL);
+ current->jobctl &= ~JOBCTL_WILL_EXIT;
trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
&sighand->action[SIGKILL-1]);
recalc_sigpending();
--
2.41.0
Powered by blists - more mailing lists