lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87tthpfu8v.fsf_-_@email.froward.int.ebiederm.org>
Date: Tue, 18 Jun 2024 23:10:08 -0500
From: "Eric W. Biederman" <ebiederm@...ssion.com>
To: Oleg Nesterov <oleg@...hat.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,  Tejun Heo <tj@...nel.org>,
  linux-kernel@...r.kernel.org
Subject: [PATCH 11/17] signal: Make individual tasks exiting a first class
 concept


Add a helper schedule_task_exit_locked that is equivalent to
asynchronously calling exit(2) except for not having an exit code.

This is a generalization of what happens in do_exit, de_thread,
zap_process, prepare_signal, complete_signal, and zap_other_threads
when individual tasks are asked to shutdown.

While schedule_task_exit_locked is a generalization of what happens in
prepare_signal I do not change prepare_signal to use
schedule_task_exit_locked to deliver SIGKILL to a coredumping process.
This keeps all of the specialness of delivering a signal to a
coredumping process limited to prepare_signal and the coredump code
itself.

Signed-off-by: "Eric W. Biederman" <ebiederm@...ssion.com>
---
 fs/coredump.c                |  6 +-----
 include/linux/sched/signal.h |  2 ++
 kernel/exit.c                |  5 +----
 kernel/signal.c              | 25 +++++++++++--------------
 4 files changed, 15 insertions(+), 23 deletions(-)

diff --git a/fs/coredump.c b/fs/coredump.c
index bcef41ec69a9..dce91e5c121a 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -371,11 +371,7 @@ static int zap_process(struct task_struct *start, int exit_code)
 	start->signal->group_stop_count = 0;
 
 	for_each_thread(start, t) {
-		if (!(t->jobctl & JOBCTL_WILL_EXIT)) {
-			task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
-			t->jobctl |= JOBCTL_WILL_EXIT;
-			signal_wake_up(t, 1);
-		}
+		schedule_task_exit_locked(t);
 		nr += (t != current) && !(t->flags & PF_POSTCOREDUMP);
 	}
 
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 33bf363a3354..48b67162b38c 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -462,6 +462,8 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
 	signal_wake_up_state(t, state);
 }
 
+void schedule_task_exit_locked(struct task_struct *task);
+
 void task_join_group_stop(struct task_struct *task);
 
 #ifdef TIF_RESTORE_SIGMASK
diff --git a/kernel/exit.c b/kernel/exit.c
index 73eb3afbf083..902af3beb7cc 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -801,10 +801,7 @@ static void synchronize_group_exit(struct task_struct *tsk, long code)
 	struct signal_struct *signal = tsk->signal;
 
 	spin_lock_irq(&sighand->siglock);
-	if (!(tsk->jobctl & JOBCTL_WILL_EXIT)) {
-		task_clear_jobctl_pending(tsk, JOBCTL_PENDING_MASK);
-		tsk->jobctl |= JOBCTL_WILL_EXIT;
-	}
+	schedule_task_exit_locked(tsk);
 	signal->quick_threads--;
 	if ((signal->quick_threads == 0) &&
 	    !(signal->flags & SIGNAL_GROUP_EXIT)) {
diff --git a/kernel/signal.c b/kernel/signal.c
index 341717c6cc97..ac4ac3aeda0a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1056,11 +1056,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 			signal->group_exit_code = sig;
 			signal->group_stop_count = 0;
 			__for_each_thread(signal, t) {
-				if (!(t->jobctl & JOBCTL_WILL_EXIT)) {
-					task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
-					t->jobctl |= JOBCTL_WILL_EXIT;
-					signal_wake_up(t, 1);
-				}
+				schedule_task_exit_locked(t);
 			}
 			return;
 		}
@@ -1371,6 +1367,15 @@ int force_sig_info(struct kernel_siginfo *info)
 	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
 }
 
+void schedule_task_exit_locked(struct task_struct *task)
+{
+	if (!(task->jobctl & JOBCTL_WILL_EXIT)) {
+		task_clear_jobctl_pending(task, JOBCTL_PENDING_MASK);
+		task->jobctl |= JOBCTL_WILL_EXIT;
+		signal_wake_up(task, true);
+	}
+}
+
 /*
  * Nuke all other threads in the group.
  */
@@ -1383,16 +1388,8 @@ int zap_other_threads(struct task_struct *p)
 
 	for_other_threads(p, t) {
 		count++;
-
-		/* Only bother with threads that might be alive */
-		if (t->jobctl & JOBCTL_WILL_EXIT)
-			continue;
-
-		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
-		t->jobctl |= JOBCTL_WILL_EXIT;
-		signal_wake_up(t, 1);
+		schedule_task_exit_locked(t);
 	}
-
 	return count;
 }
 
-- 
2.41.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ