lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <87cyodfu62.fsf_-_@email.froward.int.ebiederm.org>
Date: Tue, 18 Jun 2024 23:11:49 -0500
From: "Eric W. Biederman" <ebiederm@...ssion.com>
To: Oleg Nesterov <oleg@...hat.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,  Tejun Heo <tj@...nel.org>,
  linux-kernel@...r.kernel.org
Subject: [PATCH 14/17] signal: Factor out schedule_group_exit_locked


Essentially the same code is present in zap_process, complete_signal,
do_group_exit and get_signal.  Place that code in a function and call
it signal_group_exit_locked.  Call signal_group_exit_locked instead of
repeat the sequence inline 4 different times.

Signed-off-by: "Eric W. Biederman" <ebiederm@...ssion.com>
---
 fs/coredump.c                |  8 ++------
 include/linux/sched/signal.h |  1 +
 kernel/exit.c                | 11 ++--------
 kernel/signal.c              | 40 ++++++++++++++++++------------------
 4 files changed, 25 insertions(+), 35 deletions(-)

diff --git a/fs/coredump.c b/fs/coredump.c
index dce91e5c121a..aba72f1d170a 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -366,14 +366,10 @@ static int zap_process(struct task_struct *start, int exit_code)
 	struct task_struct *t;
 	int nr = 0;
 
-	start->signal->flags = SIGNAL_GROUP_EXIT;
-	start->signal->group_exit_code = exit_code;
-	start->signal->group_stop_count = 0;
+	schedule_group_exit_locked(start->signal, exit_code);
 
-	for_each_thread(start, t) {
-		schedule_task_exit_locked(t);
+	for_each_thread(start, t)
 		nr += (t != current) && !(t->flags & PF_POSTCOREDUMP);
-	}
 
 	return nr;
 }
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 250bd537aa6a..54b2b924aaea 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -462,6 +462,7 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
 }
 
 void schedule_task_exit_locked(struct task_struct *task);
+void schedule_group_exit_locked(struct signal_struct *signal, int exit_code);
 
 void task_join_group_stop(struct task_struct *task);
 
diff --git a/kernel/exit.c b/kernel/exit.c
index 471af82376e5..35452e822cc9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1009,15 +1009,8 @@ do_group_exit(int exit_code)
 		if (sig->flags & SIGNAL_GROUP_EXIT)
 			/* Another thread got here before we took the lock.  */
 			exit_code = sig->group_exit_code;
-		else {
-			struct task_struct *t;
-
-			sig->group_exit_code = exit_code;
-			sig->flags = SIGNAL_GROUP_EXIT;
-			sig->group_stop_count = 0;
-			__for_each_thread(sig, t)
-				schedule_task_exit_locked(t);
-		}
+		else
+			schedule_group_exit_locked(sig, exit_code);
 		spin_unlock_irq(&sighand->siglock);
 	}
 
diff --git a/kernel/signal.c b/kernel/signal.c
index 8ae6d6550e82..fe1d46b00e9f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -999,6 +999,23 @@ static inline bool wants_signal(int sig, struct task_struct *p)
 	return task_curr(p) || !task_sigpending(p);
 }
 
+void schedule_group_exit_locked(struct signal_struct *signal, int exit_code)
+{
+	/*
+	 * Start a group exit and wake everybody up.
+	 * This way we don't have other threads
+	 * running and doing things after a slower
+	 * thread has the fatal signal pending.
+	 */
+	struct task_struct *t;
+
+	signal->flags = SIGNAL_GROUP_EXIT;
+	signal->group_exit_code = exit_code;
+	signal->group_stop_count = 0;
+	__for_each_thread(signal, t)
+		schedule_task_exit_locked(t);
+}
+
 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 {
 	struct signal_struct *signal = p->signal;
@@ -1046,18 +1063,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 		 * This signal will be fatal to the whole group.
 		 */
 		if (!sig_kernel_coredump(sig)) {
-			/*
-			 * Start a group exit and wake everybody up.
-			 * This way we don't have other threads
-			 * running and doing things after a slower
-			 * thread has the fatal signal pending.
-			 */
-			signal->flags = SIGNAL_GROUP_EXIT;
-			signal->group_exit_code = sig;
-			signal->group_stop_count = 0;
-			__for_each_thread(signal, t) {
-				schedule_task_exit_locked(t);
-			}
+			schedule_group_exit_locked(signal, sig);
 			return;
 		}
 	}
@@ -2727,7 +2733,6 @@ bool get_signal(struct ksignal *ksig)
 	for (;;) {
 		bool group_exit_needed = false;
 		struct k_sigaction *ka;
-		struct task_struct *t;
 		enum pid_type type;
 		int exit_code;
 
@@ -2872,13 +2877,8 @@ bool get_signal(struct ksignal *ksig)
 		exit_code = signr;
 		if (sig_kernel_coredump(signr))
 			group_exit_needed = true;
-		else {
-			signal->group_exit_code = exit_code;
-			signal->flags = SIGNAL_GROUP_EXIT;
-			signal->group_stop_count = 0;
-			__for_each_thread(signal, t)
-				schedule_task_exit_locked(t);
-		}
+		else
+			schedule_group_exit_locked(signal, exit_code);
 	fatal:
 		spin_unlock_irq(&sighand->siglock);
 		if (unlikely(cgroup_task_frozen(current)))
-- 
2.41.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ