lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon,  3 Jan 2022 15:33:11 -0600
From:   "Eric W. Biederman" <ebiederm@...ssion.com>
To:     linux-kernel@...r.kernel.org
Cc:     linux-arch@...r.kernel.org,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Oleg Nesterov <oleg@...hat.com>,
        Al Viro <viro@...IV.linux.org.uk>,
        Kees Cook <keescook@...omium.org>, linux-api@...r.kernel.org,
        "Eric W. Biederman" <ebiederm@...ssion.com>
Subject: [PATCH 16/17] signal: Record the exit_code when an exit is scheduled

With ptrace_stop no longer using task->exit_code it is safe
to set task->exit_code when an exit is scheduled.

Use the bit JOBCTL_WILL_EXIT to detect when a exit is first scheduled
and only set exit_code the first time.  Only use the code provided
to do_exit if the task has not yet been schedled to exit.

In get_signal and do_grup_exit when JOBCTL_WILL_EXIT is set read the
recored exit_code from current->exit_code, instead of assuming
exit_code will always be 0.

Signed-off-by: "Eric W. Biederman" <ebiederm@...ssion.com>
---
 fs/coredump.c                |  2 +-
 fs/exec.c                    |  2 +-
 include/linux/sched/signal.h |  2 +-
 kernel/exit.c                | 12 ++++++++----
 kernel/signal.c              |  7 ++++---
 5 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/fs/coredump.c b/fs/coredump.c
index 4e82ee51633d..c54b502bf648 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -357,7 +357,7 @@ static int zap_process(struct task_struct *start, int exit_code)
 	start->signal->group_stop_count = 0;
 
 	for_each_thread(start, t) {
-		schedule_task_exit_locked(t);
+		schedule_task_exit_locked(t, exit_code);
 		if (t != current && !(t->flags & PF_POSTCOREDUMP))
 			nr++;
 	}
diff --git a/fs/exec.c b/fs/exec.c
index b9f646fddc51..3203605e54cb 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1062,7 +1062,7 @@ static int de_thread(struct task_struct *tsk)
 		if (t == tsk)
 			continue;
 		sig->notify_count++;
-		schedule_task_exit_locked(t);
+		schedule_task_exit_locked(t, 0);
 	}
 
 	while (sig->notify_count) {
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 989bb665f107..e8034ecaee84 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -426,7 +426,7 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
 	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
 }
 
-void schedule_task_exit_locked(struct task_struct *task);
+void schedule_task_exit_locked(struct task_struct *task, int exit_code);
 
 void task_join_group_stop(struct task_struct *task);
 
diff --git a/kernel/exit.c b/kernel/exit.c
index 7a7a0cbac28e..e95500e2d27c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -735,6 +735,11 @@ void __noreturn do_exit(long code)
 	struct task_struct *tsk = current;
 	int group_dead;
 
+	spin_lock_irq(&tsk->sighand->siglock);
+	schedule_task_exit_locked(tsk, code);
+	code = tsk->exit_code;
+	spin_unlock_irq(&tsk->sighand->siglock);
+
 	WARN_ON(blk_needs_flush_plug(tsk));
 
 	kcov_task_exit(tsk);
@@ -773,7 +778,6 @@ void __noreturn do_exit(long code)
 		tty_audit_exit();
 	audit_free(tsk);
 
-	tsk->exit_code = code;
 	taskstats_exit(tsk, group_dead);
 
 	exit_mm();
@@ -907,7 +911,7 @@ do_group_exit(int exit_code)
 	if (sig->flags & SIGNAL_GROUP_EXIT)
 		exit_code = sig->group_exit_code;
 	else if (current->jobctl & JOBCTL_WILL_EXIT)
-		exit_code = 0;
+		exit_code = current->exit_code;
 	else if (!thread_group_empty(current)) {
 		struct sighand_struct *const sighand = current->sighand;
 
@@ -916,7 +920,7 @@ do_group_exit(int exit_code)
 			/* Another thread got here before we took the lock.  */
 			exit_code = sig->group_exit_code;
 		else if (current->jobctl & JOBCTL_WILL_EXIT)
-			exit_code = 0;
+			exit_code = current->exit_code;
 		else {
 			struct task_struct *t;
 
@@ -926,7 +930,7 @@ do_group_exit(int exit_code)
 			__for_each_thread(sig, t) {
 				if (t == current)
 					continue;
-				schedule_task_exit_locked(t);
+				schedule_task_exit_locked(t, exit_code);
 			}
 		}
 		spin_unlock_irq(&sighand->siglock);
diff --git a/kernel/signal.c b/kernel/signal.c
index 6179e34ce666..e8fac8a3c935 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1057,7 +1057,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
 			signal->group_stop_count = 0;
 			t = p;
 			do {
-				schedule_task_exit_locked(t);
+				schedule_task_exit_locked(t, sig);
 			} while_each_thread(p, t);
 			return;
 		}
@@ -1362,11 +1362,12 @@ int force_sig_info(struct kernel_siginfo *info)
 	return force_sig_info_to_task(info, current, HANDLER_CURRENT);
 }
 
-void schedule_task_exit_locked(struct task_struct *task)
+void schedule_task_exit_locked(struct task_struct *task, int exit_code)
 {
 	if (!(task->jobctl & JOBCTL_WILL_EXIT)) {
 		task_clear_jobctl_pending(task, JOBCTL_PENDING_MASK);
 		task->jobctl |= JOBCTL_WILL_EXIT;
+		task->exit_code = exit_code;
 		signal_wake_up(task, 1);
 	}
 }
@@ -2703,7 +2704,7 @@ bool get_signal(struct ksignal *ksig)
 			if (signal->flags & SIGNAL_GROUP_EXIT)
 				exit_code = signal->group_exit_code;
 			else {
-				exit_code = 0;
+				exit_code = current->exit_code;
 				group_exit = false;
 			}
 			goto fatal;
-- 
2.29.2

Powered by blists - more mailing lists