lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 16 Nov 2016 12:57:59 +0000
From:   Jamie Iles <jamie.iles@...cle.com>
To:     linux-kernel@...r.kernel.org, oleg@...hat.com
Cc:     Jamie Iles <jamie.iles@...cle.com>,
        Alexander Viro <viro@...iv.linux.org.uk>,
        Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Andrew Morton <akpm@...ux-foundation.org>
Subject: [PATCH] signal: protect SIGNAL_UNKILLABLE from unintentional clearing.

Since 00cd5c37af (ptrace: permit ptracing of /sbin/init) we can now
trace init processes.  init is initially protected with
SIGNAL_UNKILLABLE which will prevent fatal signals such as SIGSTOP, but
there are a number of paths during tracing where SIGNAL_UNKILLABLE can
be implicitly cleared.

This can result in init becoming stoppable/killable after tracing.  For
example, running:

  while true; do kill -STOP 1; done &
  strace -p 1

and then stopping strace and the kill loop will result in init being
left in state TASK_STOPPED.  Sending SIGCONT to init will resume it, but
init will now respond to future SIGSTOP signals rather than ignoring
them.

Instead of direct assignment to struct signal_struct::flags, provide
accessors that protect SIGNAL_UNKILLABLE.

Cc: Alexander Viro <viro@...iv.linux.org.uk>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Oleg Nesterov <oleg@...hat.com>
Signed-off-by: Jamie Iles <jamie.iles@...cle.com>
---

The task being left in state TASK_STOPPED could still be seen as slightly
unexpected, I suspect this is pending signals that got set because the task
was traced but delivered after detach.  Perhaps signals that would normally be
ignored because of SIGNAL_UNKILLABLE should be flushed on PTRACE_DETACH?

 fs/coredump.c         |  4 ++--
 include/linux/sched.h | 22 +++++++++++++++++++++-
 kernel/exit.c         |  4 ++--
 kernel/signal.c       |  8 ++++----
 4 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/fs/coredump.c b/fs/coredump.c
index eb9c92c9b20f..290ba7e69b28 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -312,7 +312,7 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)
 	int nr = 0;
 
 	/* ignore all signals except SIGKILL, see prepare_signal() */
-	start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
+	signal_set_flags(start->signal, SIGNAL_GROUP_COREDUMP | flags);
 	start->signal->group_exit_code = exit_code;
 	start->signal->group_stop_count = 0;
 
@@ -451,7 +451,7 @@ static void coredump_finish(struct mm_struct *mm, bool core_dumped)
 	if (core_dumped && !__fatal_signal_pending(current))
 		current->signal->group_exit_code |= 0x80;
 	current->signal->group_exit_task = NULL;
-	current->signal->flags = SIGNAL_GROUP_EXIT;
+	signal_set_flags(current->signal, SIGNAL_GROUP_EXIT);
 	spin_unlock_irq(&current->sighand->siglock);
 
 	next = mm->core_state->dumper.next;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 348f51b0ec92..26e5f10b338c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -696,7 +696,11 @@ struct signal_struct {
 
 	/* thread group stop support, overloads group_exit_code too */
 	int			group_stop_count;
-	unsigned int		flags; /* see SIGNAL_* flags below */
+	/*
+	 * see SIGNAL_* flags below.  Set and clear flags with
+	 * signal_set_flags()/signal_clear_flags().
+	 */
+	unsigned int		flags;
 
 	/*
 	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
@@ -829,6 +833,22 @@ struct signal_struct {
 #define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
 
 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
+/*
+ * Protected flags that should not normally be set/cleared.
+ */
+#define SIGNAL_PROTECTED_MASK	SIGNAL_UNKILLABLE
+
+static inline void signal_set_flags(struct signal_struct *sig,
+				    unsigned int flags)
+{
+	sig->flags = (sig->flags & SIGNAL_PROTECTED_MASK) | flags;
+}
+
+static inline void signal_clear_flags(struct signal_struct *sig,
+				      unsigned int flags)
+{
+	sig->flags &= (~flags | SIGNAL_PROTECTED_MASK);
+}
 
 /* If true, all threads except ->group_exit_task have pending SIGKILL */
 static inline int signal_group_exit(const struct signal_struct *sig)
diff --git a/kernel/exit.c b/kernel/exit.c
index 9d68c45ebbe3..89a7f8860efa 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -922,7 +922,7 @@ do_group_exit(int exit_code)
 			exit_code = sig->group_exit_code;
 		else {
 			sig->group_exit_code = exit_code;
-			sig->flags = SIGNAL_GROUP_EXIT;
+			signal_set_flags(sig, SIGNAL_GROUP_EXIT);
 			zap_other_threads(current);
 		}
 		spin_unlock_irq(&sighand->siglock);
@@ -1315,7 +1315,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
 		return 0;
 	}
 	if (!unlikely(wo->wo_flags & WNOWAIT))
-		p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
+		signal_clear_flags(p->signal, SIGNAL_STOP_CONTINUED);
 	uid = from_kuid_munged(current_user_ns(), task_uid(p));
 	spin_unlock_irq(&p->sighand->siglock);
 
diff --git a/kernel/signal.c b/kernel/signal.c
index 75761acc77cf..0394e1205d1e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
 	 * fresh group stop.  Read comment in do_signal_stop() for details.
 	 */
 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
-		sig->flags = SIGNAL_STOP_STOPPED;
+		signal_set_flags(sig, SIGNAL_STOP_STOPPED);
 		return true;
 	}
 	return false;
@@ -837,7 +837,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 			 * notify its parent. See get_signal_to_deliver().
 			 */
-			signal->flags = why | SIGNAL_STOP_CONTINUED;
+			signal_set_flags(signal, why | SIGNAL_STOP_CONTINUED);
 			signal->group_stop_count = 0;
 			signal->group_exit_code = 0;
 		}
@@ -922,7 +922,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
 			 * running and doing things after a slower
 			 * thread has the fatal signal pending.
 			 */
-			signal->flags = SIGNAL_GROUP_EXIT;
+			signal_set_flags(signal, SIGNAL_GROUP_EXIT);
 			signal->group_exit_code = sig;
 			signal->group_stop_count = 0;
 			t = p;
@@ -2161,7 +2161,7 @@ int get_signal(struct ksignal *ksig)
 		else
 			why = CLD_STOPPED;
 
-		signal->flags &= ~SIGNAL_CLD_MASK;
+		signal_clear_flags(signal, SIGNAL_CLD_MASK);
 
 		spin_unlock_irq(&sighand->siglock);
 
-- 
2.9.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ