[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID:
<GV2PPF74270EBEE3A1663E566B8F640EC59E430A@GV2PPF74270EBEE.EURP195.PROD.OUTLOOK.COM>
Date: Tue, 19 Aug 2025 20:53:44 +0200
From: Bernd Edlinger <bernd.edlinger@...mail.de>
To: Kees Cook <kees@...nel.org>
Cc: Alexander Viro <viro@...iv.linux.org.uk>,
Alexey Dobriyan <adobriyan@...il.com>, Oleg Nesterov <oleg@...hat.com>,
Andy Lutomirski <luto@...capital.net>, Will Drewry <wad@...omium.org>,
Christian Brauner <brauner@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>, Michal Hocko <mhocko@...e.com>,
Serge Hallyn <serge@...lyn.com>, James Morris
<jamorris@...ux.microsoft.com>, Randy Dunlap <rdunlap@...radead.org>,
Suren Baghdasaryan <surenb@...gle.com>, Yafang Shao <laoar.shao@...il.com>,
Helge Deller <deller@....de>, "Eric W. Biederman" <ebiederm@...ssion.com>,
Adrian Reber <areber@...hat.com>, Thomas Gleixner <tglx@...utronix.de>,
Jens Axboe <axboe@...nel.dk>, Alexei Starovoitov <ast@...nel.org>,
"linux-fsdevel@...r.kernel.org" <linux-fsdevel@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
linux-kselftest@...r.kernel.org, linux-mm@...ck.org,
tiozhang <tiozhang@...iglobal.com>, Luis Chamberlain <mcgrof@...nel.org>,
"Paulo Alcantara (SUSE)" <pc@...guebit.com>,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Frederic Weisbecker <frederic@...nel.org>, YueHaibing
<yuehaibing@...wei.com>, Paul Moore <paul@...l-moore.com>,
Aleksa Sarai <cyphar@...har.com>, Stefan Roesch <shr@...kernel.io>,
Chao Yu <chao@...nel.org>, xu xin <xu.xin16@....com.cn>,
Jeff Layton <jlayton@...nel.org>, Jan Kara <jack@...e.cz>,
David Hildenbrand <david@...hat.com>, Dave Chinner <dchinner@...hat.com>,
Shuah Khan <shuah@...nel.org>, Elena Reshetova <elena.reshetova@...el.com>,
David Windsor <dwindsor@...il.com>, Mateusz Guzik <mjguzik@...il.com>,
Ard Biesheuvel <ardb@...nel.org>,
"Joel Fernandes (Google)" <joel@...lfernandes.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Hans Liljestrand <ishkamiel@...il.com>
Subject: Re: [PATCH v16] exec: Fix dead-lock in de_thread with ptrace_attach
Hi Kees,
thanks for your response.
On 8/19/25 06:36, Kees Cook wrote:
> On Mon, Aug 18, 2025 at 10:53:43PM +0200, Bernd Edlinger wrote:
>> This introduces signal->exec_bprm, which is used to
>> fix the case when at least one of the sibling threads
>> is traced, and therefore the trace process may dead-lock
>> in ptrace_attach, but de_thread will need to wait for the
>> tracer to continue execution.
>>
[...]
>>
>> -static int de_thread(struct task_struct *tsk)
>> +static int de_thread(struct task_struct *tsk, struct linux_binprm *bprm)
>> {
>> struct signal_struct *sig = tsk->signal;
>> struct sighand_struct *oldsighand = tsk->sighand;
>> spinlock_t *lock = &oldsighand->siglock;
>> + struct task_struct *t;
>> + bool unsafe_execve_in_progress = false;
>>
>> if (thread_group_empty(tsk))
>> goto no_thread_group;
>> @@ -932,6 +934,19 @@ static int de_thread(struct task_struct *tsk)
>> if (!thread_group_leader(tsk))
>> sig->notify_count--;
>>
>> + for_other_threads(tsk, t) {
>> + if (unlikely(t->ptrace)
>> + && (t != tsk->group_leader || !t->exit_state))
>> + unsafe_execve_in_progress = true;
>> + }
>> +
>> + if (unlikely(unsafe_execve_in_progress)) {
>> + spin_unlock_irq(lock);
>> + sig->exec_bprm = bprm;
>> + mutex_unlock(&sig->cred_guard_mutex);
>> + spin_lock_irq(lock);
>> + }
>> +
>
> cred_guard_mutex has a comment about it being deprecated and shouldn't
> be used in "new code"... Regardless, what cred_guard_mutex is trying to
> protect is changing to credentials.
>
> But what we want to stop is having new threads appear, which
> spin_lock_irq(lock) should stop, yes?
>
Yes, but after the fatal signal is sent to all threads, releasing the
sighand->siglock is okay, since no new threads can be cloned any more,
because kernel/fork.c checks for pending fatal signals after acquiring the
mutex and before adding another thread to the list:
spin_lock(¤t->sighand->siglock);
rv_task_fork(p);
rseq_fork(p, clone_flags);
/* Don't start children in a dying pid namespace */
if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
retval = -ENOMEM;
goto bad_fork_core_free;
}
/* Let kill terminate clone/fork in the middle */
if (fatal_signal_pending(current)) {
retval = -EINTR;
goto bad_fork_core_free;
}
/* No more failure paths after this point. */
>> while (sig->notify_count) {
>> __set_current_state(TASK_KILLABLE);
>> spin_unlock_irq(lock);
>> @@ -1021,6 +1036,11 @@ static int de_thread(struct task_struct *tsk)
>> release_task(leader);
>> }
>>
>> + if (unlikely(unsafe_execve_in_progress)) {
>> + mutex_lock(&sig->cred_guard_mutex);
>> + sig->exec_bprm = NULL;
>> + }
>> +
>> sig->group_exec_task = NULL;
>> sig->notify_count = 0;
>>
>> @@ -1032,6 +1052,11 @@ static int de_thread(struct task_struct *tsk)
>> return 0;
>>
>> killed:
>> + if (unlikely(unsafe_execve_in_progress)) {
>> + mutex_lock(&sig->cred_guard_mutex);
>> + sig->exec_bprm = NULL;
>> + }
>
> I think we need to document that cred_guard_mutex now protects
> sig->exec_bprm.
>
Maybe, I thought that it would be clear by this description:
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -237,9 +237,27 @@ struct signal_struct {
struct mm_struct *oom_mm; /* recorded mm when the thread group got
* killed by the oom killer */
+ struct linux_binprm *exec_bprm; /* Used to check ptrace_may_access
+ * against new credentials while
+ * de_thread is waiting for other
+ * traced threads to terminate.
+ * Set while de_thread is executing.
+ * The cred_guard_mutex is released
+ * after de_thread() has called
+ * zap_other_threads(), therefore
+ * a fatal signal is guaranteed to be
+ * already pending in the unlikely
+ * event, that
+ * current->signal->exec_bprm happens
+ * to be non-zero after the
+ * cred_guard_mutex was acquired.
+ */
+
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace)
+ * Held while execve runs, except when
+ * a sibling thread is being traced.
* Deprecated do not use in new code.
* Use exec_update_lock instead.
*/
>> +
>> /* protects against exit_notify() and __exit_signal() */
>> read_lock(&tasklist_lock);
>> sig->group_exec_task = NULL;
>> @@ -1114,13 +1139,31 @@ int begin_new_exec(struct linux_binprm * bprm)
>> */
>> trace_sched_prepare_exec(current, bprm);
>>
>> + /* If the binary is not readable then enforce mm->dumpable=0 */
>> + would_dump(bprm, bprm->file);
>> + if (bprm->have_execfd)
>> + would_dump(bprm, bprm->executable);
>> +
>> + /*
>> + * Figure out dumpability. Note that this checking only of current
>> + * is wrong, but userspace depends on it. This should be testing
>> + * bprm->secureexec instead.
>> + */
>> + if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
>> + is_dumpability_changed(current_cred(), bprm->cred) ||
>> + !(uid_eq(current_euid(), current_uid()) &&
>> + gid_eq(current_egid(), current_gid())))
>> + set_dumpable(bprm->mm, suid_dumpable);
>> + else
>> + set_dumpable(bprm->mm, SUID_DUMP_USER);
>> +
>
> Why is this move needed? While it's writing to bprm, I see it's reading
> from "current". Is this safe to do before de_thread() has happened?
> Can't a sibling thread manipulate things here? What lock am I missing?
>
The credentials should be protected by cred_guard_mutex here, and/or
current->signal->exec_bprm in case there are ptraced threads.
I need this earlier because the outcome of ptrace_may_access
for the new credentials depends on the dumpability.
So when de_thread has to release the mutex, a ptrace_attach
is possible without deadlock, but the tracer's access rights must
be sufficient to access the current and the new credentials because,
the tracer will receive the stop event immediately after the
execve succeeds and the new credentials are in effect, or
if the execve fails the stop event will see the old process
using the old credentials dying of a fatal signal.
>> /*
>> * Ensure all future errors are fatal.
>> */
>> bprm->point_of_no_return = true;
>>
>> /* Make this the only thread in the thread group */
>> - retval = de_thread(me);
>> + retval = de_thread(me, bprm);
>> if (retval)
>> goto out;
>> /* see the comment in check_unsafe_exec() */
>> @@ -1144,11 +1187,6 @@ int begin_new_exec(struct linux_binprm * bprm)
>> if (retval)
>> goto out;
>>
>> - /* If the binary is not readable then enforce mm->dumpable=0 */
>> - would_dump(bprm, bprm->file);
>> - if (bprm->have_execfd)
>> - would_dump(bprm, bprm->executable);
>> -
>> /*
>> * Release all of the old mmap stuff
>> */
>> @@ -1210,18 +1248,6 @@ int begin_new_exec(struct linux_binprm * bprm)
>>
>> me->sas_ss_sp = me->sas_ss_size = 0;
>>
>> - /*
>> - * Figure out dumpability. Note that this checking only of current
>> - * is wrong, but userspace depends on it. This should be testing
>> - * bprm->secureexec instead.
>> - */
>> - if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
>> - !(uid_eq(current_euid(), current_uid()) &&
>> - gid_eq(current_egid(), current_gid())))
>> - set_dumpable(current->mm, suid_dumpable);
>> - else
>> - set_dumpable(current->mm, SUID_DUMP_USER);
>> -
>> perf_event_exec();
>>
>> /*
>> @@ -1361,6 +1387,11 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
>> if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex))
>> return -ERESTARTNOINTR;
>>
>> + if (unlikely(current->signal->exec_bprm)) {
>> + mutex_unlock(¤t->signal->cred_guard_mutex);
>> + return -ERESTARTNOINTR;
>> + }
>> +
>> bprm->cred = prepare_exec_creds();
>> if (likely(bprm->cred))
>> return 0;
[...]
>> diff --git a/kernel/cred.c b/kernel/cred.c
>> index 9676965c0981..0b2822c762df 100644
>> --- a/kernel/cred.c
>> +++ b/kernel/cred.c
>> @@ -375,6 +375,30 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
>> return false;
>> }
>>
>> +/**
>> + * is_dumpability_changed - Will changing creds affect dumpability?
>> + * @old: The old credentials.
>> + * @new: The new credentials.
>> + *
>> + * If the @new credentials have no elevated privileges compared to the
>> + * @old credentials, the task may remain dumpable. Otherwise we have
>> + * to mark the task as undumpable to avoid information leaks from higher
>> + * to lower privilege domains.
>> + *
>> + * Return: True if the task will become undumpable.
>> + */
>> +bool is_dumpability_changed(const struct cred *old, const struct cred *new)
>
> This should be "static", I think?
>
No, because I want to use this function also in begin_new_exec in the moved code,
to include the expected outcome of commit_creds before de_thread, to make sure the
result of ptrace_may_access is as if the new credentials were already committed.
>> +{
>> + if (!uid_eq(old->euid, new->euid) ||
>> + !gid_eq(old->egid, new->egid) ||
>> + !uid_eq(old->fsuid, new->fsuid) ||
>> + !gid_eq(old->fsgid, new->fsgid) ||
>> + !cred_cap_issubset(old, new))
>> + return true;
>> +
>> + return false;
>> +}
>> +
>> /**
>> * commit_creds - Install new credentials upon the current task
>> * @new: The credentials to be assigned
>> @@ -403,11 +427,7 @@ int commit_creds(struct cred *new)
>> get_cred(new); /* we will require a ref for the subj creds too */
>>
>> /* dumpability changes */
>> - if (!uid_eq(old->euid, new->euid) ||
>> - !gid_eq(old->egid, new->egid) ||
>> - !uid_eq(old->fsuid, new->fsuid) ||
>> - !gid_eq(old->fsgid, new->fsgid) ||
>> - !cred_cap_issubset(old, new)) {
>> + if (is_dumpability_changed(old, new)) {
>> if (task->mm)
>> set_dumpable(task->mm, suid_dumpable);
>> task->pdeath_signal = 0;
>> diff --git a/kernel/ptrace.c b/kernel/ptrace.c
>> index 75a84efad40f..deacdf133f8b 100644
>> --- a/kernel/ptrace.c
>> +++ b/kernel/ptrace.c
>> @@ -20,6 +20,7 @@
>> #include <linux/pagemap.h>
>> #include <linux/ptrace.h>
>> #include <linux/security.h>
>> +#include <linux/binfmts.h>
>> #include <linux/signal.h>
>> #include <linux/uio.h>
>> #include <linux/audit.h>
>> @@ -453,6 +454,27 @@ static int ptrace_attach(struct task_struct *task, long request,
>> return retval;
>> }
>>
>> + if (unlikely(task->in_execve)) {
>
> Urgh, we're trying to get rid of this bit too.
> https://lore.kernel.org/all/72da7003-a115-4162-b235-53cd3da8a90e@I-love.SAKURA.ne.jp/
>
> Can we find a better indicator?
>
Hmm, Yes, I agree.
I start to think that task->signal->exec_bprm would indeed be a better indicator:
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -454,7 +454,7 @@ static int ptrace_attach(struct task_struct *task, long request,
return retval;
}
- if (unlikely(task->in_execve)) {
+ if (unlikely(task->signal->exec_bprm)) {
retval = down_write_killable(&task->signal->exec_update_lock);
if (retval)
return retval;
So I will add this to the next patch version.
>> + retval = down_write_killable(&task->signal->exec_update_lock);
>> + if (retval)
>> + return retval;
>> +
>> + scoped_guard (task_lock, task) {
>> + struct linux_binprm *bprm = task->signal->exec_bprm;
>> + const struct cred __rcu *old_cred = task->real_cred;
>> + struct mm_struct *old_mm = task->mm;
>> + rcu_assign_pointer(task->real_cred, bprm->cred);
>> + task->mm = bprm->mm;
>> + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
>> + rcu_assign_pointer(task->real_cred, old_cred);
>> + task->mm = old_mm;
>> + }
>> +
>> + up_write(&task->signal->exec_update_lock);
>> + if (retval)
>> + return retval;
>> + }
>> +
>> scoped_guard (write_lock_irq, &tasklist_lock) {
>> if (unlikely(task->exit_state))
>> return -EPERM;
>> @@ -488,6 +510,14 @@ static int ptrace_traceme(void)
>> {
>> int ret = -EPERM;
>>
>> + if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex))
>> + return -ERESTARTNOINTR;
>> +
>> + if (unlikely(current->signal->exec_bprm)) {
>> + mutex_unlock(¤t->signal->cred_guard_mutex);
>> + return -ERESTARTNOINTR;
>> + }
>
> I mention this hunk below...
>
>> +
>> write_lock_irq(&tasklist_lock);
>> /* Are we already being traced? */
>> if (!current->ptrace) {
>> @@ -503,6 +533,7 @@ static int ptrace_traceme(void)
>> }
>> }
>> write_unlock_irq(&tasklist_lock);
>> + mutex_unlock(¤t->signal->cred_guard_mutex);
>>
>> return ret;
>> }
>> diff --git a/kernel/seccomp.c b/kernel/seccomp.c
>> index 41aa761c7738..d61fc275235a 100644
>> --- a/kernel/seccomp.c
>> +++ b/kernel/seccomp.c
>> @@ -1994,9 +1994,15 @@ static long seccomp_set_mode_filter(unsigned int flags,
>> * Make sure we cannot change seccomp or nnp state via TSYNC
>> * while another thread is in the middle of calling exec.
>> */
>> - if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
>> - mutex_lock_killable(¤t->signal->cred_guard_mutex))
>> - goto out_put_fd;
>> + if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
>> + if (mutex_lock_killable(¤t->signal->cred_guard_mutex))
>> + goto out_put_fd;
>> +
>> + if (unlikely(current->signal->exec_bprm)) {
>> + mutex_unlock(¤t->signal->cred_guard_mutex);
>> + goto out_put_fd;
>> + }
>
> This updated test and the hunk noted above are _almost_ identical
> (interruptible vs killable). Could a helper with a descriptive name be
> used here instead? (And does the former hunk need interruptible, or
> could it use killable?) I'd just like to avoid having repeated dependent
> logic created ("we have to get the lock AND check for exec_bprm") when
> something better named than "lock_if_not_racing_exec(...)" could be
> used.
>
My problem here is that we have a pre-exisiting code using
mutex_lock_killable(¤t->signal->cred_guard_mutex))
and other pre-existiong code using mutex_lock_interruptible I would not want
to make any changes here sine there are probably good reasons why one is
ignoring interrupts while the others do not ignore interrupts.
So currently I would not think that a helper function for the two interruptible
mutex locking instances would really make the code more easy to understand...
>> + }
>>
>> spin_lock_irq(¤t->sighand->siglock);
>>
>> diff --git a/tools/testing/selftests/ptrace/vmaccess.c b/tools/testing/selftests/ptrace/vmaccess.c
>> index 4db327b44586..5d4a65eb5a8d 100644
>> --- a/tools/testing/selftests/ptrace/vmaccess.c
>> +++ b/tools/testing/selftests/ptrace/vmaccess.c
>> @@ -14,6 +14,7 @@
>> #include <signal.h>
>> #include <unistd.h>
>> #include <sys/ptrace.h>
>> +#include <sys/syscall.h>
>>
>> static void *thread(void *arg)
>> {
>> @@ -23,7 +24,7 @@ static void *thread(void *arg)
>>
>> TEST(vmaccess)
>> {
>> - int f, pid = fork();
>> + int s, f, pid = fork();
>> char mm[64];
>>
>> if (!pid) {
>> @@ -31,19 +32,42 @@ TEST(vmaccess)
>>
>> pthread_create(&pt, NULL, thread, NULL);
>> pthread_join(pt, NULL);
>> - execlp("true", "true", NULL);
>> + execlp("false", "false", NULL);
>> + return;
>> }
>>
>> sleep(1);
>> sprintf(mm, "/proc/%d/mem", pid);
>> + /* deadlock did happen here */
>> f = open(mm, O_RDONLY);
>> ASSERT_GE(f, 0);
>> close(f);
>> - f = kill(pid, SIGCONT);
>> - ASSERT_EQ(f, 0);
>> + f = waitpid(-1, &s, WNOHANG);
>> + ASSERT_NE(f, -1);
>> + ASSERT_NE(f, 0);
>> + ASSERT_NE(f, pid);
>> + ASSERT_EQ(WIFEXITED(s), 1);
>> + ASSERT_EQ(WEXITSTATUS(s), 0);
>> + f = waitpid(-1, &s, 0);
>> + ASSERT_EQ(f, pid);
>> + ASSERT_EQ(WIFEXITED(s), 1);
>> + ASSERT_EQ(WEXITSTATUS(s), 1);
>> + f = waitpid(-1, NULL, 0);
>> + ASSERT_EQ(f, -1);
>> + ASSERT_EQ(errno, ECHILD);
>> }
>>
>> -TEST(attach)
>> +/*
>> + * Same test as previous, except that
>> + * we try to ptrace the group leader,
>> + * which is about to call execve,
>> + * when the other thread is already ptraced.
>> + * This exercises the code in de_thread
>> + * where it is waiting inside the
>> + * while (sig->notify_count) {
>> + * loop.
>> + */
>> +TEST(attach1)
>> {
>> int s, k, pid = fork();
>>
>> @@ -52,19 +76,76 @@ TEST(attach)
>>
>> pthread_create(&pt, NULL, thread, NULL);
>> pthread_join(pt, NULL);
>> - execlp("sleep", "sleep", "2", NULL);
>> + execlp("false", "false", NULL);
>> + return;
>> }
>>
>> sleep(1);
>> + /* deadlock may happen here */
>> k = ptrace(PTRACE_ATTACH, pid, 0L, 0L);
>> - ASSERT_EQ(errno, EAGAIN);
>> - ASSERT_EQ(k, -1);
>> + ASSERT_EQ(k, 0);
>> k = waitpid(-1, &s, WNOHANG);
>> ASSERT_NE(k, -1);
>> ASSERT_NE(k, 0);
>> ASSERT_NE(k, pid);
>> ASSERT_EQ(WIFEXITED(s), 1);
>> ASSERT_EQ(WEXITSTATUS(s), 0);
>> + k = waitpid(-1, &s, 0);
>> + ASSERT_EQ(k, pid);
>> + ASSERT_EQ(WIFSTOPPED(s), 1);
>> + ASSERT_EQ(WSTOPSIG(s), SIGTRAP);
>> + k = waitpid(-1, &s, WNOHANG);
>> + ASSERT_EQ(k, 0);
>> + k = ptrace(PTRACE_CONT, pid, 0L, 0L);
>> + ASSERT_EQ(k, 0);
>> + k = waitpid(-1, &s, 0);
>> + ASSERT_EQ(k, pid);
>> + ASSERT_EQ(WIFSTOPPED(s), 1);
>> + ASSERT_EQ(WSTOPSIG(s), SIGSTOP);
>> + k = waitpid(-1, &s, WNOHANG);
>> + ASSERT_EQ(k, 0);
>> + k = ptrace(PTRACE_CONT, pid, 0L, 0L);
>> + ASSERT_EQ(k, 0);
>> + k = waitpid(-1, &s, 0);
>> + ASSERT_EQ(k, pid);
>> + ASSERT_EQ(WIFEXITED(s), 1);
>> + ASSERT_EQ(WEXITSTATUS(s), 1);
>> + k = waitpid(-1, NULL, 0);
>> + ASSERT_EQ(k, -1);
>> + ASSERT_EQ(errno, ECHILD);
>> +}
>> +
>> +/*
>> + * Same test as previous, except that
>> + * the group leader is ptraced first,
>> + * but this time with PTRACE_O_TRACEEXIT,
>> + * and the thread that does execve is
>> + * not yet ptraced. This exercises the
>> + * code block in de_thread where the
>> + * if (!thread_group_leader(tsk)) {
>> + * is executed and enters a wait state.
>> + */
>> +static long thread2_tid;
>> +static void *thread2(void *arg)
>> +{
>> + thread2_tid = syscall(__NR_gettid);
>> + sleep(2);
>> + execlp("false", "false", NULL);
>> + return NULL;
>> +}
>> +
>> +TEST(attach2)
>> +{
>> + int s, k, pid = fork();
>> +
>> + if (!pid) {
>> + pthread_t pt;
>> +
>> + pthread_create(&pt, NULL, thread2, NULL);
>> + pthread_join(pt, NULL);
>> + return;
>> + }
>> +
>> sleep(1);
>> k = ptrace(PTRACE_ATTACH, pid, 0L, 0L);
>> ASSERT_EQ(k, 0);
>> @@ -72,12 +153,46 @@ TEST(attach)
>> ASSERT_EQ(k, pid);
>> ASSERT_EQ(WIFSTOPPED(s), 1);
>> ASSERT_EQ(WSTOPSIG(s), SIGSTOP);
>> - k = ptrace(PTRACE_DETACH, pid, 0L, 0L);
>> + k = ptrace(PTRACE_SETOPTIONS, pid, 0L, PTRACE_O_TRACEEXIT);
>> + ASSERT_EQ(k, 0);
>> + thread2_tid = ptrace(PTRACE_PEEKDATA, pid, &thread2_tid, 0L);
>> + ASSERT_NE(thread2_tid, -1);
>> + ASSERT_NE(thread2_tid, 0);
>> + ASSERT_NE(thread2_tid, pid);
>> + k = waitpid(-1, &s, WNOHANG);
>> + ASSERT_EQ(k, 0);
>> + sleep(2);
>> + /* deadlock may happen here */
>> + k = ptrace(PTRACE_ATTACH, thread2_tid, 0L, 0L);
>> + ASSERT_EQ(k, 0);
>> + k = waitpid(-1, &s, WNOHANG);
>> + ASSERT_EQ(k, pid);
>> + ASSERT_EQ(WIFSTOPPED(s), 1);
>> + ASSERT_EQ(WSTOPSIG(s), SIGTRAP);
>> + k = waitpid(-1, &s, WNOHANG);
>> + ASSERT_EQ(k, 0);
>> + k = ptrace(PTRACE_CONT, pid, 0L, 0L);
>> + ASSERT_EQ(k, 0);
>> + k = waitpid(-1, &s, 0);
>> + ASSERT_EQ(k, pid);
>> + ASSERT_EQ(WIFSTOPPED(s), 1);
>> + ASSERT_EQ(WSTOPSIG(s), SIGTRAP);
>> + k = waitpid(-1, &s, WNOHANG);
>> + ASSERT_EQ(k, 0);
>> + k = ptrace(PTRACE_CONT, pid, 0L, 0L);
>> + ASSERT_EQ(k, 0);
>> + k = waitpid(-1, &s, 0);
>> + ASSERT_EQ(k, pid);
>> + ASSERT_EQ(WIFSTOPPED(s), 1);
>> + ASSERT_EQ(WSTOPSIG(s), SIGSTOP);
>> + k = waitpid(-1, &s, WNOHANG);
>> + ASSERT_EQ(k, 0);
>> + k = ptrace(PTRACE_CONT, pid, 0L, 0L);
>> ASSERT_EQ(k, 0);
>> k = waitpid(-1, &s, 0);
>> ASSERT_EQ(k, pid);
>> ASSERT_EQ(WIFEXITED(s), 1);
>> - ASSERT_EQ(WEXITSTATUS(s), 0);
>> + ASSERT_EQ(WEXITSTATUS(s), 1);
>> k = waitpid(-1, NULL, 0);
>> ASSERT_EQ(k, -1);
>> ASSERT_EQ(errno, ECHILD);
>
> Thank you for adding tests! This will be a nice deadlock to get fixed.
>
> -Kees
>
Thank you for your review, if you would like me to add some more commments and
maybe have any suggestions what to write and where, then I would be happy to add that
in the next patch version.
Thanks
Bernd.
Powered by blists - more mailing lists