fs/exec.c | 22 ++++++++++++++++++---- kernel/signal.c | 2 +- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 06b4c550af5d..e847c0417e34 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1122,11 +1122,25 @@ static int de_thread(struct task_struct *tsk) } sig->group_exit_task = tsk; - sig->notify_count = zap_other_threads(tsk); - if (!thread_group_leader(tsk)) - sig->notify_count--; - while (sig->notify_count) { + /* + * Zap and wait for other threads to go away. + * + * Note that 'notify_count' is not stable, because + * it also gets modified by zombie threads that + * zap_other_threads() does not count, but we're + * guaranteed to under-count, and at worst that will + * cause us to wake up early and go through the + * loop a few times. + */ + for (;;) { + sig->notify_count = zap_other_threads(tsk); + if (!thread_group_leader(tsk)) + sig->notify_count--; + if (!sig->notify_count) + break; + + /* sig->notify_count going down to zero will wake us up */ __set_current_state(TASK_KILLABLE); spin_unlock_irq(lock); schedule(); diff --git a/kernel/signal.c b/kernel/signal.c index e58a6c619824..98e5523f792c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1353,11 +1353,11 @@ int zap_other_threads(struct task_struct *p) while_each_thread(p, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); - count++; /* Don't bother with already dead threads */ if (t->exit_state) continue; + count++; sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); }