[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240609142408.GA11174@redhat.com>
Date: Sun, 9 Jun 2024 16:24:08 +0200
From: Oleg Nesterov <oleg@...hat.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
Tejun Heo <tj@...nel.org>
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH 1/1] exit: kill signal_struct->quick_threads
do_exit() can simply decrement signal_struct->live earlier and call
synchronize_group_exit() only if group_dead is true. Also, this code
can avoid spin_lock_irq(siglock) if SIGNAL_GROUP_EXIT is already set.
The only "nontrivial" user of signal->live is css_task_iter_advance()
but it should not be affected. With or without this change the task
is the exiting/exited leader which has already passed cgroup_exit(),
atomic_read(signal->live) can race with sub-threads but this is fine.
Signed-off-by: Oleg Nesterov <oleg@...hat.com>
---
include/linux/sched/signal.h | 1 -
kernel/exit.c | 12 +++++++-----
kernel/fork.c | 2 --
3 files changed, 7 insertions(+), 8 deletions(-)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0a0e23c45406..f98aae40d7e6 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -95,7 +95,6 @@ struct signal_struct {
refcount_t sigcnt;
atomic_t live;
int nr_threads;
- int quick_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
diff --git a/kernel/exit.c b/kernel/exit.c
index f95a2c1338a8..11bac37e2bc6 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -800,10 +800,11 @@ static void synchronize_group_exit(struct task_struct *tsk, long code)
struct sighand_struct *sighand = tsk->sighand;
struct signal_struct *signal = tsk->signal;
+ if (READ_ONCE(signal->flags) & SIGNAL_GROUP_EXIT)
+ return;
+
spin_lock_irq(&sighand->siglock);
- signal->quick_threads--;
- if ((signal->quick_threads == 0) &&
- !(signal->flags & SIGNAL_GROUP_EXIT)) {
+ if (!(signal->flags & SIGNAL_GROUP_EXIT)) {
signal->flags = SIGNAL_GROUP_EXIT;
signal->group_exit_code = code;
signal->group_stop_count = 0;
@@ -818,7 +819,9 @@ void __noreturn do_exit(long code)
WARN_ON(irqs_disabled());
- synchronize_group_exit(tsk, code);
+ group_dead = atomic_dec_and_test(&tsk->signal->live);
+ if (group_dead)
+ synchronize_group_exit(tsk, code);
WARN_ON(tsk->plug);
@@ -833,7 +836,6 @@ void __noreturn do_exit(long code)
exit_signals(tsk); /* sets PF_EXITING */
acct_update_integrals(tsk);
- group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
/*
* If the last thread of global init has exited, panic
diff --git a/kernel/fork.c b/kernel/fork.c
index 99076dbe27d8..4c361d2bdc12 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1861,7 +1861,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
return -ENOMEM;
sig->nr_threads = 1;
- sig->quick_threads = 1;
atomic_set(&sig->live, 1);
refcount_set(&sig->sigcnt, 1);
@@ -2590,7 +2589,6 @@ __latent_entropy struct task_struct *copy_process(
__this_cpu_inc(process_counts);
} else {
current->signal->nr_threads++;
- current->signal->quick_threads++;
atomic_inc(¤t->signal->live);
refcount_inc(¤t->signal->sigcnt);
task_join_group_stop(p);
--
2.25.1.362.g51ebf55
Powered by blists - more mailing lists