[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20231023153405.GA4639@redhat.com>
Date: Mon, 23 Oct 2023 17:34:05 +0200
From: Oleg Nesterov <oleg@...hat.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: "Eric W. Biederman" <ebiederm@...ssion.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/2] do_io_accounting: use sig->stats_lock
rather than lock_task_sighand(), sig->stats_lock was specifically
designed for this type of use.
This way the "if (whole)" branch runs lockless in the likely case.
Signed-off-by: Oleg Nesterov <oleg@...hat.com>
---
fs/proc/base.c | 20 ++++++++++++++------
1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 0a39412332e2..ad4afa73b25b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2977,7 +2977,6 @@ static const struct file_operations proc_coredump_filter_operations = {
static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole)
{
struct task_io_accounting acct;
- unsigned long flags;
int result;
result = down_read_killable(&task->signal->exec_update_lock);
@@ -2989,15 +2988,24 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
goto out_unlock;
}
- if (whole && lock_task_sighand(task, &flags)) {
+ if (whole) {
struct signal_struct *sig = task->signal;
struct task_struct *t;
+ unsigned int seq = 1;
+ unsigned long flags;
+
+ rcu_read_lock();
+ do {
+ seq++; /* 2 on the 1st/lockless path, otherwise odd */
+ flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
- acct = sig->ioac;
- __for_each_thread(sig, t)
- task_io_accounting_add(&acct, &t->ioac);
+ acct = sig->ioac;
+ __for_each_thread(sig, t)
+ task_io_accounting_add(&acct, &t->ioac);
- unlock_task_sighand(task, &flags);
+ } while (need_seqretry(&sig->stats_lock, seq));
+ done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
+ rcu_read_unlock();
} else {
acct = task->ioac;
}
--
2.25.1.362.g51ebf55
Powered by blists - more mailing lists