[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231024132414.GA23757@redhat.com>
Date: Tue, 24 Oct 2023 15:24:14 +0200
From: Oleg Nesterov <oleg@...hat.com>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>
Cc: Alexey Gladkov <legion@...nel.org>,
"Ahmed S. Darwish" <darwi@...utronix.de>,
Boqun Feng <boqun.feng@...il.com>,
Jonathan Corbet <corbet@....net>,
Waiman Long <longman@...hat.com>,
Will Deacon <will@...nel.org>, linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org
Subject: Re: [RFC PATCH 2/2] seqlock: introduce need_seqretry_xxx()
Or perhaps even something like
static inline int xxx(seqlock_t *lock, int *seq, int lockless)
{
if (lockless) {
*seq = read_seqbegin(lock);
return 1;
}
if (*seq & 1) {
read_sequnlock_excl(lock);
return 0;
}
if (read_seqretry(lock, *seq)) {
read_seqlock_excl(lock);
*seq = 1;
return 1;
}
return 0;
}
#define __XXX(lock, seq, lockless) \
for (int lockless = 1, seq; xxx(lock, &seq, lockless); lockless = 0)
#define XXX(lock) \
__XXX(lock, __UNIQUE_ID(seq), __UNIQUE_ID(lockless))
? This way one can do
seqlock_t sl;
void func(void)
{
XXX(&sl) {
... read-side critical section ...
}
}
using the single XXX() helper, no need to declare/initialize seq, no need to call
need_seqretry/done_seqretry.
What do you think?
Oleg.
On 10/24, Oleg Nesterov wrote:
>
> Not for inclusion, just for discussion...
>
> Modulo naming, do you think the new need_seqretry_xxx() makes sense?
>
> Simpler to use and less error prone. thread_group_cputime() is changed
> as an example.
> ---
> include/linux/seqlock.h | 10 ++++++++++
> kernel/sched/cputime.c | 9 +++------
> 2 files changed, 13 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
> index d0050c889f26..9b3bc4ce3332 100644
> --- a/include/linux/seqlock.h
> +++ b/include/linux/seqlock.h
> @@ -1165,6 +1165,16 @@ static inline int need_seqretry(seqlock_t *lock, int seq)
> return !(seq & 1) && read_seqretry(lock, seq);
> }
>
> +static inline int need_seqretry_xxx(seqlock_t *lock, int *seq)
> +{
> + int ret = !(*seq & 1) && read_seqretry(lock, *seq);
> +
> + if (ret)
> + ++*seq; /* make this counter odd */
> +
> + return ret;
> +}
> +
> /**
> * done_seqretry() - end seqlock_t "locking or lockless" reader section
> * @lock: Pointer to seqlock_t
> diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
> index af7952f12e6c..45704a84baec 100644
> --- a/kernel/sched/cputime.c
> +++ b/kernel/sched/cputime.c
> @@ -314,7 +314,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
> struct signal_struct *sig = tsk->signal;
> u64 utime, stime;
> struct task_struct *t;
> - unsigned int seq, nextseq;
> + unsigned int seq;
> unsigned long flags;
>
> /*
> @@ -330,9 +330,8 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
>
> rcu_read_lock();
> /* Attempt a lockless read on the first round. */
> - nextseq = 0;
> + seq = 0;
> do {
> - seq = nextseq;
> flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
> times->utime = sig->utime;
> times->stime = sig->stime;
> @@ -344,9 +343,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
> times->stime += stime;
> times->sum_exec_runtime += read_sum_exec_runtime(t);
> }
> - /* If lockless access failed, take the lock. */
> - nextseq = 1;
> - } while (need_seqretry(&sig->stats_lock, seq));
> + } while (need_seqretry_xxx(&sig->stats_lock, &seq));
> done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
> rcu_read_unlock();
> }
> --
> 2.25.1.362.g51ebf55
>
Powered by blists - more mailing lists