[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251007142113.GA17118@redhat.com>
Date: Tue, 7 Oct 2025 16:21:13 +0200
From: Oleg Nesterov <oleg@...hat.com>
To: Alexander Viro <viro@...iv.linux.org.uk>,
Boqun Feng <boqun.feng@...il.com>,
David Howells <dhowells@...hat.com>, Ingo Molnar <mingo@...hat.com>,
Li RongQing <lirongqing@...du.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Peter Zijlstra <peterz@...radead.org>,
Waiman Long <longman@...hat.com>, Will Deacon <will@...nel.org>
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH 1/4] seqlock: introduce scoped_seqlock_read() and
scoped_seqlock_read_irqsave()
The read_seqbegin/need_seqretry/done_seqretry API is cumbersome and
error prone. With the new helper the "typical" code like
int seq, nextseq;
unsigned long flags;
nextseq = 0;
do {
seq = nextseq;
flags = read_seqbegin_or_lock_irqsave(&seqlock, &seq);
// read-side critical section
nextseq = 1;
} while (need_seqretry(&seqlock, seq));
done_seqretry_irqrestore(&seqlock, seq, flags);
can be rewritten as
scoped_seqlock_read_irqsave (&seqlock) {
// read-side critical section
}
Signed-off-by: Oleg Nesterov <oleg@...hat.com>
---
include/linux/seqlock.h | 61 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 61 insertions(+)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5ce48eab7a2a..9012702fd0a8 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -1209,4 +1209,65 @@ done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
}
+
+/* internal helper for scoped_seqlock_read/scoped_seqlock_read_irqsave */
+static inline int
+scoped_seqlock_read_retry(seqlock_t *lock, int *seq, unsigned long *flags)
+{
+ int retry = 0;
+
+ if (*seq & 1) {
+ if (flags)
+ read_sequnlock_excl_irqrestore(lock, *flags);
+ else
+ read_sequnlock_excl(lock);
+ } else if (read_seqretry(lock, *seq)) {
+ retry = *seq = 1;
+ if (flags)
+ read_seqlock_excl_irqsave(lock, *flags);
+ else
+ read_seqlock_excl(lock);
+ }
+
+ return retry;
+}
+
+#define __scoped_seqlock_read(lock, lockless, seq) \
+ for (int lockless = 1, seq = read_seqbegin(lock); \
+ lockless || scoped_seqlock_read_retry(lock, &seq, NULL); \
+ lockless = 0)
+
+/**
+ * scoped_seqlock_read(lock) - execute the read side critical section
+ * without manual sequence counter handling
+ * or calls to other helpers
+ * @lock: pointer to the seqlock_t protecting the data
+ *
+ * Example:
+ *
+ * scoped_seqlock_read(&lock) {
+ * // read-side critical section
+ * }
+ *
+ * Starts with a lockless pass first. If it fails, restarts the critical
+ * section with the lock held.
+ *
+ * The critical section must not contain control flow that escapes the loop.
+ */
+#define scoped_seqlock_read(lock) \
+ __scoped_seqlock_read(lock, __UNIQUE_ID(lockless), __UNIQUE_ID(seq))
+
+#define __scoped_seqlock_read_irqsave(lock, s) \
+ for (struct { int lockless, seq; ulong flags; } s = { 1, read_seqbegin(lock) }; \
+ s.lockless || scoped_seqlock_read_retry(lock, &s.seq, &s.flags); \
+ s.lockless = 0)
+
+/**
+ * scoped_seqlock_read_irqsave(lock) - same as scoped_seqlock_read() but
+ * disables irqs on a locking pass
+ * @lock: pointer to the seqlock_t protecting the data
+ */
+#define scoped_seqlock_read_irqsave(lock) \
+ __scoped_seqlock_read_irqsave(lock, __UNIQUE_ID(s))
+
#endif /* __LINUX_SEQLOCK_H */
--
2.25.1.362.g51ebf55
Powered by blists - more mailing lists