[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <176104292373.2601451.8226215165137128444.tip-bot2@tip-bot2>
Date: Tue, 21 Oct 2025 10:35:23 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Oleg Nesterov <oleg@...hat.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: locking/core] seqlock: Introduce scoped_seqlock_read()
The following commit has been merged into the locking/core branch of tip:
Commit-ID: cc39f3872c0865bef992b713338df369554fa9e0
Gitweb: https://git.kernel.org/tip/cc39f3872c0865bef992b713338df369554fa9e0
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Thu, 09 Oct 2025 22:11:54 +02:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Tue, 21 Oct 2025 12:31:57 +02:00
seqlock: Introduce scoped_seqlock_read()
The read_seqbegin/need_seqretry/done_seqretry API is cumbersome and
error prone. With the new helper the "typical" code like
int seq, nextseq;
unsigned long flags;
nextseq = 0;
do {
seq = nextseq;
flags = read_seqbegin_or_lock_irqsave(&seqlock, &seq);
// read-side critical section
nextseq = 1;
} while (need_seqretry(&seqlock, seq));
done_seqretry_irqrestore(&seqlock, seq, flags);
can be rewritten as
scoped_seqlock_read (&seqlock, ss_lock_irqsave) {
// read-side critical section
}
Original idea by Oleg Nesterov; with contributions from Linus.
Originally-by: Oleg Nesterov <oleg@...hat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
include/linux/seqlock.h | 111 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 111 insertions(+)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5ce48ea..b7bcc41 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -1209,4 +1209,115 @@ done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
}
+
+enum ss_state {
+ ss_done = 0,
+ ss_lock,
+ ss_lock_irqsave,
+ ss_lockless,
+};
+
+struct ss_tmp {
+ enum ss_state state;
+ unsigned long data;
+ spinlock_t *lock;
+ spinlock_t *lock_irqsave;
+};
+
+static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
+{
+ if (sst->lock)
+ spin_unlock(sst->lock);
+ if (sst->lock_irqsave)
+ spin_unlock_irqrestore(sst->lock_irqsave, sst->data);
+}
+
+extern void __scoped_seqlock_invalid_target(void);
+
+#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000
+/*
+ * For some reason some GCC-8 architectures (nios2, alpha) have trouble
+ * determining that the ss_done state is impossible in __scoped_seqlock_next()
+ * below.
+ */
+static inline void __scoped_seqlock_bug(void) { }
+#else
+/*
+ * Canary for compiler optimization -- if the compiler doesn't realize this is
+ * an impossible state, it very likely generates sub-optimal code here.
+ */
+extern void __scoped_seqlock_bug(void);
+#endif
+
+static inline void
+__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
+{
+ switch (sst->state) {
+ case ss_done:
+ __scoped_seqlock_bug();
+ return;
+
+ case ss_lock:
+ case ss_lock_irqsave:
+ sst->state = ss_done;
+ return;
+
+ case ss_lockless:
+ if (!read_seqretry(lock, sst->data)) {
+ sst->state = ss_done;
+ return;
+ }
+ break;
+ }
+
+ switch (target) {
+ case ss_done:
+ __scoped_seqlock_invalid_target();
+ return;
+
+ case ss_lock:
+ sst->lock = &lock->lock;
+ spin_lock(sst->lock);
+ sst->state = ss_lock;
+ return;
+
+ case ss_lock_irqsave:
+ sst->lock_irqsave = &lock->lock;
+ spin_lock_irqsave(sst->lock_irqsave, sst->data);
+ sst->state = ss_lock_irqsave;
+ return;
+
+ case ss_lockless:
+ sst->data = read_seqbegin(lock);
+ return;
+ }
+}
+
+#define __scoped_seqlock_read(_seqlock, _target, _s) \
+ for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
+ { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
+ _s.state != ss_done; \
+ __scoped_seqlock_next(&_s, _seqlock, _target))
+
+/**
+ * scoped_seqlock_read (lock, ss_state) - execute the read side critical
+ * section without manual sequence
+ * counter handling or calls to other
+ * helpers
+ * @lock: pointer to seqlock_t protecting the data
+ * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
+ * the type of critical read section
+ *
+ * Example:
+ *
+ * scoped_seqlock_read (&lock, ss_lock) {
+ * // read-side critical section
+ * }
+ *
+ * Starts with a lockess pass first. If it fails, restarts the critical
+ * section with the lock held.
+ */
+#define scoped_seqlock_read(_seqlock, _target) \
+ __scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
+
#endif /* __LINUX_SEQLOCK_H */
Powered by blists - more mailing lists