[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250625031101.12555-8-boqun.feng@gmail.com>
Date: Tue, 24 Jun 2025 20:11:00 -0700
From: Boqun Feng <boqun.feng@...il.com>
To: linux-kernel@...r.kernel.org,
rcu@...r.kernel.org,
lkmm@...ts.linux.dev
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Will Deacon <will@...nel.org>,
Boqun Feng <boqun.feng@...il.com>,
Waiman Long <longman@...hat.com>,
Davidlohr Bueso <dave@...olabs.net>,
"Paul E. McKenney" <paulmck@...nel.org>,
Josh Triplett <josh@...htriplett.org>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Joel Fernandes <joelagnelf@...dia.com>,
Uladzislau Rezki <urezki@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Zqiang <qiang.zhang@...ux.dev>,
Breno Leitao <leitao@...ian.org>,
aeh@...a.com,
netdev@...r.kernel.org,
edumazet@...gle.com,
jhs@...atatu.com,
kernel-team@...a.com,
Erik Lundgren <elundgren@...a.com>
Subject: [PATCH 7/8] rcuscale: Add tests for simple hazard pointers
Add two rcu_scale_ops to include tests from simple hazard pointers
(shazptr). One is with evenly distributed readers, and the other is with
all WILDCARD readers. This could show the best and worst case scenarios
for the synchronization time of simple hazard pointers.
Signed-off-by: Boqun Feng <boqun.feng@...il.com>
---
kernel/rcu/rcuscale.c | 52 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 51 insertions(+), 1 deletion(-)
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index 45413a73d61e..357431bf802b 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -32,6 +32,7 @@
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h>
+#include <linux/shazptr.h>
#include <linux/stat.h>
#include <linux/srcu.h>
#include <linux/slab.h>
@@ -429,6 +430,54 @@ static struct rcu_scale_ops tasks_tracing_ops = {
#endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
+static int shazptr_scale_read_lock(void)
+{
+ long cpu = raw_smp_processor_id();
+
+ /* Use cpu + 1 as the key */
+ guard(shazptr)((void *)(cpu + 1));
+
+ return 0;
+}
+
+static int shazptr_scale_wc_read_lock(void)
+{
+ guard(shazptr)(SHAZPTR_WILDCARD);
+
+ return 0;
+}
+
+
+static void shazptr_scale_read_unlock(int idx)
+{
+ /* Do nothing, it's OK since readers are doing back-to-back lock+unlock*/
+}
+
+static void shazptr_scale_sync(void)
+{
+ long cpu = raw_smp_processor_id();
+
+ synchronize_shazptr((void *)(cpu + 1));
+}
+
+static struct rcu_scale_ops shazptr_ops = {
+ .ptype = RCU_FLAVOR,
+ .readlock = shazptr_scale_read_lock,
+ .readunlock = shazptr_scale_read_unlock,
+ .sync = shazptr_scale_sync,
+ .exp_sync = shazptr_scale_sync,
+ .name = "shazptr"
+};
+
+static struct rcu_scale_ops shazptr_wc_ops = {
+ .ptype = RCU_FLAVOR,
+ .readlock = shazptr_scale_wc_read_lock,
+ .readunlock = shazptr_scale_read_unlock,
+ .sync = shazptr_scale_sync,
+ .exp_sync = shazptr_scale_sync,
+ .name = "shazptr_wildcard"
+};
+
static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
{
if (!cur_ops->gp_diff)
@@ -1090,7 +1139,8 @@ rcu_scale_init(void)
long i;
long j;
static struct rcu_scale_ops *scale_ops[] = {
- &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
+ &rcu_ops, &srcu_ops, &srcud_ops, &shazptr_ops, &shazptr_wc_ops,
+ TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
};
if (!torture_init_begin(scale_type, verbose))
--
2.39.5 (Apple Git-154)
Powered by blists - more mailing lists