[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250414060055.341516-8-boqun.feng@gmail.com>
Date: Sun, 13 Apr 2025 23:00:54 -0700
From: Boqun Feng <boqun.feng@...il.com>
To: Breno Leitao <leitao@...ian.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>
Cc: aeh@...a.com,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org,
edumazet@...gle.com,
jhs@...atatu.com,
kernel-team@...a.com,
Erik Lundgren <elundgren@...a.com>,
"Paul E. McKenney" <paulmck@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>,
Uladzislau Rezki <urezki@...il.com>,
rcu@...r.kernel.org,
Boqun Feng <boqun.feng@...il.com>
Subject: [RFC PATCH 7/8] rcuscale: Add tests for simple hazard pointers
Add two rcu_scale_ops to include tests from simple hazard pointers
(shazptr). One is with evenly distributed readers, and the other is with
all WILDCARD readers. This could show the best and worst case scenarios
for the synchronization time of simple hazard pointers.
Signed-off-by: Boqun Feng <boqun.feng@...il.com>
---
kernel/rcu/rcuscale.c | 52 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 51 insertions(+), 1 deletion(-)
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index d9bff4b1928b..cab42bcc1d26 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -32,6 +32,7 @@
#include <linux/freezer.h>
#include <linux/cpu.h>
#include <linux/delay.h>
+#include <linux/shazptr.h>
#include <linux/stat.h>
#include <linux/srcu.h>
#include <linux/slab.h>
@@ -429,6 +430,54 @@ static struct rcu_scale_ops tasks_tracing_ops = {
#endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
+static int shazptr_scale_read_lock(void)
+{
+ long cpu = raw_smp_processor_id();
+
+ /* Use cpu + 1 as the key */
+ guard(shazptr)((void *)(cpu + 1));
+
+ return 0;
+}
+
+static int shazptr_scale_wc_read_lock(void)
+{
+ guard(shazptr)(SHAZPTR_WILDCARD);
+
+ return 0;
+}
+
+
+static void shazptr_scale_read_unlock(int idx)
+{
+ /* Do nothing, it's OK since readers are doing back-to-back lock+unlock*/
+}
+
+static void shazptr_scale_sync(void)
+{
+ long cpu = raw_smp_processor_id();
+
+ synchronize_shazptr((void *)(cpu + 1));
+}
+
+static struct rcu_scale_ops shazptr_ops = {
+ .ptype = RCU_FLAVOR,
+ .readlock = shazptr_scale_read_lock,
+ .readunlock = shazptr_scale_read_unlock,
+ .sync = shazptr_scale_sync,
+ .exp_sync = shazptr_scale_sync,
+ .name = "shazptr"
+};
+
+static struct rcu_scale_ops shazptr_wc_ops = {
+ .ptype = RCU_FLAVOR,
+ .readlock = shazptr_scale_wc_read_lock,
+ .readunlock = shazptr_scale_read_unlock,
+ .sync = shazptr_scale_sync,
+ .exp_sync = shazptr_scale_sync,
+ .name = "shazptr_wildcard"
+};
+
static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
{
if (!cur_ops->gp_diff)
@@ -1090,7 +1139,8 @@ rcu_scale_init(void)
long i;
long j;
static struct rcu_scale_ops *scale_ops[] = {
- &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
+ &rcu_ops, &srcu_ops, &srcud_ops, &shazptr_ops, &shazptr_wc_ops,
+ TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
};
if (!torture_init_begin(scale_type, verbose))
--
2.47.1
Powered by blists - more mailing lists