[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <dd9b0591-aea2-4a50-bf4e-276224f15f68@paulmck-laptop>
Date: Thu, 10 Jul 2025 18:03:16 -0700
From: "Paul E. McKenney" <paulmck@...nel.org>
To: Boqun Feng <boqun.feng@...il.com>
Cc: Breno Leitao <leitao@...ian.org>, Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>, aeh@...a.com,
linux-kernel@...r.kernel.org, netdev@...r.kernel.org,
edumazet@...gle.com, jhs@...atatu.com, kernel-team@...a.com,
Erik Lundgren <elundgren@...a.com>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>,
Uladzislau Rezki <urezki@...il.com>, rcu@...r.kernel.org
Subject: Re: [RFC PATCH 7/8] rcuscale: Add tests for simple hazard pointers
On Sun, Apr 13, 2025 at 11:00:54PM -0700, Boqun Feng wrote:
> Add two rcu_scale_ops to include tests from simple hazard pointers
> (shazptr). One is with evenly distributed readers, and the other is with
> all WILDCARD readers. This could show the best and worst case scenarios
> for the synchronization time of simple hazard pointers.
>
> Signed-off-by: Boqun Feng <boqun.feng@...il.com>
Cute trick using the CPU number plus one as a stand-in for a pointer. ;-)
Reviewed-by: Paul E. McKenney <paulmck@...nel.org>
> ---
> kernel/rcu/rcuscale.c | 52 ++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 51 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
> index d9bff4b1928b..cab42bcc1d26 100644
> --- a/kernel/rcu/rcuscale.c
> +++ b/kernel/rcu/rcuscale.c
> @@ -32,6 +32,7 @@
> #include <linux/freezer.h>
> #include <linux/cpu.h>
> #include <linux/delay.h>
> +#include <linux/shazptr.h>
> #include <linux/stat.h>
> #include <linux/srcu.h>
> #include <linux/slab.h>
> @@ -429,6 +430,54 @@ static struct rcu_scale_ops tasks_tracing_ops = {
>
> #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
>
> +static int shazptr_scale_read_lock(void)
> +{
> + long cpu = raw_smp_processor_id();
> +
> + /* Use cpu + 1 as the key */
> + guard(shazptr)((void *)(cpu + 1));
> +
> + return 0;
> +}
> +
> +static int shazptr_scale_wc_read_lock(void)
> +{
> + guard(shazptr)(SHAZPTR_WILDCARD);
> +
> + return 0;
> +}
> +
> +
> +static void shazptr_scale_read_unlock(int idx)
> +{
> + /* Do nothing, it's OK since readers are doing back-to-back lock+unlock*/
> +}
> +
> +static void shazptr_scale_sync(void)
> +{
> + long cpu = raw_smp_processor_id();
> +
> + synchronize_shazptr((void *)(cpu + 1));
> +}
> +
> +static struct rcu_scale_ops shazptr_ops = {
> + .ptype = RCU_FLAVOR,
> + .readlock = shazptr_scale_read_lock,
> + .readunlock = shazptr_scale_read_unlock,
> + .sync = shazptr_scale_sync,
> + .exp_sync = shazptr_scale_sync,
> + .name = "shazptr"
> +};
> +
> +static struct rcu_scale_ops shazptr_wc_ops = {
> + .ptype = RCU_FLAVOR,
> + .readlock = shazptr_scale_wc_read_lock,
> + .readunlock = shazptr_scale_read_unlock,
> + .sync = shazptr_scale_sync,
> + .exp_sync = shazptr_scale_sync,
> + .name = "shazptr_wildcard"
> +};
> +
> static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
> {
> if (!cur_ops->gp_diff)
> @@ -1090,7 +1139,8 @@ rcu_scale_init(void)
> long i;
> long j;
> static struct rcu_scale_ops *scale_ops[] = {
> - &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
> + &rcu_ops, &srcu_ops, &srcud_ops, &shazptr_ops, &shazptr_wc_ops,
> + TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
> };
>
> if (!torture_init_begin(scale_type, verbose))
> --
> 2.47.1
>
Powered by blists - more mailing lists