[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240917143402.930114-4-boqun.feng@gmail.com>
Date: Tue, 17 Sep 2024 07:34:01 -0700
From: Boqun Feng <boqun.feng@...il.com>
To: linux-kernel@...r.kernel.org,
rcu@...r.kernel.org,
linux-mm@...ck.org,
lkmm@...r.kernel.org
Cc: "Paul E. McKenney" <paulmck@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>,
Josh Triplett <josh@...htriplett.org>,
Boqun Feng <boqun.feng@...il.com>,
Uladzislau Rezki <urezki@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Zqiang <qiang.zhang1211@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Mark Rutland <mark.rutland@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Kent Overstreet <kent.overstreet@...il.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
maged.michael@...il.com
Subject: [RFC PATCH 3/4] refscale: Add benchmarks for percpu_ref
Benchmarks for percpu_ref are added to evaluate the reader side
performance between percpu_ref and other refcounting mechanisms.
Signed-off-by: Boqun Feng <boqun.feng@...il.com>
---
kernel/rcu/refscale.c | 50 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 49 insertions(+), 1 deletion(-)
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index 7e76ae5159e6..97b73c980c5d 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -393,6 +393,54 @@ static struct ref_scale_ops hazptr_ops = {
.name = "hazptr"
};
+struct percpu_ref percpu_ref;
+
+static void percpu_ref_dummy(struct percpu_ref *ref) {}
+
+static bool percpu_ref_scale_init(void)
+{
+ int ret;
+
+ ret = percpu_ref_init(&percpu_ref, percpu_ref_dummy, PERCPU_REF_INIT_ATOMIC, GFP_KERNEL);
+ percpu_ref_switch_to_percpu(&percpu_ref);
+
+ return !ret;
+}
+
+static void percpu_ref_scale_cleanup(void)
+{
+ percpu_ref_exit(&percpu_ref);
+}
+
+static void percpu_ref_section(const int nloops)
+{
+ int i;
+
+ for (i = nloops; i >= 0; i--) {
+ percpu_ref_get(&percpu_ref);
+ percpu_ref_put(&percpu_ref);
+ }
+}
+
+static void percpu_ref_delay_section(const int nloops, const int udl, const int ndl)
+{
+ int i;
+
+ for (i = nloops; i >= 0; i--) {
+ percpu_ref_get(&percpu_ref);
+ un_delay(udl, ndl);
+ percpu_ref_put(&percpu_ref);
+ }
+}
+
+static struct ref_scale_ops percpu_ref_ops = {
+ .init = percpu_ref_scale_init,
+ .cleanup = percpu_ref_scale_cleanup,
+ .readsection = percpu_ref_section,
+ .delaysection = percpu_ref_delay_section,
+ .name = "percpu_ref"
+};
+
// Definitions for rwlock
static rwlock_t test_rwlock;
@@ -1158,7 +1206,7 @@ ref_scale_init(void)
static struct ref_scale_ops *scale_ops[] = {
&rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops,
&rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops,
- &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops, &hazptr_ops,
+ &typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops, &hazptr_ops, &percpu_ref_ops,
};
if (!torture_init_begin(scale_type, verbose))
--
2.45.2
Powered by blists - more mailing lists