[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221130181325.1012760-6-paulmck@kernel.org>
Date: Wed, 30 Nov 2022 10:13:15 -0800
From: "Paul E. McKenney" <paulmck@...nel.org>
To: rcu@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, kernel-team@...a.com,
rostedt@...dmis.org, Vineeth Pillai <vineeth@...byteword.org>,
Joel Fernandes <joel@...lfernandes.org>,
"Paul E . McKenney" <paulmck@...nel.org>
Subject: [PATCH rcu 06/16] rcu: Shrinker for lazy rcu
From: Vineeth Pillai <vineeth@...byteword.org>
The shrinker is used to speed up the free'ing of memory potentially held
by RCU lazy callbacks. RCU kernel module test cases show this to be
effective. Test is introduced in a later patch.
Signed-off-by: Vineeth Pillai <vineeth@...byteword.org>
Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
---
kernel/rcu/tree_nocb.h | 52 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 213daf81c057f..9e1c8caec5ceb 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1312,6 +1312,55 @@ int rcu_nocb_cpu_offload(int cpu)
}
EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
+static unsigned long
+lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ int cpu;
+ unsigned long count = 0;
+
+ /* Snapshot count of all CPUs */
+ for_each_possible_cpu(cpu) {
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+
+ count += READ_ONCE(rdp->lazy_len);
+ }
+
+ return count ? count : SHRINK_EMPTY;
+}
+
+static unsigned long
+lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ int cpu;
+ unsigned long flags;
+ unsigned long count = 0;
+
+ /* Snapshot count of all CPUs */
+ for_each_possible_cpu(cpu) {
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ int _count = READ_ONCE(rdp->lazy_len);
+
+ if (_count == 0)
+ continue;
+ rcu_nocb_lock_irqsave(rdp, flags);
+ WRITE_ONCE(rdp->lazy_len, 0);
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ wake_nocb_gp(rdp, false);
+ sc->nr_to_scan -= _count;
+ count += _count;
+ if (sc->nr_to_scan <= 0)
+ break;
+ }
+ return count ? count : SHRINK_STOP;
+}
+
+static struct shrinker lazy_rcu_shrinker = {
+ .count_objects = lazy_rcu_shrink_count,
+ .scan_objects = lazy_rcu_shrink_scan,
+ .batch = 0,
+ .seeks = DEFAULT_SEEKS,
+};
+
void __init rcu_init_nohz(void)
{
int cpu;
@@ -1342,6 +1391,9 @@ void __init rcu_init_nohz(void)
if (!rcu_state.nocb_is_setup)
return;
+ if (register_shrinker(&lazy_rcu_shrinker, "rcu-lazy"))
+ pr_err("Failed to register lazy_rcu shrinker!\n");
+
if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
cpumask_and(rcu_nocb_mask, cpu_possible_mask,
--
2.31.1.189.g2e36527f23
Powered by blists - more mailing lists