[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220809034517.3867176-3-joel@joelfernandes.org>
Date: Tue, 9 Aug 2022 03:45:13 +0000
From: "Joel Fernandes (Google)" <joel@...lfernandes.org>
To: linux-kernel@...r.kernel.org
Cc: Vineeth Pillai <vineeth@...byteword.org>,
Joel Fernandes <joel@...lfernandes.org>,
rushikesh.s.kadam@...el.com, urezki@...il.com,
neeraj.iitr10@...il.com, frederic@...nel.org, paulmck@...nel.org,
rostedt@...dmis.org, rcu@...r.kernel.org
Subject: [PATCH v3 resend 2/6] rcu: shrinker for lazy rcu
From: Vineeth Pillai <vineeth@...byteword.org>
The shrinker is used to speed up the free'ing of memory potentially held
by RCU lazy callbacks. RCU kernel module test cases show this to be
effective. Test is introduced in a later patch.
Signed-off-by: Vineeth Pillai <vineeth@...byteword.org>
Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>
---
kernel/rcu/tree_nocb.h | 52 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 61b1409bd4d6..f8d7255f4f0a 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1249,6 +1249,55 @@ int rcu_nocb_cpu_offload(int cpu)
}
EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
+static unsigned long
+lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ int cpu;
+ unsigned long count = 0;
+
+ /* Snapshot count of all CPUs */
+ for_each_possible_cpu(cpu) {
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+
+ count += rcu_cblist_n_lazy_cbs(&rdp->nocb_bypass);
+ }
+
+ return count ? count : SHRINK_EMPTY;
+}
+
+static unsigned long
+lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ int cpu;
+ unsigned long flags;
+ unsigned long count = 0;
+
+ /* Snapshot count of all CPUs */
+ for_each_possible_cpu(cpu) {
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+ int _count = rcu_cblist_n_lazy_cbs(&rdp->nocb_bypass);
+
+ if (_count == 0)
+ continue;
+ rcu_nocb_lock_irqsave(rdp, flags);
+ rcu_cblist_reset_lazy_len(&rdp->nocb_bypass);
+ rcu_nocb_unlock_irqrestore(rdp, flags);
+ wake_nocb_gp(rdp, false);
+ sc->nr_to_scan -= _count;
+ count += _count;
+ if (sc->nr_to_scan <= 0)
+ break;
+ }
+ return count ? count : SHRINK_STOP;
+}
+
+static struct shrinker lazy_rcu_shrinker = {
+ .count_objects = lazy_rcu_shrink_count,
+ .scan_objects = lazy_rcu_shrink_scan,
+ .batch = 0,
+ .seeks = DEFAULT_SEEKS,
+};
+
void __init rcu_init_nohz(void)
{
int cpu;
@@ -1286,6 +1335,9 @@ void __init rcu_init_nohz(void)
if (!rcu_state.nocb_is_setup)
return;
+ if (register_shrinker(&lazy_rcu_shrinker))
+ pr_err("Failed to register lazy_rcu shrinker!\n");
+
#if defined(CONFIG_NO_HZ_FULL)
if (tick_nohz_full_running)
cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
--
2.37.1.559.g78731f0fdb-goog
Powered by blists - more mailing lists