lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 6 May 2020 17:42:40 -0700
From:   "Paul E. McKenney" <paulmck@...nel.org>
To:     rcu@...r.kernel.org
Cc:     linux-kernel@...r.kernel.org, kernel-team@...com, mingo@...nel.org,
        jiangshanlai@...il.com, dipankar@...ibm.com,
        akpm@...ux-foundation.org, mathieu.desnoyers@...icios.com,
        josh@...htriplett.org, tglx@...utronix.de, peterz@...radead.org,
        rostedt@...dmis.org, dhowells@...hat.com, edumazet@...gle.com,
        fweisbec@...il.com, oleg@...hat.com, joel@...lfernandes.org,
        viro@...iv.linux.org.uk, hannes@...xchg.org
Subject: [PATCH RFC tip/core/rcu] Add shrinker to shift to fast/inefficient
 GP mode

This commit adds a shrinker so as to inform RCU when memory is scarce.
RCU responds by shifting into the same fast and inefficient mode that is
used in the presence of excessive numbers of RCU callbacks.  RCU remains
in this state for one-tenth of a second, though this time window can be
extended by another call to the shrinker.

If it proves feasible, a later commit might add a function call directly
indicating the end of the period of scarce memory.

Suggested-by: Al Viro <viro@...iv.linux.org.uk>
Signed-off-by: Paul E. McKenney <paulmck@...nel.org>
Cc: Johannes Weiner <hannes@...xchg.org>

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b0fe32f..76d148d 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2368,8 +2368,15 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
 	struct rcu_data *rdp;
 	struct rcu_node *rnp;
 
-	rcu_state.cbovld = rcu_state.cbovldnext;
+	// Load .oomovld before .oomovldend, pairing with .oomovld set.
+	rcu_state.cbovld = smp_load_acquire(&rcu_state.oomovld) || // ^^^
+			   rcu_state.cbovldnext;
 	rcu_state.cbovldnext = false;
+	if (READ_ONCE(rcu_state.oomovld) &&
+	    time_after(jiffies, READ_ONCE(rcu_state.oomovldend))) {
+		WRITE_ONCE(rcu_state.oomovld, false);
+		pr_info("%s: Ending OOM-mode grace periods.\n", __func__);
+	}
 	rcu_for_each_leaf_node(rnp) {
 		cond_resched_tasks_rcu_qs();
 		mask = 0;
@@ -2697,6 +2704,35 @@ static void check_cb_ovld(struct rcu_data *rdp)
 	raw_spin_unlock_rcu_node(rnp);
 }
 
+/* Return a rough count of the RCU callbacks outstanding. */
+static unsigned long rcu_oom_count(struct shrinker *unused1,
+				   struct shrink_control *unused2)
+{
+	int cpu;
+	unsigned long ncbs = 0;
+
+	for_each_possible_cpu(cpu)
+		ncbs += rcu_get_n_cbs_cpu(cpu);
+	return ncbs;
+}
+
+/* Start up an interval of fast high-overhead grace periods. */
+static unsigned long rcu_oom_scan(struct shrinker *unused1,
+				  struct shrink_control *unused2)
+{
+	pr_info("%s: Starting OOM-mode grace periods.\n", __func__);
+	WRITE_ONCE(rcu_state.oomovldend, jiffies + HZ / 10);
+	smp_store_release(&rcu_state.oomovld, true); // After .oomovldend
+	rcu_force_quiescent_state();  // Kick grace period
+	return 0;  // We haven't actually reclaimed anything yet.
+}
+
+static struct shrinker rcu_shrinker = {
+	.count_objects = rcu_oom_count,
+	.scan_objects = rcu_oom_scan,
+	.seeks = DEFAULT_SEEKS,
+};
+
 /* Helper function for call_rcu() and friends.  */
 static void
 __call_rcu(struct rcu_head *head, rcu_callback_t func)
@@ -4146,6 +4182,7 @@ void __init rcu_init(void)
 		qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
 	else
 		qovld_calc = qovld;
+	WARN_ON(register_shrinker(&rcu_shrinker));
 }
 
 #include "tree_stall.h"
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 2d7fcb9..c4d8e96 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -326,6 +326,8 @@ struct rcu_state {
 	int ncpus_snap;				/* # CPUs seen last time. */
 	u8 cbovld;				/* Callback overload now? */
 	u8 cbovldnext;				/* ^        ^  next time? */
+	u8 oomovld;				/* OOM overload? */
+	unsigned long oomovldend;		/* OOM ovld end, jiffies. */
 
 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
 						/*  force_quiescent_state(). */

Powered by blists - more mailing lists