lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 9 Aug 2019 22:42:32 -0400
From:   Joel Fernandes <joel@...lfernandes.org>
To:     "Paul E. McKenney" <paulmck@...ux.ibm.com>
Cc:     linux-kernel@...r.kernel.org, Rao Shoaib <rao.shoaib@...cle.com>,
        max.byungchul.park@...il.com, byungchul.park@....com,
        kernel-team@...roid.com, kernel-team@....com,
        Davidlohr Bueso <dave@...olabs.net>,
        Josh Triplett <josh@...htriplett.org>,
        Lai Jiangshan <jiangshanlai@...il.com>,
        Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
        rcu@...r.kernel.org, Steven Rostedt <rostedt@...dmis.org>
Subject: Re: [PATCH RFC v1 1/2] rcu/tree: Add basic support for kfree_rcu
 batching

On Wed, Aug 07, 2019 at 10:52:15AM -0700, Paul E. McKenney wrote:
[snip] 
> > > > @@ -3459,6 +3645,8 @@ void __init rcu_init(void)
> > > >  {
> > > >  	int cpu;
> > > >  
> > > > +	kfree_rcu_batch_init();
> > > 
> > > What happens if someone does a kfree_rcu() before this point?  It looks
> > > like it should work, but have you tested it?
> > > 
> > > >  	rcu_early_boot_tests();
> > > 
> > > For example, by testing it in rcu_early_boot_tests() and moving the
> > > call to kfree_rcu_batch_init() here.
> > 
> > I have not tried to do the kfree_rcu() this early. I will try it out.
> 
> Yeah, well, call_rcu() this early came as a surprise to me back in the
> day, so...  ;-)

I actually did get surprised as well!

It appears the timers are not fully initialized so the really early
kfree_rcu() call from rcu_init() does cause a splat about an initialized
timer spinlock (even though future kfree_rcu()s and the system are working
fine all the way into the torture tests).

I think to resolve this, we can just not do batching until early_initcall,
during which I have an initialization function which switches batching on.
>From that point it is safe.

Below is the diff on top of this patch, I think this should be good but let
me know if anything looks odd to you. I tested it and it works.

have a great weekend! thanks,
-Joel

---8<-----------------------

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index a09ef81a1a4f..358f5c065fa4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2634,6 +2634,7 @@ struct kfree_rcu_cpu {
 };
 
 static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc);
+int kfree_rcu_batching_ready;
 
 /*
  * This function is invoked in workqueue context after a grace period.
@@ -2742,6 +2743,17 @@ static void kfree_rcu_monitor(struct work_struct *work)
 		spin_unlock_irqrestore(&krcp->lock, flags);
 }
 
+/*
+ * This version of kfree_call_rcu does not do batching of kfree_rcu() requests.
+ * Used only by rcuperf torture test for comparison with kfree_rcu_batch()
+ * or during really early init.
+ */
+void kfree_call_rcu_nobatch(struct rcu_head *head, rcu_callback_t func)
+{
+	__call_rcu(head, func, -1, 1);
+}
+EXPORT_SYMBOL_GPL(kfree_call_rcu_nobatch);
+
 /*
  * Queue a request for lazy invocation of kfree() after a grace period.
  *
@@ -2764,6 +2775,10 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 	unsigned long flags;
 	struct kfree_rcu_cpu *krcp;
 	bool monitor_todo;
+	static int once;
+
+	if (!READ_ONCE(kfree_rcu_batching_ready))
+		return kfree_call_rcu_nobatch(head, func);
 
 	local_irq_save(flags);
 	krcp = this_cpu_ptr(&krc);
@@ -2794,16 +2809,6 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 }
 EXPORT_SYMBOL_GPL(kfree_call_rcu);
 
-/*
- * This version of kfree_call_rcu does not do batching of kfree_rcu() requests.
- * Used only by rcuperf torture test for comparison with kfree_rcu_batch().
- */
-void kfree_call_rcu_nobatch(struct rcu_head *head, rcu_callback_t func)
-{
-	__call_rcu(head, func, -1, 1);
-}
-EXPORT_SYMBOL_GPL(kfree_call_rcu_nobatch);
-
 /*
  * During early boot, any blocking grace-period wait automatically
  * implies a grace period.  Later on, this is never the case for PREEMPT.
@@ -3650,17 +3655,6 @@ static void __init rcu_dump_rcu_node_tree(void)
 	pr_cont("\n");
 }
 
-void kfree_rcu_batch_init(void)
-{
-	int cpu;
-
-	for_each_possible_cpu(cpu) {
-		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
-		spin_lock_init(&krcp->lock);
-		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
-	}
-}
-
 struct workqueue_struct *rcu_gp_wq;
 struct workqueue_struct *rcu_par_gp_wq;
 
@@ -3668,8 +3662,6 @@ void __init rcu_init(void)
 {
 	int cpu;
 
-	kfree_rcu_batch_init();
-
 	rcu_early_boot_tests();
 
 	rcu_bootup_announce();
@@ -3700,6 +3692,21 @@ void __init rcu_init(void)
 	srcu_init();
 }
 
+static int __init kfree_rcu_batch_init(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
+		spin_lock_init(&krcp->lock);
+		INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
+	}
+
+	WRITE_ONCE(kfree_rcu_batching_ready, 1);
+	return 0;
+}
+early_initcall(kfree_rcu_batch_init);
+
 #include "tree_stall.h"
 #include "tree_exp.h"
 #include "tree_plugin.h"

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ