lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190321120656.GA61489@google.com>
Date:   Thu, 21 Mar 2019 08:06:56 -0400
From:   Joel Fernandes <joel@...lfernandes.org>
To:     Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc:     linux-kernel@...r.kernel.org,
        Josh Triplett <josh@...htriplett.org>,
        Steven Rostedt <rostedt@...dmis.org>,
        Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
        Lai Jiangshan <jiangshanlai@...il.com>, tglx@...utronix.de,
        "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
        Mike Galbraith <bitbucket@...ine.de>, rcu@...r.kernel.org
Subject: Re: [PATCH] rcu: Allow to eliminate softirq processing from rcutree

On Wed, Mar 20, 2019 at 12:28:35PM +0100, Sebastian Andrzej Siewior wrote:
> On 2019-03-19 20:26:13 [-0400], Joel Fernandes wrote:
> > > @@ -2769,19 +2782,121 @@ static void invoke_rcu_callbacks(struct rcu_data *rdp)
> > >  {
> > >  	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
> > >  		return;
> > > -	if (likely(!rcu_state.boost)) {
> > > -		rcu_do_batch(rdp);
> > > -		return;
> > > -	}
> > > -	invoke_rcu_callbacks_kthread();
> > > +	rcu_do_batch(rdp);
> > 
> > Looks like a nice change, but one question...
> > 
> > Consider the case where rcunosoftirq boot option is not passed.
> > 
> > Before, if RCU_BOOST=y, then callbacks would be invoked in rcuc threads if
> > possible, by those threads being woken up from within the softirq context
> > (in invoke_rcu_callbacks).
> > 
> > Now, if RCU_BOOST=y, then callbacks would only be invoked in softirq context
> > and not in the threads at all. Because rcu_softirq_enabled = false, so the
> > path executes:
> >   rcu_read_unlock_special() ->
> >         raise_softirq_irqsoff() ->
> >                 rcu_process_callbacks_si() ->
> >                         rcu_process_callbacks() ->
> >                                 invoke_rcu_callbacks() ->
> >                                         rcu_do_batch()
> > 
> > This seems like a behavioral change to me. This makes the callbacks always
> > execute from the softirq context and not the threads when boosting is
> > configured. IMO in the very least, such behavioral change should be
> > documented in the change.
> > 
> > One way to fix this I think could be, if boosting is enabled, then set
> > rcu_softirq_enabled to false by default so the callbacks are still executed
> > in the rcuc threads.
> > 
> > Did I miss something? Sorry if I did, thanks!
> 
> So with all the swaps and reorder we talking about this change:
> 
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 0a719f726e149..82810483bfc6c 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -2306,20 +2306,6 @@ static void rcu_core_si(struct softirq_action *h)
>  	rcu_core();
>  }
>  
> -/*
> - * Schedule RCU callback invocation.  If the running implementation of RCU
> - * does not support RCU priority boosting, just do a direct call, otherwise
> - * wake up the per-CPU kernel kthread.  Note that because we are running
> - * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
> - * cannot disappear out from under us.
> - */
> -static void invoke_rcu_callbacks(struct rcu_data *rdp)
> -{
> -	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
> -		return;
> -	rcu_do_batch(rdp);
> -}
> -
>  static void rcu_wake_cond(struct task_struct *t, int status)
>  {
>  	/*
> @@ -2330,6 +2316,19 @@ static void rcu_wake_cond(struct task_struct *t, int status)
>  		wake_up_process(t);
>  }
>  
> +static void invoke_rcu_core_kthread(void)
> +{
> +	struct task_struct *t;
> +	unsigned long flags;
> +
> +	local_irq_save(flags);
> +	__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
> +	t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
> +	if (t != NULL && t != current)
> +		rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
> +	local_irq_restore(flags);
> +}
> +
>  static bool rcu_softirq_enabled = true;
>  
>  static int __init rcunosoftirq_setup(char *str)
> @@ -2339,26 +2338,33 @@ static int __init rcunosoftirq_setup(char *str)
>  }
>  __setup("rcunosoftirq", rcunosoftirq_setup);
>  
> +/*
> + * Schedule RCU callback invocation.  If the running implementation of RCU
> + * does not support RCU priority boosting, just do a direct call, otherwise
> + * wake up the per-CPU kernel kthread.  Note that because we are running
> + * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
> + * cannot disappear out from under us.
> + */
> +static void invoke_rcu_callbacks(struct rcu_data *rdp)
> +{
> +	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
> +		return;
> +	if (rcu_state.boost || rcu_softirq_enabled)
> +		invoke_rcu_core_kthread();

Here shouldn't it be this?
	if (rcu_state.boost || !rcu_softirq_enabled)

Also the rcu/dev branch has the following hunk where we unconditionally
invoke rcu_do_batch even when boosting which would still have the issue I
pointed. I would suggest Sebastian to post the latest v4 or v5 with all diff
squashed, and then we do another round of review with latest patch, thanks!

	@@ -2306,18 +2320,110 @@ static void invoke_rcu_callbacks(struct rcu_data *rdp)
 {
 	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
 		return;
-	if (likely(!rcu_state.boost)) {
-		rcu_do_batch(rdp);
-		return;
-	}
-	invoke_rcu_callbacks_kthread();
+	rcu_do_batch(rdp);
+}
+

thanks,

 - Joel


> +	rcu_do_batch(rdp);
> +}
> +
>  /*
>   * Wake up this CPU's rcuc kthread to do RCU core processing.
>   */
>  static void invoke_rcu_core(void)
>  {
> -	unsigned long flags;
> -	struct task_struct *t;
> -
>  	if (!cpu_online(smp_processor_id()))
>  		return;
> -	if (rcu_softirq_enabled) {
> +	if (rcu_softirq_enabled)
>  		raise_softirq(RCU_SOFTIRQ);
> -	} else {
> -		local_irq_save(flags);
> -		__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
> -		t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
> -		if (t != NULL && t != current)
> -			rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
> -		local_irq_restore(flags);
> -	}
> +	else
> +		invoke_rcu_core_kthread();
>  }
>  
>  static void rcu_cpu_kthread_park(unsigned int cpu)
> @@ -2426,7 +2432,8 @@ static int __init rcu_spawn_core_kthreads(void)
>  		per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
>  	if (!IS_ENABLED(CONFIG_RCU_BOOST) && !rcu_softirq_enabled)
>  		return 0;
> -	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__);
> +	WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
> +		  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
>  	return 0;
>  }
>  early_initcall(rcu_spawn_core_kthreads);
> -- 
> 2.20.1
> 
> >  - Joel
> 
> Sebastian

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ