lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200526224908.GC16672@google.com>
Date:   Tue, 26 May 2020 18:49:08 -0400
From:   Joel Fernandes <joel@...lfernandes.org>
To:     Frederic Weisbecker <frederic@...nel.org>
Cc:     "Paul E . McKenney" <paulmck@...nel.org>,
        LKML <linux-kernel@...r.kernel.org>,
        Steven Rostedt <rostedt@...dmis.org>,
        Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
        Lai Jiangshan <jiangshanlai@...il.com>,
        Josh Triplett <josh@...htriplett.org>
Subject: Re: [PATCH 08/10] rcu: Allow to deactivate nocb on a CPU

On Tue, May 26, 2020 at 05:20:17PM -0400, Joel Fernandes wrote:
 
> > The switch happens on the target with IRQs disabled and rdp->nocb_lock
> > held to avoid races between local callbacks handling and kthread
> > offloaded callbacks handling.
> > nocb_cb kthread is first parked to avoid any future race with
> > concurrent rcu_do_batch() executions. Then the cblist is set to offloaded
> > so that the nocb_gp kthread ignores this rdp.
> 
> nit: you mean cblist is set to non-offloaded mode right?
> 
> Also, could you clarify better the rcu_barrier bits in the changelog. I know
> there's some issue if the cblist has both offloaded and non-offloaded
> callbacks, but it would be good to clarify this here better IMHO.

And for archival purposes: rcu_barrier needs excluding here because it is
possible that for a brief period of time, the callback kthread has been
parked to do the mode-switch, and it could be executing a bunch of callbacks
when it was asked to park.

Meanwhile, more interrupts happen and more callbacks are queued which are now
executing in softirq. This ruins the ordering of callbacks that rcu_barrier
needs.

thanks,

 - Joel


> 
> [...]
> > diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c
> > index d8ea2bef5574..4bed48da7702 100644
> > --- a/kernel/rcu/rcu_segcblist.c
> > +++ b/kernel/rcu/rcu_segcblist.c
> > @@ -171,9 +171,9 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
> >   * Mark the specified rcu_segcblist structure as offloaded.  This
> >   * structure must be empty.
> >   */
> > -void rcu_segcblist_offload(struct rcu_segcblist *rsclp)
> > +void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload)
> >  {
> > -	rsclp->offloaded = 1;
> > +	rsclp->offloaded = offload;
> >  }
> >  #endif
> >  
> > diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h
> > index 4c1503a82492..8f7c6c34cb1b 100644
> > --- a/kernel/rcu/rcu_segcblist.h
> > +++ b/kernel/rcu/rcu_segcblist.h
> > @@ -83,7 +83,7 @@ void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp);
> >  void rcu_segcblist_init(struct rcu_segcblist *rsclp);
> >  void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
> >  #ifdef CONFIG_RCU_NOCB_CPU
> > -void rcu_segcblist_offload(struct rcu_segcblist *rsclp);
> > +void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload);
> >  #endif
> >  bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
> >  bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
> > diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
> > index f19e81e0c691..c74a4df8d5f2 100644
> > --- a/kernel/rcu/tree_plugin.h
> > +++ b/kernel/rcu/tree_plugin.h
> > @@ -1943,6 +1943,10 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
> >  	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
> >  		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
> >  		raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
> > +		if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
> > +			raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
> > +			continue;
> > +		}
> >  		bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
> >  		if (bypass_ncbs &&
> >  		    (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
> > @@ -2176,6 +2180,50 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
> >  		do_nocb_deferred_wakeup_common(rdp);
> >  }
> >  
> > +static void __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
> > +{
> > +	unsigned long flags;
> > +	struct rcu_node *rnp = rdp->mynode;
> > +
> > +	printk("De-offloading %d\n", rdp->cpu);
> 
> nit: s/printk/pr_debug/ ?
> 
> thanks,
> 
>  - Joel
> 
> > +	kthread_park(rdp->nocb_cb_kthread);
> > +
> > +	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
> > +	rcu_nocb_flush_bypass(rdp, NULL, jiffies);
> > +	raw_spin_lock_rcu_node(rnp);
> > +	rcu_segcblist_offload(&rdp->cblist, false);
> > +	raw_spin_unlock_rcu_node(rnp);
> > +	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
> > +}
> > +
> > +static long rcu_nocb_rdp_deoffload(void *arg)
> > +{
> > +	struct rcu_data *rdp = arg;
> > +
> > +	WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
> > +	__rcu_nocb_rdp_deoffload(rdp);
> > +
> > +	return 0;
> > +}
> > +
> > +void rcu_nocb_cpu_deoffload(int cpu)
> > +{
> > +	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
> > +
> > +	mutex_lock(&rcu_state.barrier_mutex);
> > +	cpus_read_lock();
> > +	if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
> > +		if (cpu_online(cpu)) {
> > +			work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
> > +		} else {
> > +			__rcu_nocb_rdp_deoffload(rdp);
> > +		}
> > +		cpumask_clear_cpu(cpu, rcu_nocb_mask);
> > +	}
> > +	cpus_read_unlock();
> > +	mutex_unlock(&rcu_state.barrier_mutex);
> > +}
> > +
> >  void __init rcu_init_nohz(void)
> >  {
> >  	int cpu;
> > @@ -2218,7 +2266,7 @@ void __init rcu_init_nohz(void)
> >  		rdp = per_cpu_ptr(&rcu_data, cpu);
> >  		if (rcu_segcblist_empty(&rdp->cblist))
> >  			rcu_segcblist_init(&rdp->cblist);
> > -		rcu_segcblist_offload(&rdp->cblist);
> > +		rcu_segcblist_offload(&rdp->cblist, true);
> >  	}
> >  	rcu_organize_nocb_kthreads();
> >  }
> > -- 
> > 2.25.0
> > 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ