lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Fri, 2 Mar 2018 16:54:41 -0800
From:   "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
To:     Byungchul Park <byungchul.park@....com>
Cc:     jiangshanlai@...il.com, josh@...htriplett.org, rostedt@...dmis.org,
        mathieu.desnoyers@...icios.com, linux-kernel@...r.kernel.org,
        kernel-team@....com
Subject: Re: [PATCH] rcu: Remove deprecated RCU debugfs tracing code

On Fri, Mar 02, 2018 at 04:39:12PM +0900, Byungchul Park wrote:
> Commit ae91aa0adb14(rcu: Remove debugfs tracing) removed the RCU debugfs
> tracing code, and left deprecated code.
> 
> Because exp_workdone{0,1,2,3} won't be used anymore since the commit,
> it's better to remove code to do with those variables. Remove it.
> 
> Signed-off-by: Byungchul Park <byungchul.park@....com>

Good catch, queued for testing and review, thank you!

							Thanx, Paul

> ---
>  kernel/rcu/tree.h     |  4 ----
>  kernel/rcu/tree_exp.h | 13 +++++--------
>  2 files changed, 5 insertions(+), 12 deletions(-)
> 
> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> index 9dd0ea7..cfeb351 100644
> --- a/kernel/rcu/tree.h
> +++ b/kernel/rcu/tree.h
> @@ -234,10 +234,6 @@ struct rcu_data {
>  #ifdef CONFIG_RCU_FAST_NO_HZ
>  	struct rcu_head oom_head;
>  #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
> -	atomic_long_t exp_workdone0;	/* # done by workqueue. */
> -	atomic_long_t exp_workdone1;	/* # done by others #1. */
> -	atomic_long_t exp_workdone2;	/* # done by others #2. */
> -	atomic_long_t exp_workdone3;	/* # done by others #3. */
>  	int exp_dynticks_snap;		/* Double-check need for IPI. */
> 
>  	/* 6) Callback offloading. */
> diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
> index 73e1d3d..f8e4571 100644
> --- a/kernel/rcu/tree_exp.h
> +++ b/kernel/rcu/tree_exp.h
> @@ -248,14 +248,12 @@ static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
>  }
> 
>  /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
> -static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
> -			       unsigned long s)
> +static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
>  {
>  	if (rcu_exp_gp_seq_done(rsp, s)) {
>  		trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
>  		/* Ensure test happens before caller kfree(). */
>  		smp_mb__before_atomic(); /* ^^^ */
> -		atomic_long_inc(stat);
>  		return true;
>  	}
>  	return false;
> @@ -289,7 +287,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
>  	 * promoting locality and is not strictly needed for correctness.
>  	 */
>  	for (; rnp != NULL; rnp = rnp->parent) {
> -		if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
> +		if (sync_exp_work_done(rsp, s))
>  			return true;
> 
>  		/* Work not done, either wait here or go up. */
> @@ -302,8 +300,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
>  						  rnp->grplo, rnp->grphi,
>  						  TPS("wait"));
>  			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
> -				   sync_exp_work_done(rsp,
> -						      &rdp->exp_workdone2, s));
> +				   sync_exp_work_done(rsp, s));
>  			return true;
>  		}
>  		rnp->exp_seq_rq = s; /* Followers can wait on us. */
> @@ -313,7 +310,7 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
>  	}
>  	mutex_lock(&rsp->exp_mutex);
>  fastpath:
> -	if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
> +	if (sync_exp_work_done(rsp, s)) {
>  		mutex_unlock(&rsp->exp_mutex);
>  		return true;
>  	}
> @@ -661,7 +658,7 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
>  	rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
>  	rnp = rcu_get_root(rsp);
>  	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
> -		   sync_exp_work_done(rsp, &rdp->exp_workdone0, s));
> +		   sync_exp_work_done(rsp, s));
>  	smp_mb(); /* Workqueue actions happen before return. */
> 
>  	/* Let the next expedited grace period start. */
> -- 
> 1.9.1
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ