lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20091015110404.GC3127@wotan.suse.de>
Date:	Thu, 15 Oct 2009 13:04:04 +0200
From:	Nick Piggin <npiggin@...e.de>
To:	"Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>
Cc:	linux-kernel@...r.kernel.org, mingo@...e.hu, laijs@...fujitsu.com,
	dipankar@...ibm.com, akpm@...ux-foundation.org,
	mathieu.desnoyers@...ymtl.ca, josh@...htriplett.org,
	dvhltc@...ibm.com, niv@...ibm.com, tglx@...utronix.de,
	peterz@...radead.org, rostedt@...dmis.org, Valdis.Kletnieks@...edu,
	dhowells@...hat.com, jens.axboe@...cle.com
Subject: Re: [PATCH tip/core/rcu 2/6] rcu: prevent RCU IPI storms in presence of high call_rcu() load

Testing this on top of my vfs-scale patches, a 64-way 32-node ia64
system runs the parallel open/close microbenchmark ~30 times faster
and is scaling linearly now. Single thread performance is not
noticably changed. Thanks very much Paul.

On Wed, Oct 14, 2009 at 10:15:55AM -0700, Paul E. McKenney wrote:
> From: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
> 
> As the number of callbacks on a given CPU rises, invoke
> force_quiescent_state() only every blimit number of callbacks
> (defaults to 10,000), and even then only if no other CPU has invoked
> force_quiescent_state() in the meantime.
> 
> Reported-by: Nick Piggin <npiggin@...e.de>
> Signed-off-by: Paul E. McKenney <paulmck@...ux.vnet.ibm.com>
> ---
>  kernel/rcutree.c |   29 ++++++++++++++++++++++++-----
>  kernel/rcutree.h |    4 ++++
>  2 files changed, 28 insertions(+), 5 deletions(-)
> 
> diff --git a/kernel/rcutree.c b/kernel/rcutree.c
> index 705f02a..ddbf111 100644
> --- a/kernel/rcutree.c
> +++ b/kernel/rcutree.c
> @@ -958,7 +958,7 @@ static void rcu_offline_cpu(int cpu)
>   * Invoke any RCU callbacks that have made it to the end of their grace
>   * period.  Thottle as specified by rdp->blimit.
>   */
> -static void rcu_do_batch(struct rcu_data *rdp)
> +static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
>  {
>  	unsigned long flags;
>  	struct rcu_head *next, *list, **tail;
> @@ -1011,6 +1011,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
>  	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
>  		rdp->blimit = blimit;
>  
> +	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
> +	if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
> +		rdp->qlen_last_fqs_check = 0;
> +		rdp->n_force_qs_snap = rsp->n_force_qs;
> +	} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
> +		rdp->qlen_last_fqs_check = rdp->qlen;
> +
>  	local_irq_restore(flags);
>  
>  	/* Re-raise the RCU softirq if there are callbacks remaining. */
> @@ -1224,7 +1231,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
>  	}
>  
>  	/* If there are callbacks ready, invoke them. */
> -	rcu_do_batch(rdp);
> +	rcu_do_batch(rsp, rdp);
>  }
>  
>  /*
> @@ -1288,10 +1295,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
>  		rcu_start_gp(rsp, nestflag);  /* releases rnp_root->lock. */
>  	}
>  
> -	/* Force the grace period if too many callbacks or too long waiting. */
> -	if (unlikely(++rdp->qlen > qhimark)) {
> +	/*
> +	 * Force the grace period if too many callbacks or too long waiting.
> +	 * Enforce hysteresis, and don't invoke force_quiescent_state()
> +	 * if some other CPU has recently done so.  Also, don't bother
> +	 * invoking force_quiescent_state() if the newly enqueued callback
> +	 * is the only one waiting for a grace period to complete.
> +	 */
> +	if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
>  		rdp->blimit = LONG_MAX;
> -		force_quiescent_state(rsp, 0);
> +		if (rsp->n_force_qs == rdp->n_force_qs_snap &&
> +		    *rdp->nxttail[RCU_DONE_TAIL] != head)
> +			force_quiescent_state(rsp, 0);
> +		rdp->n_force_qs_snap = rsp->n_force_qs;
> +		rdp->qlen_last_fqs_check = rdp->qlen;
>  	} else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
>  		force_quiescent_state(rsp, 1);
>  	local_irq_restore(flags);
> @@ -1523,6 +1540,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
>  	rdp->beenonline = 1;	 /* We have now been online. */
>  	rdp->preemptable = preemptable;
>  	rdp->passed_quiesc_completed = lastcomp - 1;
> +	rdp->qlen_last_fqs_check = 0;
> +	rdp->n_force_qs_snap = rsp->n_force_qs;
>  	rdp->blimit = blimit;
>  	spin_unlock(&rnp->lock);		/* irqs remain disabled. */
>  
> diff --git a/kernel/rcutree.h b/kernel/rcutree.h
> index b40ac57..599161f 100644
> --- a/kernel/rcutree.h
> +++ b/kernel/rcutree.h
> @@ -167,6 +167,10 @@ struct rcu_data {
>  	struct rcu_head *nxtlist;
>  	struct rcu_head **nxttail[RCU_NEXT_SIZE];
>  	long		qlen;		/* # of queued callbacks */
> +	long		qlen_last_fqs_check;
> +					/* qlen at last check for QS forcing */
> +	unsigned long	n_force_qs_snap;
> +					/* did other CPU force QS recently? */
>  	long		blimit;		/* Upper limit on a processed batch */
>  
>  #ifdef CONFIG_NO_HZ
> -- 
> 1.5.2.5
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ