lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 13 Dec 2022 12:24:43 -0800
From:   "Paul E. McKenney" <paulmck@...nel.org>
To:     "Joel Fernandes (Google)" <joel@...lfernandes.org>
Cc:     linux-kernel@...r.kernel.org, Davidlohr Bueso <dave@...olabs.net>,
        Josh Triplett <josh@...htriplett.org>, rcu@...r.kernel.org,
        connoro@...gle.com
Subject: Re: [PATCH v2 1/2] locktorture: Allow non-rtmutex lock types to be
 boosted

On Fri, Dec 09, 2022 at 02:23:04AM +0000, Joel Fernandes (Google) wrote:
> Currently RT boosting is only done for rtmutex_lock, however with proxy
> execution, we also have the mutex_lock participating in priorities. To
> exercise the testing better, add RT boosting to other lock testing types
> as well, using a new knob (rt_boost).
> 
> Tested with boot parameters:
> locktorture.torture_type=mutex_lock
> locktorture.onoff_interval=1
> locktorture.nwriters_stress=8
> locktorture.stutter=0
> locktorture.rt_boost=1
> locktorture.rt_boost_factor=1
> locktorture.nlocks=3
> 
> For the rtmutex test, rt_boost is always enabled even if disabling is
> requested.
> 
> Signed-off-by: Joel Fernandes (Google) <joel@...lfernandes.org>

Nice, thank you!

Except that "git am -3" doesn't know about the commit on which this
is based.  Could you please rebase onto the -rcu tree's "dev" branch?

Though there is nothing touching kernel/locking/locktorture.c in
-rcu at the moment, so I confess some curiosity as to exactly what
these patches are based on.  ;-)

							Thanx, Paul

> ---
>  kernel/locking/locktorture.c | 99 ++++++++++++++++++++----------------
>  1 file changed, 56 insertions(+), 43 deletions(-)
> 
> diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
> index bc3557677eed..8968fd65a90d 100644
> --- a/kernel/locking/locktorture.c
> +++ b/kernel/locking/locktorture.c
> @@ -46,6 +46,8 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
>  torture_param(int, stat_interval, 60,
>  	     "Number of seconds between stats printk()s");
>  torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
> +torture_param(int, rt_boost, 2,
> +		"Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
>  torture_param(int, verbose, 1,
>  	     "Enable verbose debugging printk()s");
>  torture_param(int, nlocks, 1,
> @@ -129,15 +131,49 @@ static void torture_lock_busted_write_unlock(int tid __maybe_unused)
>  	  /* BUGGY, do not use in real life!!! */
>  }
>  
> -static void torture_boost_dummy(struct torture_random_state *trsp)
> +static void __torture_rt_boost(struct torture_random_state *trsp)
>  {
> -	/* Only rtmutexes care about priority */
> +	const unsigned int factor = 50000; /* yes, quite arbitrary */
> +
> +	if (!rt_task(current)) {
> +		/*
> +		 * Boost priority once every ~50k operations. When the
> +		 * task tries to take the lock, the rtmutex it will account
> +		 * for the new priority, and do any corresponding pi-dance.
> +		 */
> +		if (trsp && !(torture_random(trsp) %
> +			      (cxt.nrealwriters_stress * factor))) {
> +			sched_set_fifo(current);
> +		} else /* common case, do nothing */
> +			return;
> +	} else {
> +		/*
> +		 * The task will remain boosted for another ~500k operations,
> +		 * then restored back to its original prio, and so forth.
> +		 *
> +		 * When @trsp is nil, we want to force-reset the task for
> +		 * stopping the kthread.
> +		 */
> +		if (!trsp || !(torture_random(trsp) %
> +			       (cxt.nrealwriters_stress * factor * 2))) {
> +			sched_set_normal(current, 0);
> +		} else /* common case, do nothing */
> +			return;
> +	}
> +}
> +
> +static void torture_rt_boost(struct torture_random_state *trsp)
> +{
> +	if (rt_boost != 2)
> +		return;
> +
> +	__torture_rt_boost(trsp);
>  }
>  
>  static struct lock_torture_ops lock_busted_ops = {
>  	.writelock	= torture_lock_busted_write_lock,
>  	.write_delay	= torture_lock_busted_write_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_lock_busted_write_unlock,
>  	.readlock       = NULL,
>  	.read_delay     = NULL,
> @@ -181,7 +217,7 @@ __releases(torture_spinlock)
>  static struct lock_torture_ops spin_lock_ops = {
>  	.writelock	= torture_spin_lock_write_lock,
>  	.write_delay	= torture_spin_lock_write_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_spin_lock_write_unlock,
>  	.readlock       = NULL,
>  	.read_delay     = NULL,
> @@ -208,7 +244,7 @@ __releases(torture_spinlock)
>  static struct lock_torture_ops spin_lock_irq_ops = {
>  	.writelock	= torture_spin_lock_write_lock_irq,
>  	.write_delay	= torture_spin_lock_write_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_lock_spin_write_unlock_irq,
>  	.readlock       = NULL,
>  	.read_delay     = NULL,
> @@ -277,7 +313,7 @@ __releases(torture_rwlock)
>  static struct lock_torture_ops rw_lock_ops = {
>  	.writelock	= torture_rwlock_write_lock,
>  	.write_delay	= torture_rwlock_write_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_rwlock_write_unlock,
>  	.readlock       = torture_rwlock_read_lock,
>  	.read_delay     = torture_rwlock_read_delay,
> @@ -320,7 +356,7 @@ __releases(torture_rwlock)
>  static struct lock_torture_ops rw_lock_irq_ops = {
>  	.writelock	= torture_rwlock_write_lock_irq,
>  	.write_delay	= torture_rwlock_write_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_rwlock_write_unlock_irq,
>  	.readlock       = torture_rwlock_read_lock_irq,
>  	.read_delay     = torture_rwlock_read_delay,
> @@ -362,7 +398,7 @@ __releases(torture_mutex)
>  static struct lock_torture_ops mutex_lock_ops = {
>  	.writelock	= torture_mutex_lock,
>  	.write_delay	= torture_mutex_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_mutex_unlock,
>  	.readlock       = NULL,
>  	.read_delay     = NULL,
> @@ -460,7 +496,7 @@ static struct lock_torture_ops ww_mutex_lock_ops = {
>  	.exit		= torture_ww_mutex_exit,
>  	.writelock	= torture_ww_mutex_lock,
>  	.write_delay	= torture_mutex_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_ww_mutex_unlock,
>  	.readlock       = NULL,
>  	.read_delay     = NULL,
> @@ -478,37 +514,6 @@ __acquires(torture_rtmutex)
>  	return 0;
>  }
>  
> -static void torture_rtmutex_boost(struct torture_random_state *trsp)
> -{
> -	const unsigned int factor = 50000; /* yes, quite arbitrary */
> -
> -	if (!rt_task(current)) {
> -		/*
> -		 * Boost priority once every ~50k operations. When the
> -		 * task tries to take the lock, the rtmutex it will account
> -		 * for the new priority, and do any corresponding pi-dance.
> -		 */
> -		if (trsp && !(torture_random(trsp) %
> -			      (cxt.nrealwriters_stress * factor))) {
> -			sched_set_fifo(current);
> -		} else /* common case, do nothing */
> -			return;
> -	} else {
> -		/*
> -		 * The task will remain boosted for another ~500k operations,
> -		 * then restored back to its original prio, and so forth.
> -		 *
> -		 * When @trsp is nil, we want to force-reset the task for
> -		 * stopping the kthread.
> -		 */
> -		if (!trsp || !(torture_random(trsp) %
> -			       (cxt.nrealwriters_stress * factor * 2))) {
> -			sched_set_normal(current, 0);
> -		} else /* common case, do nothing */
> -			return;
> -	}
> -}
> -
>  static void torture_rtmutex_delay(struct torture_random_state *trsp)
>  {
>  	const unsigned long shortdelay_us = 2;
> @@ -534,10 +539,18 @@ __releases(torture_rtmutex)
>  	rt_mutex_unlock(&torture_rtmutex);
>  }
>  
> +static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
> +{
> +	if (!rt_boost)
> +		return;
> +
> +	__torture_rt_boost(trsp);
> +}
> +
>  static struct lock_torture_ops rtmutex_lock_ops = {
>  	.writelock	= torture_rtmutex_lock,
>  	.write_delay	= torture_rtmutex_delay,
> -	.task_boost     = torture_rtmutex_boost,
> +	.task_boost     = torture_rt_boost_rtmutex,
>  	.writeunlock	= torture_rtmutex_unlock,
>  	.readlock       = NULL,
>  	.read_delay     = NULL,
> @@ -604,7 +617,7 @@ __releases(torture_rwsem)
>  static struct lock_torture_ops rwsem_lock_ops = {
>  	.writelock	= torture_rwsem_down_write,
>  	.write_delay	= torture_rwsem_write_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_rwsem_up_write,
>  	.readlock       = torture_rwsem_down_read,
>  	.read_delay     = torture_rwsem_read_delay,
> @@ -656,7 +669,7 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = {
>  	.exit		= torture_percpu_rwsem_exit,
>  	.writelock	= torture_percpu_rwsem_down_write,
>  	.write_delay	= torture_rwsem_write_delay,
> -	.task_boost     = torture_boost_dummy,
> +	.task_boost     = torture_rt_boost,
>  	.writeunlock	= torture_percpu_rwsem_up_write,
>  	.readlock       = torture_percpu_rwsem_down_read,
>  	.read_delay     = torture_rwsem_read_delay,
> -- 
> 2.39.0.rc1.256.g54fd8350bd-goog
> 

Powered by blists - more mailing lists