lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YjN7wPMEBVIuOiGN@ip-172-31-19-208.ap-northeast-1.compute.internal>
Date:   Thu, 17 Mar 2022 18:19:44 +0000
From:   Hyeonggon Yoo <42.hyeyoo@...il.com>
To:     Namhyung Kim <namhyung@...nel.org>
Cc:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...nel.org>, Will Deacon <will@...nel.org>,
        Waiman Long <longman@...hat.com>,
        Boqun Feng <boqun.feng@...il.com>,
        LKML <linux-kernel@...r.kernel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Steven Rostedt <rostedt@...dmis.org>,
        Byungchul Park <byungchul.park@....com>,
        "Paul E. McKenney" <paulmck@...nel.org>,
        Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
        Arnd Bergmann <arnd@...db.de>,
        Radoslaw Burny <rburny@...gle.com>, linux-arch@...r.kernel.org,
        bpf@...r.kernel.org
Subject: Re: [PATCH 2/2] locking: Apply contention tracepoints in the slow
 path

On Wed, Mar 16, 2022 at 03:45:48PM -0700, Namhyung Kim wrote:
> Adding the lock contention tracepoints in various lock function slow
> paths.  Note that each arch can define spinlock differently, I only
> added it only to the generic qspinlock for now.
> 
> Signed-off-by: Namhyung Kim <namhyung@...nel.org>
> ---
>  kernel/locking/mutex.c        |  3 +++
>  kernel/locking/percpu-rwsem.c |  3 +++
>  kernel/locking/qrwlock.c      |  9 +++++++++
>  kernel/locking/qspinlock.c    |  5 +++++
>  kernel/locking/rtmutex.c      | 11 +++++++++++
>  kernel/locking/rwbase_rt.c    |  3 +++
>  kernel/locking/rwsem.c        |  9 +++++++++
>  kernel/locking/semaphore.c    | 14 +++++++++++++-
>  8 files changed, 56 insertions(+), 1 deletion(-)
>

[...]

> diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
> index 9ee381e4d2a4..e3c19668dfee 100644
> --- a/kernel/locking/semaphore.c
> +++ b/kernel/locking/semaphore.c
> @@ -32,6 +32,7 @@
>  #include <linux/semaphore.h>
>  #include <linux/spinlock.h>
>  #include <linux/ftrace.h>
> +#include <trace/events/lock.h>
>  
>  static noinline void __down(struct semaphore *sem);
>  static noinline int __down_interruptible(struct semaphore *sem);
> @@ -209,6 +210,7 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
>  								long timeout)
>  {
>  	struct semaphore_waiter waiter;
> +	bool tracing = false;
>  
>  	list_add_tail(&waiter.list, &sem->wait_list);
>  	waiter.task = current;
> @@ -220,18 +222,28 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
>  		if (unlikely(timeout <= 0))
>  			goto timed_out;
>  		__set_current_state(state);
> +		if (!tracing) {
> +			trace_contention_begin(sem, 0);
> +			tracing = true;
> +		}
>  		raw_spin_unlock_irq(&sem->lock);
>  		timeout = schedule_timeout(timeout);
>  		raw_spin_lock_irq(&sem->lock);
> -		if (waiter.up)
> +		if (waiter.up) {
> +			trace_contention_end(sem, 0);
>  			return 0;
> +		}
>  	}
>  
>   timed_out:
> +	if (tracing)
> +		trace_contention_end(sem, -ETIME);
>  	list_del(&waiter.list);
>  	return -ETIME;
>  
>   interrupted:
> +	if (tracing)
> +		trace_contention_end(sem, -EINTR);
>  	list_del(&waiter.list);
>  	return -EINTR;
>  }

why not simply remove tracing variable and call trace_contention_begin()
earlier like in rwsem? we can ignore it if ret != 0.

-- 
Thank you, You are awesome!
Hyeonggon :-)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ