lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 6 Dec 2019 17:00:24 -0500
From:   Waiman Long <longman@...hat.com>
To:     Alex Kogan <alex.kogan@...cle.com>, linux@...linux.org.uk,
        peterz@...radead.org, mingo@...hat.com, will.deacon@....com,
        arnd@...db.de, linux-arch@...r.kernel.org,
        linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
        tglx@...utronix.de, bp@...en8.de, hpa@...or.com, x86@...nel.org,
        guohanjun@...wei.com, jglauber@...vell.com
Cc:     steven.sistare@...cle.com, daniel.m.jordan@...cle.com,
        dave.dice@...cle.com, rahul.x.yadav@...cle.com
Subject: Re: [PATCH v7 5/5] locking/qspinlock: Introduce the shuffle reduction
 optimization into CNA

On 11/25/19 4:07 PM, Alex Kogan wrote:
> @@ -234,12 +263,13 @@ __always_inline u32 cna_pre_scan(struct qspinlock *lock,
>  	struct cna_node *cn = (struct cna_node *)node;
>  
>  	/*
> -	 * setting @pre_scan_result to 1 indicates that no post-scan
> +	 * setting @pre_scan_result to 1 or 2 indicates that no post-scan
>  	 * should be made in cna_pass_lock()
>  	 */
>  	cn->pre_scan_result =
> -		cn->intra_count == intra_node_handoff_threshold ?
> -			1 : cna_scan_main_queue(node, node);
> +		(node->locked <= 1 && probably(SHUFFLE_REDUCTION_PROB_ARG)) ?
> +			1 : cn->intra_count == intra_node_handoff_threshold ?
> +			2 : cna_scan_main_queue(node, node);
>  
>  	return 0;
>  }
> @@ -253,12 +283,15 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
>  
>  	u32 scan = cn->pre_scan_result;
>  
> +	if (scan == 1)
> +		goto pass_lock;
> +
>  	/*
>  	 * check if a successor from the same numa node has not been found in
>  	 * pre-scan, and if so, try to find it in post-scan starting from the
>  	 * node where pre-scan stopped (stored in @pre_scan_result)
>  	 */
> -	if (scan > 1)
> +	if (scan > 2)
>  		scan = cna_scan_main_queue(node, decode_tail(scan));
>  
>  	if (!scan) { /* if found a successor from the same numa node */
> @@ -281,5 +314,6 @@ static inline void cna_pass_lock(struct mcs_spinlock *node,
>  		tail_2nd->next = next;
>  	}
>  
> +pass_lock:
>  	arch_mcs_pass_lock(&next_holder->locked, val);
>  }

I think you might have mishandled the proper accounting of intra_count.
How about something like:

diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h
index f1eef6bece7b..03f8fdec2b80 100644
--- a/kernel/locking/qspinlock_cna.h
+++ b/kernel/locking/qspinlock_cna.h
@@ -268,7 +268,7 @@ __always_inline u32 cna_pre_scan(struct qspinlock *lock,
         */
        cn->pre_scan_result =
                (node->locked <= 1 &&
probably(SHUFFLE_REDUCTION_PROB_ARG)) ?
-                       1 : cn->intra_count ==
intra_node_handoff_threshold ?
+                       1 : cn->intra_count >=
intra_node_handoff_threshold ?
                        2 : cna_scan_main_queue(node, node);
 
        return 0;
@@ -283,9 +283,6 @@ static inline void cna_pass_lock(struct mcs_spinlock
*node,
 
        u32 scan = cn->pre_scan_result;
 
-       if (scan == 1)
-               goto pass_lock;
-
        /*
         * check if a successor from the same numa node has not been
found in
         * pre-scan, and if so, try to find it in post-scan starting
from the
@@ -294,7 +291,13 @@ static inline void cna_pass_lock(struct
mcs_spinlock *node,
        if (scan > 2)
                scan = cna_scan_main_queue(node, decode_tail(scan));
 
-       if (!scan) { /* if found a successor from the same numa node */
+       if (scan <= 1) { /* if found a successor from the same numa node */
+               /* inc @intra_count if the secondary queue is not empty */
+               ((struct cna_node *)next_holder)->intra_count =
+                       cn->intra_count + (node->locked > 1);
+               if ((scan == 1)
+                       goto pass_lock;
+
                next_holder = node->next;
                /*
                 * we unlock successor by passing a non-zero value,
@@ -302,9 +305,6 @@ static inline void cna_pass_lock(struct mcs_spinlock
*node,
                 * if we acquired the MCS lock when its queue was empty
                 */
                val = node->locked ? node->locked : 1;
-               /* inc @intra_count if the secondary queue is not empty */
-               ((struct cna_node *)next_holder)->intra_count =
-                       cn->intra_count + (node->locked > 1);
        } else if (node->locked > 1) {    /* if secondary queue is not
empty */
                /* next holder will be the first node in the secondary
queue */
                tail_2nd = decode_tail(node->locked);

The meaning of scan value:

0 - pass to next cna node, which is in the same numa node. Additional
cna node may or may not be added to the secondary queue

1 - pass to next cna node, which may not be in the same numa node. No
change to secondary queue

2 - exceed intra node handoff threshold, unconditionally merge back the
secondary queue cna nodes, if available

>2 no cna node of the same numa node found, unconditionally merge back
the secondary queue cna nodes, if available

The code will be easier to read if symbolic names instead of just numbers.

Cheers,
Longman


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ