lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230119230320.GB2948950@paulmck-ThinkPad-P17-Gen-1>
Date:   Thu, 19 Jan 2023 15:03:20 -0800
From:   "Paul E. McKenney" <paulmck@...nel.org>
To:     Alan Stern <stern@...land.harvard.edu>
Cc:     Jonas Oberhauser <jonas.oberhauser@...weicloud.com>,
        Andrea Parri <parri.andrea@...il.com>,
        Jonas Oberhauser <jonas.oberhauser@...wei.com>,
        Peter Zijlstra <peterz@...radead.org>, will <will@...nel.org>,
        "boqun.feng" <boqun.feng@...il.com>, npiggin <npiggin@...il.com>,
        dhowells <dhowells@...hat.com>,
        "j.alglave" <j.alglave@....ac.uk>,
        "luc.maranget" <luc.maranget@...ia.fr>, akiyks <akiyks@...il.com>,
        dlustig <dlustig@...dia.com>, joel <joel@...lfernandes.org>,
        urezki <urezki@...il.com>,
        quic_neeraju <quic_neeraju@...cinc.com>,
        frederic <frederic@...nel.org>,
        Kernel development list <linux-kernel@...r.kernel.org>
Subject: Re: Internal vs. external barriers (was: Re: Interesting LKMM litmus
 test)

On Thu, Jan 19, 2023 at 05:04:49PM -0500, Alan Stern wrote:
> On Thu, Jan 19, 2023 at 01:53:04PM -0800, Paul E. McKenney wrote:
> > On Thu, Jan 19, 2023 at 02:51:53PM -0500, Alan Stern wrote:
> > > Index: usb-devel/tools/memory-model/linux-kernel.bell
> > > ===================================================================
> > > --- usb-devel.orig/tools/memory-model/linux-kernel.bell
> > > +++ usb-devel/tools/memory-model/linux-kernel.bell
> > > @@ -53,38 +53,30 @@ let rcu-rscs = let rec
> > >  	in matched
> > >  
> > >  (* Validate nesting *)
> > > -flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking
> > > -flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking
> > > +flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-lock
> > > +flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-unlock
> > 
> > This renaming makes sense to me.
> 
> But I'll put it in a separate patch, since it's not related to the main 
> purpose of this change.

Even better!

> > >  (* Compute matching pairs of nested Srcu-lock and Srcu-unlock *)
> > > -let srcu-rscs = let rec
> > > -	    unmatched-locks = Srcu-lock \ domain(matched)
> > > -	and unmatched-unlocks = Srcu-unlock \ range(matched)
> > > -	and unmatched = unmatched-locks | unmatched-unlocks
> > > -	and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc
> > > -	and unmatched-locks-to-unlocks =
> > > -		([unmatched-locks] ; po ; [unmatched-unlocks]) & loc
> > > -	and matched = matched | (unmatched-locks-to-unlocks \
> > > -		(unmatched-po ; unmatched-po))
> > > -	in matched
> > > +let srcu-rscs = ([Srcu-lock] ; (data | rf)+ ; [Srcu-unlock]) & loc
> > 
> > The point of the "+" instead of the "*" is to avoid LKMM being confused by
> > an srcu_read_lock() immediately preceding an unrelated srcu_read_unlock(),
> > right?  Or am I missing something more subtle?
> 
> No, and it's not to avoid confusion.  It merely indicates that there has 
> to be at least one instance of data or rf here; we will never have a 
> case where the lock and the unlock are the same event.

Got it, thank you!

> > >  (* Validate nesting *)
> > > -flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking
> > > -flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking
> > > +flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-lock
> > > +flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-unlock
> > > +flag ~empty (srcu-rscs^-1 ; srcu-rscs) \ id as multiple-srcu-matches
> > >  
> > >  (* Check for use of synchronize_srcu() inside an RCU critical section *)
> > >  flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
> > >  
> > >  (* Validate SRCU dynamic match *)
> > > -flag ~empty different-values(srcu-rscs) as srcu-bad-nesting
> > > +flag ~empty different-values(srcu-rscs) as bad-srcu-value-match
> > >  
> > >  (* Compute marked and plain memory accesses *)
> > >  let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
> > > -		LKR | LKW | UL | LF | RL | RU
> > > + 		LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock
> > >  let Plain = M \ Marked
> > >  
> > >  (* Redefine dependencies to include those carried through plain accesses *)
> > > -let carry-dep = (data ; rfi)*
> > > +let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
> > 
> > The "[~ Srcu-unlock]" matches the store that bridges the data and rfi",
> > correct?
> 
> Right.
> 
> > >  let addr = carry-dep ; addr
> > >  let ctrl = carry-dep ; ctrl
> > >  let data = carry-dep ; data
> > > Index: usb-devel/tools/memory-model/linux-kernel.def
> > > ===================================================================
> > > --- usb-devel.orig/tools/memory-model/linux-kernel.def
> > > +++ usb-devel/tools/memory-model/linux-kernel.def
> > > @@ -49,8 +49,10 @@ synchronize_rcu() { __fence{sync-rcu}; }
> > >  synchronize_rcu_expedited() { __fence{sync-rcu}; }
> > >  
> > >  // SRCU
> > > -srcu_read_lock(X)  __srcu{srcu-lock}(X)
> > > -srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
> > > +srcu_read_lock(X) __load{srcu-lock}(*X)
> > > +srcu_read_unlock(X,Y) { __store{srcu-unlock}(*X,Y); }
> > > +srcu_down_read(X) __load{srcu-lock}(*X)
> > > +srcu_up_read(X,Y) { __store{srcu-unlock}(*X,Y); }
> > 
> > And here srcu_down_read() and srcu_up_read() are synonyms for
> > srcu_read_lock() and srcu_read_unlock(), respectively, which I believe
> > should suffice.
> > 
> > >  synchronize_srcu(X)  { __srcu{sync-srcu}(X); }
> > >  synchronize_srcu_expedited(X)  { __srcu{sync-srcu}(X); }
> > 
> > So this looks quite reasonable to me.
> 
> Okay, good.  In theory we could check for read_lock and read_unlock on 
> different CPUs, but I don't think it's worth the trouble.

Given that lockdep already complains about that sort of thing in the
Linux kernel, agreed, it is not worth much trouble at all.

							Thanx, Paul

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ