[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190416144231.GQ14281@hirez.programming.kicks-ass.net>
Date: Tue, 16 Apr 2019 16:42:31 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Waiman Long <longman@...hat.com>
Cc: Ingo Molnar <mingo@...hat.com>, Will Deacon <will.deacon@....com>,
Thomas Gleixner <tglx@...utronix.de>,
linux-kernel@...r.kernel.org, x86@...nel.org,
Davidlohr Bueso <dave@...olabs.net>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
huang ying <huang.ying.caritas@...il.com>
Subject: Re: [PATCH v4 04/16] locking/rwsem: Implement a new locking scheme
On Tue, Apr 16, 2019 at 04:18:20PM +0200, Peter Zijlstra wrote:
> On Tue, Apr 16, 2019 at 09:32:38AM -0400, Waiman Long wrote:
> > On 04/16/2019 09:22 AM, Peter Zijlstra wrote:
> > > On Sat, Apr 13, 2019 at 01:22:47PM -0400, Waiman Long wrote:
> > >> +#define RWSEM_COUNT_LOCKED(c) ((c) & RWSEM_LOCK_MASK)
> > > The above doesn't seem to make it more readable or shorter.
> >
> > Fair enough. I can remove that macro.
>
> I did the same for the HANDOFF patch but seem to have misplaced the
> delta and have already refreshed the patch.
Had to redo it, so here goes:
---
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -102,9 +102,6 @@
#define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
RWSEM_FLAG_HANDOFF)
-#define RWSEM_COUNT_HANDOFF(c) ((c) & RWSEM_FLAG_HANDOFF)
-#define RWSEM_COUNT_LOCKED_OR_HANDOFF(c) \
- ((c) & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))
/*
* All writes to owner are protected by WRITE_ONCE() to make sure that
* store tearing can't happen as optimistic spinners may read and use
@@ -365,7 +362,7 @@ static void __rwsem_mark_wake(struct rw_
/*
* Clear the handoff flag
*/
- if (woken && RWSEM_COUNT_HANDOFF(atomic_long_read(&sem->count)))
+ if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
adjustment -= RWSEM_FLAG_HANDOFF;
if (adjustment)
@@ -387,7 +384,7 @@ static inline bool rwsem_try_write_lock(
retry:
if (count & RWSEM_LOCK_MASM)) {
- if (RWSEM_COUNT_HANDOFF(count) || (wstate != WRITER_HANDOFF))
+ if ((count & RWSEM_FLAG_HANDOFF) || (wstate != WRITER_HANDOFF))
return false;
/*
* The lock may become free just before setting handoff bit.
@@ -398,7 +395,7 @@ static inline bool rwsem_try_write_lock(
goto retry;
}
- if ((wstate == WRITER_NOT_FIRST) && RWSEM_COUNT_HANDOFF(count))
+ if ((wstate == WRITER_NOT_FIRST) && (count & RWSEM_FLAG_HANDOFF))
return false;
new = (count & ~RWSEM_FLAG_HANDOFF) + RWSEM_WRITER_LOCKED -
@@ -409,7 +406,7 @@ static inline bool rwsem_try_write_lock(
return true;
}
- if (unlikely((wstate == WRITER_HANDOFF) && !RWSEM_COUNT_HANDOFF(count)))
+ if (unlikely((wstate == WRITER_HANDOFF) && !(count & RWSEM_FLAG_HANDOFF)))
goto retry;
return false;
@@ -704,7 +701,7 @@ __rwsem_down_write_failed_common(struct
rwsem_waiter_is_first(sem, &waiter))
wstate = WRITER_FIRST;
- if (!RWSEM_COUNT_LOCKED(count))
+ if (!(count & RWSEM_LOCK_MASK))
break;
/*
Powered by blists - more mailing lists