[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190416160113.GM12232@hirez.programming.kicks-ass.net>
Date: Tue, 16 Apr 2019 18:01:13 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Waiman Long <longman@...hat.com>
Cc: Ingo Molnar <mingo@...hat.com>, Will Deacon <will.deacon@....com>,
Thomas Gleixner <tglx@...utronix.de>,
linux-kernel@...r.kernel.org, x86@...nel.org,
Davidlohr Bueso <dave@...olabs.net>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
huang ying <huang.ying.caritas@...il.com>
Subject: Re: [PATCH v4 06/16] locking/rwsem: Code cleanup after files merging
More cleanups..
---
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -303,7 +303,7 @@ static void __rwsem_mark_wake(struct rw_
list_del(&waiter->list);
/*
* Ensure calling get_task_struct() before setting the reader
- * waiter to nil such that rwsem_down_read_failed() cannot
+ * waiter to nil such that rwsem_down_read_slow() cannot
* race with do_exit() by always holding a reference count
* to the task to wakeup.
*/
@@ -500,7 +500,7 @@ static bool rwsem_optimistic_spin(struct
* Wait for the read lock to be granted
*/
static inline struct rw_semaphore __sched *
-__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
+rwsem_down_read_slow(struct rw_semaphore *sem, int state)
{
long count, adjustment = -RWSEM_READER_BIAS;
struct rwsem_waiter waiter;
@@ -572,23 +572,11 @@ __rwsem_down_read_failed_common(struct r
return ERR_PTR(-EINTR);
}
-static inline struct rw_semaphore * __sched
-rwsem_down_read_failed(struct rw_semaphore *sem)
-{
- return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
-}
-
-static inline struct rw_semaphore * __sched
-rwsem_down_read_failed_killable(struct rw_semaphore *sem)
-{
- return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
-}
-
/*
* Wait until we successfully acquire the write lock
*/
static inline struct rw_semaphore *
-__rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
+rwsem_down_write_slow(struct rw_semaphore *sem, int state)
{
long count;
bool waiting = true; /* any queued threads before us */
@@ -689,18 +677,6 @@ __rwsem_down_write_failed_common(struct
return ERR_PTR(-EINTR);
}
-static inline struct rw_semaphore * __sched
-rwsem_down_write_failed(struct rw_semaphore *sem)
-{
- return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
-}
-
-static inline struct rw_semaphore * __sched
-rwsem_down_write_failed_killable(struct rw_semaphore *sem)
-{
- return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
-}
-
/*
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
@@ -749,7 +725,7 @@ inline void __down_read(struct rw_semaph
{
if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
&sem->count) & RWSEM_READ_FAILED_MASK)) {
- rwsem_down_read_failed(sem);
+ rwsem_down_read_slow(sem, TASK_UNINTERRUPTIBLE);
DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
RWSEM_READER_OWNED), sem);
} else {
@@ -761,7 +737,7 @@ static inline int __down_read_killable(s
{
if (unlikely(atomic_long_fetch_add_acquire(RWSEM_READER_BIAS,
&sem->count) & RWSEM_READ_FAILED_MASK)) {
- if (IS_ERR(rwsem_down_read_failed_killable(sem)))
+ if (IS_ERR(rwsem_down_read_slow(sem, TASK_KILLABLE)))
return -EINTR;
DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
RWSEM_READER_OWNED), sem);
@@ -794,34 +770,38 @@ static inline int __down_read_trylock(st
*/
static inline void __down_write(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_cmpxchg_acquire(&sem->count, 0,
- RWSEM_WRITER_LOCKED)))
- rwsem_down_write_failed(sem);
+ long tmp = RWSEM_UNLOCKED_VALUE;
+
+ if (unlikely(atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+ RWSEM_WRITER_LOCKED)))
+ rwsem_down_write_slow(sem, TASK_UNINTERRUPTIBLE);
rwsem_set_owner(sem);
}
static inline int __down_write_killable(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_cmpxchg_acquire(&sem->count, 0,
- RWSEM_WRITER_LOCKED)))
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
+ long tmp = RWSEM_UNLOCKED_VALUE;
+
+ if (unlikely(atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+ RWSEM_WRITER_LOCKED))) {
+ if (IS_ERR(rwsem_down_write_slow(sem, TASK_KILLABLE)))
return -EINTR;
+ }
rwsem_set_owner(sem);
return 0;
}
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
- long tmp;
+ long tmp = RWSEM_UNLOCKED_VALUE;
lockevent_inc(rwsem_wtrylock);
- tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_WRITER_LOCKED);
- if (tmp == RWSEM_UNLOCKED_VALUE) {
- rwsem_set_owner(sem);
- return true;
- }
- return false;
+ if (!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+ RWSEM_WRITER_LOCKED))
+ return false;
+
+ rwsem_set_owner(sem);
+ return true;
}
/*
@@ -831,12 +811,11 @@ inline void __up_read(struct rw_semaphor
{
long tmp;
- DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
- sem);
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED), sem);
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
- if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS))
- == RWSEM_FLAG_WAITERS))
+ if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
+ RWSEM_FLAG_WAITERS))
rwsem_wake(sem);
}
@@ -848,7 +827,7 @@ static inline void __up_write(struct rw_
DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
rwsem_clear_owner(sem);
if (unlikely(atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED,
- &sem->count) & RWSEM_FLAG_WAITERS))
+ &sem->count) & RWSEM_FLAG_WAITERS))
rwsem_wake(sem);
}
Powered by blists - more mailing lists