Signed-off-by: Peter Zijlstra (Intel) --- include/linux/percpu-rwsem.h | 17 +++++++++++++++++ kernel/locking/percpu-rwsem.c | 12 ++++++++++++ 2 files changed, 29 insertions(+) --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -17,6 +17,7 @@ struct percpu_rw_semaphore { }; extern void __percpu_down_read(struct percpu_rw_semaphore *); +extern bool __percpu_down_read_trylock(struct percpu_rw_semaphore *); extern void __percpu_up_read(struct percpu_rw_semaphore *); static inline void percpu_down_read(struct percpu_rw_semaphore *sem) @@ -45,6 +46,22 @@ static inline void percpu_down_read(stru */ } +static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) +{ + bool ret = true; + + preempt_disable(); + __this_cpu_inc(*sem->refcount); + if (unlikely(!rcu_sync_is_idle(&sem->rss))) + ret = __percpu_down_read_trylock(sem); + preempt_enable(); + + if (ret) + rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); + + return ret; +} + static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { /* --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -80,6 +80,18 @@ void __percpu_down_read(struct percpu_rw preempt_disable(); } +bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem) +{ + smp_mb(); /* A matches D */ + + if (likely(smp_load_acquire(&sem->state) != readers_block)) + return true; + + __percpu_up_read(sem); + + return false; +} + void __percpu_up_read(struct percpu_rw_semaphore *sem) { smp_mb(); /* B matches C */ -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in Please read the FAQ at http://www.tux.org/lkml/