[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1550017627-14504-6-git-send-email-longman@redhat.com>
Date: Tue, 12 Feb 2019 19:27:02 -0500
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will.deacon@....com>,
Thomas Gleixner <tglx@...utronix.de>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
Arnd Bergmann <arnd@...db.de>, Borislav Petkov <bp@...en8.de>,
"H. Peter Anvin" <hpa@...or.com>,
Davidlohr Bueso <dave@...olabs.net>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Waiman Long <longman@...hat.com>
Subject: [PATCH 05/10] locking/rwsem: Add debug check for __down_read*()
When rwsem_down_read_failed*() return, the read lock is acquired
indirectly by others. So debug checks are added in __down_read() and
__down_read_killable() to make sure the rwsem is really reader-owned.
The other debug check calls in kernel/locking/rwsem.c except the
one in up_read_non_owner() are also moved over to rwsem-xadd.h.
Signed-off-by: Waiman Long <longman@...hat.com>
---
kernel/locking/rwsem-xadd.h | 12 ++++++++++--
kernel/locking/rwsem.c | 3 ---
2 files changed, 10 insertions(+), 5 deletions(-)
diff --git a/kernel/locking/rwsem-xadd.h b/kernel/locking/rwsem-xadd.h
index fac363b..12cbb80 100644
--- a/kernel/locking/rwsem-xadd.h
+++ b/kernel/locking/rwsem-xadd.h
@@ -165,10 +165,13 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
*/
static inline void __down_read(struct rw_semaphore *sem)
{
- if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
+ if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
rwsem_down_read_failed(sem);
- else
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
+ RWSEM_READER_OWNED));
+ } else {
rwsem_set_reader_owned(sem);
+ }
}
static inline int __down_read_killable(struct rw_semaphore *sem)
@@ -176,6 +179,8 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
if (IS_ERR(rwsem_down_read_failed_killable(sem)))
return -EINTR;
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
+ RWSEM_READER_OWNED));
} else {
rwsem_set_reader_owned(sem);
}
@@ -243,6 +248,7 @@ static inline void __up_read(struct rw_semaphore *sem)
{
long tmp;
+ DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
rwsem_clear_reader_owned(sem);
tmp = atomic_long_dec_return_release(&sem->count);
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
@@ -254,6 +260,7 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
+ DEBUG_RWSEMS_WARN_ON(sem->owner != current);
rwsem_clear_owner(sem);
if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
&sem->count) < 0))
@@ -274,6 +281,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
* read-locked region is ok to be re-ordered into the
* write side. As such, rely on RELEASE semantics.
*/
+ DEBUG_RWSEMS_WARN_ON(sem->owner != current);
tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
rwsem_set_reader_owned(sem);
if (tmp < 0)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index b3b4582..598fc7c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -114,7 +114,6 @@ int down_write_trylock(struct rw_semaphore *sem)
void up_read(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
- DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED));
__up_read(sem);
}
@@ -127,7 +126,6 @@ void up_read(struct rw_semaphore *sem)
void up_write(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
- DEBUG_RWSEMS_WARN_ON(sem->owner != current);
__up_write(sem);
}
@@ -140,7 +138,6 @@ void up_write(struct rw_semaphore *sem)
void downgrade_write(struct rw_semaphore *sem)
{
lock_downgrade(&sem->dep_map, _RET_IP_);
- DEBUG_RWSEMS_WARN_ON(sem->owner != current);
__downgrade_write(sem);
}
--
1.8.3.1
Powered by blists - more mailing lists