[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZPiiDj1T3lGp2w2c@casper.infradead.org>
Date: Wed, 6 Sep 2023 17:00:14 +0100
From: Matthew Wilcox <willy@...radead.org>
To: "Darrick J. Wong" <djwong@...nel.org>
Cc: Bernd Schubert <bernd.schubert@...tmail.fm>,
Mateusz Guzik <mjguzik@...il.com>, brauner@...nel.org,
viro@...iv.linux.org.uk, linux-kernel@...r.kernel.org,
linux-fsdevel@...r.kernel.org
Subject: Re: [RFC PATCH] vfs: add inode lockdep assertions
On Wed, Sep 06, 2023 at 08:29:48AM -0700, Darrick J. Wong wrote:
> Or hoist the XFS mrlock, because it actually /does/ know if the rwsem is
> held in shared or exclusive mode.
... or to put it another way, if we had rwsem_is_write_locked(),
we could get rid of mrlock?
diff --git a/fs/xfs/mrlock.h b/fs/xfs/mrlock.h
index 79155eec341b..5530f03aaed1 100644
--- a/fs/xfs/mrlock.h
+++ b/fs/xfs/mrlock.h
@@ -10,18 +10,10 @@
typedef struct {
struct rw_semaphore mr_lock;
-#if defined(DEBUG) || defined(XFS_WARN)
- int mr_writer;
-#endif
} mrlock_t;
-#if defined(DEBUG) || defined(XFS_WARN)
-#define mrinit(mrp, name) \
- do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
-#else
#define mrinit(mrp, name) \
do { init_rwsem(&(mrp)->mr_lock); } while (0)
-#endif
#define mrlock_init(mrp, t,n,s) mrinit(mrp, n)
#define mrfree(mrp) do { } while (0)
@@ -34,9 +26,6 @@ static inline void mraccess_nested(mrlock_t *mrp, int subclass)
static inline void mrupdate_nested(mrlock_t *mrp, int subclass)
{
down_write_nested(&mrp->mr_lock, subclass);
-#if defined(DEBUG) || defined(XFS_WARN)
- mrp->mr_writer = 1;
-#endif
}
static inline int mrtryaccess(mrlock_t *mrp)
@@ -48,17 +37,11 @@ static inline int mrtryupdate(mrlock_t *mrp)
{
if (!down_write_trylock(&mrp->mr_lock))
return 0;
-#if defined(DEBUG) || defined(XFS_WARN)
- mrp->mr_writer = 1;
-#endif
return 1;
}
static inline void mrunlock_excl(mrlock_t *mrp)
{
-#if defined(DEBUG) || defined(XFS_WARN)
- mrp->mr_writer = 0;
-#endif
up_write(&mrp->mr_lock);
}
@@ -69,9 +52,6 @@ static inline void mrunlock_shared(mrlock_t *mrp)
static inline void mrdemote(mrlock_t *mrp)
{
-#if defined(DEBUG) || defined(XFS_WARN)
- mrp->mr_writer = 0;
-#endif
downgrade_write(&mrp->mr_lock);
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 9e62cc500140..b99c3bd78c5e 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -361,7 +361,7 @@ xfs_isilocked(
{
if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
if (!(lock_flags & XFS_ILOCK_SHARED))
- return !!ip->i_lock.mr_writer;
+ return rwsem_is_write_locked(&ip->i_lock.mr_lock);
return rwsem_is_locked(&ip->i_lock.mr_lock);
}
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index e05e167dbd16..277b8c96bbf9 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -69,7 +69,7 @@ static inline void mmap_assert_locked(struct mm_struct *mm)
static inline void mmap_assert_write_locked(struct mm_struct *mm)
{
lockdep_assert_held_write(&mm->mmap_lock);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+ VM_BUG_ON_MM(!rwsem_is_write_locked(&mm->mmap_lock), mm);
}
#ifdef CONFIG_PER_VMA_LOCK
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
index 1d264dd08625..3c25b14edc05 100644
--- a/include/linux/rwbase_rt.h
+++ b/include/linux/rwbase_rt.h
@@ -31,6 +31,11 @@ static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb)
return atomic_read(&rwb->readers) != READER_BIAS;
}
+static __always_inline bool rw_base_is_write_locked(struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) == WRITER_BIAS;
+}
+
static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb)
{
return atomic_read(&rwb->readers) > 0;
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 1dd530ce8b45..241a12c6019e 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -72,6 +72,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
return atomic_long_read(&sem->count) != 0;
}
+static inline int rwsem_is_write_locked(struct rw_semaphore *sem)
+{
+ return atomic_long_read(&sem->count) & 1;
+}
+
#define RWSEM_UNLOCKED_VALUE 0L
#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
@@ -157,6 +162,11 @@ static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
return rw_base_is_locked(&sem->rwbase);
}
+static __always_inline int rwsem_is_write_locked(struct rw_semaphore *sem)
+{
+ return rw_base_is_write_locked(&sem->rwbase);
+}
+
static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
{
return rw_base_is_contended(&sem->rwbase);
Powered by blists - more mailing lists