[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220607165008.798822478@linuxfoundation.org>
Date: Tue, 7 Jun 2022 18:55:23 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Bob Peterson <rpeterso@...hat.com>,
Andreas Gruenbacher <agruenba@...hat.com>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.18 205/879] gfs2: use i_lock spin_lock for inode qadata
From: Bob Peterson <rpeterso@...hat.com>
[ Upstream commit 5fcff61eea9efd1f4b60e89d2d686b5feaea100f ]
Before this patch, functions gfs2_qa_get and _put used the i_rw_mutex to
prevent simultaneous access to its i_qadata. But i_rw_mutex is now used
for many other things, including iomap_begin and end, which causes a
conflict according to lockdep. We cannot just remove the lock since
simultaneous opens (gfs2_open -> gfs2_open_common -> gfs2_qa_get) can
then stomp on each others values for i_qadata.
This patch solves the conflict by using the i_lock spin_lock in the inode
to prevent simultaneous access.
Signed-off-by: Bob Peterson <rpeterso@...hat.com>
Signed-off-by: Andreas Gruenbacher <agruenba@...hat.com>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
fs/gfs2/quota.c | 32 ++++++++++++++++++++------------
1 file changed, 20 insertions(+), 12 deletions(-)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index be0997e24d60..dc77080a82bb 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -531,34 +531,42 @@ static void qdsb_put(struct gfs2_quota_data *qd)
*/
int gfs2_qa_get(struct gfs2_inode *ip)
{
- int error = 0;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct inode *inode = &ip->i_inode;
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
- down_write(&ip->i_rw_mutex);
+ spin_lock(&inode->i_lock);
if (ip->i_qadata == NULL) {
- ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
- if (!ip->i_qadata) {
- error = -ENOMEM;
- goto out;
- }
+ struct gfs2_qadata *tmp;
+
+ spin_unlock(&inode->i_lock);
+ tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
+ if (!tmp)
+ return -ENOMEM;
+
+ spin_lock(&inode->i_lock);
+ if (ip->i_qadata == NULL)
+ ip->i_qadata = tmp;
+ else
+ kmem_cache_free(gfs2_qadata_cachep, tmp);
}
ip->i_qadata->qa_ref++;
-out:
- up_write(&ip->i_rw_mutex);
- return error;
+ spin_unlock(&inode->i_lock);
+ return 0;
}
void gfs2_qa_put(struct gfs2_inode *ip)
{
- down_write(&ip->i_rw_mutex);
+ struct inode *inode = &ip->i_inode;
+
+ spin_lock(&inode->i_lock);
if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
ip->i_qadata = NULL;
}
- up_write(&ip->i_rw_mutex);
+ spin_unlock(&inode->i_lock);
}
int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
--
2.35.1
Powered by blists - more mailing lists