[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2470e028-9b05-2013-7198-1fdad071d999@I-love.SAKURA.ne.jp>
Date: Sun, 4 Sep 2022 15:09:28 +0900
From: Tetsuo Handa <penguin-kernel@...ove.SAKURA.ne.jp>
To: Eric Van Hensbergen <ericvh@...il.com>,
Latchesar Ionkov <lucho@...kov.net>,
Dominique Martinet <asmadeus@...ewreck.org>,
Christian Schoenebeck <linux_oss@...debyte.com>
Cc: syzbot <syzbot+2f20b523930c32c160cc@...kaller.appspotmail.com>,
v9fs-developer@...ts.sourceforge.net,
syzkaller-bugs@...glegroups.com, netdev@...r.kernel.org
Subject: [PATCH] net/9p: use a dedicated spinlock for modifying IDR
syzbot is reporting inconsistent lock state in p9_req_put(), for
p9_tag_remove() from p9_req_put() from IRQ context is using
spin_lock_irqsave() on "struct p9_client"->lock but other locations
not from IRQ context are using spin_lock().
Since spin_lock() => spin_lock_irqsave() conversion on this lock will
needlessly disable IRQ for infrequent event, and p9_tag_remove() needs
to disable IRQ only for modifying IDR (RCU read lock can be used for
reading IDR), let's introduce a spinlock dedicated for serializing
idr_alloc()/idr_alloc_u32()/idr_remove() calls. Since this spinlock
will be held as innermost lock, circular locking dependency problem
won't happen by adding this spinlock.
Link: https://syzkaller.appspot.com/bug?extid=2f20b523930c32c160cc [1]
Reported-by: syzbot <syzbot+2f20b523930c32c160cc@...kaller.appspotmail.com>
Signed-off-by: Tetsuo Handa <penguin-kernel@...ove.SAKURA.ne.jp>
---
This patch is not tested, for reproducer is not available.
net/9p/client.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/net/9p/client.c b/net/9p/client.c
index 0a6110e15d0f..20f0a2d8dd38 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -28,6 +28,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/9p.h>
+static DEFINE_SPINLOCK(p9_idr_lock);
+
#define DEFAULT_MSIZE (128 * 1024)
/* Client Option Parsing (code inspired by NFS code)
@@ -283,14 +285,14 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
INIT_LIST_HEAD(&req->req_list);
idr_preload(GFP_NOFS);
- spin_lock_irq(&c->lock);
+ spin_lock_irq(&p9_idr_lock);
if (type == P9_TVERSION)
tag = idr_alloc(&c->reqs, req, P9_NOTAG, P9_NOTAG + 1,
GFP_NOWAIT);
else
tag = idr_alloc(&c->reqs, req, 0, P9_NOTAG, GFP_NOWAIT);
req->tc.tag = tag;
- spin_unlock_irq(&c->lock);
+ spin_unlock_irq(&p9_idr_lock);
idr_preload_end();
if (tag < 0)
goto free;
@@ -364,9 +366,9 @@ static void p9_tag_remove(struct p9_client *c, struct p9_req_t *r)
u16 tag = r->tc.tag;
p9_debug(P9_DEBUG_MUX, "freeing clnt %p req %p tag: %d\n", c, r, tag);
- spin_lock_irqsave(&c->lock, flags);
+ spin_lock_irqsave(&p9_idr_lock, flags);
idr_remove(&c->reqs, tag);
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&p9_idr_lock, flags);
}
int p9_req_put(struct p9_client *c, struct p9_req_t *r)
@@ -813,10 +815,10 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
refcount_set(&fid->count, 1);
idr_preload(GFP_KERNEL);
- spin_lock_irq(&clnt->lock);
+ spin_lock_irq(&p9_idr_lock);
ret = idr_alloc_u32(&clnt->fids, fid, &fid->fid, P9_NOFID - 1,
GFP_NOWAIT);
- spin_unlock_irq(&clnt->lock);
+ spin_unlock_irq(&p9_idr_lock);
idr_preload_end();
if (!ret) {
trace_9p_fid_ref(fid, P9_FID_REF_CREATE);
@@ -835,9 +837,9 @@ static void p9_fid_destroy(struct p9_fid *fid)
p9_debug(P9_DEBUG_FID, "fid %d\n", fid->fid);
trace_9p_fid_ref(fid, P9_FID_REF_DESTROY);
clnt = fid->clnt;
- spin_lock_irqsave(&clnt->lock, flags);
+ spin_lock_irqsave(&p9_idr_lock, flags);
idr_remove(&clnt->fids, fid->fid);
- spin_unlock_irqrestore(&clnt->lock, flags);
+ spin_unlock_irqrestore(&p9_idr_lock, flags);
kfree(fid->rdir);
kfree(fid);
}
--
2.34.1
Powered by blists - more mailing lists