[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4e1e28ef5ea5d7783185eb43f7bdc02aaf569e16.1762194056.git.pav@iki.fi>
Date: Mon, 3 Nov 2025 20:29:49 +0200
From: Pauli Virtanen <pav@....fi>
To: linux-bluetooth@...r.kernel.org
Cc: Pauli Virtanen <pav@....fi>,
marcel@...tmann.org,
johan.hedberg@...il.com,
luiz.dentz@...il.com,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 4/5] Bluetooth: 6lowpan: Don't hold spin lock over sleeping functions
disconnect_all_peers() calls sleeping function (l2cap_chan_close) under
spinlock. Holding the lock doesn't actually do any good -- we work on a
local copy of the list, and the lock doesn't protect against peer->chan
having already been freed.
Fix by taking refcounts of peer->chan instead. Clean up the code and
old comments a bit.
Take devices_lock instead of RCU, because the kfree_rcu();
l2cap_chan_put(); construct in chan_close_cb() does not guarantee
peer->chan is necessarily valid in RCU.
Also take l2cap_chan_lock() which is required for l2cap_chan_close().
Log: (bluez 6lowpan-tester Client Connect - Disable)
------
BUG: sleeping function called from invalid context at kernel/locking/mutex.c:575
...
<TASK>
...
l2cap_send_disconn_req (net/bluetooth/l2cap_core.c:938 net/bluetooth/l2cap_core.c:1495)
...
? __pfx_l2cap_chan_close (net/bluetooth/l2cap_core.c:809)
do_enable_set (net/bluetooth/6lowpan.c:1048 net/bluetooth/6lowpan.c:1068)
------
Fixes: 90305829635d ("Bluetooth: 6lowpan: Converting rwlocks to use RCU")
Signed-off-by: Pauli Virtanen <pav@....fi>
---
Notes:
v2:
- Take devices_lock instead of RCU and use l2cap_chan_hold(). The
unless_zero() variant is not correct here, as peer->chan l2cap_chan
does not wait for RCU when its refcount hits 0.
net/bluetooth/6lowpan.c | 74 +++++++++++++++++++++++++----------------
1 file changed, 46 insertions(+), 28 deletions(-)
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 0d8c2e2e9a6c..588d7e94e606 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -53,6 +53,11 @@ static bool enable_6lowpan;
static struct l2cap_chan *listen_chan;
static DEFINE_MUTEX(set_lock);
+enum {
+ LOWPAN_PEER_CLOSING,
+ LOWPAN_PEER_MAXBITS
+};
+
struct lowpan_peer {
struct list_head list;
struct rcu_head rcu;
@@ -61,6 +66,8 @@ struct lowpan_peer {
/* peer addresses in various formats */
unsigned char lladdr[ETH_ALEN];
struct in6_addr peer_addr;
+
+ DECLARE_BITMAP(flags, LOWPAN_PEER_MAXBITS);
};
struct lowpan_btle_dev {
@@ -1014,41 +1021,52 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
static void disconnect_all_peers(void)
{
struct lowpan_btle_dev *entry;
- struct lowpan_peer *peer, *tmp_peer, *new_peer;
- struct list_head peers;
+ struct lowpan_peer *peer;
+ int nchans;
- INIT_LIST_HEAD(&peers);
-
- /* We make a separate list of peers as the close_cb() will
- * modify the device peers list so it is better not to mess
- * with the same list at the same time.
+ /* l2cap_chan_close() cannot be called from RCU, and lock ordering
+ * chan->lock > devices_lock prevents taking write side lock, so copy
+ * then close.
*/
rcu_read_lock();
-
- list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
- list_for_each_entry_rcu(peer, &entry->peers, list) {
- new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
- if (!new_peer)
- break;
-
- new_peer->chan = peer->chan;
- INIT_LIST_HEAD(&new_peer->list);
-
- list_add(&new_peer->list, &peers);
- }
- }
-
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list)
+ list_for_each_entry_rcu(peer, &entry->peers, list)
+ clear_bit(LOWPAN_PEER_CLOSING, peer->flags);
rcu_read_unlock();
- spin_lock(&devices_lock);
- list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
- l2cap_chan_close(peer->chan, ENOENT);
+ do {
+ struct l2cap_chan *chans[32];
+ int i;
- list_del_rcu(&peer->list);
- kfree_rcu(peer, rcu);
- }
- spin_unlock(&devices_lock);
+ nchans = 0;
+
+ spin_lock(&devices_lock);
+
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
+ list_for_each_entry_rcu(peer, &entry->peers, list) {
+ if (test_and_set_bit(LOWPAN_PEER_CLOSING,
+ peer->flags))
+ continue;
+
+ l2cap_chan_hold(peer->chan);
+ chans[nchans++] = peer->chan;
+
+ if (nchans >= ARRAY_SIZE(chans))
+ goto done;
+ }
+ }
+
+done:
+ spin_unlock(&devices_lock);
+
+ for (i = 0; i < nchans; ++i) {
+ l2cap_chan_lock(chans[i]);
+ l2cap_chan_close(chans[i], ENOENT);
+ l2cap_chan_unlock(chans[i]);
+ l2cap_chan_put(chans[i]);
+ }
+ } while (nchans);
}
struct set_enable {
--
2.51.1
Powered by blists - more mailing lists