[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8736a4ce03f143b7a63cb99ab425e5403eafa9e4.1761998763.git.pav@iki.fi>
Date: Sat, 1 Nov 2025 14:09:13 +0200
From: Pauli Virtanen <pav@....fi>
To: linux-bluetooth@...r.kernel.org
Cc: Pauli Virtanen <pav@....fi>,
marcel@...tmann.org,
johan.hedberg@...il.com,
luiz.dentz@...il.com,
jukka.rissanen@...ux.intel.com,
linux-kernel@...r.kernel.org
Subject: [PATCH 3/4] Bluetooth: 6lowpan: Don't hold spin lock over sleeping functions
disconnect_all_peers() calls sleeping function (l2cap_chan_close) under
spinlock. Holding the lock doesn't actually do any good -- we work on a
local copy of the list, and the lock doesn't protect against peer->chan
having already been freed.
Fix by taking refcounts of peer->chan instead. Clean up the code and
old comments a bit.
Also take l2cap_chan_lock() which is required for l2cap_chan_close().
Log: (bluez 6lowpan-tester Client Connect - Disable)
------
BUG: sleeping function called from invalid context at kernel/locking/mutex.c:575
...
<TASK>
...
l2cap_send_disconn_req (net/bluetooth/l2cap_core.c:938 net/bluetooth/l2cap_core.c:1495)
...
? __pfx_l2cap_chan_close (net/bluetooth/l2cap_core.c:809)
do_enable_set (net/bluetooth/6lowpan.c:1048 net/bluetooth/6lowpan.c:1068)
------
Fixes: 90305829635d ("Bluetooth: 6lowpan: Converting rwlocks to use RCU")
Signed-off-by: Pauli Virtanen <pav@....fi>
---
net/bluetooth/6lowpan.c | 78 ++++++++++++++++++++++++++---------------
1 file changed, 50 insertions(+), 28 deletions(-)
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 0d8c2e2e9a6c..f64bc4dc2b54 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -53,6 +53,11 @@ static bool enable_6lowpan;
static struct l2cap_chan *listen_chan;
static DEFINE_MUTEX(set_lock);
+enum {
+ LOWPAN_PEER_CLOSING,
+ LOWPAN_PEER_MAXBITS
+};
+
struct lowpan_peer {
struct list_head list;
struct rcu_head rcu;
@@ -61,6 +66,8 @@ struct lowpan_peer {
/* peer addresses in various formats */
unsigned char lladdr[ETH_ALEN];
struct in6_addr peer_addr;
+
+ DECLARE_BITMAP(flags, LOWPAN_PEER_MAXBITS);
};
struct lowpan_btle_dev {
@@ -1014,41 +1021,56 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
static void disconnect_all_peers(void)
{
struct lowpan_btle_dev *entry;
- struct lowpan_peer *peer, *tmp_peer, *new_peer;
- struct list_head peers;
+ struct lowpan_peer *peer;
+ int nchans;
- INIT_LIST_HEAD(&peers);
-
- /* We make a separate list of peers as the close_cb() will
- * modify the device peers list so it is better not to mess
- * with the same list at the same time.
+ /* l2cap_chan_close() cannot be called from RCU, and lock ordering
+ * chan->lock > devices_lock prevents taking write side lock, so copy
+ * then close.
*/
rcu_read_lock();
-
- list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
- list_for_each_entry_rcu(peer, &entry->peers, list) {
- new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
- if (!new_peer)
- break;
-
- new_peer->chan = peer->chan;
- INIT_LIST_HEAD(&new_peer->list);
-
- list_add(&new_peer->list, &peers);
- }
- }
-
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list)
+ list_for_each_entry_rcu(peer, &entry->peers, list)
+ clear_bit(LOWPAN_PEER_CLOSING, peer->flags);
rcu_read_unlock();
- spin_lock(&devices_lock);
- list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
- l2cap_chan_close(peer->chan, ENOENT);
+ do {
+ struct l2cap_chan *chans[64];
+ int i;
- list_del_rcu(&peer->list);
- kfree_rcu(peer, rcu);
- }
- spin_unlock(&devices_lock);
+ nchans = 0;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
+ list_for_each_entry_rcu(peer, &entry->peers, list) {
+ struct l2cap_chan *chan;
+
+ if (test_and_set_bit(LOWPAN_PEER_CLOSING,
+ peer->flags))
+ continue;
+
+ chan = l2cap_chan_hold_unless_zero(peer->chan);
+ if (!chan)
+ continue;
+
+ chans[nchans++] = chan;
+ if (nchans >= ARRAY_SIZE(chans))
+ goto done;
+ }
+ }
+
+done:
+ rcu_read_unlock();
+
+ for (i = 0; i < nchans; ++i) {
+ l2cap_chan_lock(chans[i]);
+ l2cap_chan_close(chans[i], ENOENT);
+ l2cap_chan_unlock(chans[i]);
+ l2cap_chan_put(chans[i]);
+ }
+ } while (nchans);
}
struct set_enable {
--
2.51.1
Powered by blists - more mailing lists