lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20250221222146.GA1896@templeofstupid.com>
Date: Fri, 21 Feb 2025 14:21:46 -0800
From: Krister Johansen <kjlx@...pleofstupid.com>
To: Matthieu Baerts <matttbe@...nel.org>,
	Mat Martineau <martineau@...nel.org>
Cc: Geliang Tang <geliang@...nel.org>,
	"David S. Miller" <davem@...emloft.net>,
	Eric Dumazet <edumazet@...gle.com>,
	Jakub Kicinski <kuba@...nel.org>, Paolo Abeni <pabeni@...hat.com>,
	Simon Horman <horms@...nel.org>, netdev@...r.kernel.org,
	mptcp@...ts.linux.dev
Subject: [PATCH mptcp] mptcp: fix 'scheduling while atomic' in
 mptcp_pm_nl_append_new_local_addr

If multiple connection requests attempt to create an implicit mptcp
endpoint in parallel, more than one caller may end up in
mptcp_pm_nl_append_new_local_addr because none found the address in
local_addr_list during their call to mptcp_pm_nl_get_local_id.  In this
case, the concurrent new_local_addr calls may delete the address entry
created by the previous caller.  These deletes use synchronize_rcu, but
this is not permitted in some of the contexts where this function may be
called.  During packet recv, the caller may be in a rcu read critical
section and have preemption disabled.

An example stack:

   BUG: scheduling while atomic: swapper/2/0/0x00000302

   Call Trace:
   <IRQ>
   dump_stack_lvl+0x76/0xa0
   dump_stack+0x10/0x20
   __schedule_bug+0x64/0x80
   schedule_debug.constprop.0+0xdb/0x130
   __schedule+0x69/0x6a0
   schedule+0x33/0x110
   schedule_timeout+0x157/0x170
   wait_for_completion+0x88/0x150
   __wait_rcu_gp+0x150/0x160
   synchronize_rcu+0x12d/0x140
   mptcp_pm_nl_append_new_local_addr+0x1bd/0x280
   mptcp_pm_nl_get_local_id+0x121/0x160
   mptcp_pm_get_local_id+0x9d/0xe0
   subflow_check_req+0x1a8/0x460
   subflow_v4_route_req+0xb5/0x110
   tcp_conn_request+0x3a4/0xd00
   subflow_v4_conn_request+0x42/0xa0
   tcp_rcv_state_process+0x1e3/0x7e0
   tcp_v4_do_rcv+0xd3/0x2a0
   tcp_v4_rcv+0xbb8/0xbf0
   ip_protocol_deliver_rcu+0x3c/0x210
   ip_local_deliver_finish+0x77/0xa0
   ip_local_deliver+0x6e/0x120
   ip_sublist_rcv_finish+0x6f/0x80
   ip_sublist_rcv+0x178/0x230
   ip_list_rcv+0x102/0x140
   __netif_receive_skb_list_core+0x22d/0x250
   netif_receive_skb_list_internal+0x1a3/0x2d0
   napi_complete_done+0x74/0x1c0
   igb_poll+0x6c/0xe0 [igb]
   __napi_poll+0x30/0x200
   net_rx_action+0x181/0x2e0
   handle_softirqs+0xd8/0x340
   __irq_exit_rcu+0xd9/0x100
   irq_exit_rcu+0xe/0x20
   common_interrupt+0xa4/0xb0
   </IRQ>

This problem seems particularly prevalent if the user advertises an
endpoint that has a different external vs internal address.  In the case
where the external address is advertised and multiple connections
already exist, multiple subflow SYNs arrive in parallel which tends to
trigger the race during creation of the first local_addr_list entries
which have the internal address instead.

Fix this problem by switching mptcp_pm_nl_append_new_local_addr to use
call_rcu .  As part of plumbing this up, make
__mptcp_pm_release_addr_entry take a rcu_head which is used by all
callers regardless of cleanup method.

Cc: stable@...r.kernel.org
Fixes: d045b9eb95a9 ("mptcp: introduce implicit endpoints")
Signed-off-by: Krister Johansen <kjlx@...pleofstupid.com>
---
 net/mptcp/pm_netlink.c | 19 ++++++++++++-------
 net/mptcp/protocol.h   |  1 +
 2 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index c0e47f4f7b1a..4115b83cc2c3 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -967,9 +967,15 @@ static bool address_use_port(struct mptcp_pm_addr_entry *entry)
 		MPTCP_PM_ADDR_FLAG_SIGNAL;
 }
 
-/* caller must ensure the RCU grace period is already elapsed */
-static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
+/*
+ * Caller must ensure the RCU grace period is already elapsed or call this
+ * via a RCU callback.
+ */
+static void __mptcp_pm_release_addr_entry(struct rcu_head *head)
 {
+	struct mptcp_pm_addr_entry *entry;
+
+	entry = container_of(head, struct mptcp_pm_addr_entry, rcu_head);
 	if (entry->lsk)
 		sock_release(entry->lsk);
 	kfree(entry);
@@ -1064,8 +1070,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
 
 	/* just replaced an existing entry, free it */
 	if (del_entry) {
-		synchronize_rcu();
-		__mptcp_pm_release_addr_entry(del_entry);
+		call_rcu(&del_entry->rcu_head, __mptcp_pm_release_addr_entry);
 	}
 	return ret;
 }
@@ -1443,7 +1448,7 @@ int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info)
 	return 0;
 
 out_free:
-	__mptcp_pm_release_addr_entry(entry);
+	__mptcp_pm_release_addr_entry(&entry->rcu_head);
 	return ret;
 }
 
@@ -1623,7 +1628,7 @@ int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info)
 
 	mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry);
 	synchronize_rcu();
-	__mptcp_pm_release_addr_entry(entry);
+	__mptcp_pm_release_addr_entry(&entry->rcu_head);
 
 	return ret;
 }
@@ -1689,7 +1694,7 @@ static void __flush_addrs(struct list_head *list)
 		cur = list_entry(list->next,
 				 struct mptcp_pm_addr_entry, list);
 		list_del_rcu(&cur->list);
-		__mptcp_pm_release_addr_entry(cur);
+		__mptcp_pm_release_addr_entry(&cur->rcu_head);
 	}
 }
 
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index ad21925af061..29c4ee64cd0b 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -250,6 +250,7 @@ struct mptcp_pm_addr_entry {
 	u8			flags;
 	int			ifindex;
 	struct socket		*lsk;
+	struct rcu_head		rcu_head;
 };
 
 struct mptcp_data_frag {
-- 
2.25.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ