[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230227-upstream-net-20230227-mptcp-fixes-v1-2-070e30ae4a8e@tessares.net>
Date: Mon, 27 Feb 2023 18:29:25 +0100
From: Matthieu Baerts <matthieu.baerts@...sares.net>
To: mptcp@...ts.linux.dev, "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Menglong Dong <imagedong@...cent.com>,
Mengen Sun <mengensun@...cent.com>,
Shuah Khan <shuah@...nel.org>, Florian Westphal <fw@...len.de>,
Jiang Biao <benbjiang@...cent.com>
Cc: netdev@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-kselftest@...r.kernel.org,
Matthieu Baerts <matthieu.baerts@...sares.net>,
stable@...r.kernel.org, Christoph Paasch <cpaasch@...le.com>
Subject: [PATCH net 2/7] mptcp: refactor passive socket initialization
From: Paolo Abeni <pabeni@...hat.com>
After commit 30e51b923e43 ("mptcp: fix unreleased socket in accept queue")
unaccepted msk sockets go throu complete shutdown, we don't need anymore
to delay inserting the first subflow into the subflow lists.
The reference counting deserve some extra care, as __mptcp_close() is
unaware of the request socket linkage to the first subflow.
Please note that this is more a refactoring than a fix but because this
modification is needed to include other corrections, see the following
commits. Then a Fixes tag has been added here to help the stable team.
Fixes: 30e51b923e43 ("mptcp: fix unreleased socket in accept queue")
Cc: stable@...r.kernel.org # v6.0+
Signed-off-by: Paolo Abeni <pabeni@...hat.com>
Reviewed-by: Matthieu Baerts <matthieu.baerts@...sares.net>
Tested-by: Christoph Paasch <cpaasch@...le.com>
Signed-off-by: Matthieu Baerts <matthieu.baerts@...sares.net>
---
net/mptcp/protocol.c | 17 -----------------
net/mptcp/subflow.c | 27 +++++++++++++++++++++------
2 files changed, 21 insertions(+), 23 deletions(-)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 3ad9c46202fc..447641d34c2c 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -825,7 +825,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
if (sk->sk_socket && !ssk->sk_socket)
mptcp_sock_graft(ssk, sk->sk_socket);
- mptcp_propagate_sndbuf((struct sock *)msk, ssk);
mptcp_sockopt_sync_locked(msk, ssk);
return true;
}
@@ -3708,22 +3707,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
lock_sock(newsk);
- /* PM/worker can now acquire the first subflow socket
- * lock without racing with listener queue cleanup,
- * we can notify it, if needed.
- *
- * Even if remote has reset the initial subflow by now
- * the refcnt is still at least one.
- */
- subflow = mptcp_subflow_ctx(msk->first);
- list_add(&subflow->node, &msk->conn_list);
- sock_hold(msk->first);
- if (mptcp_is_fully_established(newsk))
- mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
-
- mptcp_rcv_space_init(msk, msk->first);
- mptcp_propagate_sndbuf(newsk, msk->first);
-
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
* This is needed so NOSPACE flag can be set from tcp stack.
*/
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 5070dc33675d..a631a5e6fc7b 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -397,6 +397,12 @@ void mptcp_subflow_reset(struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
+ /* mptcp_mp_fail_no_response() can reach here on an already closed
+ * socket
+ */
+ if (ssk->sk_state == TCP_CLOSE)
+ return;
+
/* must hold: tcp_done() could drop last reference on parent */
sock_hold(sk);
@@ -750,6 +756,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct mptcp_options_received mp_opt;
bool fallback, fallback_is_fatal;
struct sock *new_msk = NULL;
+ struct mptcp_sock *owner;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
@@ -824,6 +831,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
ctx->setsockopt_seq = listener->setsockopt_seq;
if (ctx->mp_capable) {
+ owner = mptcp_sk(new_msk);
+
/* this can't race with mptcp_close(), as the msk is
* not yet exposted to user-space
*/
@@ -832,14 +841,14 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
/* record the newly created socket as the first msk
* subflow, but don't link it yet into conn_list
*/
- WRITE_ONCE(mptcp_sk(new_msk)->first, child);
+ WRITE_ONCE(owner->first, child);
/* new mpc subflow takes ownership of the newly
* created mptcp socket
*/
mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
- mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
- mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
+ mptcp_pm_new_connection(owner, child, 1);
+ mptcp_token_accept(subflow_req, owner);
ctx->conn = new_msk;
new_msk = NULL;
@@ -847,15 +856,21 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
* uses the correct data
*/
mptcp_copy_inaddrs(ctx->conn, child);
+ mptcp_propagate_sndbuf(ctx->conn, child);
+
+ mptcp_rcv_space_init(owner, child);
+ list_add(&ctx->node, &owner->conn_list);
+ sock_hold(child);
/* with OoO packets we can reach here without ingress
* mpc option
*/
- if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK)
+ if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
mptcp_subflow_fully_established(ctx, &mp_opt);
+ mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
+ ctx->pm_notified = 1;
+ }
} else if (ctx->mp_join) {
- struct mptcp_sock *owner;
-
owner = subflow_req->msk;
if (!owner) {
subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
--
2.38.1
Powered by blists - more mailing lists