[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191002233655.24323-36-mathew.j.martineau@linux.intel.com>
Date: Wed, 2 Oct 2019 16:36:45 -0700
From: Mat Martineau <mathew.j.martineau@...ux.intel.com>
To: netdev@...r.kernel.org, edumazet@...gle.com
Cc: Paolo Abeni <pabeni@...hat.com>, cpaasch@...le.com, fw@...len.de,
peter.krystad@...ux.intel.com, dcaratti@...hat.com,
matthieu.baerts@...sares.net
Subject: [RFC PATCH v2 35/45] mptcp: update per unacked sequence on pkt reception
From: Paolo Abeni <pabeni@...hat.com>
So that we keep per unacked sequence number consistent; since
we update per msk data, use an atomic64 cmpxcgh() to protect
against concurrent updates from multiple subflows.
Initialize the snd_una at connect()/accept() time.
Signed-off-by: Paolo Abeni <pabeni@...hat.com>
---
net/mptcp/options.c | 45 ++++++++++++++++++++++++++++++++++++++------
net/mptcp/protocol.c | 2 ++
net/mptcp/protocol.h | 1 +
3 files changed, 42 insertions(+), 6 deletions(-)
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index ce298ecc64f5..2427fff98091 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -540,6 +540,39 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
return false;
}
+static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
+{
+ u32 old_ack32, cur_ack32;
+
+ if (use_64bit)
+ return cur_ack;
+
+ old_ack32 = (u32)old_ack;
+ cur_ack32 = (u32)cur_ack;
+ cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32;
+ if (unlikely(before(cur_ack32, old_ack32)))
+ return cur_ack + (1LL << 32);
+ return cur_ack;
+}
+
+void update_una(struct mptcp_sock *msk, struct mptcp_options_received *mp_opt)
+{
+ u64 new_snd_una, snd_una, old_snd_una = atomic64_read(&msk->snd_una);
+
+ /* avoid ack expansion on update conflict, to reduce the risk of
+ * wrongly expanding to a future ack sequence number, which is way
+ * more dangerous than missing an ack
+ */
+ new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
+ while (after64(new_snd_una, old_snd_una)) {
+ snd_una = old_snd_una;
+ old_snd_una = atomic64_cmpxchg(&msk->snd_una, snd_una,
+ new_snd_una);
+ if (old_snd_una == snd_una)
+ break;
+ }
+}
+
void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
struct tcp_options_received *opt_rx)
{
@@ -563,6 +596,12 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
if (!mp_opt->dss)
return;
+ /* we can't wait for recvmsg() to update the ack_seq, otherwise
+ * monodirectional flows will stuck
+ */
+ if (msk && mp_opt->use_ack)
+ update_una(msk, mp_opt);
+
mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
if (!mpext)
return;
@@ -579,12 +618,6 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
mpext->use_checksum = mp_opt->use_checksum;
}
- if (mp_opt->use_ack) {
- mpext->data_ack = mp_opt->data_ack;
- mpext->use_ack = 1;
- mpext->ack64 = mp_opt->ack64;
- }
-
mpext->data_fin = mp_opt->data_fin;
if (msk)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index c8ee20963887..2c64435aedd8 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -688,6 +688,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
mptcp_crypto_key_sha1(msk->remote_key, NULL, &ack_seq);
msk->write_seq = subflow->idsn + 1;
+ atomic64_set(&msk->snd_una, msk->write_seq);
ack_seq++;
msk->ack_seq = ack_seq;
subflow->map_seq = ack_seq;
@@ -822,6 +823,7 @@ void mptcp_finish_connect(struct sock *sk, int mp_capable)
mptcp_crypto_key_sha1(msk->remote_key, NULL, &ack_seq);
msk->write_seq = subflow->idsn + 1;
+ atomic64_set(&msk->snd_una, msk->write_seq);
ack_seq++;
msk->ack_seq = ack_seq;
subflow->map_seq = ack_seq;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 56df4f46f313..45646e38aa4c 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -119,6 +119,7 @@ struct mptcp_sock {
u64 remote_key;
u64 write_seq;
u64 ack_seq;
+ atomic64_t snd_una;
u32 token;
unsigned long flags;
u16 dport;
--
2.23.0
Powered by blists - more mailing lists