[<prev] [next>] [day] [month] [year] [list]
Message-Id: <20230531134020.3383253-20-sashal@kernel.org>
Date: Wed, 31 May 2023 09:40:02 -0400
From: Sasha Levin <sashal@...nel.org>
To: linux-kernel@...r.kernel.org,
stable@...r.kernel.org
Cc: Jakub Kicinski <kuba@...nel.org>,
Shai Amiram <samiram@...dia.com>,
Simon Horman <simon.horman@...igine.com>,
"David S . Miller" <davem@...emloft.net>,
Sasha Levin <sashal@...nel.org>,
borisp@...dia.com,
john.fastabend@...il.com,
edumazet@...gle.com,
pabeni@...hat.com,
asml.silence@...il.com,
richardbgobert@...il.com,
imagedong@...cent.com,
netdev@...r.kernel.org
Subject: [PATCH AUTOSEL 6.3 20/37] tls: rx: strp: force mixed decrypted records into copy mode
From: Jakub Kicinski <kuba@...nel.org>
[ Upstream commit 14c4be92ebb3e36e392aa9dd8f314038a9f96f3c ]
If a record is partially decrypted we'll have to CoW it, anyway,
so go into copy mode and allocate a writable skb right away.
This will make subsequent fix simpler because we won't have to
teach tls_strp_msg_make_copy() how to copy skbs while preserving
decrypt status.
Tested-by: Shai Amiram <samiram@...dia.com>
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
Reviewed-by: Simon Horman <simon.horman@...igine.com>
Signed-off-by: David S. Miller <davem@...emloft.net>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
include/linux/skbuff.h | 10 ++++++++++
net/tls/tls_strp.c | 16 +++++++++++-----
2 files changed, 21 insertions(+), 5 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dbcaac8b69665..4a882f9ba1f1f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1577,6 +1577,16 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
to->l4_hash = from->l4_hash;
};
+static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+#ifdef CONFIG_TLS_DEVICE
+ return skb2->decrypted - skb1->decrypted;
+#else
+ return 0;
+#endif
+}
+
static inline void skb_copy_decrypted(struct sk_buff *to,
const struct sk_buff *from)
{
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index 955ac3e0bf4d3..445543d92ac5c 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -315,15 +315,19 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
return 0;
}
-static bool tls_strp_check_no_dup(struct tls_strparser *strp)
+static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
{
unsigned int len = strp->stm.offset + strp->stm.full_len;
- struct sk_buff *skb;
+ struct sk_buff *first, *skb;
u32 seq;
- skb = skb_shinfo(strp->anchor)->frag_list;
- seq = TCP_SKB_CB(skb)->seq;
+ first = skb_shinfo(strp->anchor)->frag_list;
+ skb = first;
+ seq = TCP_SKB_CB(first)->seq;
+ /* Make sure there's no duplicate data in the queue,
+ * and the decrypted status matches.
+ */
while (skb->len < len) {
seq += skb->len;
len -= skb->len;
@@ -331,6 +335,8 @@ static bool tls_strp_check_no_dup(struct tls_strparser *strp)
if (TCP_SKB_CB(skb)->seq != seq)
return false;
+ if (skb_cmp_decrypted(first, skb))
+ return false;
}
return true;
@@ -411,7 +417,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
return tls_strp_read_copy(strp, true);
}
- if (!tls_strp_check_no_dup(strp))
+ if (!tls_strp_check_queue_ok(strp))
return tls_strp_read_copy(strp, false);
strp->msg_ready = 1;
--
2.39.2
Powered by blists - more mailing lists