[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201123121838.099930457@linuxfoundation.org>
Date: Mon, 23 Nov 2020 13:20:02 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Tariq Toukan <tariqt@...dia.com>,
Boris Pismenny <borisp@...dia.com>,
Jakub Kicinski <kuba@...nel.org>
Subject: [PATCH 5.9 052/252] net/tls: Fix wrong record sn in async mode of device resync
From: Tariq Toukan <tariqt@...dia.com>
[ Upstream commit 138559b9f99d3b6b1d5e75c78facc067a23871c6 ]
In async_resync mode, we log the TCP seq of records until the async request
is completed. Later, in case one of the logged seqs matches the resync
request, we return it, together with its record serial number. Before this
fix, we mistakenly returned the serial number of the current record
instead.
Fixes: ed9b7646b06a ("net/tls: Add asynchronous resync")
Signed-off-by: Tariq Toukan <tariqt@...dia.com>
Reviewed-by: Boris Pismenny <borisp@...dia.com>
Link: https://lore.kernel.org/r/20201115131448.2702-1-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@...nel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
include/net/tls.h | 16 +++++++++++++++-
net/tls/tls_device.c | 37 +++++++++++++++++++++++++++----------
2 files changed, 42 insertions(+), 11 deletions(-)
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -300,7 +300,8 @@ enum tls_offload_sync_type {
#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
struct tls_offload_resync_async {
atomic64_t req;
- u32 loglen;
+ u16 loglen;
+ u16 rcd_delta;
u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
};
@@ -471,6 +472,18 @@ static inline bool tls_bigint_increment(
return (i == -1);
}
+static inline void tls_bigint_subtract(unsigned char *seq, int n)
+{
+ u64 rcd_sn;
+ __be64 *p;
+
+ BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
+
+ p = (__be64 *)seq;
+ rcd_sn = be64_to_cpu(*p);
+ *p = cpu_to_be64(rcd_sn - n);
+}
+
static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -639,6 +652,7 @@ tls_offload_rx_resync_async_request_star
atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
rx_ctx->resync_async->loglen = 0;
+ rx_ctx->resync_async->rcd_delta = 0;
}
static inline void
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -694,36 +694,51 @@ static void tls_device_resync_rx(struct
static bool
tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
- s64 resync_req, u32 *seq)
+ s64 resync_req, u32 *seq, u16 *rcd_delta)
{
u32 is_async = resync_req & RESYNC_REQ_ASYNC;
u32 req_seq = resync_req >> 32;
u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
+ u16 i;
+
+ *rcd_delta = 0;
if (is_async) {
+ /* shouldn't get to wraparound:
+ * too long in async stage, something bad happened
+ */
+ if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
+ return false;
+
/* asynchronous stage: log all headers seq such that
* req_seq <= seq <= end_seq, and wait for real resync request
*/
- if (between(*seq, req_seq, req_end) &&
+ if (before(*seq, req_seq))
+ return false;
+ if (!after(*seq, req_end) &&
resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
resync_async->log[resync_async->loglen++] = *seq;
+ resync_async->rcd_delta++;
+
return false;
}
/* synchronous stage: check against the logged entries and
* proceed to check the next entries if no match was found
*/
- while (resync_async->loglen) {
- if (req_seq == resync_async->log[resync_async->loglen - 1] &&
- atomic64_try_cmpxchg(&resync_async->req,
- &resync_req, 0)) {
- resync_async->loglen = 0;
+ for (i = 0; i < resync_async->loglen; i++)
+ if (req_seq == resync_async->log[i] &&
+ atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
+ *rcd_delta = resync_async->rcd_delta - i;
*seq = req_seq;
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
return true;
}
- resync_async->loglen--;
- }
+
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
if (req_seq == *seq &&
atomic64_try_cmpxchg(&resync_async->req,
@@ -741,6 +756,7 @@ void tls_device_rx_resync_new_rec(struct
u32 sock_data, is_req_pending;
struct tls_prot_info *prot;
s64 resync_req;
+ u16 rcd_delta;
u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW)
@@ -786,8 +802,9 @@ void tls_device_rx_resync_new_rec(struct
return;
if (!tls_device_rx_resync_async(rx_ctx->resync_async,
- resync_req, &seq))
+ resync_req, &seq, &rcd_delta))
return;
+ tls_bigint_subtract(rcd_sn, rcd_delta);
break;
}
Powered by blists - more mailing lists