[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251007004634.38716-2-wilfred.opensource@gmail.com>
Date: Tue, 7 Oct 2025 10:46:35 +1000
From: Wilfred Mallawa <wilfred.opensource@...il.com>
To: linux-nvme@...ts.infradead.org,
linux-kernel@...r.kernel.org,
netdev@...r.kernel.org
Cc: Keith Busch <kbusch@...nel.org>,
Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>,
John Fastabend <john.fastabend@...il.com>,
Jakub Kicinski <kuba@...nel.org>,
Sabrina Dubroca <sd@...asysnail.net>,
"David S . Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
Simon Horman <horms@...nel.org>,
Hannes Reinecke <hare@...e.de>,
Wilfred Mallawa <wilfred.mallawa@....com>
Subject: [PATCH] nvme/tcp: handle tls partially sent records in write_space()
From: Wilfred Mallawa <wilfred.mallawa@....com>
With TLS enabled, records that are encrypted and appended to TLS TX
list can fail to see a retry if the underlying TCP socket is busy, for
example, hitting an EAGAIN from tcp_sendmsg_locked(). This is not known
to the NVMe TCP driver, as the TLS layer successfully generated a record.
Typically, the TLS write_space() callback would ensure such records are
retried, but in the NVMe TCP Host driver, write_space() invokes
nvme_tcp_write_space(). This causes a partially sent record in the TLS TX
list to timeout after not being retried.
This patch aims to address the above by first publically exposing
tls_is_partially_sent_record(), then, using this in the NVMe TCP host
driver to invoke the TLS write_space() handler where appropriate.
Signed-off-by: Wilfred Mallawa <wilfred.mallawa@....com>
Fixes: be8e82caa685 ("nvme-tcp: enable TLS handshake upcall")
---
drivers/nvme/host/tcp.c | 8 ++++++++
include/net/tls.h | 5 +++++
net/tls/tls.h | 5 -----
3 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1413788ca7d5..e3d02c33243b 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1076,11 +1076,18 @@ static void nvme_tcp_data_ready(struct sock *sk)
static void nvme_tcp_write_space(struct sock *sk)
{
struct nvme_tcp_queue *queue;
+ struct tls_context *ctx = tls_get_ctx(sk);
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
+
if (likely(queue && sk_stream_is_writeable(sk))) {
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ /* Ensure pending TLS partial records are retried */
+ if (nvme_tcp_queue_tls(queue) &&
+ tls_is_partially_sent_record(ctx))
+ queue->write_space(sk);
+
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
read_unlock_bh(&sk->sk_callback_lock);
@@ -1306,6 +1313,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
{
struct nvme_tcp_request *req;
+ struct tls_context *ctx = tls_get_ctx(queue->sock->sk);
unsigned int noreclaim_flag;
int ret = 1;
diff --git a/include/net/tls.h b/include/net/tls.h
index 857340338b69..9c61a2de44bf 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -373,6 +373,11 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk)
return (__force void *)icsk->icsk_ulp_data;
}
+static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
+{
+ return !!ctx->partially_sent_record;
+}
+
static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
const struct tls_context *tls_ctx)
{
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 2f86baeb71fc..7839a2effe31 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -271,11 +271,6 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags);
void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
-static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
-{
- return !!ctx->partially_sent_record;
-}
-
static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
{
return tls_ctx->pending_open_record_frags;
--
2.51.0
Powered by blists - more mailing lists