[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250815050210.1518439-9-alistair.francis@wdc.com>
Date: Fri, 15 Aug 2025 15:02:10 +1000
From: alistair23@...il.com
To: chuck.lever@...cle.com,
hare@...nel.org,
kernel-tls-handshake@...ts.linux.dev,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org,
linux-nvme@...ts.infradead.org,
linux-nfs@...r.kernel.org
Cc: kbusch@...nel.org,
axboe@...nel.dk,
hch@....de,
sagi@...mberg.me,
kch@...dia.com,
alistair23@...il.com,
Alistair Francis <alistair.francis@....com>
Subject: [PATCH 8/8] nvmet-tcp: Support KeyUpdate
From: Alistair Francis <alistair.francis@....com>
If the nvmet_tcp_try_recv() function return EKEYEXPIRED or if we receive
a KeyUpdate handshake type then the underlying TLS keys need to be
updated.
If the NVMe Host (TLS client) initiates a KeyUpdate this patch will
allow the NVMe layer to process the KeyUpdate request and forward the
request to userspace. Userspace must then update the key to keep the
connection alive.
This patch allows us to handle the NVMe host sending a KeyUpdate
request without aborting the connection. At this time we don't support
initiating a KeyUpdate.
Link: https://datatracker.ietf.org/doc/html/rfc8446#section-4.6.3
Signed-off-by: Alistair Francis <alistair.francis@....com>
---
drivers/nvme/target/tcp.c | 59 +++++++++++++++++++++++++++++++++++++--
1 file changed, 56 insertions(+), 3 deletions(-)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 5eaab9c858be..1dc6fa28d08c 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -175,6 +175,7 @@ struct nvmet_tcp_queue {
/* TLS state */
key_serial_t tls_pskid;
+ key_serial_t user_key_serial;
struct delayed_work tls_handshake_tmo_work;
unsigned long poll_end;
@@ -836,6 +837,11 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
return 1;
}
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue);
+static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w);
+#endif
+
static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
int budget, int *sends)
{
@@ -1114,7 +1120,7 @@ static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
struct msghdr *msg, char *cbuf)
{
struct cmsghdr *cmsg = (struct cmsghdr *)cbuf;
- u8 ctype, level, description;
+ u8 ctype, htype, level, description;
int ret = 0;
ctype = tls_get_record_type(queue->sock->sk, cmsg);
@@ -1135,6 +1141,29 @@ static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
ret = -EAGAIN;
}
break;
+ case TLS_RECORD_TYPE_HANDSHAKE:
+ htype = tls_get_handshake_type(queue->sock->sk, cmsg);
+
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ if (htype == TLS_HANDSHAKE_TYPE_KEY_UPDATE) {
+ tls_clear_err(queue->sock->sk);
+ handshake_req_cancel(queue->sock->sk);
+ handshake_sk_destruct_req(queue->sock->sk);
+ queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
+
+ /* Restore the default callbacks before starting upcall */
+ read_lock_bh(&queue->sock->sk->sk_callback_lock);
+ queue->sock->sk->sk_user_data = NULL;
+ queue->sock->sk->sk_data_ready = queue->data_ready;
+ read_unlock_bh(&queue->sock->sk->sk_callback_lock);
+
+ return nvmet_tcp_tls_handshake(queue, HANDSHAKE_KEY_UPDATE_TYPE_RECEIVED);
+ }
+#endif
+ pr_err("queue %d: TLS handshake %d unhandled\n",
+ queue->idx, htype);
+ ret = -EAGAIN;
+ break;
default:
/* discard this record type */
pr_err("queue %d: TLS record %d unhandled\n",
@@ -1344,7 +1373,29 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_recv_one(queue);
if (unlikely(ret < 0)) {
- nvmet_tcp_socket_error(queue, ret);
+ if (ret == -EKEYEXPIRED &&
+ queue->state != NVMET_TCP_Q_DISCONNECTING &&
+ queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ tls_clear_err(queue->sock->sk);
+ handshake_req_cancel(queue->sock->sk);
+ handshake_sk_destruct_req(queue->sock->sk);
+ queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
+
+ /* Restore the default callbacks before starting upcall */
+ read_lock_bh(&queue->sock->sk->sk_callback_lock);
+ queue->sock->sk->sk_user_data = NULL;
+ queue->sock->sk->sk_data_ready = queue->data_ready;
+ read_unlock_bh(&queue->sock->sk->sk_callback_lock);
+
+ ret = nvmet_tcp_tls_handshake(queue,
+ HANDSHAKE_KEY_UPDATE_TYPE_RECEIVED);
+#else
+ nvmet_tcp_socket_error(queue, ret);
+#endif
+ } else {
+ nvmet_tcp_socket_error(queue, ret);
+ }
goto done;
} else if (ret == 0) {
break;
@@ -1798,6 +1849,7 @@ static void nvmet_tcp_tls_handshake_done(void *data, int status,
}
if (!status) {
queue->tls_pskid = peerid;
+ queue->user_key_serial = user_key_serial;
queue->state = NVMET_TCP_Q_CONNECTING;
} else
queue->state = NVMET_TCP_Q_FAILED;
@@ -1843,7 +1895,7 @@ static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue,
int ret = -EOPNOTSUPP;
struct tls_handshake_args args;
- if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
+ if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE && !keyupdate) {
pr_warn("cannot start TLS in state %d\n", queue->state);
return -EINVAL;
}
@@ -1856,6 +1908,7 @@ static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue,
args.ta_data = queue;
args.ta_keyring = key_serial(queue->port->nport->keyring);
args.ta_timeout_ms = tls_handshake_timeout * 1000;
+ args.user_key_serial = queue->user_key_serial;
ret = tls_server_hello_psk(&args, GFP_KERNEL, keyupdate);
if (ret) {
--
2.50.1
Powered by blists - more mailing lists