[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250905024659.811386-8-alistair.francis@wdc.com>
Date: Fri, 5 Sep 2025 12:46:59 +1000
From: alistair23@...il.com
To: chuck.lever@...cle.com,
hare@...nel.org,
kernel-tls-handshake@...ts.linux.dev,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org,
linux-nvme@...ts.infradead.org,
linux-nfs@...r.kernel.org
Cc: kbusch@...nel.org,
axboe@...nel.dk,
hch@....de,
sagi@...mberg.me,
kch@...dia.com,
alistair23@...il.com,
Alistair Francis <alistair.francis@....com>
Subject: [PATCH v2 7/7] nvmet-tcp: Support KeyUpdate
From: Alistair Francis <alistair.francis@....com>
If the nvmet_tcp_try_recv() function return EKEYEXPIRED or if we receive
a KeyUpdate handshake type then the underlying TLS keys need to be
updated.
If the NVMe Host (TLS client) initiates a KeyUpdate this patch will
allow the NVMe layer to process the KeyUpdate request and forward the
request to userspace. Userspace must then update the key to keep the
connection alive.
This patch allows us to handle the NVMe host sending a KeyUpdate
request without aborting the connection. At this time we don't support
initiating a KeyUpdate.
Link: https://datatracker.ietf.org/doc/html/rfc8446#section-4.6.3
Signed-off-by: Alistair Francis <alistair.francis@....com>
---
v2:
- Use a helper function for KeyUpdates
- Ensure keep alive timer is stopped
- Wait for TLS KeyUpdate to complete
drivers/nvme/target/tcp.c | 90 ++++++++++++++++++++++++++++++++++++---
1 file changed, 84 insertions(+), 6 deletions(-)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index bee0355195f5..dd09940e9635 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -175,6 +175,7 @@ struct nvmet_tcp_queue {
/* TLS state */
key_serial_t tls_pskid;
+ key_serial_t user_session_id;
struct delayed_work tls_handshake_tmo_work;
unsigned long poll_end;
@@ -186,6 +187,8 @@ struct nvmet_tcp_queue {
struct sockaddr_storage sockaddr_peer;
struct work_struct release_work;
+ struct completion tls_complete;
+
int idx;
struct list_head queue_list;
@@ -836,6 +839,11 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
return 1;
}
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue);
+static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w);
+#endif
+
static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
int budget, int *sends)
{
@@ -844,6 +852,13 @@ static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
if (unlikely(ret < 0)) {
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ if (ret == -EKEYEXPIRED &&
+ queue->state != NVMET_TCP_Q_DISCONNECTING &&
+ queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
+ goto done;
+ }
+#endif
nvmet_tcp_socket_error(queue, ret);
goto done;
} else if (ret == 0) {
@@ -1110,11 +1125,52 @@ static inline bool nvmet_tcp_pdu_valid(u8 type)
return false;
}
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static int update_tls_keys(struct nvmet_tcp_queue *queue)
+{
+ int ret;
+
+ cancel_work(&queue->io_work);
+ handshake_req_cancel(queue->sock->sk);
+ handshake_sk_destruct_req(queue->sock->sk);
+ queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
+
+ /* Restore the default callbacks before starting upcall */
+ read_lock_bh(&queue->sock->sk->sk_callback_lock);
+ queue->sock->sk->sk_data_ready = queue->data_ready;
+ queue->sock->sk->sk_state_change = queue->state_change;
+ queue->sock->sk->sk_write_space = queue->write_space;
+ queue->sock->sk->sk_user_data = NULL;
+ read_unlock_bh(&queue->sock->sk->sk_callback_lock);
+
+ nvmet_stop_keep_alive_timer(queue->nvme_sq.ctrl);
+
+ INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
+ nvmet_tcp_tls_handshake_timeout);
+
+ ret = nvmet_tcp_tls_handshake(queue, HANDSHAKE_KEY_UPDATE_TYPE_RECEIVED);
+
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, 10 * HZ);
+
+ if (ret <= 0) {
+ tls_handshake_cancel(queue->sock->sk);
+ return ret;
+ }
+
+ queue->state = NVMET_TCP_Q_LIVE;
+
+ return ret;
+}
+#endif
+
static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
struct msghdr *msg, char *cbuf)
{
struct cmsghdr *cmsg = (struct cmsghdr *)cbuf;
- u8 ctype, level, description;
+ u8 ctype, htype, level, description;
int ret = 0;
ctype = tls_get_record_type(queue->sock->sk, cmsg);
@@ -1135,6 +1191,9 @@ static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
ret = -EAGAIN;
}
break;
+ case TLS_RECORD_TYPE_HANDSHAKE:
+ ret = -EAGAIN;
+ break;
default:
/* discard this record type */
pr_err("queue %d: TLS record %d unhandled\n",
@@ -1344,6 +1403,13 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
for (i = 0; i < budget; i++) {
ret = nvmet_tcp_try_recv_one(queue);
if (unlikely(ret < 0)) {
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ if (ret == -EKEYEXPIRED &&
+ queue->state != NVMET_TCP_Q_DISCONNECTING &&
+ queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
+ goto done;
+ }
+#endif
nvmet_tcp_socket_error(queue, ret);
goto done;
} else if (ret == 0) {
@@ -1408,14 +1474,22 @@ static void nvmet_tcp_io_work(struct work_struct *w)
ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
if (ret > 0)
pending = true;
- else if (ret < 0)
- return;
+ else if (ret < 0) {
+ if (ret == -EKEYEXPIRED)
+ update_tls_keys(queue);
+ else
+ return;
+ }
ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
if (ret > 0)
pending = true;
- else if (ret < 0)
- return;
+ else if (ret < 0) {
+ if (ret == -EKEYEXPIRED)
+ update_tls_keys(queue);
+ else
+ return;
+ }
} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
@@ -1798,6 +1872,7 @@ static void nvmet_tcp_tls_handshake_done(void *data, int status,
}
if (!status) {
queue->tls_pskid = peerid;
+ queue->user_session_id = user_session_id;
queue->state = NVMET_TCP_Q_CONNECTING;
} else
queue->state = NVMET_TCP_Q_FAILED;
@@ -1813,6 +1888,7 @@ static void nvmet_tcp_tls_handshake_done(void *data, int status,
else
nvmet_tcp_set_queue_sock(queue);
kref_put(&queue->kref, nvmet_tcp_release_queue);
+ complete(&queue->tls_complete);
}
static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
@@ -1843,7 +1919,7 @@ static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue,
int ret = -EOPNOTSUPP;
struct tls_handshake_args args;
- if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
+ if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE && !keyupdate) {
pr_warn("cannot start TLS in state %d\n", queue->state);
return -EINVAL;
}
@@ -1856,7 +1932,9 @@ static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue,
args.ta_data = queue;
args.ta_keyring = key_serial(queue->port->nport->keyring);
args.ta_timeout_ms = tls_handshake_timeout * 1000;
+ args.user_session_id = queue->user_session_id;
+ init_completion(&queue->tls_complete);
ret = tls_server_hello_psk(&args, GFP_KERNEL, keyupdate);
if (ret) {
kref_put(&queue->kref, nvmet_tcp_release_queue);
--
2.50.1
Powered by blists - more mailing lists