[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231122224719.4042108-4-arnd@kernel.org>
Date: Wed, 22 Nov 2023 23:47:19 +0100
From: Arnd Bergmann <arnd@...nel.org>
To: linux-nvme@...ts.infradead.org
Cc: Arnd Bergmann <arnd@...db.de>, Keith Busch <kbusch@...nel.org>,
Jens Axboe <axboe@...nel.dk>, Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>,
Chaitanya Kulkarni <kch@...dia.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 3/3] nvme: tcp: fix compile-time checks for TLS mode
From: Arnd Bergmann <arnd@...db.de>
When CONFIG_NVME_KEYRING is enabled as a loadable module, but the TCP
host code is built-in, it fails to link:
arm-linux-gnueabi-ld: drivers/nvme/host/tcp.o: in function `nvme_tcp_setup_ctrl':
tcp.c:(.text+0x1940): undefined reference to `nvme_tls_psk_default'
The problem is that the compile-time conditionals are inconsistent here,
using a mix of #ifdef CONFIG_NVME_TCP_TLS, IS_ENABLED(CONFIG_NVME_TCP_TLS)
and IS_ENABLED(CONFIG_NVME_KEYRING) checks, with CONFIG_NVME_KEYRING
controlling whether the implementation is actually built.
Change it to use IS_ENABLED(CONFIG_NVME_KEYRING) checks consistently,
which should help readability and make it less error-prone. Combining
it with the check for the ctrl->opts->tls flag lets the compiler drop
all the TLS code in configurations without this feature, which also
helps runtime behavior in addition to avoiding the link failure.
To make it possible for the compiler to build the dead code, both
the tls_handshake_timeout variable and the TLS specific members
of nvme_tcp_queue need to be moved out of the #ifdef block as well,
but at least the former of these gets optimized out again.
Signed-off-by: Arnd Bergmann <arnd@...db.de>
---
drivers/nvme/host/tcp.c | 31 ++++++++++++++-----------------
1 file changed, 14 insertions(+), 17 deletions(-)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 89661a9cf850..ee7aa8478cfb 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -36,11 +36,11 @@ static int so_priority;
module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
-#ifdef CONFIG_NVME_TCP_TLS
/*
* TLS handshake timeout
*/
static int tls_handshake_timeout = 10;
+#ifdef CONFIG_NVME_TCP_TLS
module_param(tls_handshake_timeout, int, 0644);
MODULE_PARM_DESC(tls_handshake_timeout,
"nvme TLS handshake timeout in seconds (default 10)");
@@ -161,10 +161,8 @@ struct nvme_tcp_queue {
struct ahash_request *snd_hash;
__le32 exp_ddgst;
__le32 recv_ddgst;
-#ifdef CONFIG_NVME_TCP_TLS
struct completion tls_complete;
int tls_err;
-#endif
struct page_frag_cache pf_cache;
void (*state_change)(struct sock *);
@@ -207,6 +205,14 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl)
+{
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
+ return 0;
+
+ return ctrl->opts->tls;
+}
+
static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
{
u32 queue_idx = nvme_tcp_queue_id(queue);
@@ -1412,7 +1418,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
memset(&msg, 0, sizeof(msg));
iov.iov_base = icresp;
iov.iov_len = sizeof(*icresp);
- if (queue->ctrl->ctrl.opts->tls) {
+ if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
@@ -1424,7 +1430,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
ret = -ENOTCONN;
- if (queue->ctrl->ctrl.opts->tls) {
+ if (nvme_tcp_tls(&queue->ctrl->ctrl)) {
ctype = tls_get_record_type(queue->sock->sk,
(struct cmsghdr *)cbuf);
if (ctype != TLS_RECORD_TYPE_DATA) {
@@ -1548,7 +1554,6 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}
-#ifdef CONFIG_NVME_TCP_TLS
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
{
struct nvme_tcp_queue *queue = data;
@@ -1625,14 +1630,6 @@ static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
}
return ret;
}
-#else
-static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
- struct nvme_tcp_queue *queue,
- key_serial_t pskid)
-{
- return -EPROTONOSUPPORT;
-}
-#endif
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
key_serial_t pskid)
@@ -1759,7 +1756,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
}
/* If PSKs are configured try to start TLS */
- if (pskid) {
+ if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) {
ret = nvme_tcp_start_tls(nctrl, queue, pskid);
if (ret)
goto err_init_connect;
@@ -1916,7 +1913,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
int ret;
key_serial_t pskid = 0;
- if (ctrl->opts->tls) {
+ if (nvme_tcp_tls(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
else
@@ -1949,7 +1946,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
- if (ctrl->opts->tls && !ctrl->tls_key) {
+ if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) {
dev_err(ctrl->device, "no PSK negotiated\n");
return -ENOKEY;
}
--
2.39.2
Powered by blists - more mailing lists