[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230607181920.2294972-6-dhowells@redhat.com>
Date: Wed, 7 Jun 2023 19:19:11 +0100
From: David Howells <dhowells@...hat.com>
To: netdev@...r.kernel.org,
Linus Torvalds <torvalds@...ux-foundation.org>
Cc: David Howells <dhowells@...hat.com>,
Chuck Lever <chuck.lever@...cle.com>,
Boris Pismenny <borisp@...dia.com>,
John Fastabend <john.fastabend@...il.com>,
Jakub Kicinski <kuba@...nel.org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Paolo Abeni <pabeni@...hat.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
David Ahern <dsahern@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
Jens Axboe <axboe@...nel.dk>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH net-next v6 05/14] tls/sw: Use splice_eof() to flush
Allow splice to end a TLS record after prematurely ending a splice/sendfile
due to getting an EOF condition (->splice_read() returned 0) after splice
had called TLS with a sendmsg() with MSG_MORE set when the user didn't set
MSG_MORE.
Suggested-by: Linus Torvalds <torvalds@...ux-foundation.org>
Link: https://lore.kernel.org/r/CAHk-=wh=V579PDYvkpnTobCLGczbgxpMgGmmhqiTyE34Cpi5Gg@mail.gmail.com/
Signed-off-by: David Howells <dhowells@...hat.com>
Reviewed-by: Jakub Kicinski <kuba@...nel.org>
cc: Chuck Lever <chuck.lever@...cle.com>
cc: Boris Pismenny <borisp@...dia.com>
cc: John Fastabend <john.fastabend@...il.com>
cc: Eric Dumazet <edumazet@...gle.com>
cc: "David S. Miller" <davem@...emloft.net>
cc: Paolo Abeni <pabeni@...hat.com>
cc: Jens Axboe <axboe@...nel.dk>
cc: Matthew Wilcox <willy@...radead.org>
cc: netdev@...r.kernel.org
---
net/tls/tls.h | 1 +
net/tls/tls_main.c | 2 ++
net/tls/tls_sw.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 77 insertions(+)
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 0672acab2773..4922668fefaa 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -97,6 +97,7 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+void tls_sw_splice_eof(struct socket *sock);
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int tls_sw_sendpage(struct sock *sk, struct page *page,
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index e02a0d882ed3..82ec5c654f32 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -957,6 +957,7 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG]
ops[TLS_BASE][TLS_BASE] = *base;
ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof;
ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
@@ -1027,6 +1028,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
+ prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof;
prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 38acc27a0dd0..a2fb0256ff1c 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1157,6 +1157,80 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
return copied > 0 ? copied : ret;
}
+/*
+ * Handle unexpected EOF during splice without SPLICE_F_MORE set.
+ */
+void tls_sw_splice_eof(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_rec *rec;
+ struct sk_msg *msg_pl;
+ ssize_t copied = 0;
+ bool retrying = false;
+ int ret = 0;
+ int pending;
+
+ if (!ctx->open_rec)
+ return;
+
+ mutex_lock(&tls_ctx->tx_lock);
+ lock_sock(sk);
+
+retry:
+ rec = ctx->open_rec;
+ if (!rec)
+ goto unlock;
+
+ msg_pl = &rec->msg_plaintext;
+
+ /* Check the BPF advisor and perform transmission. */
+ ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
+ &copied, 0);
+ switch (ret) {
+ case 0:
+ case -EAGAIN:
+ if (retrying)
+ goto unlock;
+ retrying = true;
+ goto retry;
+ case -EINPROGRESS:
+ break;
+ default:
+ goto unlock;
+ }
+
+ /* Wait for pending encryptions to get completed */
+ spin_lock_bh(&ctx->encrypt_compl_lock);
+ ctx->async_notify = true;
+
+ pending = atomic_read(&ctx->encrypt_pending);
+ spin_unlock_bh(&ctx->encrypt_compl_lock);
+ if (pending)
+ crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
+ else
+ reinit_completion(&ctx->async_wait.completion);
+
+ /* There can be no concurrent accesses, since we have no pending
+ * encrypt operations
+ */
+ WRITE_ONCE(ctx->async_notify, false);
+
+ if (ctx->async_wait.err)
+ goto unlock;
+
+ /* Transmit if any encryptions have completed */
+ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
+ cancel_delayed_work(&ctx->tx_work.work);
+ tls_tx_records(sk, 0);
+ }
+
+unlock:
+ release_sock(sk);
+ mutex_unlock(&tls_ctx->tx_lock);
+}
+
static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags)
{
Powered by blists - more mailing lists