[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230515093345.396978-9-dhowells@redhat.com>
Date: Mon, 15 May 2023 10:33:37 +0100
From: David Howells <dhowells@...hat.com>
To: netdev@...r.kernel.org
Cc: David Howells <dhowells@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
David Ahern <dsahern@...nel.org>,
Matthew Wilcox <willy@...radead.org>,
Al Viro <viro@...iv.linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Jens Axboe <axboe@...nel.dk>,
Jeff Layton <jlayton@...nel.org>,
Christian Brauner <brauner@...nel.org>,
Chuck Lever III <chuck.lever@...cle.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
Boris Pismenny <borisp@...dia.com>,
John Fastabend <john.fastabend@...il.com>
Subject: [PATCH net-next v7 08/16] tls: Inline do_tcp_sendpages()
do_tcp_sendpages() is now just a small wrapper around tcp_sendmsg_locked(),
so inline it, allowing do_tcp_sendpages() to be removed. This is part of
replacing ->sendpage() with a call to sendmsg() with MSG_SPLICE_PAGES set.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Boris Pismenny <borisp@...dia.com>
cc: John Fastabend <john.fastabend@...il.com>
cc: Jakub Kicinski <kuba@...nel.org>
cc: "David S. Miller" <davem@...emloft.net>
cc: Eric Dumazet <edumazet@...gle.com>
cc: Paolo Abeni <pabeni@...hat.com>
cc: Jens Axboe <axboe@...nel.dk>
cc: Matthew Wilcox <willy@...radead.org>
cc: netdev@...r.kernel.org
---
include/net/tls.h | 2 +-
net/tls/tls_main.c | 24 +++++++++++++++---------
2 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/include/net/tls.h b/include/net/tls.h
index 6056ce5a2aa5..5791ca7a189c 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -258,7 +258,7 @@ struct tls_context {
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
- bool in_tcp_sendpages;
+ bool splicing_pages;
bool pending_open_record_frags;
struct mutex tx_lock; /* protects partially_sent_* fields and
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index f2e7302a4d96..3d45fdb5c4e9 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -125,7 +125,10 @@ int tls_push_sg(struct sock *sk,
u16 first_offset,
int flags)
{
- int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
+ struct bio_vec bvec;
+ struct msghdr msg = {
+ .msg_flags = MSG_SENDPAGE_NOTLAST | MSG_SPLICE_PAGES | flags,
+ };
int ret = 0;
struct page *p;
size_t size;
@@ -134,16 +137,19 @@ int tls_push_sg(struct sock *sk,
size = sg->length - offset;
offset += sg->offset;
- ctx->in_tcp_sendpages = true;
+ ctx->splicing_pages = true;
while (1) {
if (sg_is_last(sg))
- sendpage_flags = flags;
+ msg.msg_flags = flags;
/* is sending application-limited? */
tcp_rate_check_app_limited(sk);
p = sg_page(sg);
retry:
- ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
+ bvec_set_page(&bvec, p, size, offset);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
+
+ ret = tcp_sendmsg_locked(sk, &msg, size);
if (ret != size) {
if (ret > 0) {
@@ -155,7 +161,7 @@ int tls_push_sg(struct sock *sk,
offset -= sg->offset;
ctx->partially_sent_offset = offset;
ctx->partially_sent_record = (void *)sg;
- ctx->in_tcp_sendpages = false;
+ ctx->splicing_pages = false;
return ret;
}
@@ -169,7 +175,7 @@ int tls_push_sg(struct sock *sk,
size = sg->length;
}
- ctx->in_tcp_sendpages = false;
+ ctx->splicing_pages = false;
return 0;
}
@@ -247,11 +253,11 @@ static void tls_write_space(struct sock *sk)
{
struct tls_context *ctx = tls_get_ctx(sk);
- /* If in_tcp_sendpages call lower protocol write space handler
+ /* If splicing_pages call lower protocol write space handler
* to ensure we wake up any waiting operations there. For example
- * if do_tcp_sendpages where to call sk_wait_event.
+ * if splicing pages where to call sk_wait_event.
*/
- if (ctx->in_tcp_sendpages) {
+ if (ctx->splicing_pages) {
ctx->sk_write_space(sk);
return;
}
Powered by blists - more mailing lists