[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230331160914.1608208-32-dhowells@redhat.com>
Date: Fri, 31 Mar 2023 17:08:50 +0100
From: David Howells <dhowells@...hat.com>
To: Matthew Wilcox <willy@...radead.org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: David Howells <dhowells@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Jens Axboe <axboe@...nel.dk>, Jeff Layton <jlayton@...nel.org>,
Christian Brauner <brauner@...nel.org>,
Chuck Lever III <chuck.lever@...cle.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
netdev@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Ayush Sawal <ayush.sawal@...lsio.com>
Subject: [PATCH v3 31/55] chelsio: Support MSG_SPLICE_PAGES
Make Chelsio's TLS offload sendmsg() support MSG_SPLICE_PAGES, splicing in
pages from the source iterator if possible and copying the data in
otherwise.
This allows ->sendpage() to be replaced by something that can handle
multiple multipage folios in a single transaction.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Ayush Sawal <ayush.sawal@...lsio.com>
cc: "David S. Miller" <davem@...emloft.net>
cc: Eric Dumazet <edumazet@...gle.com>
cc: Jakub Kicinski <kuba@...nel.org>
cc: Paolo Abeni <pabeni@...hat.com>
cc: Jens Axboe <axboe@...nel.dk>
cc: Matthew Wilcox <willy@...radead.org>
cc: netdev@...r.kernel.org
---
.../chelsio/inline_crypto/chtls/chtls_io.c | 60 ++++++++++++++++++-
1 file changed, 59 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index ae6b17b96bf1..ca3daf5df95c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1092,7 +1092,65 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
if (copy > size)
copy = size;
- if (skb_tailroom(skb) > 0) {
+ if (msg->msg_flags & MSG_SPLICE_PAGES) {
+ struct page *page, **pages = &page;
+ ssize_t part;
+ size_t off, spliced = 0;
+ bool put = false;
+ int i;
+
+ do {
+ i = skb_shinfo(skb)->nr_frags;
+ part = iov_iter_extract_pages(&msg->msg_iter, &pages,
+ copy - spliced, 1, 0, &off);
+ if (part <= 0) {
+ err = part ?: -EIO;
+ goto do_fault;
+ }
+
+ if (!sendpage_ok(page)) {
+ const void *p = kmap_local_page(page);
+ void *q;
+
+ q = page_frag_memdup(NULL, p + off, part,
+ sk->sk_allocation, ULONG_MAX);
+ kunmap_local(p);
+ if (!q) {
+ iov_iter_revert(&msg->msg_iter, part);
+ return -ENOMEM;
+ }
+ page = virt_to_page(q);
+ off = offset_in_page(q);
+ put = true;
+ }
+
+ if (skb_can_coalesce(skb, i, page, off)) {
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], part);
+ spliced += part;
+ if (put)
+ put_page(page);
+ } else if (i < MAX_SKB_FRAGS) {
+ if (!put)
+ get_page(page);
+ skb_fill_page_desc(skb, i, page, off, spliced);
+ spliced += part;
+ put = false;
+ } else {
+ if (put)
+ put_page(page);
+ if (!spliced)
+ goto new_buf;
+ break;
+ }
+ } while (spliced < copy);
+
+ copy = spliced;
+ skb->len += copy;
+ skb->data_len += copy;
+ skb->truesize += copy;
+ sk->sk_wmem_queued += copy;
+
+ } else if (skb_tailroom(skb) > 0) {
copy = min(copy, skb_tailroom(skb));
if (is_tls_tx(csk))
copy = min_t(int, copy, csk->tlshws.txleft);
Powered by blists - more mailing lists