[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230329141354.516864-27-dhowells@redhat.com>
Date: Wed, 29 Mar 2023 15:13:32 +0100
From: David Howells <dhowells@...hat.com>
To: Matthew Wilcox <willy@...radead.org>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: David Howells <dhowells@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
Christoph Hellwig <hch@...radead.org>,
Jens Axboe <axboe@...nel.dk>, Jeff Layton <jlayton@...nel.org>,
Christian Brauner <brauner@...nel.org>,
Chuck Lever III <chuck.lever@...cle.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
netdev@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Herbert Xu <herbert@...dor.apana.org.au>,
linux-crypto@...r.kernel.org
Subject: [RFC PATCH v2 26/48] crypto: af_alg/hash: Support MSG_SPLICE_PAGES
Make AF_ALG sendmsg() support MSG_SPLICE_PAGES in the hashing code. This
causes pages to be spliced from the source iterator if possible.
This allows ->sendpage() to be replaced by something that can handle
multiple multipage folios in a single transaction.
[!] Note that this makes use of netfs_extract_iter_to_sg() from netfslib.
This probably needs moving to core code somewhere.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Herbert Xu <herbert@...dor.apana.org.au>
cc: "David S. Miller" <davem@...emloft.net>
cc: Eric Dumazet <edumazet@...gle.com>
cc: Jakub Kicinski <kuba@...nel.org>
cc: Paolo Abeni <pabeni@...hat.com>
cc: Jens Axboe <axboe@...nel.dk>
cc: Matthew Wilcox <willy@...radead.org>
cc: linux-crypto@...r.kernel.org
cc: netdev@...r.kernel.org
---
crypto/af_alg.c | 11 +++--
crypto/algif_hash.c | 99 ++++++++++++++++++++++++++++-----------------
2 files changed, 70 insertions(+), 40 deletions(-)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 7fe8c8db6bb5..686610a4986f 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -543,9 +543,14 @@ void af_alg_free_sg(struct af_alg_sgl *sgl)
{
int i;
- if (sgl->need_unpin)
- for (i = 0; i < sgl->sgt.nents; i++)
- unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
+ if (sgl->sgt.sgl) {
+ if (sgl->need_unpin)
+ for (i = 0; i < sgl->sgt.nents; i++)
+ unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
+ if (sgl->sgt.sgl != sgl->sgl)
+ kvfree(sgl->sgt.sgl);
+ sgl->sgt.sgl = NULL;
+ }
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index f051fa624bd7..b89c2c50cecc 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -64,77 +64,102 @@ static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
- int limit = ALG_MAX_PAGES * PAGE_SIZE;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- long copied = 0;
+ ssize_t copied = 0;
+ size_t len, max_pages = ALG_MAX_PAGES, npages;
+ bool continuing = ctx->more, need_init = false;
int err;
- if (limit > sk->sk_sndbuf)
- limit = sk->sk_sndbuf;
+ /* Don't limit to ALG_MAX_PAGES if the pages are all already pinned. */
+ if (!user_backed_iter(&msg->msg_iter))
+ max_pages = INT_MAX;
+ else
+ max_pages = min_t(size_t, max_pages,
+ DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
lock_sock(sk);
- if (!ctx->more) {
+ if (!continuing) {
if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx);
-
- err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
- if (err)
- goto unlock;
+ need_init = true;
}
ctx->more = false;
while (msg_data_left(msg)) {
- int len = msg_data_left(msg);
-
- if (len > limit)
- len = limit;
-
ctx->sgl.sgt.sgl = ctx->sgl.sgl;
ctx->sgl.sgt.nents = 0;
ctx->sgl.sgt.orig_nents = 0;
- len = netfs_extract_iter_to_sg(&msg->msg_iter, len,
- &ctx->sgl.sgt, ALG_MAX_PAGES, 0);
- if (len < 0) {
- err = copied ? 0 : len;
- goto unlock;
+ err = -EIO;
+ npages = iov_iter_npages(&msg->msg_iter, max_pages);
+ if (npages == 0)
+ goto unlock_free;
+
+ if (npages > ARRAY_SIZE(ctx->sgl.sgl)) {
+ err = -ENOMEM;
+ ctx->sgl.sgt.sgl =
+ kvmalloc(array_size(npages, sizeof(*ctx->sgl.sgt.sgl)),
+ GFP_KERNEL);
+ if (!ctx->sgl.sgt.sgl)
+ goto unlock_free;
}
+ sg_init_table(ctx->sgl.sgl, npages);
ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl, NULL, len);
+ err = netfs_extract_iter_to_sg(&msg->msg_iter, LONG_MAX,
+ &ctx->sgl.sgt, npages, 0);
+ if (err < 0)
+ goto unlock_free;
+ len = err;
+ sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents - 1);
- err = crypto_wait_req(crypto_ahash_update(&ctx->req),
- &ctx->wait);
- af_alg_free_sg(&ctx->sgl);
- if (err) {
- iov_iter_revert(&msg->msg_iter, len);
- goto unlock;
+ if (!msg_data_left(msg)) {
+ err = hash_alloc_result(sk, ctx);
+ if (err)
+ goto unlock_free;
}
- copied += len;
- }
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl, ctx->result, len);
- err = 0;
+ if (!msg_data_left(msg) && !continuing && !(msg->msg_flags & MSG_MORE)) {
+ err = crypto_ahash_digest(&ctx->req);
+ } else {
+ if (need_init) {
+ err = crypto_wait_req(crypto_ahash_init(&ctx->req),
+ &ctx->wait);
+ if (err)
+ goto unlock_free;
+ need_init = false;
+ }
+
+ if (msg_data_left(msg) || (msg->msg_flags & MSG_MORE))
+ err = crypto_ahash_update(&ctx->req);
+ else
+ err = crypto_ahash_finup(&ctx->req);
+ continuing = true;
+ }
- ctx->more = msg->msg_flags & MSG_MORE;
- if (!ctx->more) {
- err = hash_alloc_result(sk, ctx);
+ err = crypto_wait_req(err, &ctx->wait);
if (err)
- goto unlock;
+ goto unlock_free;
- ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- err = crypto_wait_req(crypto_ahash_final(&ctx->req),
- &ctx->wait);
+ copied += len;
+ af_alg_free_sg(&ctx->sgl);
}
+ ctx->more = msg->msg_flags & MSG_MORE;
+ err = 0;
unlock:
release_sock(sk);
+ return copied ?: err;
- return err ?: copied;
+unlock_free:
+ af_alg_free_sg(&ctx->sgl);
+ goto unlock;
}
static ssize_t hash_sendpage(struct socket *sock, struct page *page,
Powered by blists - more mailing lists