[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240814203850.2240469-24-dhowells@redhat.com>
Date: Wed, 14 Aug 2024 21:38:43 +0100
From: David Howells <dhowells@...hat.com>
To: Christian Brauner <christian@...uner.io>,
Steve French <smfrench@...il.com>,
Matthew Wilcox <willy@...radead.org>
Cc: David Howells <dhowells@...hat.com>,
Jeff Layton <jlayton@...nel.org>,
Gao Xiang <hsiangkao@...ux.alibaba.com>,
Dominique Martinet <asmadeus@...ewreck.org>,
Marc Dionne <marc.dionne@...istor.com>,
Paulo Alcantara <pc@...guebit.com>,
Shyam Prasad N <sprasad@...rosoft.com>,
Tom Talpey <tom@...pey.com>,
Eric Van Hensbergen <ericvh@...nel.org>,
Ilya Dryomov <idryomov@...il.com>,
netfs@...ts.linux.dev,
linux-afs@...ts.infradead.org,
linux-cifs@...r.kernel.org,
linux-nfs@...r.kernel.org,
ceph-devel@...r.kernel.org,
v9fs@...ts.linux.dev,
linux-erofs@...ts.ozlabs.org,
linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
netdev@...r.kernel.org,
linux-kernel@...r.kernel.org,
Steve French <sfrench@...ba.org>,
Enzo Matsumiya <ematsumiya@...e.de>
Subject: [PATCH v2 23/25] cifs: Use iterate_and_advance*() routines directly for hashing
Replace the bespoke cifs iterators of ITER_BVEC and ITER_KVEC to do hashing
with iterate_and_advance_kernel() - a variant on iterate_and_advance() that
only supports kernel-internal ITER_* types and not UBUF/IOVEC types.
The bespoke ITER_XARRAY is left because we don't really want to be calling
crypto_shash_update() under the RCU read lock for large amounts of data;
besides, ITER_XARRAY is going to be phased out.
Signed-off-by: David Howells <dhowells@...hat.com>
cc: Steve French <sfrench@...ba.org>
cc: Paulo Alcantara <pc@...guebit.com>
cc: Tom Talpey <tom@...pey.com>
cc: Enzo Matsumiya <ematsumiya@...e.de>
cc: linux-cifs@...r.kernel.org
---
fs/smb/client/cifsencrypt.c | 109 ++++++++----------------------------
include/linux/iov_iter.h | 47 ++++++++++++++++
2 files changed, 70 insertions(+), 86 deletions(-)
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 6322f0f68a17..991a1ab047e7 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -21,82 +21,10 @@
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/fips.h>
+#include <linux/iov_iter.h>
#include "../common/arc4.h"
#include <crypto/aead.h>
-/*
- * Hash data from a BVEC-type iterator.
- */
-static int cifs_shash_bvec(const struct iov_iter *iter, ssize_t maxsize,
- struct shash_desc *shash)
-{
- const struct bio_vec *bv = iter->bvec;
- unsigned long start = iter->iov_offset;
- unsigned int i;
- void *p;
- int ret;
-
- for (i = 0; i < iter->nr_segs; i++) {
- size_t off, len;
-
- len = bv[i].bv_len;
- if (start >= len) {
- start -= len;
- continue;
- }
-
- len = min_t(size_t, maxsize, len - start);
- off = bv[i].bv_offset + start;
-
- p = kmap_local_page(bv[i].bv_page);
- ret = crypto_shash_update(shash, p + off, len);
- kunmap_local(p);
- if (ret < 0)
- return ret;
-
- maxsize -= len;
- if (maxsize <= 0)
- break;
- start = 0;
- }
-
- return 0;
-}
-
-/*
- * Hash data from a KVEC-type iterator.
- */
-static int cifs_shash_kvec(const struct iov_iter *iter, ssize_t maxsize,
- struct shash_desc *shash)
-{
- const struct kvec *kv = iter->kvec;
- unsigned long start = iter->iov_offset;
- unsigned int i;
- int ret;
-
- for (i = 0; i < iter->nr_segs; i++) {
- size_t len;
-
- len = kv[i].iov_len;
- if (start >= len) {
- start -= len;
- continue;
- }
-
- len = min_t(size_t, maxsize, len - start);
- ret = crypto_shash_update(shash, kv[i].iov_base + start, len);
- if (ret < 0)
- return ret;
- maxsize -= len;
-
- if (maxsize <= 0)
- break;
- start = 0;
- }
-
- return 0;
-}
-
/*
* Hash data from an XARRAY-type iterator.
*/
@@ -145,27 +73,36 @@ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize,
return 0;
}
+static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
+{
+ struct shash_desc *shash = priv;
+ int ret, *pret = priv2;
+
+ ret = crypto_shash_update(shash, iter_base, len);
+ if (ret < 0) {
+ *pret = ret;
+ return len;
+ }
+ return 0;
+}
+
/*
* Pass the data from an iterator into a hash.
*/
static int cifs_shash_iter(const struct iov_iter *iter, size_t maxsize,
struct shash_desc *shash)
{
- if (maxsize == 0)
- return 0;
+ struct iov_iter tmp_iter = *iter;
+ int err = -EIO;
- switch (iov_iter_type(iter)) {
- case ITER_BVEC:
- return cifs_shash_bvec(iter, maxsize, shash);
- case ITER_KVEC:
- return cifs_shash_kvec(iter, maxsize, shash);
- case ITER_XARRAY:
+ if (iov_iter_type(iter) == ITER_XARRAY)
return cifs_shash_xarray(iter, maxsize, shash);
- default:
- pr_err("cifs_shash_iter(%u) unsupported\n", iov_iter_type(iter));
- WARN_ON_ONCE(1);
- return -EIO;
- }
+
+ if (iterate_and_advance_kernel(&tmp_iter, maxsize, shash, &err,
+ cifs_shash_step) != maxsize)
+ return err;
+ return 0;
}
int __cifs_calc_signature(struct smb_rqst *rqst,
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
index a223370a59a7..c4aa58032faf 100644
--- a/include/linux/iov_iter.h
+++ b/include/linux/iov_iter.h
@@ -328,4 +328,51 @@ size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
}
+/**
+ * iterate_and_advance_kernel - Iterate over a kernel-internal iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type
+ * iterators; it will not handle UBUF or IOVEC-type iterators.
+ *
+ * A step functions, @step, must be provided, one for handling mapped kernel
+ * addresses and the other is given user addresses which have the potential to
+ * fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
#endif /* _LINUX_IOV_ITER_H */
Powered by blists - more mailing lists