[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251211011846.8179-8-ebiggers@kernel.org>
Date: Wed, 10 Dec 2025 17:18:39 -0800
From: Eric Biggers <ebiggers@...nel.org>
To: linux-crypto@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
Ard Biesheuvel <ardb@...nel.org>,
"Jason A . Donenfeld" <Jason@...c4.com>,
Herbert Xu <herbert@...dor.apana.org.au>,
linux-arm-kernel@...ts.infradead.org,
x86@...nel.org,
Eric Biggers <ebiggers@...nel.org>
Subject: [PATCH 07/12] crypto: adiantum - Use scatter_walk API instead of sg_miter
Make adiantum_hash_message() use the scatter_walk API instead of
sg_miter. scatter_walk is a bit simpler and also more efficient. For
example, unlike sg_miter, scatter_walk doesn't require that the number
of scatterlist entries be calculated up-front.
Signed-off-by: Eric Biggers <ebiggers@...nel.org>
---
crypto/adiantum.c | 33 +++++++++++++++------------------
1 file changed, 15 insertions(+), 18 deletions(-)
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index bbe519fbd739..519e95228ad8 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -367,30 +367,27 @@ static void nhpoly1305_final(struct nhpoly1305_ctx *ctx,
* evaluated as a polynomial in GF(2^{130}-5), like in the Poly1305 MAC. Note
* that the polynomial evaluation by itself would suffice to achieve the ε-∆U
* property; NH is used for performance since it's much faster than Poly1305.
*/
static void adiantum_hash_message(struct skcipher_request *req,
- struct scatterlist *sgl, unsigned int nents,
- le128 *out)
+ struct scatterlist *sgl, le128 *out)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
- const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
- struct sg_mapping_iter miter;
- unsigned int i, n;
+ unsigned int len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ struct scatter_walk walk;
nhpoly1305_init(&rctx->u.hash_ctx);
+ scatterwalk_start(&walk, sgl);
+ while (len) {
+ unsigned int n = scatterwalk_next(&walk, len);
- sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
- for (i = 0; i < bulk_len; i += n) {
- sg_miter_next(&miter);
- n = min_t(unsigned int, miter.length, bulk_len - i);
- nhpoly1305_update(&rctx->u.hash_ctx, tctx, miter.addr, n);
+ nhpoly1305_update(&rctx->u.hash_ctx, tctx, walk.addr, n);
+ scatterwalk_done_src(&walk, n);
+ len -= n;
}
- sg_miter_stop(&miter);
-
nhpoly1305_final(&rctx->u.hash_ctx, tctx, out);
}
/* Continue Adiantum encryption/decryption after the stream cipher step */
static int adiantum_finish(struct skcipher_request *req)
@@ -398,11 +395,10 @@ static int adiantum_finish(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct scatterlist *dst = req->dst;
- const unsigned int dst_nents = sg_nents(dst);
le128 digest;
/* If decrypting, decrypt C_M with the block cipher to get P_M */
if (!rctx->enc)
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
@@ -412,11 +408,12 @@ static int adiantum_finish(struct skcipher_request *req)
* Second hash step
* enc: C_R = C_M - H_{K_H}(T, C_L)
* dec: P_R = P_M - H_{K_H}(T, P_L)
*/
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
- if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
+ if (dst->length >= req->cryptlen &&
+ dst->offset + req->cryptlen <= PAGE_SIZE) {
/* Fast path for single-page destination */
struct page *page = sg_page(dst);
void *virt = kmap_local_page(page) + dst->offset;
nhpoly1305_init(&rctx->u.hash_ctx);
@@ -426,11 +423,11 @@ static int adiantum_finish(struct skcipher_request *req)
memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
flush_dcache_page(page);
kunmap_local(virt);
} else {
/* Slow path that works for any destination scatterlist */
- adiantum_hash_message(req, dst, dst_nents, &digest);
+ adiantum_hash_message(req, dst, &digest);
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
bulk_len, sizeof(le128), 1);
}
return 0;
@@ -451,11 +448,10 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct scatterlist *src = req->src;
- const unsigned int src_nents = sg_nents(src);
unsigned int stream_len;
le128 digest;
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
return -EINVAL;
@@ -466,22 +462,23 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
* First hash step
* enc: P_M = P_R + H_{K_H}(T, P_L)
* dec: C_M = C_R + H_{K_H}(T, C_L)
*/
adiantum_hash_header(req);
- if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
+ if (src->length >= req->cryptlen &&
+ src->offset + req->cryptlen <= PAGE_SIZE) {
/* Fast path for single-page source */
void *virt = kmap_local_page(sg_page(src)) + src->offset;
nhpoly1305_init(&rctx->u.hash_ctx);
nhpoly1305_update(&rctx->u.hash_ctx, tctx, virt, bulk_len);
nhpoly1305_final(&rctx->u.hash_ctx, tctx, &digest);
memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
kunmap_local(virt);
} else {
/* Slow path that works for any source scatterlist */
- adiantum_hash_message(req, src, src_nents, &digest);
+ adiantum_hash_message(req, src, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
bulk_len, sizeof(le128), 0);
}
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
--
2.52.0
Powered by blists - more mailing lists