[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230928072508.218368-4-chang.seok.bae@intel.com>
Date: Thu, 28 Sep 2023 00:25:08 -0700
From: "Chang S. Bae" <chang.seok.bae@...el.com>
To: linux-kernel@...r.kernel.org, linux-crypto@...r.kernel.org
Cc: herbert@...dor.apana.org.au, davem@...emloft.net,
ebiggers@...nel.org, x86@...nel.org, chang.seok.bae@...el.com
Subject: [PATCH v2 3/3] crypto: x86/aesni - Perform address alignment early for XTS mode
Currently, the alignment of each field in struct aesni_xts_ctx occurs
right before every access. However, it's possible to perform this
alignment ahead of time.
Introduce a helper function that converts struct crypto_skcipher *tfm
to struct aesni_xts_ctx *ctx and returns an aligned address. Utilize
this helper function at the beginning of each XTS function and then
eliminate redundant alignment code.
Suggested-by: Eric Biggers <ebiggers@...nel.org>
Link: https://lore.kernel.org/all/ZFWQ4sZEVu%2FLHq+Q@gmail.com/
Signed-off-by: Chang S. Bae <chang.seok.bae@...el.com>
Cc: linux-crypto@...r.kernel.org
Cc: x86@...nel.org
Cc: linux-kernel@...r.kernel.org
---
V1->V2: drop the cast (Eric).
---
arch/x86/crypto/aesni-intel_glue.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 80e28a01aa3a..b1d90c25975a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -223,6 +223,11 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
return aes_align_addr(raw_ctx);
}
+static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
+{
+ return aes_align_addr(crypto_skcipher_ctx(tfm));
+}
+
static int aes_set_key_common(struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len)
{
@@ -875,7 +880,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int err;
err = xts_verify_key(tfm, key, keylen);
@@ -885,18 +890,18 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
keylen /= 2;
/* first half of xts-key is for crypt */
- err = aes_set_key_common(aes_ctx(&ctx->crypt_ctx), key, keylen);
+ err = aes_set_key_common(&ctx->crypt_ctx, key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
- return aes_set_key_common(aes_ctx(&ctx->tweak_ctx), key + keylen, keylen);
+ return aes_set_key_common(&ctx->tweak_ctx, key + keylen, keylen);
}
static int xts_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
struct skcipher_walk walk;
@@ -932,7 +937,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
/* calculate first value of T */
- aesni_enc(aes_ctx(&ctx->tweak_ctx), walk.iv, walk.iv);
+ aesni_enc(&ctx->tweak_ctx, walk.iv, walk.iv);
while (walk.nbytes > 0) {
int nbytes = walk.nbytes;
@@ -941,11 +946,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
nbytes &= ~(AES_BLOCK_SIZE - 1);
if (encrypt)
- aesni_xts_encrypt(aes_ctx(&ctx->crypt_ctx),
+ aesni_xts_encrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
else
- aesni_xts_decrypt(aes_ctx(&ctx->crypt_ctx),
+ aesni_xts_decrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
kernel_fpu_end();
@@ -973,11 +978,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
if (encrypt)
- aesni_xts_encrypt(aes_ctx(&ctx->crypt_ctx),
+ aesni_xts_encrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
else
- aesni_xts_decrypt(aes_ctx(&ctx->crypt_ctx),
+ aesni_xts_decrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
kernel_fpu_end();
--
2.34.1
Powered by blists - more mailing lists