[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230603152227.12335-11-chang.seok.bae@intel.com>
Date: Sat, 3 Jun 2023 08:22:25 -0700
From: "Chang S. Bae" <chang.seok.bae@...el.com>
To: linux-kernel@...r.kernel.org, linux-crypto@...r.kernel.org,
dm-devel@...hat.com
Cc: ebiggers@...nel.org, elliott@....com, gmazyland@...il.com,
luto@...nel.org, dave.hansen@...ux.intel.com, tglx@...utronix.de,
bp@...en8.de, mingo@...nel.org, x86@...nel.org,
herbert@...dor.apana.org.au, ardb@...nel.org,
dan.j.williams@...el.com, bernie.keany@...el.com,
charishma1.gairuboyina@...el.com,
lalithambika.krishnakumar@...el.com, nhuck@...gle.com,
chang.seok.bae@...el.com, "David S. Miller" <davem@...emloft.net>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>
Subject: [PATCH v8 10/12] crypto: x86/aesni - Use the proper data type in struct aesni_xts_ctx
Every field in struct aesni_xts_ctx is a pointer to a byte array. Each
array has a size of struct crypto_aes_ctx. Then, the field can be
redefined as that struct type instead of the obscure pointer.
Subsequently, the address to struct aesni_xts_ctx should be aligned
right away rather than on every access to the field.
Thus, redefine struct aesni_xts_ctx, and align its address on the
front. This draws a rework by refactoring the common alignment code.
Then, the refactored code itself appears to be useful to simplify
the overall runtime alignment. So, use it for other modes.
Suggested-by: Eric Biggers <ebiggers@...nel.org>
Signed-off-by: Chang S. Bae <chang.seok.bae@...el.com>
Cc: Herbert Xu <herbert@...dor.apana.org.au>
Cc: "David S. Miller" <davem@...emloft.net>
Cc: Eric Biggers <ebiggers@...nel.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Borislav Petkov <bp@...en8.de>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: x86@...nel.org
Cc: linux-crypto@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
---
Changes from v7:
* Massage the helper function to be usable for other alignment code
such as aesni_rfc4106_gcm_ctx_get() and generic_gcmaes_ctx_get().
(Eric Biggers)
Changes from v6:
* Add as a new patch. (Eric Biggers)
This fix was considered to be better addressed before the preparatory
AES-NI code rework.
---
arch/x86/crypto/aesni-intel_glue.c | 51 +++++++++++++++---------------
1 file changed, 25 insertions(+), 26 deletions(-)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index a5b0cb3efeba..589648142c17 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -61,8 +61,8 @@ struct generic_gcmaes_ctx {
};
struct aesni_xts_ctx {
- u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
- u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
+ struct crypto_aes_ctx tweak_ctx AESNI_ALIGN_ATTR;
+ struct crypto_aes_ctx crypt_ctx AESNI_ALIGN_ATTR;
};
#define GCM_BLOCK_LEN 16
@@ -80,6 +80,13 @@ struct gcm_context_data {
u8 hash_keys[GCM_BLOCK_LEN * 16];
};
+static inline void *aes_align_addr(void *addr)
+{
+ if (crypto_tfm_ctx_alignment() >= AESNI_ALIGN)
+ return addr;
+ return PTR_ALIGN(addr, AESNI_ALIGN);
+}
+
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
@@ -201,32 +208,24 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
- unsigned long align = AESNI_ALIGN;
-
- if (align <= crypto_tfm_ctx_alignment())
- align = 1;
- return PTR_ALIGN(crypto_aead_ctx(tfm), align);
+ return (struct aesni_rfc4106_gcm_ctx *)aes_align_addr(crypto_aead_ctx(tfm));
}
static inline struct
generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
{
- unsigned long align = AESNI_ALIGN;
-
- if (align <= crypto_tfm_ctx_alignment())
- align = 1;
- return PTR_ALIGN(crypto_aead_ctx(tfm), align);
+ return (struct generic_gcmaes_ctx *)aes_align_addr(crypto_aead_ctx(tfm));
}
#endif
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{
- unsigned long addr = (unsigned long)raw_ctx;
- unsigned long align = AESNI_ALIGN;
+ return (struct crypto_aes_ctx *)aes_align_addr(raw_ctx);
+}
- if (align <= crypto_tfm_ctx_alignment())
- align = 1;
- return (struct crypto_aes_ctx *)ALIGN(addr, align);
+static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
+{
+ return (struct aesni_xts_ctx *)aes_align_addr(crypto_skcipher_ctx(tfm));
}
static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
@@ -883,7 +882,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int err;
err = xts_verify_key(tfm, key, keylen);
@@ -893,20 +892,20 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
keylen /= 2;
/* first half of xts-key is for crypt */
- err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
+ err = aes_set_key_common(crypto_skcipher_tfm(tfm), &ctx->crypt_ctx,
key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
- return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
+ return aes_set_key_common(crypto_skcipher_tfm(tfm), &ctx->tweak_ctx,
key + keylen, keylen);
}
static int xts_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
struct skcipher_walk walk;
@@ -942,7 +941,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
/* calculate first value of T */
- aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
+ aesni_enc(&ctx->tweak_ctx, walk.iv, walk.iv);
while (walk.nbytes > 0) {
int nbytes = walk.nbytes;
@@ -951,11 +950,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
nbytes &= ~(AES_BLOCK_SIZE - 1);
if (encrypt)
- aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_encrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
else
- aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_decrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
kernel_fpu_end();
@@ -983,11 +982,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
if (encrypt)
- aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_encrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
else
- aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_decrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
kernel_fpu_end();
--
2.17.1
Powered by blists - more mailing lists