[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190507161321.34611-3-keescook@chromium.org>
Date: Tue, 7 May 2019 09:13:16 -0700
From: Kees Cook <keescook@...omium.org>
To: Herbert Xu <herbert@...dor.apana.org.au>
Cc: Kees Cook <keescook@...omium.org>, Joao Moreira <jmoreira@...e.de>,
Eric Biggers <ebiggers@...gle.com>,
Ingo Molnar <mingo@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...en8.de>, x86@...nel.org,
linux-crypto@...r.kernel.org, linux-kernel@...r.kernel.org,
kernel-hardening@...ts.openwall.com
Subject: [PATCH v3 2/7] crypto: x86/crypto: Use new glue function macros
From: Joao Moreira <jmoreira@...e.de>
Convert to function declaration macros from function prototype casts
to avoid trigger Control-Flow Integrity checks during indirect function
calls.
Signed-off-by: Joao Moreira <jmoreira@...e.de>
Co-developed-by: Kees Cook <keescook@...omium.org>
Signed-off-by: Kees Cook <keescook@...omium.org>
---
arch/x86/crypto/serpent_avx2_glue.c | 65 ++++++++++-------------
arch/x86/crypto/serpent_avx_glue.c | 58 +++++++-------------
arch/x86/crypto/serpent_sse2_glue.c | 27 ++++++----
arch/x86/include/asm/crypto/serpent-avx.h | 28 +++++-----
4 files changed, 80 insertions(+), 98 deletions(-)
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 03347b16ac9d..36a0cd694792 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -24,18 +24,12 @@
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
/* 16-way AVX2 parallel cipher functions */
-asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src);
-
-asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src,
- le128 *iv);
-asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+SERPENT_GLUE(serpent_ecb_enc_16way);
+SERPENT_GLUE(serpent_ecb_dec_16way);
+SERPENT_GLUE_CBC(serpent_cbc_dec_16way);
+SERPENT_GLUE_CTR(serpent_ctr_16way);
+SERPENT_GLUE_XTS(serpent_xts_enc_16way);
+SERPENT_GLUE_XTS(serpent_xts_dec_16way);
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -49,13 +43,13 @@ static const struct common_glue_ctx serpent_enc = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_16way) }
+ .fn_u = { .ecb = serpent_ecb_enc_16way_glue }
}, {
.num_blocks = 8,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
+ .fn_u = { .ecb = serpent_ecb_enc_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+ .fn_u = { .ecb = __serpent_encrypt_glue }
} }
};
@@ -65,13 +59,13 @@ static const struct common_glue_ctx serpent_ctr = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_16way) }
+ .fn_u = { .ctr = serpent_ctr_16way_glue }
}, {
.num_blocks = 8,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
+ .fn_u = { .ctr = serpent_ctr_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
+ .fn_u = { .ctr = __serpent_crypt_ctr }
} }
};
@@ -81,13 +75,13 @@ static const struct common_glue_ctx serpent_enc_xts = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_16way) }
+ .fn_u = { .xts = serpent_xts_enc_16way_glue }
}, {
.num_blocks = 8,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
+ .fn_u = { .xts = serpent_xts_enc_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
+ .fn_u = { .xts = serpent_xts_enc }
} }
};
@@ -97,13 +91,13 @@ static const struct common_glue_ctx serpent_dec = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_16way) }
+ .fn_u = { .ecb = serpent_ecb_dec_16way_glue }
}, {
.num_blocks = 8,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
+ .fn_u = { .ecb = serpent_ecb_dec_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .ecb = __serpent_decrypt_glue }
} }
};
@@ -113,13 +107,13 @@ static const struct common_glue_ctx serpent_dec_cbc = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_16way) }
+ .fn_u = { .cbc = serpent_cbc_dec_16way_cbc_glue }
}, {
.num_blocks = 8,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
+ .fn_u = { .cbc = serpent_cbc_dec_8way_avx_cbc_glue }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .cbc = __serpent_decrypt_cbc_glue }
} }
};
@@ -129,13 +123,13 @@ static const struct common_glue_ctx serpent_dec_xts = {
.funcs = { {
.num_blocks = 16,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_16way) }
+ .fn_u = { .xts = serpent_xts_dec_16way_glue }
}, {
.num_blocks = 8,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
+ .fn_u = { .xts = serpent_xts_dec_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
+ .fn_u = { .xts = serpent_xts_dec }
} }
};
@@ -151,8 +145,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
- req);
+ return glue_cbc_encrypt_req_128bit(__serpent_encrypt_glue, req);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -171,8 +164,8 @@ static int xts_encrypt(struct skcipher_request *req)
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
return glue_xts_req_128bit(&serpent_enc_xts, req,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ __serpent_encrypt_glue, &ctx->tweak_ctx,
+ &ctx->crypt_ctx);
}
static int xts_decrypt(struct skcipher_request *req)
@@ -181,8 +174,8 @@ static int xts_decrypt(struct skcipher_request *req)
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
return glue_xts_req_128bit(&serpent_dec_xts, req,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ __serpent_encrypt_glue, &ctx->tweak_ctx,
+ &ctx->crypt_ctx);
}
static struct skcipher_alg serpent_algs[] = {
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index 458567ecf76c..897bb3f0116d 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -35,28 +35,11 @@
#include <asm/crypto/serpent-avx.h>
/* 8-way parallel cipher functions */
-asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx);
-
-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx);
-
-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
-
-asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx);
-
-asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx);
-
-asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx);
void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
@@ -73,15 +56,13 @@ EXPORT_SYMBOL_GPL(__serpent_crypt_ctr);
void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(__serpent_encrypt));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt_glue);
}
EXPORT_SYMBOL_GPL(serpent_xts_enc);
void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
- glue_xts_crypt_128bit_one(ctx, dst, src, iv,
- GLUE_FUNC_CAST(__serpent_decrypt));
+ glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt_glue);
}
EXPORT_SYMBOL_GPL(serpent_xts_dec);
@@ -117,10 +98,10 @@ static const struct common_glue_ctx serpent_enc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
+ .fn_u = { .ecb = serpent_ecb_enc_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+ .fn_u = { .ecb = __serpent_encrypt_glue }
} }
};
@@ -130,10 +111,10 @@ static const struct common_glue_ctx serpent_ctr = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
+ .fn_u = { .ctr = serpent_ctr_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
+ .fn_u = { .ctr = __serpent_crypt_ctr }
} }
};
@@ -143,10 +124,10 @@ static const struct common_glue_ctx serpent_enc_xts = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
+ .fn_u = { .xts = serpent_xts_enc_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
+ .fn_u = { .xts = serpent_xts_enc }
} }
};
@@ -156,10 +137,10 @@ static const struct common_glue_ctx serpent_dec = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
+ .fn_u = { .ecb = serpent_ecb_dec_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .ecb = __serpent_decrypt_glue }
} }
};
@@ -169,10 +150,10 @@ static const struct common_glue_ctx serpent_dec_cbc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
+ .fn_u = { .cbc = serpent_cbc_dec_8way_avx_cbc_glue }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .cbc = __serpent_decrypt_cbc_glue }
} }
};
@@ -182,10 +163,10 @@ static const struct common_glue_ctx serpent_dec_xts = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
+ .fn_u = { .xts = serpent_xts_dec_8way_avx_glue }
}, {
.num_blocks = 1,
- .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
+ .fn_u = { .xts = serpent_xts_dec }
} }
};
@@ -201,8 +182,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
- req);
+ return glue_cbc_encrypt_req_128bit(__serpent_encrypt_glue, req);
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -221,8 +201,8 @@ static int xts_encrypt(struct skcipher_request *req)
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
return glue_xts_req_128bit(&serpent_enc_xts, req,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ __serpent_encrypt_glue, &ctx->tweak_ctx,
+ &ctx->crypt_ctx);
}
static int xts_decrypt(struct skcipher_request *req)
@@ -231,8 +211,8 @@ static int xts_decrypt(struct skcipher_request *req)
struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
return glue_xts_req_128bit(&serpent_dec_xts, req,
- XTS_TWEAK_CAST(__serpent_encrypt),
- &ctx->tweak_ctx, &ctx->crypt_ctx);
+ __serpent_encrypt_glue, &ctx->tweak_ctx,
+ &ctx->crypt_ctx);
}
static struct skcipher_alg serpent_algs[] = {
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 3dafe137596a..135f6b616bc6 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -40,6 +40,15 @@
#include <asm/crypto/serpent-sse2.h>
#include <asm/crypto/glue_helper.h>
+#define SERPENT_GLUE(func) GLUE_CAST(func, serpent_ctx)
+#define SERPENT_GLUE_CBC(func) GLUE_CAST_CBC(func, serpent_ctx)
+
+SERPENT_GLUE(__serpent_encrypt);
+SERPENT_GLUE(__serpent_decrypt);
+SERPENT_GLUE_CBC(__serpent_decrypt);
+SERPENT_GLUE(serpent_enc_blk_xway);
+SERPENT_GLUE(serpent_dec_blk_xway);
+
static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
@@ -94,10 +103,10 @@ static const struct common_glue_ctx serpent_enc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
+ .fn_u = { .ecb = serpent_enc_blk_xway_glue }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+ .fn_u = { .ecb = __serpent_encrypt_glue }
} }
};
@@ -107,10 +116,10 @@ static const struct common_glue_ctx serpent_ctr = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
+ .fn_u = { .ctr = serpent_crypt_ctr_xway }
}, {
.num_blocks = 1,
- .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
+ .fn_u = { .ctr = serpent_crypt_ctr }
} }
};
@@ -120,10 +129,10 @@ static const struct common_glue_ctx serpent_dec = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
+ .fn_u = { .ecb = serpent_dec_blk_xway_glue }
}, {
.num_blocks = 1,
- .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .ecb = __serpent_decrypt_glue }
} }
};
@@ -133,10 +142,10 @@ static const struct common_glue_ctx serpent_dec_cbc = {
.funcs = { {
.num_blocks = SERPENT_PARALLEL_BLOCKS,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
+ .fn_u = { .cbc = serpent_decrypt_cbc_xway }
}, {
.num_blocks = 1,
- .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+ .fn_u = { .cbc = __serpent_decrypt_cbc_glue }
} }
};
@@ -152,7 +161,7 @@ static int ecb_decrypt(struct skcipher_request *req)
static int cbc_encrypt(struct skcipher_request *req)
{
- return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+ return glue_cbc_encrypt_req_128bit(__serpent_encrypt_glue,
req);
}
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
index db7c9cc32234..c95059be3ae6 100644
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ b/arch/x86/include/asm/crypto/serpent-avx.h
@@ -15,20 +15,20 @@ struct serpent_xts_ctx {
struct serpent_ctx crypt_ctx;
};
-asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-
-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src);
-asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-
-asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
-asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
- const u8 *src, le128 *iv);
+#define SERPENT_GLUE(func) GLUE_CAST(func, serpent_ctx)
+#define SERPENT_GLUE_CBC(func) GLUE_CAST_CBC(func, serpent_ctx)
+#define SERPENT_GLUE_CTR(func) GLUE_CAST_CTR(func, serpent_ctx)
+#define SERPENT_GLUE_XTS(func) GLUE_CAST_XTS(func, serpent_ctx)
+
+SERPENT_GLUE(__serpent_encrypt);
+SERPENT_GLUE(__serpent_decrypt);
+SERPENT_GLUE_CBC(__serpent_decrypt);
+SERPENT_GLUE(serpent_ecb_enc_8way_avx);
+SERPENT_GLUE(serpent_ecb_dec_8way_avx);
+SERPENT_GLUE_CBC(serpent_cbc_dec_8way_avx);
+SERPENT_GLUE_CTR(serpent_ctr_8way_avx);
+SERPENT_GLUE_XTS(serpent_xts_enc_8way_avx);
+SERPENT_GLUE_XTS(serpent_xts_dec_8way_avx);
extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
--
2.17.1
Powered by blists - more mailing lists