lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20221128122955.2f946b91@canb.auug.org.au>
Date:   Mon, 28 Nov 2022 12:29:55 +1100
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Herbert Xu <herbert@...dor.apana.org.au>
Cc:     Linux Crypto List <linux-crypto@...r.kernel.org>,
        Eric Biggers <ebiggers@...gle.com>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
        Linux Next Mailing List <linux-next@...r.kernel.org>
Subject: linux-next: manual merge of the tip tree with the crypto tree

Hi all,

Today's linux-next merge of the tip tree got conflicts in:

  arch/x86/crypto/sha1_ni_asm.S
  arch/x86/crypto/sha256-avx-asm.S
  arch/x86/crypto/sha256-avx2-asm.S
  arch/x86/crypto/sha256-ssse3-asm.S
  arch/x86/crypto/sha256_ni_asm.S
  arch/x86/crypto/sm3-avx-asm_64.S
  arch/x86/crypto/sm4-aesni-avx-asm_64.S
  arch/x86/crypto/sm4-aesni-avx2-asm_64.S

between commits:

  32f34bf7e44e ("crypto: x86/sha1 - fix possible crash with CFI enabled")
  19940ebbb59c ("crypto: x86/sha256 - fix possible crash with CFI enabled")
  8ba490d9f5a5 ("crypto: x86/sm3 - fix possible crash with CFI enabled")
  2d203c46a0fa ("crypto: x86/sm4 - fix crash with CFI enabled")

from the crypto tree and commits:

  c2a3ce6fdb12 ("crypto: x86/sha1: Remove custom alignments")
  3ba56d0b8711 ("crypto: x86/sha256: Remove custom alignments")
  2f93238b87dd ("crypto: x86/sm[34]: Remove redundant alignments")

from the tip tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc arch/x86/crypto/sha1_ni_asm.S
index 3cae5a1bb3d6,cd943b2af2c4..000000000000
--- a/arch/x86/crypto/sha1_ni_asm.S
+++ b/arch/x86/crypto/sha1_ni_asm.S
@@@ -93,8 -92,7 +93,7 @@@
   * numBlocks: Number of blocks to process
   */
  .text
- .align 32
 -SYM_FUNC_START(sha1_ni_transform)
 +SYM_TYPED_FUNC_START(sha1_ni_transform)
  	push		%rbp
  	mov		%rsp, %rbp
  	sub		$FRAME_SIZE, %rsp
diff --cc arch/x86/crypto/sha256-avx-asm.S
index 06ea30c20828,3649370690c5..000000000000
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@@ -347,8 -346,7 +347,7 @@@ a = TMP
  ## arg 3 : Num blocks
  ########################################################################
  .text
 -SYM_FUNC_START(sha256_transform_avx)
 +SYM_TYPED_FUNC_START(sha256_transform_avx)
- .align 32
  	pushq   %rbx
  	pushq   %r12
  	pushq   %r13
diff --cc arch/x86/crypto/sha256-avx2-asm.S
index 2d2be531a11e,c4c1dc5ee078..000000000000
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@@ -524,8 -523,7 +524,7 @@@ STACK_SIZE	= _CTX      + _CTX_SIZ
  ## arg 3 : Num blocks
  ########################################################################
  .text
 -SYM_FUNC_START(sha256_transform_rorx)
 +SYM_TYPED_FUNC_START(sha256_transform_rorx)
- .align 32
  	pushq	%rbx
  	pushq	%r12
  	pushq	%r13
diff --cc arch/x86/crypto/sha256-ssse3-asm.S
index 7db28839108d,96b7dcdeaebe..000000000000
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@@ -356,8 -355,7 +356,7 @@@ a = TMP
  ## arg 3 : Num blocks
  ########################################################################
  .text
 -SYM_FUNC_START(sha256_transform_ssse3)
 +SYM_TYPED_FUNC_START(sha256_transform_ssse3)
- .align 32
  	pushq   %rbx
  	pushq   %r12
  	pushq   %r13
diff --cc arch/x86/crypto/sha256_ni_asm.S
index 47f93937f798,b3f1a1a12027..000000000000
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/crypto/sha256_ni_asm.S
@@@ -97,8 -96,7 +97,7 @@@
   */
  
  .text
- .align 32
 -SYM_FUNC_START(sha256_ni_transform)
 +SYM_TYPED_FUNC_START(sha256_ni_transform)
  
  	shl		$6, NUM_BLKS		/*  convert to bytes */
  	jz		.Ldone_hash
diff --cc arch/x86/crypto/sm3-avx-asm_64.S
index 8fc5ac681fd6,b28d804ee10d..000000000000
--- a/arch/x86/crypto/sm3-avx-asm_64.S
+++ b/arch/x86/crypto/sm3-avx-asm_64.S
@@@ -328,8 -327,7 +328,7 @@@
   * void sm3_transform_avx(struct sm3_state *state,
   *                        const u8 *data, int nblocks);
   */
- .align 16
 -SYM_FUNC_START(sm3_transform_avx)
 +SYM_TYPED_FUNC_START(sm3_transform_avx)
  	/* input:
  	 *	%rdi: ctx, CTX
  	 *	%rsi: data (64*nblks bytes)
diff --cc arch/x86/crypto/sm4-aesni-avx-asm_64.S
index 22b6560eb9e1,e13c8537b2ec..000000000000
--- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
@@@ -420,8 -415,7 +416,7 @@@ SYM_FUNC_END(sm4_aesni_avx_crypt8
   * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
   *                                 const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (8 blocks)
@@@ -495,8 -489,7 +490,7 @@@ SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8
   * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
   *                                 const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (8 blocks)
@@@ -545,8 -538,7 +539,7 @@@ SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8
   * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
   *                                 const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (8 blocks)
diff --cc arch/x86/crypto/sm4-aesni-avx2-asm_64.S
index 23ee39a8ada8,2212705f7da6..000000000000
--- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
@@@ -282,8 -278,7 +279,7 @@@ SYM_FUNC_END(__sm4_crypt_blk16
   * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
   *                                   const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (16 blocks)
@@@ -395,8 -390,7 +391,7 @@@ SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk
   * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
   *                                   const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (16 blocks)
@@@ -449,8 -443,7 +444,7 @@@ SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk
   * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
   *                                   const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (16 blocks)

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ