lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20171008205053.13201-1-Jason@zx2c4.com>
Date:   Sun,  8 Oct 2017 22:50:53 +0200
From:   "Jason A. Donenfeld" <Jason@...c4.com>
To:     Herbert Xu <herbert@...dor.apana.org.au>,
        linux-crypto@...r.kernel.org, linux-kernel@...r.kernel.org,
        martin@...ongswan.org
Cc:     "Jason A. Donenfeld" <Jason@...c4.com>
Subject: [PATCH] chacha20-ssse3/avx2: satisfy stack validation 2.0

The new stack validator in objdump doesn't like directly assigning r11
to rsp, warning with something like:

warning: objtool: chacha20_4block_xor_ssse3()+0xa: unsupported stack pointer realignment
warning: objtool: chacha20_8block_xor_avx2()+0x6: unsupported stack pointer realignment

This fixes things up to use code similar to gcc's DRAP register, so that
objdump remains happy.

Signed-off-by: Jason A. Donenfeld <Jason@...c4.com>
Fixes: baa41469a7b9 ("objtool: Implement stack validation 2.0")
---
 arch/x86/crypto/chacha20-avx2-x86_64.S  | 4 ++--
 arch/x86/crypto/chacha20-ssse3-x86_64.S | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
index 3a2dc3dc6cac..f3cd26f48332 100644
--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
@@ -45,7 +45,7 @@ ENTRY(chacha20_8block_xor_avx2)
 
 	vzeroupper
 	# 4 * 32 byte stack, 32-byte aligned
-	mov		%rsp, %r8
+	lea		8(%rsp),%r10
 	and		$~31, %rsp
 	sub		$0x80, %rsp
 
@@ -443,6 +443,6 @@ ENTRY(chacha20_8block_xor_avx2)
 	vmovdqu		%ymm15,0x01e0(%rsi)
 
 	vzeroupper
-	mov		%r8,%rsp
+	lea		-8(%r10),%rsp
 	ret
 ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 3f511a7d73b8..512a2b500fd1 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -160,7 +160,7 @@ ENTRY(chacha20_4block_xor_ssse3)
 	# done with the slightly better performing SSSE3 byte shuffling,
 	# 7/12-bit word rotation uses traditional shift+OR.
 
-	mov		%rsp,%r11
+	lea		8(%rsp),%r10
 	sub		$0x80,%rsp
 	and		$~63,%rsp
 
@@ -625,6 +625,6 @@ ENTRY(chacha20_4block_xor_ssse3)
 	pxor		%xmm1,%xmm15
 	movdqu		%xmm15,0xf0(%rsi)
 
-	mov		%r11,%rsp
+	lea		-8(%r10),%rsp
 	ret
 ENDPROC(chacha20_4block_xor_ssse3)
-- 
2.14.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ