lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 10 Dec 2018 19:59:11 +0000
From:   Dave Watson <davejwatson@...com>
To:     Herbert Xu <herbert@...dor.apana.org.au>,
        Junaid Shahid <junaids@...gle.com>,
        Steffen Klassert <steffen.klassert@...unet.com>,
        "linux-crypto@...r.kernel.org" <linux-crypto@...r.kernel.org>
CC:     Doron Roberts-Kedes <doronrk@...com>,
        Sabrina Dubroca <sd@...asysnail.net>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        Stephan Mueller <smueller@...onox.de>
Subject: [PATCH 09/12] x86/crypto: aesni: Move ghash_mul to GCM_COMPLETE

Prepare to handle partial blocks between scatter/gather calls.
For the last partial block, we only want to calculate the aadhash
in GCM_COMPLETE, and a new partial block macro will handle both
aadhash update and encrypting partial blocks between calls.

Signed-off-by: Dave Watson <davejwatson@...com>
---
 arch/x86/crypto/aesni-intel_avx-x86_64.S | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 0a9cdcfdd987..44a4a8b43ca4 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -488,8 +488,7 @@ _final_ghash_mul\@:
         vpand   %xmm1, %xmm2, %xmm2
         vpshufb SHUF_MASK(%rip), %xmm2, %xmm2
         vpxor   %xmm2, %xmm14, %xmm14
-	#GHASH computation for the last <16 Byte block
-        \GHASH_MUL       %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+
         vmovdqu %xmm14, AadHash(arg2)
         sub     %r13, %r11
         add     $16, %r11
@@ -500,8 +499,7 @@ _final_ghash_mul\@:
         vpand   %xmm1, %xmm9, %xmm9                  # mask out top 16-r13 bytes of xmm9
         vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
         vpxor   %xmm9, %xmm14, %xmm14
-	#GHASH computation for the last <16 Byte block
-        \GHASH_MUL       %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+
         vmovdqu %xmm14, AadHash(arg2)
         sub     %r13, %r11
         add     $16, %r11
@@ -541,6 +539,14 @@ _multiple_of_16_bytes\@:
         vmovdqu AadHash(arg2), %xmm14
         vmovdqu HashKey(arg2), %xmm13
 
+        mov PBlockLen(arg2), %r12
+        cmp $0, %r12
+        je _partial_done\@
+
+	#GHASH computation for the last <16 Byte block
+        \GHASH_MUL       %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+
+_partial_done\@:
         mov AadLen(arg2), %r12                          # r12 = aadLen (number of bytes)
         shl     $3, %r12                             # convert into number of bits
         vmovd   %r12d, %xmm15                        # len(A) in xmm15
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ