# Haswell 4.43[+3.6(4.2)] 8.00(8.58) 4.55(5.21) +75%(+65%)
# Skylake 2.63[+3.5(4.1)] 6.17(6.69) 4.23(4.44) +46%(+51%)
# Bulldozer 5.77[+6.0] 11.72 6.37 +84%
+# Ryzen(**) 2.71[+1.93] 4.64 2.74 +69%
+# Goldmont(**) 3.82[+1.70] 5.52 4.20 +31%
#
# AES-192-CBC
# Westmere 4.51 9.81 6.80 +44%
# Sandy Bridge 7.05 12.06(13.15) 7.12(7.72) +69%(+70%)
# Ivy Bridge 7.05 11.65 7.12 +64%
# Haswell 6.19 9.76(10.34) 6.21(6.25) +57%(+65%)
-# Skylake 3.62 7.16(7.68) 4.56(4.76) +57%(+61$)
+# Skylake 3.62 7.16(7.68) 4.56(4.76) +57%(+61%)
# Bulldozer 8.00 13.95 8.25 +69%
+# Ryzen(**) 3.71 5.64 3.72 +52%
+# Goldmont(**) 5.35 7.05 5.76 +22%
#
# (*) There are two code paths: SSSE3 and AVX. See sha1-568.pl for
# background information. Above numbers in parentheses are SSSE3
# results collected on AVX-capable CPU, i.e. apply on OSes that
# don't support AVX.
+# (**) SHAEXT results.
#
# Needless to mention that it makes no sense to implement "stitched"
# *decrypt* subroutine. Because *both* AESNI-CBC decrypt and SHA1
.type aesni_cbc_sha1_enc_ssse3,\@function,6
.align 32
aesni_cbc_sha1_enc_ssse3:
+.cfi_startproc
mov `($win64?56:8)`(%rsp),$inp # load 7th argument
#shr \$6,$len # debugging artefact
#jz .Lepilogue_ssse3 # debugging artefact
push %rbx
+.cfi_push %rbx
push %rbp
+.cfi_push %rbp
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
lea `-104-($win64?10*16:0)`(%rsp),%rsp
+.cfi_adjust_cfa_offset `104+($win64?10*16:0)`
#mov $in0,$inp # debugging artefact
#lea 64(%rsp),$ctx # debugging artefact
___
$r++; unshift(@rndkey,pop(@rndkey));
};
-sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
+sub Xupdate_ssse3_16_31() # recall that $Xi starts with 4
{ use integer;
my $body = shift;
my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
___
$code.=<<___;
lea `104+($win64?10*16:0)`(%rsp),%rsi
+.cfi_def_cfa %rsi,56
mov 0(%rsi),%r15
+.cfi_restore %r15
mov 8(%rsi),%r14
+.cfi_restore %r14
mov 16(%rsi),%r13
+.cfi_restore %r13
mov 24(%rsi),%r12
+.cfi_restore %r12
mov 32(%rsi),%rbp
+.cfi_restore %rbp
mov 40(%rsi),%rbx
+.cfi_restore %rbx
lea 48(%rsi),%rsp
+.cfi_def_cfa %rsp,8
.Lepilogue_ssse3:
ret
+.cfi_endproc
.size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
___
.type aesni256_cbc_sha1_dec_ssse3,\@function,6
.align 32
aesni256_cbc_sha1_dec_ssse3:
+.cfi_startproc
mov `($win64?56:8)`(%rsp),$inp # load 7th argument
push %rbx
+.cfi_push %rbx
push %rbp
+.cfi_push %rbp
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
lea `-104-($win64?10*16:0)`(%rsp),%rsp
+.cfi_adjust_cfa_offset `104+($win64?10*16:0)`
___
$code.=<<___ if ($win64);
movaps %xmm6,96+0(%rsp)
___
$code.=<<___;
lea `104+($win64?10*16:0)`(%rsp),%rsi
+.cfi_cfa_def %rsi,56
mov 0(%rsi),%r15
+.cfi_restore %r15
mov 8(%rsi),%r14
+.cfi_restore %r14
mov 16(%rsi),%r13
+.cfi_restore %r13
mov 24(%rsi),%r12
+.cfi_restore %r12
mov 32(%rsi),%rbp
+.cfi_restore %rbp
mov 40(%rsi),%rbx
+.cfi_restore %rbx
lea 48(%rsi),%rsp
+.cfi_cfa_def %rsp,8
.Lepilogue_dec_ssse3:
ret
+.cfi_endproc
.size aesni256_cbc_sha1_dec_ssse3,.-aesni256_cbc_sha1_dec_ssse3
___
}}}
.type aesni_cbc_sha1_enc_avx,\@function,6
.align 32
aesni_cbc_sha1_enc_avx:
+.cfi_startproc
mov `($win64?56:8)`(%rsp),$inp # load 7th argument
#shr \$6,$len # debugging artefact
#jz .Lepilogue_avx # debugging artefact
push %rbx
+.cfi_push %rbx
push %rbp
+.cfi_push %rbp
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
lea `-104-($win64?10*16:0)`(%rsp),%rsp
+.cfi_adjust_cfa_offset `104+($win64?10*16:0)`
#mov $in0,$inp # debugging artefact
#lea 64(%rsp),$ctx # debugging artefact
___
$r++; unshift(@rndkey,pop(@rndkey));
};
-sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
+sub Xupdate_avx_16_31() # recall that $Xi starts with 4
{ use integer;
my $body = shift;
my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
___
$code.=<<___;
lea `104+($win64?10*16:0)`(%rsp),%rsi
+.cfi_def_cfa %rsi,56
mov 0(%rsi),%r15
+.cfi_restore %r15
mov 8(%rsi),%r14
+.cfi_restore %r14
mov 16(%rsi),%r13
+.cfi_restore %r13
mov 24(%rsi),%r12
+.cfi_restore %r12
mov 32(%rsi),%rbp
+.cfi_restore %rbp
mov 40(%rsi),%rbx
+.cfi_restore %rbx
lea 48(%rsi),%rsp
+.cfi_def_cfa %rsp,8
.Lepilogue_avx:
ret
+.cfi_endproc
.size aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
___
.type aesni256_cbc_sha1_dec_avx,\@function,6
.align 32
aesni256_cbc_sha1_dec_avx:
+.cfi_startproc
mov `($win64?56:8)`(%rsp),$inp # load 7th argument
push %rbx
+.cfi_push %rbx
push %rbp
+.cfi_push %rbp
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
lea `-104-($win64?10*16:0)`(%rsp),%rsp
+.cfi_adjust_cfa_offset `104+($win64?10*16:0)`
___
$code.=<<___ if ($win64);
movaps %xmm6,96+0(%rsp)
___
$code.=<<___;
lea `104+($win64?10*16:0)`(%rsp),%rsi
+.cfi_def_cfa %rsi,56
mov 0(%rsi),%r15
+.cfi_restore %r15
mov 8(%rsi),%r14
+.cfi_restore %r14
mov 16(%rsi),%r13
+.cfi_restore %r13
mov 24(%rsi),%r12
+.cfi_restore %r12
mov 32(%rsi),%rbp
+.cfi_restore %rbp
mov 40(%rsi),%rbx
+.cfi_restore %rbx
lea 48(%rsi),%rsp
+.cfi_def_cfa %rsp,8
.Lepilogue_dec_avx:
ret
+.cfi_endproc
.size aesni256_cbc_sha1_dec_avx,.-aesni256_cbc_sha1_dec_avx
___
}}}
mov 240($key),$rounds
sub $in0,$out
movups ($key),$rndkey0 # $key[0]
+ movups ($ivp),$iv # load IV
movups 16($key),$rndkey[0] # forward reference
lea 112($key),$key # size optimization