#!/usr/bin/env perl
#
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# in CTR mode AES instruction interleave factor was chosen to be 6x.
######################################################################
-# For reference, AMD Bulldozer spends 5.77 cycles per byte processed
-# with 128-bit key in CBC encrypt and 0.70 cycles in CBC decrypt, 0.70
-# in ECB, 0.71 in CTR, 0.90 in XTS... This means that aes[enc|dec]
-# instruction latency is 9 cycles and that they can be issued every
-# cycle.
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+# CBC en-/decrypt CTR XTS ECB
+# Westmere 3.77/1.25 1.25 1.25 1.26
+# * Bridge 5.07/0.74 0.75 0.90 0.85
+# Haswell 4.44/0.63 0.63 0.73 0.63
+# Atom 5.75/3.54 3.56 4.12 3.87(*)
+# Bulldozer 5.77/0.70 0.72 0.90 0.70
+#
+# (*) Atom ECB result is suboptimal because of penalties incurred
+# by operations on %xmm8-15. As ECB is not considered
+# critical, nothing was done to mitigate the problem.
$PREFIX="aesni"; # if $PREFIX is set to "AES", the script
# generates drop-in replacement for
("%rdi","%rsi","%rdx","%rcx"); # Unix order
$code=".text\n";
+$code.=".extern OPENSSL_ia32cap_P\n";
$rounds="%eax"; # input to and changed by aesni_[en|de]cryptN !!!
# this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
# every *2nd* cycle. Thus 3x interleave was the one providing optimal
# utilization, i.e. when subroutine's throughput is virtually same as
# of non-interleaved subroutine [for number of input blocks up to 3].
-# This is why it makes no sense to implement 2x subroutine.
-# aes[enc|dec] latency in next processor generation is 8, but the
-# instructions can be scheduled every cycle. Optimal interleave for
-# new processor is therefore 8x...
+# This is why it originally made no sense to implement 2x subroutine.
+# But times change and it became appropriate to spend extra 192 bytes
+# on 2x subroutine on Atom Silvermont account. For processors that
+# can schedule aes[enc|dec] every cycle optimal interleave factor
+# equals to corresponding instructions latency. 8x is optimal for
+# * Bridge and "super-optimal" for other Intel CPUs...
+
+sub aesni_generate2 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-1] is cipher/clear text...
+$code.=<<___;
+.type _aesni_${dir}rypt2,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt2:
+ $movkey ($key),$rndkey0
+ shl \$4,$rounds
+ $movkey 16($key),$rndkey1
+ xorps $rndkey0,$inout0
+ xorps $rndkey0,$inout1
+ $movkey 32($key),$rndkey0
+ lea 32($key,$rounds),$key
+ neg %rax # $rounds
+ add \$16,%rax
+
+.L${dir}_loop2:
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ $movkey ($key,%rax),$rndkey1
+ add \$32,%rax
+ aes${dir} $rndkey0,$inout0
+ aes${dir} $rndkey0,$inout1
+ $movkey -16($key,%rax),$rndkey0
+ jnz .L${dir}_loop2
+
+ aes${dir} $rndkey1,$inout0
+ aes${dir} $rndkey1,$inout1
+ aes${dir}last $rndkey0,$inout0
+ aes${dir}last $rndkey0,$inout1
+ ret
+.size _aesni_${dir}rypt2,.-_aesni_${dir}rypt2
+___
+}
sub aesni_generate3 {
my $dir=shift;
# As already mentioned it takes in $key and $rounds, which are *not*
.align 16
_aesni_${dir}rypt3:
$movkey ($key),$rndkey0
- shr \$1,$rounds
+ shl \$4,$rounds
$movkey 16($key),$rndkey1
- lea 32($key),$key
xorps $rndkey0,$inout0
xorps $rndkey0,$inout1
xorps $rndkey0,$inout2
- $movkey ($key),$rndkey0
+ $movkey 32($key),$rndkey0
+ lea 32($key,$rounds),$key
+ neg %rax # $rounds
+ add \$16,%rax
.L${dir}_loop3:
aes${dir} $rndkey1,$inout0
aes${dir} $rndkey1,$inout1
- dec $rounds
aes${dir} $rndkey1,$inout2
- $movkey 16($key),$rndkey1
+ $movkey ($key,%rax),$rndkey1
+ add \$32,%rax
aes${dir} $rndkey0,$inout0
aes${dir} $rndkey0,$inout1
- lea 32($key),$key
aes${dir} $rndkey0,$inout2
- $movkey ($key),$rndkey0
+ $movkey -16($key,%rax),$rndkey0
jnz .L${dir}_loop3
aes${dir} $rndkey1,$inout0
.align 16
_aesni_${dir}rypt4:
$movkey ($key),$rndkey0
- shr \$1,$rounds
+ shl \$4,$rounds
$movkey 16($key),$rndkey1
- lea 32($key),$key
xorps $rndkey0,$inout0
xorps $rndkey0,$inout1
xorps $rndkey0,$inout2
xorps $rndkey0,$inout3
- $movkey ($key),$rndkey0
+ $movkey 32($key),$rndkey0
+ lea 32($key,$rounds),$key
+ neg %rax # $rounds
+ .byte 0x0f,0x1f,0x00
+ add \$16,%rax
.L${dir}_loop4:
aes${dir} $rndkey1,$inout0
aes${dir} $rndkey1,$inout1
- dec $rounds
aes${dir} $rndkey1,$inout2
aes${dir} $rndkey1,$inout3
- $movkey 16($key),$rndkey1
+ $movkey ($key,%rax),$rndkey1
+ add \$32,%rax
aes${dir} $rndkey0,$inout0
aes${dir} $rndkey0,$inout1
- lea 32($key),$key
aes${dir} $rndkey0,$inout2
aes${dir} $rndkey0,$inout3
- $movkey ($key),$rndkey0
+ $movkey -16($key,%rax),$rndkey0
jnz .L${dir}_loop4
aes${dir} $rndkey1,$inout0
.align 16
_aesni_${dir}rypt6:
$movkey ($key),$rndkey0
- shr \$1,$rounds
+ shl \$4,$rounds
$movkey 16($key),$rndkey1
- lea 32($key),$key
xorps $rndkey0,$inout0
pxor $rndkey0,$inout1
- aes${dir} $rndkey1,$inout0
pxor $rndkey0,$inout2
+ aes${dir} $rndkey1,$inout0
+ lea 32($key,$rounds),$key
+ neg %rax # $rounds
aes${dir} $rndkey1,$inout1
pxor $rndkey0,$inout3
- aes${dir} $rndkey1,$inout2
pxor $rndkey0,$inout4
- aes${dir} $rndkey1,$inout3
+ aes${dir} $rndkey1,$inout2
pxor $rndkey0,$inout5
- dec $rounds
+ add \$16,%rax
+ aes${dir} $rndkey1,$inout3
aes${dir} $rndkey1,$inout4
- $movkey ($key),$rndkey0
aes${dir} $rndkey1,$inout5
+ $movkey -16($key,%rax),$rndkey0
jmp .L${dir}_loop6_enter
.align 16
.L${dir}_loop6:
aes${dir} $rndkey1,$inout0
aes${dir} $rndkey1,$inout1
- dec $rounds
aes${dir} $rndkey1,$inout2
aes${dir} $rndkey1,$inout3
aes${dir} $rndkey1,$inout4
aes${dir} $rndkey1,$inout5
-.L${dir}_loop6_enter: # happens to be 16-byte aligned
- $movkey 16($key),$rndkey1
+.L${dir}_loop6_enter:
+ $movkey ($key,%rax),$rndkey1
+ add \$32,%rax
aes${dir} $rndkey0,$inout0
aes${dir} $rndkey0,$inout1
- lea 32($key),$key
aes${dir} $rndkey0,$inout2
aes${dir} $rndkey0,$inout3
aes${dir} $rndkey0,$inout4
aes${dir} $rndkey0,$inout5
- $movkey ($key),$rndkey0
+ $movkey -16($key,%rax),$rndkey0
jnz .L${dir}_loop6
aes${dir} $rndkey1,$inout0
.align 16
_aesni_${dir}rypt8:
$movkey ($key),$rndkey0
- shr \$1,$rounds
+ shl \$4,$rounds
$movkey 16($key),$rndkey1
- lea 32($key),$key
xorps $rndkey0,$inout0
xorps $rndkey0,$inout1
- aes${dir} $rndkey1,$inout0
pxor $rndkey0,$inout2
- aes${dir} $rndkey1,$inout1
pxor $rndkey0,$inout3
- aes${dir} $rndkey1,$inout2
pxor $rndkey0,$inout4
- aes${dir} $rndkey1,$inout3
+ lea 32($key,$rounds),$key
+ neg %rax # $rounds
+ aes${dir} $rndkey1,$inout0
+ add \$16,%rax
pxor $rndkey0,$inout5
- dec $rounds
- aes${dir} $rndkey1,$inout4
+ aes${dir} $rndkey1,$inout1
pxor $rndkey0,$inout6
- aes${dir} $rndkey1,$inout5
pxor $rndkey0,$inout7
- $movkey ($key),$rndkey0
+ aes${dir} $rndkey1,$inout2
+ aes${dir} $rndkey1,$inout3
+ aes${dir} $rndkey1,$inout4
+ aes${dir} $rndkey1,$inout5
aes${dir} $rndkey1,$inout6
aes${dir} $rndkey1,$inout7
- $movkey 16($key),$rndkey1
+ $movkey -16($key,%rax),$rndkey0
jmp .L${dir}_loop8_enter
.align 16
.L${dir}_loop8:
aes${dir} $rndkey1,$inout0
aes${dir} $rndkey1,$inout1
- dec $rounds
aes${dir} $rndkey1,$inout2
aes${dir} $rndkey1,$inout3
aes${dir} $rndkey1,$inout4
aes${dir} $rndkey1,$inout5
aes${dir} $rndkey1,$inout6
aes${dir} $rndkey1,$inout7
- $movkey 16($key),$rndkey1
-.L${dir}_loop8_enter: # happens to be 16-byte aligned
+.L${dir}_loop8_enter:
+ $movkey ($key,%rax),$rndkey1
+ add \$32,%rax
aes${dir} $rndkey0,$inout0
aes${dir} $rndkey0,$inout1
- lea 32($key),$key
aes${dir} $rndkey0,$inout2
aes${dir} $rndkey0,$inout3
aes${dir} $rndkey0,$inout4
aes${dir} $rndkey0,$inout5
aes${dir} $rndkey0,$inout6
aes${dir} $rndkey0,$inout7
- $movkey ($key),$rndkey0
+ $movkey -16($key,%rax),$rndkey0
jnz .L${dir}_loop8
aes${dir} $rndkey1,$inout0
.size _aesni_${dir}rypt8,.-_aesni_${dir}rypt8
___
}
+&aesni_generate2("enc") if ($PREFIX eq "aesni");
+&aesni_generate2("dec");
&aesni_generate3("enc") if ($PREFIX eq "aesni");
&aesni_generate3("dec");
&aesni_generate4("enc") if ($PREFIX eq "aesni");
jmp .Lecb_ret
.align 16
.Lecb_enc_two:
- xorps $inout2,$inout2
- call _aesni_encrypt3
+ call _aesni_encrypt2
movups $inout0,($out)
movups $inout1,0x10($out)
jmp .Lecb_ret
jmp .Lecb_ret
.align 16
.Lecb_dec_two:
- xorps $inout2,$inout2
- call _aesni_decrypt3
+ call _aesni_decrypt2
movups $inout0,($out)
movups $inout1,0x10($out)
jmp .Lecb_ret
{
my $cmac="%r9"; # 6th argument
-my $increment="%xmm6";
+my $increment="%xmm9";
+my $iv="%xmm6";
my $bswap_mask="%xmm7";
$code.=<<___;
movdqa .Lincrement64(%rip),$increment
movdqa .Lbswap_mask(%rip),$bswap_mask
- shr \$1,$rounds
+ shl \$4,$rounds
+ mov \$16,$rnds_
lea 0($key),$key_
movdqu ($cmac),$inout1
movdqa $iv,$inout0
- mov $rounds,$rnds_
+ lea 32($key,$rounds),$key # end of key schedule
pshufb $bswap_mask,$iv
+ sub %rax,%r10 # twisted $rounds
jmp .Lccm64_enc_outer
.align 16
.Lccm64_enc_outer:
$movkey ($key_),$rndkey0
- mov $rnds_,$rounds
+ mov %r10,%rax
movups ($inp),$in0 # load inp
xorps $rndkey0,$inout0 # counter
$movkey 16($key_),$rndkey1
xorps $in0,$rndkey0
- lea 32($key_),$key
xorps $rndkey0,$inout1 # cmac^=inp
- $movkey ($key),$rndkey0
+ $movkey 32($key_),$rndkey0
.Lccm64_enc2_loop:
aesenc $rndkey1,$inout0
- dec $rounds
aesenc $rndkey1,$inout1
- $movkey 16($key),$rndkey1
+ $movkey ($key,%rax),$rndkey1
+ add \$32,%rax
aesenc $rndkey0,$inout0
- lea 32($key),$key
aesenc $rndkey0,$inout1
- $movkey 0($key),$rndkey0
+ $movkey -16($key,%rax),$rndkey0
jnz .Lccm64_enc2_loop
aesenc $rndkey1,$inout0
aesenc $rndkey1,$inout1
paddq $increment,$iv
+ dec $len
aesenclast $rndkey0,$inout0
aesenclast $rndkey0,$inout1
- dec $len
lea 16($inp),$inp
xorps $inout0,$in0 # inp ^= E(iv)
movdqa $iv,$inout0
movups $in0,($out) # save output
- lea 16($out),$out
pshufb $bswap_mask,$inout0
+ lea 16($out),$out
jnz .Lccm64_enc_outer
movups $inout1,($cmac)
___
&aesni_generate1("enc",$key,$rounds);
$code.=<<___;
+ shl \$4,$rnds_
+ mov \$16,$rounds
movups ($inp),$in0 # load inp
paddq $increment,$iv
lea 16($inp),$inp
+ sub %r10,%rax # twisted $rounds
+ lea 32($key_,$rnds_),$key # end of key schedule
+ mov %rax,%r10
jmp .Lccm64_dec_outer
.align 16
.Lccm64_dec_outer:
xorps $inout0,$in0 # inp ^= E(iv)
movdqa $iv,$inout0
- mov $rnds_,$rounds
movups $in0,($out) # save output
lea 16($out),$out
pshufb $bswap_mask,$inout0
jz .Lccm64_dec_break
$movkey ($key_),$rndkey0
- shr \$1,$rounds
+ mov %r10,%rax
$movkey 16($key_),$rndkey1
xorps $rndkey0,$in0
- lea 32($key_),$key
xorps $rndkey0,$inout0
xorps $in0,$inout1 # cmac^=out
- $movkey ($key),$rndkey0
-
+ $movkey 32($key_),$rndkey0
+ jmp .Lccm64_dec2_loop
+.align 16
.Lccm64_dec2_loop:
aesenc $rndkey1,$inout0
- dec $rounds
aesenc $rndkey1,$inout1
- $movkey 16($key),$rndkey1
+ $movkey ($key,%rax),$rndkey1
+ add \$32,%rax
aesenc $rndkey0,$inout0
- lea 32($key),$key
aesenc $rndkey0,$inout1
- $movkey 0($key),$rndkey0
+ $movkey -16($key,%rax),$rndkey0
jnz .Lccm64_dec2_loop
movups ($inp),$in0 # load inp
paddq $increment,$iv
aesenc $rndkey1,$inout0
aesenc $rndkey1,$inout1
- lea 16($inp),$inp
aesenclast $rndkey0,$inout0
aesenclast $rndkey0,$inout1
+ lea 16($inp),$inp
jmp .Lccm64_dec_outer
.align 16
.Lccm64_dec_break:
#xorps $in0,$inout1 # cmac^=out
+ mov 240($key_),$rounds
___
&aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
$code.=<<___;
movdqa $inout0,0x40(%rsp)
movdqa $inout0,0x50(%rsp)
movdqa $inout0,0x60(%rsp)
+ mov %rdx,%r10 # borrow %rdx
movdqa $inout0,0x70(%rsp)
- mov 240($key),$rounds # key->rounds
-
- lea 1($ctr),%r9
- lea 2($ctr),%r10
- bswap %r9d
- bswap %r10d
- xor $key0,%r9d
- xor $key0,%r10d
- pinsrd \$3,%r9d,$inout1
- lea 3($ctr),%r9
+ lea 1($ctr),%rax
+ lea 2($ctr),%rdx
+ bswap %eax
+ bswap %edx
+ xor $key0,%eax
+ xor $key0,%edx
+ pinsrd \$3,%eax,$inout1
+ lea 3($ctr),%rax
movdqa $inout1,0x10(%rsp)
- pinsrd \$3,%r10d,$inout2
- bswap %r9d
+ pinsrd \$3,%edx,$inout2
+ bswap %eax
+ mov %r10,%rdx # restore %rdx
lea 4($ctr),%r10
movdqa $inout2,0x20(%rsp)
- xor $key0,%r9d
+ xor $key0,%eax
bswap %r10d
- pinsrd \$3,%r9d,$inout3
+ pinsrd \$3,%eax,$inout3
xor $key0,%r10d
movdqa $inout3,0x30(%rsp)
lea 5($ctr),%r9
mov %r10d,0x40+12(%rsp)
bswap %r9d
lea 6($ctr),%r10
+ mov 240($key),$rounds # key->rounds
xor $key0,%r9d
bswap %r10d
mov %r9d,0x50+12(%rsp)
lea 7($ctr),%r9
mov %r10d,0x60+12(%rsp)
bswap %r9d
+ mov OPENSSL_ia32cap_P+4(%rip),%r10d
xor $key0,%r9d
+ and \$`1<<26|1<<22`,%r10d # isolate XSAVE+MOVBE
mov %r9d,0x70+12(%rsp)
$movkey 0x10($key),$rndkey1
cmp \$8,$len
jb .Lctr32_tail
+ sub \$6,$len
+ cmp \$`1<<22`,%r10d # check for MOVBE without XSAVE
+ je .Lctr32_6x
+
lea 0x80($key),$key # size optimization
- sub \$8,$len
+ sub \$2,$len
jmp .Lctr32_loop8
+.align 16
+.Lctr32_6x:
+ shl \$4,$rounds
+ mov \$48,$rnds_
+ bswap $key0
+ lea 32($key,$rounds),$key # end of key schedule
+ sub %rax,%r10 # twisted $rounds
+ jmp .Lctr32_loop6
+
+.align 16
+.Lctr32_loop6:
+ add \$6,$ctr
+ $movkey -48($key,$rnds_),$rndkey0
+ aesenc $rndkey1,$inout0
+ mov $ctr,%eax
+ xor $key0,%eax
+ aesenc $rndkey1,$inout1
+ movbe %eax,`0x00+12`(%rsp)
+ lea 1($ctr),%eax
+ aesenc $rndkey1,$inout2
+ xor $key0,%eax
+ movbe %eax,`0x10+12`(%rsp)
+ aesenc $rndkey1,$inout3
+ lea 2($ctr),%eax
+ xor $key0,%eax
+ aesenc $rndkey1,$inout4
+ movbe %eax,`0x20+12`(%rsp)
+ lea 3($ctr),%eax
+ aesenc $rndkey1,$inout5
+ $movkey -32($key,$rnds_),$rndkey1
+ xor $key0,%eax
+
+ aesenc $rndkey0,$inout0
+ movbe %eax,`0x30+12`(%rsp)
+ lea 4($ctr),%eax
+ aesenc $rndkey0,$inout1
+ xor $key0,%eax
+ movbe %eax,`0x40+12`(%rsp)
+ aesenc $rndkey0,$inout2
+ lea 5($ctr),%eax
+ xor $key0,%eax
+ aesenc $rndkey0,$inout3
+ movbe %eax,`0x50+12`(%rsp)
+ mov %r10,%rax # mov $rnds_,$rounds
+ aesenc $rndkey0,$inout4
+ aesenc $rndkey0,$inout5
+ $movkey -16($key,$rnds_),$rndkey0
+
+ call .Lenc_loop6
+
+ movdqu ($inp),$inout6
+ movdqu 0x10($inp),$inout7
+ movdqu 0x20($inp),$in0
+ movdqu 0x30($inp),$in1
+ movdqu 0x40($inp),$in2
+ movdqu 0x50($inp),$in3
+ lea 0x60($inp),$inp
+ $movkey -64($key,$rnds_),$rndkey1
+ pxor $inout0,$inout6
+ movaps 0x00(%rsp),$inout0
+ pxor $inout1,$inout7
+ movaps 0x10(%rsp),$inout1
+ pxor $inout2,$in0
+ movaps 0x20(%rsp),$inout2
+ pxor $inout3,$in1
+ movaps 0x30(%rsp),$inout3
+ pxor $inout4,$in2
+ movaps 0x40(%rsp),$inout4
+ pxor $inout5,$in3
+ movaps 0x50(%rsp),$inout5
+ movdqu $inout6,($out)
+ movdqu $inout7,0x10($out)
+ movdqu $in0,0x20($out)
+ movdqu $in1,0x30($out)
+ movdqu $in2,0x40($out)
+ movdqu $in3,0x50($out)
+ lea 0x60($out),$out
+
+ sub \$6,$len
+ jnc .Lctr32_loop6
+
+ add \$6,$len
+ jz .Lctr32_done
+
+ lea -48($rnds_),$rounds
+ lea -80($key,$rnds_),$key # restore $key
+ neg $rounds
+ shr \$4,$rounds # restore $rounds
+ jmp .Lctr32_tail
+
.align 32
.Lctr32_loop8:
add \$8,$ctr
$movkey 0x20-0x80($key),$rndkey0
aesenc $rndkey1,$inout2
xor $key0,%r9d
+ nop
aesenc $rndkey1,$inout3
mov %r9d,0x00+12(%rsp)
lea 1($ctr),%r9
for($i=2;$i<8;$i++) {
my $rndkeyx = ($i&1)?$rndkey1:$rndkey0;
$code.=<<___;
+ bswap %r9d
aesenc $rndkeyx,$inout0
aesenc $rndkeyx,$inout1
- bswap %r9d
- aesenc $rndkeyx,$inout2
xor $key0,%r9d
+ .byte 0x66,0x90
+ aesenc $rndkeyx,$inout2
aesenc $rndkeyx,$inout3
mov %r9d,`0x10*($i-1)`+12(%rsp)
lea $i($ctr),%r9
___
}
$code.=<<___;
+ bswap %r9d
aesenc $rndkey0,$inout0
aesenc $rndkey0,$inout1
- bswap %r9d
aesenc $rndkey0,$inout2
xor $key0,%r9d
+ movdqu 0x00($inp),$in0
aesenc $rndkey0,$inout3
mov %r9d,0x70+12(%rsp)
+ cmp \$11,$rounds
aesenc $rndkey0,$inout4
aesenc $rndkey0,$inout5
aesenc $rndkey0,$inout6
- movdqu 0x00($inp),$in0
aesenc $rndkey0,$inout7
$movkey 0xa0-0x80($key),$rndkey0
- cmp \$11,$rounds
jb .Lctr32_enc_done
aesenc $rndkey1,$inout0
aesenc $rndkey0,$inout6
aesenc $rndkey0,$inout7
$movkey 0xe0-0x80($key),$rndkey0
+ jmp .Lctr32_enc_done
+.align 16
.Lctr32_enc_done:
movdqu 0x10($inp),$in1
pxor $rndkey0,$in0
pxor $rndkey0,$in3
movdqu 0x50($inp),$in5
pxor $rndkey0,$in4
- aesenc $rndkey1,$inout0
pxor $rndkey0,$in5
+ aesenc $rndkey1,$inout0
aesenc $rndkey1,$inout1
aesenc $rndkey1,$inout2
aesenc $rndkey1,$inout3
aesenc $rndkey1,$inout6
aesenc $rndkey1,$inout7
movdqu 0x60($inp),$rndkey1
+ lea 0x80($inp),$inp
aesenclast $in0,$inout0
pxor $rndkey0,$rndkey1
- movdqu 0x70($inp),$in0
- lea 0x80($inp),$inp
+ movdqu 0x70-0x80($inp),$in0
aesenclast $in1,$inout1
pxor $rndkey0,$in0
movdqa 0x00(%rsp),$in1 # load next counter block
aesenclast $in2,$inout2
- movdqa 0x10(%rsp),$in2
aesenclast $in3,$inout3
+ movdqa 0x10(%rsp),$in2
movdqa 0x20(%rsp),$in3
aesenclast $in4,$inout4
- movdqa 0x30(%rsp),$in4
aesenclast $in5,$inout5
+ movdqa 0x30(%rsp),$in4
movdqa 0x40(%rsp),$in5
aesenclast $rndkey1,$inout6
movdqa 0x50(%rsp),$rndkey0
- aesenclast $in0,$inout7
$movkey 0x10-0x80($key),$rndkey1
+ aesenclast $in0,$inout7
movups $inout0,($out) # store output
movdqa $in1,$inout0
jb .Lctr32_loop3
je .Lctr32_loop4
+ shl \$4,$rounds
movdqa 0x60(%rsp),$inout6
pxor $inout7,$inout7
$movkey 16($key),$rndkey0
aesenc $rndkey1,$inout0
- lea 16($key),$key
aesenc $rndkey1,$inout1
- shr \$1,$rounds
+ lea 32-16($key,$rounds),$key
+ neg %rax
aesenc $rndkey1,$inout2
- dec $rounds
- aesenc $rndkey1,$inout3
+ add \$16,%rax
movups ($inp),$in0
+ aesenc $rndkey1,$inout3
aesenc $rndkey1,$inout4
movups 0x10($inp),$in1
- aesenc $rndkey1,$inout5
movups 0x20($inp),$in2
+ aesenc $rndkey1,$inout5
aesenc $rndkey1,$inout6
- $movkey 16($key),$rndkey1
call .Lenc_loop8_enter
.Lctr32_loop4:
aesenc $rndkey1,$inout0
lea 16($key),$key
+ dec $rounds
aesenc $rndkey1,$inout1
aesenc $rndkey1,$inout2
aesenc $rndkey1,$inout3
$movkey ($key),$rndkey1
- dec $rounds
jnz .Lctr32_loop4
aesenclast $rndkey1,$inout0
- movups ($inp),$in0
aesenclast $rndkey1,$inout1
+ movups ($inp),$in0
movups 0x10($inp),$in1
aesenclast $rndkey1,$inout2
- movups 0x20($inp),$in2
aesenclast $rndkey1,$inout3
+ movups 0x20($inp),$in2
movups 0x30($inp),$in3
xorps $in0,$inout0
.Lctr32_loop3:
aesenc $rndkey1,$inout0
lea 16($key),$key
+ dec $rounds
aesenc $rndkey1,$inout1
aesenc $rndkey1,$inout2
$movkey ($key),$rndkey1
- dec $rounds
jnz .Lctr32_loop3
aesenclast $rndkey1,$inout0
aesenclast $rndkey1,$inout1
___
$code.=<<___;
lea -8(%rax),%rbp
- movups ($ivp),@tweak[5] # load clear-text tweak
+ movups ($ivp),$inout0 # load clear-text tweak
mov 240(%r8),$rounds # key2->rounds
mov 240($key),$rnds_ # key1->rounds
___
# generate the tweak
- &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
+ &aesni_generate1("enc",$key2,$rounds,$inout0);
$code.=<<___;
$movkey ($key),$rndkey0 # zero round key
mov $key,$key_ # backup $key
and \$-16,$len
$movkey 16($key,$rnds_),$rndkey1 # last round key
- mov $rounds,$rnds_
movdqa .Lxts_magic(%rip),$twmask
- pshufd \$0x5f,@tweak[5],$twres
+ movdqa $inout0,@tweak[5]
+ pshufd \$0x5f,$inout0,$twres
pxor $rndkey0,$rndkey1
___
# alternative tweak calculation algorithm is based on suggestions
sub \$16*6,$len
jc .Lxts_enc_short
- shr \$1,$rounds
- sub \$3,$rounds
+ mov \$16+96,$rounds
+ lea 32($key_,$rnds_),$key # end of key schedule
+ sub %r10,%rax # twisted $rounds
$movkey 16($key_),$rndkey1
- mov $rounds,$rnds_
+ mov %rax,%r10 # backup twisted $rounds
lea .Lxts_magic(%rip),%r8
jmp .Lxts_enc_grandloop
movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key
aesenc $rndkey1,$inout5
$movkey 48($key_),$rndkey1
+ pxor $twres,@tweak[2]
aesenc $rndkey0,$inout0
- pxor $twres,@tweak[2]
+ pxor $twres,@tweak[3]
movdqa @tweak[1],`16*1`(%rsp)
aesenc $rndkey0,$inout1
- pxor $twres,@tweak[3]
+ pxor $twres,@tweak[4]
movdqa @tweak[2],`16*2`(%rsp)
aesenc $rndkey0,$inout2
- pxor $twres,@tweak[4]
aesenc $rndkey0,$inout3
pxor $twres,$twmask
movdqa @tweak[4],`16*4`(%rsp)
aesenc $rndkey0,$inout4
- movdqa $twmask,`16*5`(%rsp)
aesenc $rndkey0,$inout5
$movkey 64($key_),$rndkey0
- lea 64($key_),$key
+ movdqa $twmask,`16*5`(%rsp)
pshufd \$0x5f,@tweak[5],$twres
jmp .Lxts_enc_loop6
.align 32
aesenc $rndkey1,$inout3
aesenc $rndkey1,$inout4
aesenc $rndkey1,$inout5
- $movkey 16($key),$rndkey1
- lea 32($key),$key
+ $movkey -64($key,%rax),$rndkey1
+ add \$32,%rax
aesenc $rndkey0,$inout0
aesenc $rndkey0,$inout1
aesenc $rndkey0,$inout3
aesenc $rndkey0,$inout4
aesenc $rndkey0,$inout5
- $movkey ($key),$rndkey0
- dec $rounds
+ $movkey -80($key,%rax),$rndkey0
jnz .Lxts_enc_loop6
movdqa (%r8),$twmask
$movkey ($key_),@tweak[0] # load round[0]
aesenc $rndkey1,$inout2
aesenc $rndkey1,$inout3
- pxor $twtmp,@tweak[5]
aesenc $rndkey1,$inout4
+ pxor $twtmp,@tweak[5]
movaps @tweak[0],@tweak[1] # copy round[0]
aesenc $rndkey1,$inout5
- $movkey 16($key),$rndkey1
+ $movkey -64($key),$rndkey1
movdqa $twres,$twtmp
- paddd $twres,$twres
aesenc $rndkey0,$inout0
+ paddd $twres,$twres
pxor @tweak[5],@tweak[0]
- psrad \$31,$twtmp
aesenc $rndkey0,$inout1
+ psrad \$31,$twtmp
paddq @tweak[5],@tweak[5]
- pand $twmask,$twtmp
aesenc $rndkey0,$inout2
aesenc $rndkey0,$inout3
- pxor $twtmp,@tweak[5]
- aesenc $rndkey0,$inout4
+ pand $twmask,$twtmp
movaps @tweak[1],@tweak[2]
+ aesenc $rndkey0,$inout4
+ pxor $twtmp,@tweak[5]
+ movdqa $twres,$twtmp
aesenc $rndkey0,$inout5
- $movkey 32($key),$rndkey0
+ $movkey -48($key),$rndkey0
- movdqa $twres,$twtmp
paddd $twres,$twres
aesenc $rndkey1,$inout0
pxor @tweak[5],@tweak[1]
paddq @tweak[5],@tweak[5]
pand $twmask,$twtmp
aesenc $rndkey1,$inout2
- movdqa @tweak[3],`16*3`(%rsp)
aesenc $rndkey1,$inout3
+ movdqa @tweak[3],`16*3`(%rsp)
pxor $twtmp,@tweak[5]
aesenc $rndkey1,$inout4
movaps @tweak[2],@tweak[3]
+ movdqa $twres,$twtmp
aesenc $rndkey1,$inout5
- $movkey 48($key),$rndkey1
+ $movkey -32($key),$rndkey1
- movdqa $twres,$twtmp
paddd $twres,$twres
aesenc $rndkey0,$inout0
pxor @tweak[5],@tweak[2]
pand $twmask,$twtmp
aesenc $rndkey0,$inout2
aesenc $rndkey0,$inout3
- pxor $twtmp,@tweak[5]
aesenc $rndkey0,$inout4
+ pxor $twtmp,@tweak[5]
movaps @tweak[3],@tweak[4]
aesenc $rndkey0,$inout5
$movkey 16($key_),$rndkey1
pxor @tweak[5],@tweak[4]
- psrad \$31,$twres
aesenclast `16*0`(%rsp),$inout0
+ psrad \$31,$twres
paddq @tweak[5],@tweak[5]
- pand $twmask,$twres
aesenclast `16*1`(%rsp),$inout1
aesenclast `16*2`(%rsp),$inout2
- pxor $twres,@tweak[5]
+ pand $twmask,$twres
+ mov %r10,%rax # restore $rounds
aesenclast `16*3`(%rsp),$inout3
aesenclast `16*4`(%rsp),$inout4
aesenclast `16*5`(%rsp),$inout5
- mov $rnds_,$rounds # restore $rounds
+ pxor $twres,@tweak[5]
lea `16*6`($out),$out
movups $inout0,`-16*6`($out) # write output
sub \$16*6,$len
jnc .Lxts_enc_grandloop
- lea 7($rounds,$rounds),$rounds # restore original value
+ mov \$16+96,$rounds
+ sub $rnds_,$rounds
mov $key_,$key # restore $key
- mov $rounds,$rnds_ # backup $rounds
+ shr \$4,$rounds # restore original value
.Lxts_enc_short:
+ mov $rounds,$rnds_ # backup $rounds
pxor $rndkey0,@tweak[0]
add \$16*6,$len
jz .Lxts_enc_done
xorps @tweak[0],$inout0
xorps @tweak[1],$inout1
- call _aesni_encrypt3
+ call _aesni_encrypt2
xorps @tweak[0],$inout0
movdqa @tweak[2],@tweak[0]
___
$code.=<<___;
lea -8(%rax),%rbp
- movups ($ivp),@tweak[5] # load clear-text tweak
+ movups ($ivp),$inout0 # load clear-text tweak
mov 240($key2),$rounds # key2->rounds
mov 240($key),$rnds_ # key1->rounds
___
# generate the tweak
- &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
+ &aesni_generate1("enc",$key2,$rounds,$inout0);
$code.=<<___;
xor %eax,%eax # if ($len%16) len-=16;
test \$15,$len
and \$-16,$len
$movkey 16($key,$rnds_),$rndkey1 # last round key
- mov $rounds,$rnds_
movdqa .Lxts_magic(%rip),$twmask
- pshufd \$0x5f,@tweak[5],$twres
+ movdqa $inout0,@tweak[5]
+ pshufd \$0x5f,$inout0,$twres
pxor $rndkey0,$rndkey1
___
for ($i=0;$i<4;$i++) {
sub \$16*6,$len
jc .Lxts_dec_short
- shr \$1,$rounds
- sub \$3,$rounds
+ mov \$16+96,$rounds
+ lea 32($key_,$rnds_),$key # end of key schedule
+ sub %r10,%rax # twisted $rounds
$movkey 16($key_),$rndkey1
- mov $rounds,$rnds_
+ mov %rax,%r10 # backup twisted $rounds
lea .Lxts_magic(%rip),%r8
jmp .Lxts_dec_grandloop
movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key
aesdec $rndkey1,$inout5
$movkey 48($key_),$rndkey1
+ pxor $twres,@tweak[2]
aesdec $rndkey0,$inout0
- pxor $twres,@tweak[2]
+ pxor $twres,@tweak[3]
movdqa @tweak[1],`16*1`(%rsp)
aesdec $rndkey0,$inout1
- pxor $twres,@tweak[3]
+ pxor $twres,@tweak[4]
movdqa @tweak[2],`16*2`(%rsp)
aesdec $rndkey0,$inout2
- pxor $twres,@tweak[4]
aesdec $rndkey0,$inout3
pxor $twres,$twmask
movdqa @tweak[4],`16*4`(%rsp)
aesdec $rndkey0,$inout4
- movdqa $twmask,`16*5`(%rsp)
aesdec $rndkey0,$inout5
$movkey 64($key_),$rndkey0
- lea 64($key_),$key
+ movdqa $twmask,`16*5`(%rsp)
pshufd \$0x5f,@tweak[5],$twres
jmp .Lxts_dec_loop6
.align 32
aesdec $rndkey1,$inout3
aesdec $rndkey1,$inout4
aesdec $rndkey1,$inout5
- $movkey 16($key),$rndkey1
- lea 32($key),$key
+ $movkey -64($key,%rax),$rndkey1
+ add \$32,%rax
aesdec $rndkey0,$inout0
aesdec $rndkey0,$inout1
aesdec $rndkey0,$inout3
aesdec $rndkey0,$inout4
aesdec $rndkey0,$inout5
- $movkey ($key),$rndkey0
- dec $rounds
+ $movkey -80($key,%rax),$rndkey0
jnz .Lxts_dec_loop6
movdqa (%r8),$twmask
$movkey ($key_),@tweak[0] # load round[0]
aesdec $rndkey1,$inout2
aesdec $rndkey1,$inout3
- pxor $twtmp,@tweak[5]
aesdec $rndkey1,$inout4
+ pxor $twtmp,@tweak[5]
movaps @tweak[0],@tweak[1] # copy round[0]
aesdec $rndkey1,$inout5
- $movkey 16($key),$rndkey1
+ $movkey -64($key),$rndkey1
movdqa $twres,$twtmp
- paddd $twres,$twres
aesdec $rndkey0,$inout0
+ paddd $twres,$twres
pxor @tweak[5],@tweak[0]
- psrad \$31,$twtmp
aesdec $rndkey0,$inout1
+ psrad \$31,$twtmp
paddq @tweak[5],@tweak[5]
- pand $twmask,$twtmp
aesdec $rndkey0,$inout2
aesdec $rndkey0,$inout3
- pxor $twtmp,@tweak[5]
- aesdec $rndkey0,$inout4
+ pand $twmask,$twtmp
movaps @tweak[1],@tweak[2]
+ aesdec $rndkey0,$inout4
+ pxor $twtmp,@tweak[5]
+ movdqa $twres,$twtmp
aesdec $rndkey0,$inout5
- $movkey 32($key),$rndkey0
+ $movkey -48($key),$rndkey0
- movdqa $twres,$twtmp
paddd $twres,$twres
aesdec $rndkey1,$inout0
pxor @tweak[5],@tweak[1]
paddq @tweak[5],@tweak[5]
pand $twmask,$twtmp
aesdec $rndkey1,$inout2
- movdqa @tweak[3],`16*3`(%rsp)
aesdec $rndkey1,$inout3
+ movdqa @tweak[3],`16*3`(%rsp)
pxor $twtmp,@tweak[5]
aesdec $rndkey1,$inout4
movaps @tweak[2],@tweak[3]
+ movdqa $twres,$twtmp
aesdec $rndkey1,$inout5
- $movkey 48($key),$rndkey1
+ $movkey -32($key),$rndkey1
- movdqa $twres,$twtmp
paddd $twres,$twres
aesdec $rndkey0,$inout0
pxor @tweak[5],@tweak[2]
pand $twmask,$twtmp
aesdec $rndkey0,$inout2
aesdec $rndkey0,$inout3
- pxor $twtmp,@tweak[5]
aesdec $rndkey0,$inout4
+ pxor $twtmp,@tweak[5]
movaps @tweak[3],@tweak[4]
aesdec $rndkey0,$inout5
$movkey 16($key_),$rndkey1
pxor @tweak[5],@tweak[4]
- psrad \$31,$twres
aesdeclast `16*0`(%rsp),$inout0
+ psrad \$31,$twres
paddq @tweak[5],@tweak[5]
- pand $twmask,$twres
aesdeclast `16*1`(%rsp),$inout1
aesdeclast `16*2`(%rsp),$inout2
- pxor $twres,@tweak[5]
+ pand $twmask,$twres
+ mov %r10,%rax # restore $rounds
aesdeclast `16*3`(%rsp),$inout3
aesdeclast `16*4`(%rsp),$inout4
aesdeclast `16*5`(%rsp),$inout5
- mov $rnds_,$rounds # restore $rounds
+ pxor $twres,@tweak[5]
lea `16*6`($out),$out
movups $inout0,`-16*6`($out) # write output
sub \$16*6,$len
jnc .Lxts_dec_grandloop
- lea 7($rounds,$rounds),$rounds # restore original value
+ mov \$16+96,$rounds
+ sub $rnds_,$rounds
mov $key_,$key # restore $key
- mov $rounds,$rnds_ # backup $rounds
+ shr \$4,$rounds # restore original value
.Lxts_dec_short:
+ mov $rounds,$rnds_ # backup $rounds
pxor $rndkey0,@tweak[0]
pxor $rndkey0,@tweak[1]
add \$16*6,$len
xorps @tweak[0],$inout0
xorps @tweak[1],$inout1
- call _aesni_decrypt3
+ call _aesni_decrypt2
xorps @tweak[0],$inout0
movdqa @tweak[2],@tweak[0]
movdqa $inout3,$in3
movdqu 0x50($inp),$inout5
movdqa $inout4,$in4
+ mov OPENSSL_ia32cap_P+4(%rip),%r9d
cmp \$0x70,$len
jbe .Lcbc_dec_six_or_seven
- sub \$0x70,$len
+ and \$`1<<26|1<<22`,%r9d # isolate XSAVE+MOVBE
+ sub \$0x50,$len
+ cmp \$`1<<22`,%r9d # check for MOVBE without XSAVE
+ je .Lcbc_dec_loop6_enter
+ sub \$0x20,$len
lea 0x70($key),$key # size optimization
jmp .Lcbc_dec_loop8_enter
.align 16
aesdec $rndkey1,$inout3
aesdec $rndkey1,$inout4
aesdec $rndkey1,$inout5
- setnc ${inp_}b
aesdec $rndkey1,$inout6
+ setnc ${inp_}b
shl \$7,$inp_
aesdec $rndkey1,$inout7
add $inp,$inp_
___
for($i=1;$i<12;$i++) {
my $rndkeyx = ($i&1)?$rndkey0:$rndkey1;
+$code.=<<___ if ($i==7);
+ cmp \$11,$rounds
+___
$code.=<<___;
aesdec $rndkeyx,$inout0
aesdec $rndkeyx,$inout1
aesdec $rndkeyx,$inout7
$movkey `0x30+0x10*$i`-0x70($key),$rndkeyx
___
+$code.=<<___ if ($i<6 || (!($i&1) && $i>7));
+ nop
+___
$code.=<<___ if ($i==7);
- cmp \$11,$rounds
jb .Lcbc_dec_done
___
$code.=<<___ if ($i==9);
je .Lcbc_dec_done
___
+$code.=<<___ if ($i==11);
+ jmp .Lcbc_dec_done
+___
}
$code.=<<___;
+.align 16
.Lcbc_dec_done:
aesdec $rndkey1,$inout0
- pxor $rndkey0,$iv
aesdec $rndkey1,$inout1
+ pxor $rndkey0,$iv
pxor $rndkey0,$in0
aesdec $rndkey1,$inout2
- pxor $rndkey0,$in1
aesdec $rndkey1,$inout3
+ pxor $rndkey0,$in1
pxor $rndkey0,$in2
aesdec $rndkey1,$inout4
- pxor $rndkey0,$in3
aesdec $rndkey1,$inout5
+ pxor $rndkey0,$in3
pxor $rndkey0,$in4
aesdec $rndkey1,$inout6
aesdec $rndkey1,$inout7
aesdeclast $in0,$inout1
pxor $rndkey0,$iv
movdqu 0x70($inp),$rndkey0 # next IV
- lea 0x80($inp),$inp
aesdeclast $in1,$inout2
+ lea 0x80($inp),$inp
movdqu 0x00($inp_),$in0
aesdeclast $in2,$inout3
- movdqu 0x10($inp_),$in1
aesdeclast $in3,$inout4
+ movdqu 0x10($inp_),$in1
movdqu 0x20($inp_),$in2
aesdeclast $in4,$inout5
- movdqu 0x30($inp_),$in3
aesdeclast $rndkey1,$inout6
+ movdqu 0x30($inp_),$in3
movdqu 0x40($inp_),$in4
aesdeclast $iv,$inout7
movdqa $rndkey0,$iv # return $iv
movdqa $inout6,$inout0
jmp .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_loop6:
+ movups $inout5,($out)
+ lea 0x10($out),$out
+ movdqu 0x00($inp),$inout0 # load input
+ movdqu 0x10($inp),$inout1
+ movdqa $inout0,$in0
+ movdqu 0x20($inp),$inout2
+ movdqa $inout1,$in1
+ movdqu 0x30($inp),$inout3
+ movdqa $inout2,$in2
+ movdqu 0x40($inp),$inout4
+ movdqa $inout3,$in3
+ movdqu 0x50($inp),$inout5
+ movdqa $inout4,$in4
+.Lcbc_dec_loop6_enter:
+ lea 0x60($inp),$inp
+ movdqa $inout5,$inout6
+
+ call _aesni_decrypt6
+
+ pxor $iv,$inout0 # ^= IV
+ movdqa $inout6,$iv
+ pxor $in0,$inout1
+ movdqu $inout0,($out)
+ pxor $in1,$inout2
+ movdqu $inout1,0x10($out)
+ pxor $in2,$inout3
+ movdqu $inout2,0x20($out)
+ pxor $in3,$inout4
+ mov $key_,$key
+ movdqu $inout3,0x30($out)
+ pxor $in4,$inout5
+ mov $rnds_,$rounds
+ movdqu $inout4,0x40($out)
+ lea 0x50($out),$out
+ sub \$0x60,$len
+ ja .Lcbc_dec_loop6
+
+ movdqa $inout5,$inout0
+ add \$0x50,$len
+ jle .Lcbc_dec_tail_collected
+ movups $inout5,($out)
+ lea 0x10($out),$out
+
.Lcbc_dec_tail:
movups ($inp),$inout0
sub \$0x10,$len
.align 16
.Lcbc_dec_two:
movaps $inout1,$in1
- xorps $inout2,$inout2
- call _aesni_decrypt3
+ call _aesni_decrypt2
pxor $iv,$inout0
movaps $in1,$iv
pxor $in0,$inout1
return $line;
}
+sub movbe {
+ ".byte 0x0f,0x38,0xf1,0x44,0x24,".shift;
+}
+
$code =~ s/\`([^\`]*)\`/eval($1)/gem;
$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+#$code =~ s/\bmovbe\s+%eax/bswap %eax; mov %eax/gm; # debugging artefact
+$code =~ s/\bmovbe\s+%eax,\s*([0-9]+)\(%rsp\)/movbe($1)/gem;
print $code;