AES-NI backport from HEAD. Note that e_aes.c doesn't implement all modes
authorAndy Polyakov <appro@openssl.org>
Tue, 28 Jun 2011 14:49:35 +0000 (14:49 +0000)
committerAndy Polyakov <appro@openssl.org>
Tue, 28 Jun 2011 14:49:35 +0000 (14:49 +0000)
from HEAD yet, more will be back-ported later.

Configure
TABLE
crypto/aes/Makefile
crypto/aes/asm/aesni-x86.pl [new file with mode: 0644]
crypto/aes/asm/aesni-x86_64.pl [new file with mode: 0644]
crypto/evp/e_aes.c
crypto/modes/ctr128.c
crypto/modes/modes.h

index 962a7480330bff21d48874551ca8cea7a0a7db56..bd9a7ba789f45a63b84835dd261633aeb9b1b762 100755 (executable)
--- a/Configure
+++ b/Configure
@@ -123,11 +123,11 @@ my $tlib="-lnsl -lsocket";
 my $bits1="THIRTY_TWO_BIT ";
 my $bits2="SIXTY_FOUR_BIT ";
 
 my $bits1="THIRTY_TWO_BIT ";
 my $bits2="SIXTY_FOUR_BIT ";
 
-my $x86_asm="x86cpuid.o:bn-586.o co-586.o x86-mont.o:des-586.o crypt586.o:aes-586.o:bf-586.o:md5-586.o:sha1-586.o sha256-586.o sha512-586.o:cast-586.o:rc4-586.o:rmd-586.o:rc5-586.o:wp_block.o wp-mmx.o:cmll-x86.o";
+my $x86_asm="x86cpuid.o:bn-586.o co-586.o x86-mont.o:des-586.o crypt586.o:aes-586.o aesni-x86.o:bf-586.o:md5-586.o:sha1-586.o sha256-586.o sha512-586.o:cast-586.o:rc4-586.o:rmd-586.o:rc5-586.o:wp_block.o wp-mmx.o:cmll-x86.o";
 
 my $x86_elf_asm="$x86_asm:elf";
 
 
 my $x86_elf_asm="$x86_asm:elf";
 
-my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o::aes-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o";
+my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o::aes-x86_64.o aesni-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o";
 my $ia64_asm="ia64cpuid.o:bn-ia64.o::aes_core.o aes_cbc.o aes-ia64.o::md5-ia64.o:sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o rc4_skey.o:::::void";
 my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o:des_enc-sparc.o fcrypt_b.o:aes_core.o aes_cbc.o aes-sparcv9.o:::sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o:::::::void";
 my $sparcv8_asm=":sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::::void";
 my $ia64_asm="ia64cpuid.o:bn-ia64.o::aes_core.o aes_cbc.o aes-ia64.o::md5-ia64.o:sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o rc4_skey.o:::::void";
 my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o:des_enc-sparc.o fcrypt_b.o:aes_core.o aes_cbc.o aes-sparcv9.o:::sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o:::::::void";
 my $sparcv8_asm=":sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::::void";
diff --git a/TABLE b/TABLE
index c6a5ef75db671ddc0822f99498ee095fa27ec46f..22bb1d602e0628b72e4f9bad9cddc6c40549b02b 100644 (file)
--- a/TABLE
+++ b/TABLE
@@ -228,7 +228,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -259,7 +259,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -290,7 +290,7 @@ $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -321,7 +321,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -383,7 +383,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -538,7 +538,7 @@ $bn_ops       = DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -724,7 +724,7 @@ $bn_ops       = BN_LLONG RC4_INDEX EXPORT_VAR_AS_FN RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -961,6 +961,68 @@ $ranlib       =
 $arflags      = -X64
 $multilib     = 
 
 $arflags      = -X64
 $multilib     = 
 
+*** android
+$cc           = gcc
+$cflags       = -mandroid -I$(ANDROID_DEV)/include -B$(ANDROID_DEV)/lib -O3 -fomit-frame-pointer -Wall
+$unistd       = 
+$thread_cflag = -D_REENTRANT
+$sys_id       = 
+$lflags       = -ldl
+$bn_ops       = BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR
+$cpuid_obj    = 
+$bn_obj       = 
+$des_obj      = 
+$aes_obj      = 
+$bf_obj       = 
+$md5_obj      = 
+$sha1_obj     = 
+$cast_obj     = 
+$rc4_obj      = 
+$rmd160_obj   = 
+$rc5_obj      = 
+$wp_obj       = 
+$cmll_obj     = 
+$perlasm_scheme = void
+$dso_scheme   = dlfcn
+$shared_target= linux-shared
+$shared_cflag = -fPIC
+$shared_ldflag = 
+$shared_extension = .so.$(SHLIB_MAJOR).$(SHLIB_MINOR)
+$ranlib       = 
+$arflags      = 
+$multilib     = 
+
+*** android-armv7
+$cc           = gcc
+$cflags       = -march=armv7-a -mandroid -I$(ANDROID_DEV)/include -B$(ANDROID_DEV)/lib -O3 -fomit-frame-pointer -Wall
+$unistd       = 
+$thread_cflag = -D_REENTRANT
+$sys_id       = 
+$lflags       = -ldl
+$bn_ops       = BN_LLONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR
+$cpuid_obj    = 
+$bn_obj       = bn_asm.o armv4-mont.o
+$des_obj      = 
+$aes_obj      = aes_cbc.o aes-armv4.o
+$bf_obj       = 
+$md5_obj      = 
+$sha1_obj     = sha1-armv4-large.o sha256-armv4.o sha512-armv4.o
+$cast_obj     = 
+$rc4_obj      = 
+$rmd160_obj   = 
+$rc5_obj      = 
+$wp_obj       = 
+$cmll_obj     = 
+$perlasm_scheme = void
+$dso_scheme   = dlfcn
+$shared_target= linux-shared
+$shared_cflag = -fPIC
+$shared_ldflag = 
+$shared_extension = .so.$(SHLIB_MAJOR).$(SHLIB_MINOR)
+$ranlib       = 
+$arflags      = 
+$multilib     = 
+
 *** aux3-gcc
 $cc           = gcc
 $cflags       = -O2 -DTERMIO
 *** aux3-gcc
 $cc           = gcc
 $cflags       = -O2 -DTERMIO
@@ -1003,7 +1065,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1034,7 +1096,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1065,7 +1127,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1189,7 +1251,7 @@ $bn_ops       = BN_LLONG RC4_INT RC4_CHUNK DES_UNROLL BF_PTR
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1282,7 +1344,7 @@ $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -1344,7 +1406,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1406,7 +1468,7 @@ $bn_ops       = BN_LLONG RC4_INDEX EXPORT_VAR_AS_FN RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1685,7 +1747,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1716,7 +1778,7 @@ $bn_ops       = BN_LLONG RC4_INT RC4_CHUNK DES_UNROLL BF_PTR
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1840,7 +1902,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1871,7 +1933,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1964,7 +2026,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -1995,7 +2057,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -2119,7 +2181,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -2150,7 +2212,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -2181,7 +2243,7 @@ $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -2212,7 +2274,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -2367,7 +2429,7 @@ $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -2398,7 +2460,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -2429,7 +2491,7 @@ $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -2584,7 +2646,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -3111,7 +3173,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -3452,7 +3514,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -3514,7 +3576,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -3607,7 +3669,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -3886,7 +3948,7 @@ $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -3948,7 +4010,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT EXPORT_V
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -3979,7 +4041,7 @@ $bn_ops       = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -4537,7 +4599,7 @@ $bn_ops       = DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -4568,7 +4630,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -4816,7 +4878,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -4909,7 +4971,7 @@ $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -4940,7 +5002,7 @@ $bn_ops       = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
 $cpuid_obj    = x86_64cpuid.o
 $bn_obj       = x86_64-gcc.o x86_64-mont.o
 $des_obj      = 
-$aes_obj      = aes-x86_64.o
+$aes_obj      = aes-x86_64.o aesni-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
 $bf_obj       = 
 $md5_obj      = md5-x86_64.o
 $sha1_obj     = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o
@@ -5250,7 +5312,7 @@ $bn_ops       = BN_LLONG MD2_CHAR RC4_INDEX DES_PTR DES_RISC1 DES_UNROLL
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
@@ -5281,7 +5343,7 @@ $bn_ops       = BN_LLONG DES_PTR DES_RISC1 DES_UNROLL RC4_INDEX MD2_INT
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
 $cpuid_obj    = x86cpuid.o
 $bn_obj       = bn-586.o co-586.o x86-mont.o
 $des_obj      = des-586.o crypt586.o
-$aes_obj      = aes-586.o
+$aes_obj      = aes-586.o aesni-x86.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
 $bf_obj       = bf-586.o
 $md5_obj      = md5-586.o
 $sha1_obj     = sha1-586.o sha256-586.o sha512-586.o
index c501a43a8f6b703df54bdb5a4ce246686883822c..3517465bd072593e2fbe3f17f6c4ba0f855ea5f9 100644 (file)
@@ -50,9 +50,13 @@ aes-ia64.s: asm/aes-ia64.S
 
 aes-586.s:     asm/aes-586.pl ../perlasm/x86asm.pl
        $(PERL) asm/aes-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
 
 aes-586.s:     asm/aes-586.pl ../perlasm/x86asm.pl
        $(PERL) asm/aes-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
+aesni-x86.s:   asm/aesni-x86.pl ../perlasm/x86asm.pl
+       $(PERL) asm/aesni-x86.pl $(PERLASM_SCHEME) $(CFLAGS) $(PROCESSOR) > $@
 
 aes-x86_64.s: asm/aes-x86_64.pl
        $(PERL) asm/aes-x86_64.pl $(PERLASM_SCHEME) > $@
 
 aes-x86_64.s: asm/aes-x86_64.pl
        $(PERL) asm/aes-x86_64.pl $(PERLASM_SCHEME) > $@
+aesni-x86_64.s: asm/aesni-x86_64.pl
+       $(PERL) asm/aesni-x86_64.pl $(PERLASM_SCHEME) > $@
 
 aes-sparcv9.s: asm/aes-sparcv9.pl
        $(PERL) asm/aes-sparcv9.pl $(CFLAGS) > $@
 
 aes-sparcv9.s: asm/aes-sparcv9.pl
        $(PERL) asm/aes-sparcv9.pl $(CFLAGS) > $@
diff --git a/crypto/aes/asm/aesni-x86.pl b/crypto/aes/asm/aesni-x86.pl
new file mode 100644 (file)
index 0000000..b3c8d1f
--- /dev/null
@@ -0,0 +1,2187 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-586.pl [see below for
+# details].
+#
+# Performance.
+#
+# To start with see corresponding paragraph in aesni-x86_64.pl...
+# Instead of filling table similar to one found there I've chosen to
+# summarize *comparison* results for raw ECB, CTR and CBC benchmarks.
+# The simplified table below represents 32-bit performance relative
+# to 64-bit one in every given point. Ratios vary for different
+# encryption modes, therefore interval values.
+#
+#      16-byte     64-byte     256-byte    1-KB        8-KB
+#      53-67%      67-84%      91-94%      95-98%      97-99.5%
+#
+# Lower ratios for smaller block sizes are perfectly understandable,
+# because function call overhead is higher in 32-bit mode. Largest
+# 8-KB block performance is virtually same: 32-bit code is less than
+# 1% slower for ECB, CBC and CCM, and ~3% slower otherwise.
+
+# January 2011
+#
+# See aesni-x86_64.pl for details. Unlike x86_64 version this module
+# interleaves at most 6 aes[enc|dec] instructions, because there are
+# not enough registers for 8x interleave [which should be optimal for
+# Sandy Bridge]. Actually, performance results for 6x interleave
+# factor presented in aesni-x86_64.pl (except for CTR) are for this
+# module.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.50 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 1.09.
+
+$PREFIX="aesni";       # if $PREFIX is set to "AES", the script
+                       # generates drop-in replacement for
+                       # crypto/aes/asm/aes-586.pl:-)
+$inline=1;             # inline _aesni_[en|de]crypt
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],$0);
+
+if ($PREFIX eq "aesni")        { $movekey=*movups; }
+else                   { $movekey=*movups; }
+
+$len="eax";
+$rounds="ecx";
+$key="edx";
+$inp="esi";
+$out="edi";
+$rounds_="ebx";        # backup copy for $rounds
+$key_="ebp";   # backup copy for $key
+
+$rndkey0="xmm0";
+$rndkey1="xmm1";
+$inout0="xmm2";
+$inout1="xmm3";
+$inout2="xmm4";
+$inout3="xmm5";        $in1="xmm5";
+$inout4="xmm6";        $in0="xmm6";
+$inout5="xmm7";        $ivec="xmm7";
+
+# AESNI extenstion
+sub aeskeygenassist
+{ my($dst,$src,$imm)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {  &data_byte(0x66,0x0f,0x3a,0xdf,0xc0|($1<<3)|$2,$imm);   }
+}
+sub aescommon
+{ my($opcodelet,$dst,$src)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {  &data_byte(0x66,0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);}
+}
+sub aesimc     { aescommon(0xdb,@_); }
+sub aesenc     { aescommon(0xdc,@_); }
+sub aesenclast { aescommon(0xdd,@_); }
+sub aesdec     { aescommon(0xde,@_); }
+sub aesdeclast { aescommon(0xdf,@_); }
+\f
+# Inline version of internal aesni_[en|de]crypt1
+{ my $sn;
+sub aesni_inline_generate1
+{ my ($p,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
+  $sn++;
+
+    &$movekey          ($rndkey0,&QWP(0,$key));
+    &$movekey          ($rndkey1,&QWP(16,$key));
+    &xorps             ($ivec,$rndkey0)        if (defined($ivec));
+    &lea               ($key,&DWP(32,$key));
+    &xorps             ($inout,$ivec)          if (defined($ivec));
+    &xorps             ($inout,$rndkey0)       if (!defined($ivec));
+    &set_label("${p}1_loop_$sn");
+       eval"&aes${p}   ($inout,$rndkey1)";
+       &dec            ($rounds);
+       &$movekey       ($rndkey1,&QWP(0,$key));
+       &lea            ($key,&DWP(16,$key));
+    &jnz               (&label("${p}1_loop_$sn"));
+    eval"&aes${p}last  ($inout,$rndkey1)";
+}}
+
+sub aesni_generate1    # fully unrolled loop
+{ my ($p,$inout)=@_; $inout=$inout0 if (!defined($inout));
+
+    &function_begin_B("_aesni_${p}rypt1");
+       &movups         ($rndkey0,&QWP(0,$key));
+       &$movekey       ($rndkey1,&QWP(0x10,$key));
+       &xorps          ($inout,$rndkey0);
+       &$movekey       ($rndkey0,&QWP(0x20,$key));
+       &lea            ($key,&DWP(0x30,$key));
+       &cmp            ($rounds,11);
+       &jb             (&label("${p}128"));
+       &lea            ($key,&DWP(0x20,$key));
+       &je             (&label("${p}192"));
+       &lea            ($key,&DWP(0x20,$key));
+       eval"&aes${p}   ($inout,$rndkey1)";
+       &$movekey       ($rndkey1,&QWP(-0x40,$key));
+       eval"&aes${p}   ($inout,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(-0x30,$key));
+    &set_label("${p}192");
+       eval"&aes${p}   ($inout,$rndkey1)";
+       &$movekey       ($rndkey1,&QWP(-0x20,$key));
+       eval"&aes${p}   ($inout,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(-0x10,$key));
+    &set_label("${p}128");
+       eval"&aes${p}   ($inout,$rndkey1)";
+       &$movekey       ($rndkey1,&QWP(0,$key));
+       eval"&aes${p}   ($inout,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(0x10,$key));
+       eval"&aes${p}   ($inout,$rndkey1)";
+       &$movekey       ($rndkey1,&QWP(0x20,$key));
+       eval"&aes${p}   ($inout,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(0x30,$key));
+       eval"&aes${p}   ($inout,$rndkey1)";
+       &$movekey       ($rndkey1,&QWP(0x40,$key));
+       eval"&aes${p}   ($inout,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(0x50,$key));
+       eval"&aes${p}   ($inout,$rndkey1)";
+       &$movekey       ($rndkey1,&QWP(0x60,$key));
+       eval"&aes${p}   ($inout,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(0x70,$key));
+       eval"&aes${p}   ($inout,$rndkey1)";
+    eval"&aes${p}last  ($inout,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt1");
+}
+\f
+# void $PREFIX_encrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("enc") if (!$inline);
+&function_begin_B("${PREFIX}_encrypt");
+       &mov    ("eax",&wparam(0));
+       &mov    ($key,&wparam(2));
+       &movups ($inout0,&QWP(0,"eax"));
+       &mov    ($rounds,&DWP(240,$key));
+       &mov    ("eax",&wparam(1));
+       if ($inline)
+       {   &aesni_inline_generate1("enc");     }
+       else
+       {   &call       ("_aesni_encrypt1");    }
+       &movups (&QWP(0,"eax"),$inout0);
+       &ret    ();
+&function_end_B("${PREFIX}_encrypt");
+
+# void $PREFIX_decrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("dec") if(!$inline);
+&function_begin_B("${PREFIX}_decrypt");
+       &mov    ("eax",&wparam(0));
+       &mov    ($key,&wparam(2));
+       &movups ($inout0,&QWP(0,"eax"));
+       &mov    ($rounds,&DWP(240,$key));
+       &mov    ("eax",&wparam(1));
+       if ($inline)
+       {   &aesni_inline_generate1("dec");     }
+       else
+       {   &call       ("_aesni_decrypt1");    }
+       &movups (&QWP(0,"eax"),$inout0);
+       &ret    ();
+&function_end_B("${PREFIX}_decrypt");
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it makes no sense to implement 2x subroutine.
+# aes[enc|dec] latency in next processor generation is 8, but the
+# instructions can be scheduled every cycle. Optimal interleave for
+# new processor is therefore 8x, but it's unfeasible to accommodate it
+# in XMM registers addreassable in 32-bit mode and therefore 6x is
+# used instead...
+
+sub aesni_generate3
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt3");
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &shr            ($rounds,1);
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       &lea            ($key,&DWP(32,$key));
+       &xorps          ($inout0,$rndkey0);
+       &pxor           ($inout1,$rndkey0);
+       &pxor           ($inout2,$rndkey0);
+       &$movekey       ($rndkey0,&QWP(0,$key));
+
+    &set_label("${p}3_loop");
+       eval"&aes${p}   ($inout0,$rndkey1)";
+       eval"&aes${p}   ($inout1,$rndkey1)";
+       &dec            ($rounds);
+       eval"&aes${p}   ($inout2,$rndkey1)";
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       eval"&aes${p}   ($inout0,$rndkey0)";
+       eval"&aes${p}   ($inout1,$rndkey0)";
+       &lea            ($key,&DWP(32,$key));
+       eval"&aes${p}   ($inout2,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &jnz            (&label("${p}3_loop"));
+    eval"&aes${p}      ($inout0,$rndkey1)";
+    eval"&aes${p}      ($inout1,$rndkey1)";
+    eval"&aes${p}      ($inout2,$rndkey1)";
+    eval"&aes${p}last  ($inout0,$rndkey0)";
+    eval"&aes${p}last  ($inout1,$rndkey0)";
+    eval"&aes${p}last  ($inout2,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt3");
+}
+
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement  would be <20%,
+# so it's not worth it...
+sub aesni_generate4
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt4");
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       &shr            ($rounds,1);
+       &lea            ($key,&DWP(32,$key));
+       &xorps          ($inout0,$rndkey0);
+       &pxor           ($inout1,$rndkey0);
+       &pxor           ($inout2,$rndkey0);
+       &pxor           ($inout3,$rndkey0);
+       &$movekey       ($rndkey0,&QWP(0,$key));
+
+    &set_label("${p}4_loop");
+       eval"&aes${p}   ($inout0,$rndkey1)";
+       eval"&aes${p}   ($inout1,$rndkey1)";
+       &dec            ($rounds);
+       eval"&aes${p}   ($inout2,$rndkey1)";
+       eval"&aes${p}   ($inout3,$rndkey1)";
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       eval"&aes${p}   ($inout0,$rndkey0)";
+       eval"&aes${p}   ($inout1,$rndkey0)";
+       &lea            ($key,&DWP(32,$key));
+       eval"&aes${p}   ($inout2,$rndkey0)";
+       eval"&aes${p}   ($inout3,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(0,$key));
+    &jnz               (&label("${p}4_loop"));
+
+    eval"&aes${p}      ($inout0,$rndkey1)";
+    eval"&aes${p}      ($inout1,$rndkey1)";
+    eval"&aes${p}      ($inout2,$rndkey1)";
+    eval"&aes${p}      ($inout3,$rndkey1)";
+    eval"&aes${p}last  ($inout0,$rndkey0)";
+    eval"&aes${p}last  ($inout1,$rndkey0)";
+    eval"&aes${p}last  ($inout2,$rndkey0)";
+    eval"&aes${p}last  ($inout3,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt4");
+}
+
+sub aesni_generate6
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt6");
+    &static_label("_aesni_${p}rypt6_enter");
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &shr            ($rounds,1);
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       &lea            ($key,&DWP(32,$key));
+       &xorps          ($inout0,$rndkey0);
+       &pxor           ($inout1,$rndkey0);     # pxor does better here
+       eval"&aes${p}   ($inout0,$rndkey1)";
+       &pxor           ($inout2,$rndkey0);
+       eval"&aes${p}   ($inout1,$rndkey1)";
+       &pxor           ($inout3,$rndkey0);
+       &dec            ($rounds);
+       eval"&aes${p}   ($inout2,$rndkey1)";
+       &pxor           ($inout4,$rndkey0);
+       eval"&aes${p}   ($inout3,$rndkey1)";
+       &pxor           ($inout5,$rndkey0);
+       eval"&aes${p}   ($inout4,$rndkey1)";
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       eval"&aes${p}   ($inout5,$rndkey1)";
+       &jmp            (&label("_aesni_${p}rypt6_enter"));
+
+    &set_label("${p}6_loop",16);
+       eval"&aes${p}   ($inout0,$rndkey1)";
+       eval"&aes${p}   ($inout1,$rndkey1)";
+       &dec            ($rounds);
+       eval"&aes${p}   ($inout2,$rndkey1)";
+       eval"&aes${p}   ($inout3,$rndkey1)";
+       eval"&aes${p}   ($inout4,$rndkey1)";
+       eval"&aes${p}   ($inout5,$rndkey1)";
+    &set_label("_aesni_${p}rypt6_enter",16);
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       eval"&aes${p}   ($inout0,$rndkey0)";
+       eval"&aes${p}   ($inout1,$rndkey0)";
+       &lea            ($key,&DWP(32,$key));
+       eval"&aes${p}   ($inout2,$rndkey0)";
+       eval"&aes${p}   ($inout3,$rndkey0)";
+       eval"&aes${p}   ($inout4,$rndkey0)";
+       eval"&aes${p}   ($inout5,$rndkey0)";
+       &$movekey       ($rndkey0,&QWP(0,$key));
+    &jnz               (&label("${p}6_loop"));
+
+    eval"&aes${p}      ($inout0,$rndkey1)";
+    eval"&aes${p}      ($inout1,$rndkey1)";
+    eval"&aes${p}      ($inout2,$rndkey1)";
+    eval"&aes${p}      ($inout3,$rndkey1)";
+    eval"&aes${p}      ($inout4,$rndkey1)";
+    eval"&aes${p}      ($inout5,$rndkey1)";
+    eval"&aes${p}last  ($inout0,$rndkey0)";
+    eval"&aes${p}last  ($inout1,$rndkey0)";
+    eval"&aes${p}last  ($inout2,$rndkey0)";
+    eval"&aes${p}last  ($inout3,$rndkey0)";
+    eval"&aes${p}last  ($inout4,$rndkey0)";
+    eval"&aes${p}last  ($inout5,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt6");
+}
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+\f
+if ($PREFIX eq "aesni") {
+######################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+#                         size_t length, const AES_KEY *key,
+#                         int enc);
+&function_begin("aesni_ecb_encrypt");
+       &mov    ($inp,&wparam(0));
+       &mov    ($out,&wparam(1));
+       &mov    ($len,&wparam(2));
+       &mov    ($key,&wparam(3));
+       &mov    ($rounds_,&wparam(4));
+       &and    ($len,-16);
+       &jz     (&label("ecb_ret"));
+       &mov    ($rounds,&DWP(240,$key));
+       &test   ($rounds_,$rounds_);
+       &jz     (&label("ecb_decrypt"));
+
+       &mov    ($key_,$key);           # backup $key
+       &mov    ($rounds_,$rounds);     # backup $rounds
+       &cmp    ($len,0x60);
+       &jb     (&label("ecb_enc_tail"));
+
+       &movdqu ($inout0,&QWP(0,$inp));
+       &movdqu ($inout1,&QWP(0x10,$inp));
+       &movdqu ($inout2,&QWP(0x20,$inp));
+       &movdqu ($inout3,&QWP(0x30,$inp));
+       &movdqu ($inout4,&QWP(0x40,$inp));
+       &movdqu ($inout5,&QWP(0x50,$inp));
+       &lea    ($inp,&DWP(0x60,$inp));
+       &sub    ($len,0x60);
+       &jmp    (&label("ecb_enc_loop6_enter"));
+
+&set_label("ecb_enc_loop6",16);
+       &movups (&QWP(0,$out),$inout0);
+       &movdqu ($inout0,&QWP(0,$inp));
+       &movups (&QWP(0x10,$out),$inout1);
+       &movdqu ($inout1,&QWP(0x10,$inp));
+       &movups (&QWP(0x20,$out),$inout2);
+       &movdqu ($inout2,&QWP(0x20,$inp));
+       &movups (&QWP(0x30,$out),$inout3);
+       &movdqu ($inout3,&QWP(0x30,$inp));
+       &movups (&QWP(0x40,$out),$inout4);
+       &movdqu ($inout4,&QWP(0x40,$inp));
+       &movups (&QWP(0x50,$out),$inout5);
+       &lea    ($out,&DWP(0x60,$out));
+       &movdqu ($inout5,&QWP(0x50,$inp));
+       &lea    ($inp,&DWP(0x60,$inp));
+&set_label("ecb_enc_loop6_enter");
+
+       &call   ("_aesni_encrypt6");
+
+       &mov    ($key,$key_);           # restore $key
+       &mov    ($rounds,$rounds_);     # restore $rounds
+       &sub    ($len,0x60);
+       &jnc    (&label("ecb_enc_loop6"));
+
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+       &movups (&QWP(0x40,$out),$inout4);
+       &movups (&QWP(0x50,$out),$inout5);
+       &lea    ($out,&DWP(0x60,$out));
+       &add    ($len,0x60);
+       &jz     (&label("ecb_ret"));
+
+&set_label("ecb_enc_tail");
+       &movups ($inout0,&QWP(0,$inp));
+       &cmp    ($len,0x20);
+       &jb     (&label("ecb_enc_one"));
+       &movups ($inout1,&QWP(0x10,$inp));
+       &je     (&label("ecb_enc_two"));
+       &movups ($inout2,&QWP(0x20,$inp));
+       &cmp    ($len,0x40);
+       &jb     (&label("ecb_enc_three"));
+       &movups ($inout3,&QWP(0x30,$inp));
+       &je     (&label("ecb_enc_four"));
+       &movups ($inout4,&QWP(0x40,$inp));
+       &xorps  ($inout5,$inout5);
+       &call   ("_aesni_encrypt6");
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+       &movups (&QWP(0x40,$out),$inout4);
+       jmp     (&label("ecb_ret"));
+
+&set_label("ecb_enc_one",16);
+       if ($inline)
+       {   &aesni_inline_generate1("enc");     }
+       else
+       {   &call       ("_aesni_encrypt1");    }
+       &movups (&QWP(0,$out),$inout0);
+       &jmp    (&label("ecb_ret"));
+
+&set_label("ecb_enc_two",16);
+       &xorps  ($inout2,$inout2);
+       &call   ("_aesni_encrypt3");
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &jmp    (&label("ecb_ret"));
+
+&set_label("ecb_enc_three",16);
+       &call   ("_aesni_encrypt3");
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &jmp    (&label("ecb_ret"));
+
+&set_label("ecb_enc_four",16);
+       &call   ("_aesni_encrypt4");
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+       &jmp    (&label("ecb_ret"));
+######################################################################
+&set_label("ecb_decrypt",16);
+       &mov    ($key_,$key);           # backup $key
+       &mov    ($rounds_,$rounds);     # backup $rounds
+       &cmp    ($len,0x60);
+       &jb     (&label("ecb_dec_tail"));
+
+       &movdqu ($inout0,&QWP(0,$inp));
+       &movdqu ($inout1,&QWP(0x10,$inp));
+       &movdqu ($inout2,&QWP(0x20,$inp));
+       &movdqu ($inout3,&QWP(0x30,$inp));
+       &movdqu ($inout4,&QWP(0x40,$inp));
+       &movdqu ($inout5,&QWP(0x50,$inp));
+       &lea    ($inp,&DWP(0x60,$inp));
+       &sub    ($len,0x60);
+       &jmp    (&label("ecb_dec_loop6_enter"));
+
+&set_label("ecb_dec_loop6",16);
+       &movups (&QWP(0,$out),$inout0);
+       &movdqu ($inout0,&QWP(0,$inp));
+       &movups (&QWP(0x10,$out),$inout1);
+       &movdqu ($inout1,&QWP(0x10,$inp));
+       &movups (&QWP(0x20,$out),$inout2);
+       &movdqu ($inout2,&QWP(0x20,$inp));
+       &movups (&QWP(0x30,$out),$inout3);
+       &movdqu ($inout3,&QWP(0x30,$inp));
+       &movups (&QWP(0x40,$out),$inout4);
+       &movdqu ($inout4,&QWP(0x40,$inp));
+       &movups (&QWP(0x50,$out),$inout5);
+       &lea    ($out,&DWP(0x60,$out));
+       &movdqu ($inout5,&QWP(0x50,$inp));
+       &lea    ($inp,&DWP(0x60,$inp));
+&set_label("ecb_dec_loop6_enter");
+
+       &call   ("_aesni_decrypt6");
+
+       &mov    ($key,$key_);           # restore $key
+       &mov    ($rounds,$rounds_);     # restore $rounds
+       &sub    ($len,0x60);
+       &jnc    (&label("ecb_dec_loop6"));
+
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+       &movups (&QWP(0x40,$out),$inout4);
+       &movups (&QWP(0x50,$out),$inout5);
+       &lea    ($out,&DWP(0x60,$out));
+       &add    ($len,0x60);
+       &jz     (&label("ecb_ret"));
+
+&set_label("ecb_dec_tail");
+       &movups ($inout0,&QWP(0,$inp));
+       &cmp    ($len,0x20);
+       &jb     (&label("ecb_dec_one"));
+       &movups ($inout1,&QWP(0x10,$inp));
+       &je     (&label("ecb_dec_two"));
+       &movups ($inout2,&QWP(0x20,$inp));
+       &cmp    ($len,0x40);
+       &jb     (&label("ecb_dec_three"));
+       &movups ($inout3,&QWP(0x30,$inp));
+       &je     (&label("ecb_dec_four"));
+       &movups ($inout4,&QWP(0x40,$inp));
+       &xorps  ($inout5,$inout5);
+       &call   ("_aesni_decrypt6");
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+       &movups (&QWP(0x40,$out),$inout4);
+       &jmp    (&label("ecb_ret"));
+
+&set_label("ecb_dec_one",16);
+       if ($inline)
+       {   &aesni_inline_generate1("dec");     }
+       else
+       {   &call       ("_aesni_decrypt1");    }
+       &movups (&QWP(0,$out),$inout0);
+       &jmp    (&label("ecb_ret"));
+
+&set_label("ecb_dec_two",16);
+       &xorps  ($inout2,$inout2);
+       &call   ("_aesni_decrypt3");
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &jmp    (&label("ecb_ret"));
+
+&set_label("ecb_dec_three",16);
+       &call   ("_aesni_decrypt3");
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &jmp    (&label("ecb_ret"));
+
+&set_label("ecb_dec_four",16);
+       &call   ("_aesni_decrypt4");
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+
+&set_label("ecb_ret");
+&function_end("aesni_ecb_encrypt");
+\f
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{ my $cmac=$inout1;
+&function_begin("aesni_ccm64_encrypt_blocks");
+       &mov    ($inp,&wparam(0));
+       &mov    ($out,&wparam(1));
+       &mov    ($len,&wparam(2));
+       &mov    ($key,&wparam(3));
+       &mov    ($rounds_,&wparam(4));
+       &mov    ($rounds,&wparam(5));
+       &mov    ($key_,"esp");
+       &sub    ("esp",60);
+       &and    ("esp",-16);                    # align stack
+       &mov    (&DWP(48,"esp"),$key_);
+
+       &movdqu ($ivec,&QWP(0,$rounds_));       # load ivec
+       &movdqu ($cmac,&QWP(0,$rounds));        # load cmac
+
+       # compose byte-swap control mask for pshufb on stack
+       &mov    (&DWP(0,"esp"),0x0c0d0e0f);
+       &mov    (&DWP(4,"esp"),0x08090a0b);
+       &mov    (&DWP(8,"esp"),0x04050607);
+       &mov    (&DWP(12,"esp"),0x00010203);
+
+       # compose counter increment vector on stack
+       &mov    ($rounds,1);
+       &xor    ($key_,$key_);
+       &mov    (&DWP(16,"esp"),$rounds);
+       &mov    (&DWP(20,"esp"),$key_);
+       &mov    (&DWP(24,"esp"),$key_);
+       &mov    (&DWP(28,"esp"),$key_);
+
+       &movdqa ($inout3,&QWP(0,"esp"));
+       &pshufb ($ivec,$inout3);                # keep iv in reverse order
+
+       &mov    ($rounds,&DWP(240,$key));
+       &mov    ($key_,$key);
+       &mov    ($rounds_,$rounds);
+       &movdqa ($inout0,$ivec);
+
+&set_label("ccm64_enc_outer");
+       &movups         ($in0,&QWP(0,$inp));
+       &pshufb         ($inout0,$inout3);
+       &mov            ($key,$key_);
+       &mov            ($rounds,$rounds_);
+
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &shr            ($rounds,1);
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       &xorps          ($in0,$rndkey0);
+       &lea            ($key,&DWP(32,$key));
+       &xorps          ($inout0,$rndkey0);
+       &xorps          ($cmac,$in0);           # cmac^=inp
+       &$movekey       ($rndkey0,&QWP(0,$key));
+
+&set_label("ccm64_enc2_loop");
+       &aesenc         ($inout0,$rndkey1);
+       &dec            ($rounds);
+       &aesenc         ($cmac,$rndkey1);
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       &aesenc         ($inout0,$rndkey0);
+       &lea            ($key,&DWP(32,$key));
+       &aesenc         ($cmac,$rndkey0);
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &jnz            (&label("ccm64_enc2_loop"));
+       &aesenc         ($inout0,$rndkey1);
+       &aesenc         ($cmac,$rndkey1);
+       &aesenclast     ($inout0,$rndkey0);
+       &aesenclast     ($cmac,$rndkey0);
+
+       &paddq  ($ivec,&QWP(16,"esp"));
+       &dec    ($len);
+       &lea    ($inp,&DWP(16,$inp));
+       &xorps  ($in0,$inout0);                 # inp^=E(ivec)
+       &movdqa ($inout0,$ivec);
+       &movups (&QWP(0,$out),$in0);
+       &lea    ($out,&DWP(16,$out));
+       &jnz    (&label("ccm64_enc_outer"));
+
+       &mov    ("esp",&DWP(48,"esp"));
+       &mov    ($out,&wparam(5));
+       &movups (&QWP(0,$out),$cmac);
+&function_end("aesni_ccm64_encrypt_blocks");
+
+&function_begin("aesni_ccm64_decrypt_blocks");
+       &mov    ($inp,&wparam(0));
+       &mov    ($out,&wparam(1));
+       &mov    ($len,&wparam(2));
+       &mov    ($key,&wparam(3));
+       &mov    ($rounds_,&wparam(4));
+       &mov    ($rounds,&wparam(5));
+       &mov    ($key_,"esp");
+       &sub    ("esp",60);
+       &and    ("esp",-16);                    # align stack
+       &mov    (&DWP(48,"esp"),$key_);
+
+       &movdqu ($ivec,&QWP(0,$rounds_));       # load ivec
+       &movdqu ($cmac,&QWP(0,$rounds));        # load cmac
+
+       # compose byte-swap control mask for pshufb on stack
+       &mov    (&DWP(0,"esp"),0x0c0d0e0f);
+       &mov    (&DWP(4,"esp"),0x08090a0b);
+       &mov    (&DWP(8,"esp"),0x04050607);
+       &mov    (&DWP(12,"esp"),0x00010203);
+
+       # compose counter increment vector on stack
+       &mov    ($rounds,1);
+       &xor    ($key_,$key_);
+       &mov    (&DWP(16,"esp"),$rounds);
+       &mov    (&DWP(20,"esp"),$key_);
+       &mov    (&DWP(24,"esp"),$key_);
+       &mov    (&DWP(28,"esp"),$key_);
+
+       &movdqa ($inout3,&QWP(0,"esp"));        # bswap mask
+       &movdqa ($inout0,$ivec);
+       &pshufb ($ivec,$inout3);                # keep iv in reverse order
+
+       &mov    ($rounds,&DWP(240,$key));
+       &mov    ($key_,$key);
+       &mov    ($rounds_,$rounds);
+
+       if ($inline)
+       {   &aesni_inline_generate1("enc");     }
+       else
+       {   &call       ("_aesni_encrypt1");    }
+
+&set_label("ccm64_dec_outer");
+       &paddq  ($ivec,&QWP(16,"esp"));
+       &movups ($in0,&QWP(0,$inp));            # load inp
+       &xorps  ($in0,$inout0);
+       &movdqa ($inout0,$ivec);
+       &lea    ($inp,&QWP(16,$inp));
+       &pshufb ($inout0,$inout3);
+       &mov    ($key,$key_);
+       &mov    ($rounds,$rounds_);
+       &movups (&QWP(0,$out),$in0);
+       &lea    ($out,&DWP(16,$out));
+
+       &sub    ($len,1);
+       &jz     (&label("ccm64_dec_break"));
+
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &shr            ($rounds,1);
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       &xorps          ($in0,$rndkey0);
+       &lea            ($key,&DWP(32,$key));
+       &xorps          ($inout0,$rndkey0);
+       &xorps          ($cmac,$in0);           # cmac^=out
+       &$movekey       ($rndkey0,&QWP(0,$key));
+
+&set_label("ccm64_dec2_loop");
+       &aesenc         ($inout0,$rndkey1);
+       &dec            ($rounds);
+       &aesenc         ($cmac,$rndkey1);
+       &$movekey       ($rndkey1,&QWP(16,$key));
+       &aesenc         ($inout0,$rndkey0);
+       &lea            ($key,&DWP(32,$key));
+       &aesenc         ($cmac,$rndkey0);
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &jnz            (&label("ccm64_dec2_loop"));
+       &aesenc         ($inout0,$rndkey1);
+       &aesenc         ($cmac,$rndkey1);
+       &aesenclast     ($inout0,$rndkey0);
+       &aesenclast     ($cmac,$rndkey0);
+       &jmp    (&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_break",16);
+       if ($inline)
+       {   &aesni_inline_generate1("enc",$cmac,$in0);  }
+       else
+       {   &call       ("_aesni_encrypt1",$cmac);      }
+
+       &mov    ("esp",&DWP(48,"esp"));
+       &mov    ($out,&wparam(5));
+       &movups (&QWP(0,$out),$cmac);
+&function_end("aesni_ccm64_decrypt_blocks");
+}
+\f
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see engine/eng_aesni.c for details)
+#
+# stack layout:
+#      0       pshufb mask
+#      16      vector addend: 0,6,6,6
+#      32      counter-less ivec
+#      48      1st triplet of counter vector
+#      64      2nd triplet of counter vector
+#      80      saved %esp
+
+&function_begin("aesni_ctr32_encrypt_blocks");
+       &mov    ($inp,&wparam(0));
+       &mov    ($out,&wparam(1));
+       &mov    ($len,&wparam(2));
+       &mov    ($key,&wparam(3));
+       &mov    ($rounds_,&wparam(4));
+       &mov    ($key_,"esp");
+       &sub    ("esp",88);
+       &and    ("esp",-16);                    # align stack
+       &mov    (&DWP(80,"esp"),$key_);
+
+       &cmp    ($len,1);
+       &je     (&label("ctr32_one_shortcut"));
+
+       &movdqu ($inout5,&QWP(0,$rounds_));     # load ivec
+
+       # compose byte-swap control mask for pshufb on stack
+       &mov    (&DWP(0,"esp"),0x0c0d0e0f);
+       &mov    (&DWP(4,"esp"),0x08090a0b);
+       &mov    (&DWP(8,"esp"),0x04050607);
+       &mov    (&DWP(12,"esp"),0x00010203);
+
+       # compose counter increment vector on stack
+       &mov    ($rounds,6);
+       &xor    ($key_,$key_);
+       &mov    (&DWP(16,"esp"),$rounds);
+       &mov    (&DWP(20,"esp"),$rounds);
+       &mov    (&DWP(24,"esp"),$rounds);
+       &mov    (&DWP(28,"esp"),$key_);
+
+       &pextrd ($rounds_,$inout5,3);           # pull 32-bit counter
+       &pinsrd ($inout5,$key_,3);              # wipe 32-bit counter
+
+       &mov    ($rounds,&DWP(240,$key));       # key->rounds
+
+       # compose 2 vectors of 3x32-bit counters
+       &bswap  ($rounds_);
+       &pxor   ($rndkey1,$rndkey1);
+       &pxor   ($rndkey0,$rndkey0);
+       &movdqa ($inout0,&QWP(0,"esp"));        # load byte-swap mask
+       &pinsrd ($rndkey1,$rounds_,0);
+       &lea    ($key_,&DWP(3,$rounds_));
+       &pinsrd ($rndkey0,$key_,0);
+       &inc    ($rounds_);
+       &pinsrd ($rndkey1,$rounds_,1);
+       &inc    ($key_);
+       &pinsrd ($rndkey0,$key_,1);
+       &inc    ($rounds_);
+       &pinsrd ($rndkey1,$rounds_,2);
+       &inc    ($key_);
+       &pinsrd ($rndkey0,$key_,2);
+       &movdqa (&QWP(48,"esp"),$rndkey1);      # save 1st triplet
+       &pshufb ($rndkey1,$inout0);             # byte swap
+       &movdqa (&QWP(64,"esp"),$rndkey0);      # save 2nd triplet
+       &pshufb ($rndkey0,$inout0);             # byte swap
+
+       &pshufd ($inout0,$rndkey1,3<<6);        # place counter to upper dword
+       &pshufd ($inout1,$rndkey1,2<<6);
+       &cmp    ($len,6);
+       &jb     (&label("ctr32_tail"));
+       &movdqa (&QWP(32,"esp"),$inout5);       # save counter-less ivec
+       &shr    ($rounds,1);
+       &mov    ($key_,$key);                   # backup $key
+       &mov    ($rounds_,$rounds);             # backup $rounds
+       &sub    ($len,6);
+       &jmp    (&label("ctr32_loop6"));
+
+&set_label("ctr32_loop6",16);
+       &pshufd ($inout2,$rndkey1,1<<6);
+       &movdqa ($rndkey1,&QWP(32,"esp"));      # pull counter-less ivec
+       &pshufd ($inout3,$rndkey0,3<<6);
+       &por    ($inout0,$rndkey1);             # merge counter-less ivec
+       &pshufd ($inout4,$rndkey0,2<<6);
+       &por    ($inout1,$rndkey1);
+       &pshufd ($inout5,$rndkey0,1<<6);
+       &por    ($inout2,$rndkey1);
+       &por    ($inout3,$rndkey1);
+       &por    ($inout4,$rndkey1);
+       &por    ($inout5,$rndkey1);
+
+       # inlining _aesni_encrypt6's prologue gives ~4% improvement...
+       &$movekey       ($rndkey0,&QWP(0,$key_));
+       &$movekey       ($rndkey1,&QWP(16,$key_));
+       &lea            ($key,&DWP(32,$key_));
+       &dec            ($rounds);
+       &pxor           ($inout0,$rndkey0);
+       &pxor           ($inout1,$rndkey0);
+       &aesenc         ($inout0,$rndkey1);
+       &pxor           ($inout2,$rndkey0);
+       &aesenc         ($inout1,$rndkey1);
+       &pxor           ($inout3,$rndkey0);
+       &aesenc         ($inout2,$rndkey1);
+       &pxor           ($inout4,$rndkey0);
+       &aesenc         ($inout3,$rndkey1);
+       &pxor           ($inout5,$rndkey0);
+       &aesenc         ($inout4,$rndkey1);
+       &$movekey       ($rndkey0,&QWP(0,$key));
+       &aesenc         ($inout5,$rndkey1);
+
+       &call           (&label("_aesni_encrypt6_enter"));
+
+       &movups ($rndkey1,&QWP(0,$inp));
+       &movups ($rndkey0,&QWP(0x10,$inp));
+       &xorps  ($inout0,$rndkey1);
+       &movups ($rndkey1,&QWP(0x20,$inp));
+       &xorps  ($inout1,$rndkey0);
+       &movups (&QWP(0,$out),$inout0);
+       &movdqa ($rndkey0,&QWP(16,"esp"));      # load increment
+       &xorps  ($inout2,$rndkey1);
+       &movdqa ($rndkey1,&QWP(48,"esp"));      # load 1st triplet
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+
+       &paddd  ($rndkey1,$rndkey0);            # 1st triplet increment
+       &paddd  ($rndkey0,&QWP(64,"esp"));      # 2nd triplet increment
+       &movdqa ($inout0,&QWP(0,"esp"));        # load byte swap mask
+
+       &movups ($inout1,&QWP(0x30,$inp));
+       &movups ($inout2,&QWP(0x40,$inp));
+       &xorps  ($inout3,$inout1);
+       &movups ($inout1,&QWP(0x50,$inp));
+       &lea    ($inp,&DWP(0x60,$inp));
+       &movdqa (&QWP(48,"esp"),$rndkey1);      # save 1st triplet
+       &pshufb ($rndkey1,$inout0);             # byte swap
+       &xorps  ($inout4,$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+       &xorps  ($inout5,$inout1);
+       &movdqa (&QWP(64,"esp"),$rndkey0);      # save 2nd triplet
+       &pshufb ($rndkey0,$inout0);             # byte swap
+       &movups (&QWP(0x40,$out),$inout4);
+       &pshufd ($inout0,$rndkey1,3<<6);
+       &movups (&QWP(0x50,$out),$inout5);
+       &lea    ($out,&DWP(0x60,$out));
+
+       &mov    ($rounds,$rounds_);
+       &pshufd ($inout1,$rndkey1,2<<6);
+       &sub    ($len,6);
+       &jnc    (&label("ctr32_loop6"));
+
+       &add    ($len,6);
+       &jz     (&label("ctr32_ret"));
+       &mov    ($key,$key_);
+       &lea    ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
+       &movdqa ($inout5,&QWP(32,"esp"));       # pull count-less ivec
+
+&set_label("ctr32_tail");
+       &por    ($inout0,$inout5);
+       &cmp    ($len,2);
+       &jb     (&label("ctr32_one"));
+
+       &pshufd ($inout2,$rndkey1,1<<6);
+       &por    ($inout1,$inout5);
+       &je     (&label("ctr32_two"));
+
+       &pshufd ($inout3,$rndkey0,3<<6);
+       &por    ($inout2,$inout5);
+       &cmp    ($len,4);
+       &jb     (&label("ctr32_three"));
+
+       &pshufd ($inout4,$rndkey0,2<<6);
+       &por    ($inout3,$inout5);
+       &je     (&label("ctr32_four"));
+
+       &por    ($inout4,$inout5);
+       &call   ("_aesni_encrypt6");
+       &movups ($rndkey1,&QWP(0,$inp));
+       &movups ($rndkey0,&QWP(0x10,$inp));
+       &xorps  ($inout0,$rndkey1);
+       &movups ($rndkey1,&QWP(0x20,$inp));
+       &xorps  ($inout1,$rndkey0);
+       &movups ($rndkey0,&QWP(0x30,$inp));
+       &xorps  ($inout2,$rndkey1);
+       &movups ($rndkey1,&QWP(0x40,$inp));
+       &xorps  ($inout3,$rndkey0);
+       &movups (&QWP(0,$out),$inout0);
+       &xorps  ($inout4,$rndkey1);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+       &movups (&QWP(0x40,$out),$inout4);
+       &jmp    (&label("ctr32_ret"));
+
+&set_label("ctr32_one_shortcut",16);
+       &movups ($inout0,&QWP(0,$rounds_));     # load ivec
+       &mov    ($rounds,&DWP(240,$key));
+       
+&set_label("ctr32_one");
+       if ($inline)
+       {   &aesni_inline_generate1("enc");     }
+       else
+       {   &call       ("_aesni_encrypt1");    }
+       &movups ($in0,&QWP(0,$inp));
+       &xorps  ($in0,$inout0);
+       &movups (&QWP(0,$out),$in0);
+       &jmp    (&label("ctr32_ret"));
+
+&set_label("ctr32_two",16);
+       &call   ("_aesni_encrypt3");
+       &movups ($inout3,&QWP(0,$inp));
+       &movups ($inout4,&QWP(0x10,$inp));
+       &xorps  ($inout0,$inout3);
+       &xorps  ($inout1,$inout4);
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &jmp    (&label("ctr32_ret"));
+
+&set_label("ctr32_three",16);
+       &call   ("_aesni_encrypt3");
+       &movups ($inout3,&QWP(0,$inp));
+       &movups ($inout4,&QWP(0x10,$inp));
+       &xorps  ($inout0,$inout3);
+       &movups ($inout5,&QWP(0x20,$inp));
+       &xorps  ($inout1,$inout4);
+       &movups (&QWP(0,$out),$inout0);
+       &xorps  ($inout2,$inout5);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &jmp    (&label("ctr32_ret"));
+
+&set_label("ctr32_four",16);
+       &call   ("_aesni_encrypt4");
+       &movups ($inout4,&QWP(0,$inp));
+       &movups ($inout5,&QWP(0x10,$inp));
+       &movups ($rndkey1,&QWP(0x20,$inp));
+       &xorps  ($inout0,$inout4);
+       &movups ($rndkey0,&QWP(0x30,$inp));
+       &xorps  ($inout1,$inout5);
+       &movups (&QWP(0,$out),$inout0);
+       &xorps  ($inout2,$rndkey1);
+       &movups (&QWP(0x10,$out),$inout1);
+       &xorps  ($inout3,$rndkey0);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+
+&set_label("ctr32_ret");
+       &mov    ("esp",&DWP(80,"esp"));
+&function_end("aesni_ctr32_encrypt_blocks");
+\f
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#      const AES_KEY *key1, const AES_KEY *key2
+#      const unsigned char iv[16]);
+#
+{ my ($tweak,$twtmp,$twres,$twmask)=($rndkey1,$rndkey0,$inout0,$inout1);
+
+&function_begin("aesni_xts_encrypt");
+       &mov    ($key,&wparam(4));              # key2
+       &mov    ($inp,&wparam(5));              # clear-text tweak
+
+       &mov    ($rounds,&DWP(240,$key));       # key2->rounds
+       &movups ($inout0,&QWP(0,$inp));
+       if ($inline)
+       {   &aesni_inline_generate1("enc");     }
+       else
+       {   &call       ("_aesni_encrypt1");    }
+
+       &mov    ($inp,&wparam(0));
+       &mov    ($out,&wparam(1));
+       &mov    ($len,&wparam(2));
+       &mov    ($key,&wparam(3));              # key1
+
+       &mov    ($key_,"esp");
+       &sub    ("esp",16*7+8);
+       &mov    ($rounds,&DWP(240,$key));       # key1->rounds
+       &and    ("esp",-16);                    # align stack
+
+       &mov    (&DWP(16*6+0,"esp"),0x87);      # compose the magic constant
+       &mov    (&DWP(16*6+4,"esp"),0);
+       &mov    (&DWP(16*6+8,"esp"),1);
+       &mov    (&DWP(16*6+12,"esp"),0);
+       &mov    (&DWP(16*7+0,"esp"),$len);      # save original $len
+       &mov    (&DWP(16*7+4,"esp"),$key_);     # save original %esp
+
+       &movdqa ($tweak,$inout0);
+       &pxor   ($twtmp,$twtmp);
+       &movdqa ($twmask,&QWP(6*16,"esp"));     # 0x0...010...87
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+
+       &and    ($len,-16);
+       &mov    ($key_,$key);                   # backup $key
+       &mov    ($rounds_,$rounds);             # backup $rounds
+       &sub    ($len,16*6);
+       &jc     (&label("xts_enc_short"));
+
+       &shr    ($rounds,1);
+       &mov    ($rounds_,$rounds);
+       &jmp    (&label("xts_enc_loop6"));
+
+&set_label("xts_enc_loop6",16);
+       for ($i=0;$i<4;$i++) {
+           &pshufd     ($twres,$twtmp,0x13);
+           &pxor       ($twtmp,$twtmp);
+           &movdqa     (&QWP(16*$i,"esp"),$tweak);
+           &paddq      ($tweak,$tweak);        # &psllq($tweak,1);
+           &pand       ($twres,$twmask);       # isolate carry and residue
+           &pcmpgtd    ($twtmp,$tweak);        # broadcast upper bits
+           &pxor       ($tweak,$twres);
+       }
+       &pshufd ($inout5,$twtmp,0x13);
+       &movdqa (&QWP(16*$i++,"esp"),$tweak);
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+        &$movekey      ($rndkey0,&QWP(0,$key_));
+       &pand   ($inout5,$twmask);              # isolate carry and residue
+        &movups        ($inout0,&QWP(0,$inp)); # load input
+       &pxor   ($inout5,$tweak);
+
+       # inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+       &movdqu ($inout1,&QWP(16*1,$inp));
+        &xorps         ($inout0,$rndkey0);     # input^=rndkey[0]
+       &movdqu ($inout2,&QWP(16*2,$inp));
+        &pxor          ($inout1,$rndkey0);
+       &movdqu ($inout3,&QWP(16*3,$inp));
+        &pxor          ($inout2,$rndkey0);
+       &movdqu ($inout4,&QWP(16*4,$inp));
+        &pxor          ($inout3,$rndkey0);
+       &movdqu ($rndkey1,&QWP(16*5,$inp));
+        &pxor          ($inout4,$rndkey0);
+       &lea    ($inp,&DWP(16*6,$inp));
+       &pxor   ($inout0,&QWP(16*0,"esp"));     # input^=tweak
+       &movdqa (&QWP(16*$i,"esp"),$inout5);    # save last tweak
+       &pxor   ($inout5,$rndkey1);
+
+        &$movekey      ($rndkey1,&QWP(16,$key_));
+        &lea           ($key,&DWP(32,$key_));
+       &pxor   ($inout1,&QWP(16*1,"esp"));
+        &aesenc        ($inout0,$rndkey1);
+       &pxor   ($inout2,&QWP(16*2,"esp"));
+        &aesenc        ($inout1,$rndkey1);
+       &pxor   ($inout3,&QWP(16*3,"esp"));
+        &dec           ($rounds);
+        &aesenc        ($inout2,$rndkey1);
+       &pxor   ($inout4,&QWP(16*4,"esp"));
+        &aesenc        ($inout3,$rndkey1);
+       &pxor           ($inout5,$rndkey0);
+        &aesenc        ($inout4,$rndkey1);
+        &$movekey      ($rndkey0,&QWP(0,$key));
+        &aesenc        ($inout5,$rndkey1);
+       &call           (&label("_aesni_encrypt6_enter"));
+
+       &movdqa ($tweak,&QWP(16*5,"esp"));      # last tweak
+       &pxor   ($twtmp,$twtmp);
+       &xorps  ($inout0,&QWP(16*0,"esp"));     # output^=tweak
+       &pcmpgtd        ($twtmp,$tweak);                # broadcast upper bits
+       &xorps  ($inout1,&QWP(16*1,"esp"));
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &xorps  ($inout2,&QWP(16*2,"esp"));
+       &movups (&QWP(16*1,$out),$inout1);
+       &xorps  ($inout3,&QWP(16*3,"esp"));
+       &movups (&QWP(16*2,$out),$inout2);
+       &xorps  ($inout4,&QWP(16*4,"esp"));
+       &movups (&QWP(16*3,$out),$inout3);
+       &xorps  ($inout5,$tweak);
+       &movups (&QWP(16*4,$out),$inout4);
+       &pshufd ($twres,$twtmp,0x13);
+       &movups (&QWP(16*5,$out),$inout5);
+       &lea    ($out,&DWP(16*6,$out));
+       &movdqa ($twmask,&QWP(16*6,"esp"));     # 0x0...010...87
+
+       &pxor   ($twtmp,$twtmp);
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &mov    ($rounds,$rounds_);             # restore $rounds
+       &pxor   ($tweak,$twres);
+
+       &sub    ($len,16*6);
+       &jnc    (&label("xts_enc_loop6"));
+
+       &lea    ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
+       &mov    ($key,$key_);                   # restore $key
+       &mov    ($rounds_,$rounds);
+
+&set_label("xts_enc_short");
+       &add    ($len,16*6);
+       &jz     (&label("xts_enc_done6x"));
+
+       &movdqa ($inout3,$tweak);               # put aside previous tweak
+       &cmp    ($len,0x20);
+       &jb     (&label("xts_enc_one"));
+
+       &pshufd ($twres,$twtmp,0x13);
+       &pxor   ($twtmp,$twtmp);
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &pxor   ($tweak,$twres);
+       &je     (&label("xts_enc_two"));
+
+       &pshufd ($twres,$twtmp,0x13);
+       &pxor   ($twtmp,$twtmp);
+       &movdqa ($inout4,$tweak);               # put aside previous tweak
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &pxor   ($tweak,$twres);
+       &cmp    ($len,0x40);
+       &jb     (&label("xts_enc_three"));
+
+       &pshufd ($twres,$twtmp,0x13);
+       &pxor   ($twtmp,$twtmp);
+       &movdqa ($inout5,$tweak);               # put aside previous tweak
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &pxor   ($tweak,$twres);
+       &movdqa (&QWP(16*0,"esp"),$inout3);
+       &movdqa (&QWP(16*1,"esp"),$inout4);
+       &je     (&label("xts_enc_four"));
+
+       &movdqa (&QWP(16*2,"esp"),$inout5);
+       &pshufd ($inout5,$twtmp,0x13);
+       &movdqa (&QWP(16*3,"esp"),$tweak);
+       &paddq  ($tweak,$tweak);                # &psllq($inout0,1);
+       &pand   ($inout5,$twmask);              # isolate carry and residue
+       &pxor   ($inout5,$tweak);
+
+       &movdqu ($inout0,&QWP(16*0,$inp));      # load input
+       &movdqu ($inout1,&QWP(16*1,$inp));
+       &movdqu ($inout2,&QWP(16*2,$inp));
+       &pxor   ($inout0,&QWP(16*0,"esp"));     # input^=tweak
+       &movdqu ($inout3,&QWP(16*3,$inp));
+       &pxor   ($inout1,&QWP(16*1,"esp"));
+       &movdqu ($inout4,&QWP(16*4,$inp));
+       &pxor   ($inout2,&QWP(16*2,"esp"));
+       &lea    ($inp,&DWP(16*5,$inp));
+       &pxor   ($inout3,&QWP(16*3,"esp"));
+       &movdqa (&QWP(16*4,"esp"),$inout5);     # save last tweak
+       &pxor   ($inout4,$inout5);
+
+       &call   ("_aesni_encrypt6");
+
+       &movaps ($tweak,&QWP(16*4,"esp"));      # last tweak
+       &xorps  ($inout0,&QWP(16*0,"esp"));     # output^=tweak
+       &xorps  ($inout1,&QWP(16*1,"esp"));
+       &xorps  ($inout2,&QWP(16*2,"esp"));
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &xorps  ($inout3,&QWP(16*3,"esp"));
+       &movups (&QWP(16*1,$out),$inout1);
+       &xorps  ($inout4,$tweak);
+       &movups (&QWP(16*2,$out),$inout2);
+       &movups (&QWP(16*3,$out),$inout3);
+       &movups (&QWP(16*4,$out),$inout4);
+       &lea    ($out,&DWP(16*5,$out));
+       &jmp    (&label("xts_enc_done"));
+
+&set_label("xts_enc_one",16);
+       &movups ($inout0,&QWP(16*0,$inp));      # load input
+       &lea    ($inp,&DWP(16*1,$inp));
+       &xorps  ($inout0,$inout3);              # input^=tweak
+       if ($inline)
+       {   &aesni_inline_generate1("enc");     }
+       else
+       {   &call       ("_aesni_encrypt1");    }
+       &xorps  ($inout0,$inout3);              # output^=tweak
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &lea    ($out,&DWP(16*1,$out));
+
+       &movdqa ($tweak,$inout3);               # last tweak
+       &jmp    (&label("xts_enc_done"));
+
+&set_label("xts_enc_two",16);
+       &movaps ($inout4,$tweak);               # put aside last tweak
+
+       &movups ($inout0,&QWP(16*0,$inp));      # load input
+       &movups ($inout1,&QWP(16*1,$inp));
+       &lea    ($inp,&DWP(16*2,$inp));
+       &xorps  ($inout0,$inout3);              # input^=tweak
+       &xorps  ($inout1,$inout4);
+       &xorps  ($inout2,$inout2);
+
+       &call   ("_aesni_encrypt3");
+
+       &xorps  ($inout0,$inout3);              # output^=tweak
+       &xorps  ($inout1,$inout4);
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &movups (&QWP(16*1,$out),$inout1);
+       &lea    ($out,&DWP(16*2,$out));
+
+       &movdqa ($tweak,$inout4);               # last tweak
+       &jmp    (&label("xts_enc_done"));
+
+&set_label("xts_enc_three",16);
+       &movaps ($inout5,$tweak);               # put aside last tweak
+       &movups ($inout0,&QWP(16*0,$inp));      # load input
+       &movups ($inout1,&QWP(16*1,$inp));
+       &movups ($inout2,&QWP(16*2,$inp));
+       &lea    ($inp,&DWP(16*3,$inp));
+       &xorps  ($inout0,$inout3);              # input^=tweak
+       &xorps  ($inout1,$inout4);
+       &xorps  ($inout2,$inout5);
+
+       &call   ("_aesni_encrypt3");
+
+       &xorps  ($inout0,$inout3);              # output^=tweak
+       &xorps  ($inout1,$inout4);
+       &xorps  ($inout2,$inout5);
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &movups (&QWP(16*1,$out),$inout1);
+       &movups (&QWP(16*2,$out),$inout2);
+       &lea    ($out,&DWP(16*3,$out));
+
+       &movdqa ($tweak,$inout5);               # last tweak
+       &jmp    (&label("xts_enc_done"));
+
+&set_label("xts_enc_four",16);
+       &movaps ($inout4,$tweak);               # put aside last tweak
+
+       &movups ($inout0,&QWP(16*0,$inp));      # load input
+       &movups ($inout1,&QWP(16*1,$inp));
+       &movups ($inout2,&QWP(16*2,$inp));
+       &xorps  ($inout0,&QWP(16*0,"esp"));     # input^=tweak
+       &movups ($inout3,&QWP(16*3,$inp));
+       &lea    ($inp,&DWP(16*4,$inp));
+       &xorps  ($inout1,&QWP(16*1,"esp"));
+       &xorps  ($inout2,$inout5);
+       &xorps  ($inout3,$inout4);
+
+       &call   ("_aesni_encrypt4");
+
+       &xorps  ($inout0,&QWP(16*0,"esp"));     # output^=tweak
+       &xorps  ($inout1,&QWP(16*1,"esp"));
+       &xorps  ($inout2,$inout5);
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &xorps  ($inout3,$inout4);
+       &movups (&QWP(16*1,$out),$inout1);
+       &movups (&QWP(16*2,$out),$inout2);
+       &movups (&QWP(16*3,$out),$inout3);
+       &lea    ($out,&DWP(16*4,$out));
+
+       &movdqa ($tweak,$inout4);               # last tweak
+       &jmp    (&label("xts_enc_done"));
+
+&set_label("xts_enc_done6x",16);               # $tweak is pre-calculated
+       &mov    ($len,&DWP(16*7+0,"esp"));      # restore original $len
+       &and    ($len,15);
+       &jz     (&label("xts_enc_ret"));
+       &movdqa ($inout3,$tweak);
+       &mov    (&DWP(16*7+0,"esp"),$len);      # save $len%16
+       &jmp    (&label("xts_enc_steal"));
+
+&set_label("xts_enc_done",16);
+       &mov    ($len,&DWP(16*7+0,"esp"));      # restore original $len
+       &pxor   ($twtmp,$twtmp);
+       &and    ($len,15);
+       &jz     (&label("xts_enc_ret"));
+
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &mov    (&DWP(16*7+0,"esp"),$len);      # save $len%16
+       &pshufd ($inout3,$twtmp,0x13);
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($inout3,&QWP(16*6,"esp"));     # isolate carry and residue
+       &pxor   ($inout3,$tweak);
+
+&set_label("xts_enc_steal");
+       &movz   ($rounds,&BP(0,$inp));
+       &movz   ($key,&BP(-16,$out));
+       &lea    ($inp,&DWP(1,$inp));
+       &mov    (&BP(-16,$out),&LB($rounds));
+       &mov    (&BP(0,$out),&LB($key));
+       &lea    ($out,&DWP(1,$out));
+       &sub    ($len,1);
+       &jnz    (&label("xts_enc_steal"));
+
+       &sub    ($out,&DWP(16*7+0,"esp"));      # rewind $out
+       &mov    ($key,$key_);                   # restore $key
+       &mov    ($rounds,$rounds_);             # restore $rounds
+
+       &movups ($inout0,&QWP(-16,$out));       # load input
+       &xorps  ($inout0,$inout3);              # input^=tweak
+       if ($inline)
+       {   &aesni_inline_generate1("enc");     }
+       else
+       {   &call       ("_aesni_encrypt1");    }
+       &xorps  ($inout0,$inout3);              # output^=tweak
+       &movups (&QWP(-16,$out),$inout0);       # write output
+
+&set_label("xts_enc_ret");
+       &mov    ("esp",&DWP(16*7+4,"esp"));     # restore %esp
+&function_end("aesni_xts_encrypt");
+
+&function_begin("aesni_xts_decrypt");
+       &mov    ($key,&wparam(4));              # key2
+       &mov    ($inp,&wparam(5));              # clear-text tweak
+
+       &mov    ($rounds,&DWP(240,$key));       # key2->rounds
+       &movups ($inout0,&QWP(0,$inp));
+       if ($inline)
+       {   &aesni_inline_generate1("enc");     }
+       else
+       {   &call       ("_aesni_encrypt1");    }
+
+       &mov    ($inp,&wparam(0));
+       &mov    ($out,&wparam(1));
+       &mov    ($len,&wparam(2));
+       &mov    ($key,&wparam(3));              # key1
+
+       &mov    ($key_,"esp");
+       &sub    ("esp",16*7+8);
+       &and    ("esp",-16);                    # align stack
+
+       &xor    ($rounds_,$rounds_);            # if(len%16) len-=16;
+       &test   ($len,15);
+       &setnz  (&LB($rounds_));
+       &shl    ($rounds_,4);
+       &sub    ($len,$rounds_);
+
+       &mov    (&DWP(16*6+0,"esp"),0x87);      # compose the magic constant
+       &mov    (&DWP(16*6+4,"esp"),0);
+       &mov    (&DWP(16*6+8,"esp"),1);
+       &mov    (&DWP(16*6+12,"esp"),0);
+       &mov    (&DWP(16*7+0,"esp"),$len);      # save original $len
+       &mov    (&DWP(16*7+4,"esp"),$key_);     # save original %esp
+
+       &mov    ($rounds,&DWP(240,$key));       # key1->rounds
+       &mov    ($key_,$key);                   # backup $key
+       &mov    ($rounds_,$rounds);             # backup $rounds
+
+       &movdqa ($tweak,$inout0);
+       &pxor   ($twtmp,$twtmp);
+       &movdqa ($twmask,&QWP(6*16,"esp"));     # 0x0...010...87
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+
+       &and    ($len,-16);
+       &sub    ($len,16*6);
+       &jc     (&label("xts_dec_short"));
+
+       &shr    ($rounds,1);
+       &mov    ($rounds_,$rounds);
+       &jmp    (&label("xts_dec_loop6"));
+
+&set_label("xts_dec_loop6",16);
+       for ($i=0;$i<4;$i++) {
+           &pshufd     ($twres,$twtmp,0x13);
+           &pxor       ($twtmp,$twtmp);
+           &movdqa     (&QWP(16*$i,"esp"),$tweak);
+           &paddq      ($tweak,$tweak);        # &psllq($tweak,1);
+           &pand       ($twres,$twmask);       # isolate carry and residue
+           &pcmpgtd    ($twtmp,$tweak);        # broadcast upper bits
+           &pxor       ($tweak,$twres);
+       }
+       &pshufd ($inout5,$twtmp,0x13);
+       &movdqa (&QWP(16*$i++,"esp"),$tweak);
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+        &$movekey      ($rndkey0,&QWP(0,$key_));
+       &pand   ($inout5,$twmask);              # isolate carry and residue
+        &movups        ($inout0,&QWP(0,$inp)); # load input
+       &pxor   ($inout5,$tweak);
+
+       # inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+       &movdqu ($inout1,&QWP(16*1,$inp));
+        &xorps         ($inout0,$rndkey0);     # input^=rndkey[0]
+       &movdqu ($inout2,&QWP(16*2,$inp));
+        &pxor          ($inout1,$rndkey0);
+       &movdqu ($inout3,&QWP(16*3,$inp));
+        &pxor          ($inout2,$rndkey0);
+       &movdqu ($inout4,&QWP(16*4,$inp));
+        &pxor          ($inout3,$rndkey0);
+       &movdqu ($rndkey1,&QWP(16*5,$inp));
+        &pxor          ($inout4,$rndkey0);
+       &lea    ($inp,&DWP(16*6,$inp));
+       &pxor   ($inout0,&QWP(16*0,"esp"));     # input^=tweak
+       &movdqa (&QWP(16*$i,"esp"),$inout5);    # save last tweak
+       &pxor   ($inout5,$rndkey1);
+
+        &$movekey      ($rndkey1,&QWP(16,$key_));
+        &lea           ($key,&DWP(32,$key_));
+       &pxor   ($inout1,&QWP(16*1,"esp"));
+        &aesdec        ($inout0,$rndkey1);
+       &pxor   ($inout2,&QWP(16*2,"esp"));
+        &aesdec        ($inout1,$rndkey1);
+       &pxor   ($inout3,&QWP(16*3,"esp"));
+        &dec           ($rounds);
+        &aesdec        ($inout2,$rndkey1);
+       &pxor   ($inout4,&QWP(16*4,"esp"));
+        &aesdec        ($inout3,$rndkey1);
+       &pxor           ($inout5,$rndkey0);
+        &aesdec        ($inout4,$rndkey1);
+        &$movekey      ($rndkey0,&QWP(0,$key));
+        &aesdec        ($inout5,$rndkey1);
+       &call           (&label("_aesni_decrypt6_enter"));
+
+       &movdqa ($tweak,&QWP(16*5,"esp"));      # last tweak
+       &pxor   ($twtmp,$twtmp);
+       &xorps  ($inout0,&QWP(16*0,"esp"));     # output^=tweak
+       &pcmpgtd        ($twtmp,$tweak);                # broadcast upper bits
+       &xorps  ($inout1,&QWP(16*1,"esp"));
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &xorps  ($inout2,&QWP(16*2,"esp"));
+       &movups (&QWP(16*1,$out),$inout1);
+       &xorps  ($inout3,&QWP(16*3,"esp"));
+       &movups (&QWP(16*2,$out),$inout2);
+       &xorps  ($inout4,&QWP(16*4,"esp"));
+       &movups (&QWP(16*3,$out),$inout3);
+       &xorps  ($inout5,$tweak);
+       &movups (&QWP(16*4,$out),$inout4);
+       &pshufd ($twres,$twtmp,0x13);
+       &movups (&QWP(16*5,$out),$inout5);
+       &lea    ($out,&DWP(16*6,$out));
+       &movdqa ($twmask,&QWP(16*6,"esp"));     # 0x0...010...87
+
+       &pxor   ($twtmp,$twtmp);
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &mov    ($rounds,$rounds_);             # restore $rounds
+       &pxor   ($tweak,$twres);
+
+       &sub    ($len,16*6);
+       &jnc    (&label("xts_dec_loop6"));
+
+       &lea    ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
+       &mov    ($key,$key_);                   # restore $key
+       &mov    ($rounds_,$rounds);
+
+&set_label("xts_dec_short");
+       &add    ($len,16*6);
+       &jz     (&label("xts_dec_done6x"));
+
+       &movdqa ($inout3,$tweak);               # put aside previous tweak
+       &cmp    ($len,0x20);
+       &jb     (&label("xts_dec_one"));
+
+       &pshufd ($twres,$twtmp,0x13);
+       &pxor   ($twtmp,$twtmp);
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &pxor   ($tweak,$twres);
+       &je     (&label("xts_dec_two"));
+
+       &pshufd ($twres,$twtmp,0x13);
+       &pxor   ($twtmp,$twtmp);
+       &movdqa ($inout4,$tweak);               # put aside previous tweak
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &pxor   ($tweak,$twres);
+       &cmp    ($len,0x40);
+       &jb     (&label("xts_dec_three"));
+
+       &pshufd ($twres,$twtmp,0x13);
+       &pxor   ($twtmp,$twtmp);
+       &movdqa ($inout5,$tweak);               # put aside previous tweak
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &pxor   ($tweak,$twres);
+       &movdqa (&QWP(16*0,"esp"),$inout3);
+       &movdqa (&QWP(16*1,"esp"),$inout4);
+       &je     (&label("xts_dec_four"));
+
+       &movdqa (&QWP(16*2,"esp"),$inout5);
+       &pshufd ($inout5,$twtmp,0x13);
+       &movdqa (&QWP(16*3,"esp"),$tweak);
+       &paddq  ($tweak,$tweak);                # &psllq($inout0,1);
+       &pand   ($inout5,$twmask);              # isolate carry and residue
+       &pxor   ($inout5,$tweak);
+
+       &movdqu ($inout0,&QWP(16*0,$inp));      # load input
+       &movdqu ($inout1,&QWP(16*1,$inp));
+       &movdqu ($inout2,&QWP(16*2,$inp));
+       &pxor   ($inout0,&QWP(16*0,"esp"));     # input^=tweak
+       &movdqu ($inout3,&QWP(16*3,$inp));
+       &pxor   ($inout1,&QWP(16*1,"esp"));
+       &movdqu ($inout4,&QWP(16*4,$inp));
+       &pxor   ($inout2,&QWP(16*2,"esp"));
+       &lea    ($inp,&DWP(16*5,$inp));
+       &pxor   ($inout3,&QWP(16*3,"esp"));
+       &movdqa (&QWP(16*4,"esp"),$inout5);     # save last tweak
+       &pxor   ($inout4,$inout5);
+
+       &call   ("_aesni_decrypt6");
+
+       &movaps ($tweak,&QWP(16*4,"esp"));      # last tweak
+       &xorps  ($inout0,&QWP(16*0,"esp"));     # output^=tweak
+       &xorps  ($inout1,&QWP(16*1,"esp"));
+       &xorps  ($inout2,&QWP(16*2,"esp"));
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &xorps  ($inout3,&QWP(16*3,"esp"));
+       &movups (&QWP(16*1,$out),$inout1);
+       &xorps  ($inout4,$tweak);
+       &movups (&QWP(16*2,$out),$inout2);
+       &movups (&QWP(16*3,$out),$inout3);
+       &movups (&QWP(16*4,$out),$inout4);
+       &lea    ($out,&DWP(16*5,$out));
+       &jmp    (&label("xts_dec_done"));
+
+&set_label("xts_dec_one",16);
+       &movups ($inout0,&QWP(16*0,$inp));      # load input
+       &lea    ($inp,&DWP(16*1,$inp));
+       &xorps  ($inout0,$inout3);              # input^=tweak
+       if ($inline)
+       {   &aesni_inline_generate1("dec");     }
+       else
+       {   &call       ("_aesni_decrypt1");    }
+       &xorps  ($inout0,$inout3);              # output^=tweak
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &lea    ($out,&DWP(16*1,$out));
+
+       &movdqa ($tweak,$inout3);               # last tweak
+       &jmp    (&label("xts_dec_done"));
+
+&set_label("xts_dec_two",16);
+       &movaps ($inout4,$tweak);               # put aside last tweak
+
+       &movups ($inout0,&QWP(16*0,$inp));      # load input
+       &movups ($inout1,&QWP(16*1,$inp));
+       &lea    ($inp,&DWP(16*2,$inp));
+       &xorps  ($inout0,$inout3);              # input^=tweak
+       &xorps  ($inout1,$inout4);
+
+       &call   ("_aesni_decrypt3");
+
+       &xorps  ($inout0,$inout3);              # output^=tweak
+       &xorps  ($inout1,$inout4);
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &movups (&QWP(16*1,$out),$inout1);
+       &lea    ($out,&DWP(16*2,$out));
+
+       &movdqa ($tweak,$inout4);               # last tweak
+       &jmp    (&label("xts_dec_done"));
+
+&set_label("xts_dec_three",16);
+       &movaps ($inout5,$tweak);               # put aside last tweak
+       &movups ($inout0,&QWP(16*0,$inp));      # load input
+       &movups ($inout1,&QWP(16*1,$inp));
+       &movups ($inout2,&QWP(16*2,$inp));
+       &lea    ($inp,&DWP(16*3,$inp));
+       &xorps  ($inout0,$inout3);              # input^=tweak
+       &xorps  ($inout1,$inout4);
+       &xorps  ($inout2,$inout5);
+
+       &call   ("_aesni_decrypt3");
+
+       &xorps  ($inout0,$inout3);              # output^=tweak
+       &xorps  ($inout1,$inout4);
+       &xorps  ($inout2,$inout5);
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &movups (&QWP(16*1,$out),$inout1);
+       &movups (&QWP(16*2,$out),$inout2);
+       &lea    ($out,&DWP(16*3,$out));
+
+       &movdqa ($tweak,$inout5);               # last tweak
+       &jmp    (&label("xts_dec_done"));
+
+&set_label("xts_dec_four",16);
+       &movaps ($inout4,$tweak);               # put aside last tweak
+
+       &movups ($inout0,&QWP(16*0,$inp));      # load input
+       &movups ($inout1,&QWP(16*1,$inp));
+       &movups ($inout2,&QWP(16*2,$inp));
+       &xorps  ($inout0,&QWP(16*0,"esp"));     # input^=tweak
+       &movups ($inout3,&QWP(16*3,$inp));
+       &lea    ($inp,&DWP(16*4,$inp));
+       &xorps  ($inout1,&QWP(16*1,"esp"));
+       &xorps  ($inout2,$inout5);
+       &xorps  ($inout3,$inout4);
+
+       &call   ("_aesni_decrypt4");
+
+       &xorps  ($inout0,&QWP(16*0,"esp"));     # output^=tweak
+       &xorps  ($inout1,&QWP(16*1,"esp"));
+       &xorps  ($inout2,$inout5);
+       &movups (&QWP(16*0,$out),$inout0);      # write output
+       &xorps  ($inout3,$inout4);
+       &movups (&QWP(16*1,$out),$inout1);
+       &movups (&QWP(16*2,$out),$inout2);
+       &movups (&QWP(16*3,$out),$inout3);
+       &lea    ($out,&DWP(16*4,$out));
+
+       &movdqa ($tweak,$inout4);               # last tweak
+       &jmp    (&label("xts_dec_done"));
+
+&set_label("xts_dec_done6x",16);               # $tweak is pre-calculated
+       &mov    ($len,&DWP(16*7+0,"esp"));      # restore original $len
+       &and    ($len,15);
+       &jz     (&label("xts_dec_ret"));
+       &mov    (&DWP(16*7+0,"esp"),$len);      # save $len%16
+       &jmp    (&label("xts_dec_only_one_more"));
+
+&set_label("xts_dec_done",16);
+       &mov    ($len,&DWP(16*7+0,"esp"));      # restore original $len
+       &pxor   ($twtmp,$twtmp);
+       &and    ($len,15);
+       &jz     (&label("xts_dec_ret"));
+
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &mov    (&DWP(16*7+0,"esp"),$len);      # save $len%16
+       &pshufd ($twres,$twtmp,0x13);
+       &pxor   ($twtmp,$twtmp);
+       &movdqa ($twmask,&QWP(16*6,"esp"));
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($twres,$twmask);               # isolate carry and residue
+       &pcmpgtd($twtmp,$tweak);                # broadcast upper bits
+       &pxor   ($tweak,$twres);
+
+&set_label("xts_dec_only_one_more");
+       &pshufd ($inout3,$twtmp,0x13);
+       &movdqa ($inout4,$tweak);               # put aside previous tweak
+       &paddq  ($tweak,$tweak);                # &psllq($tweak,1);
+       &pand   ($inout3,$twmask);              # isolate carry and residue
+       &pxor   ($inout3,$tweak);
+
+       &mov    ($key,$key_);                   # restore $key
+       &mov    ($rounds,$rounds_);             # restore $rounds
+
+       &movups ($inout0,&QWP(0,$inp));         # load input
+       &xorps  ($inout0,$inout3);              # input^=tweak
+       if ($inline)
+       {   &aesni_inline_generate1("dec");     }
+       else
+       {   &call       ("_aesni_decrypt1");    }
+       &xorps  ($inout0,$inout3);              # output^=tweak
+       &movups (&QWP(0,$out),$inout0);         # write output
+
+&set_label("xts_dec_steal");
+       &movz   ($rounds,&BP(16,$inp));
+       &movz   ($key,&BP(0,$out));
+       &lea    ($inp,&DWP(1,$inp));
+       &mov    (&BP(0,$out),&LB($rounds));
+       &mov    (&BP(16,$out),&LB($key));
+       &lea    ($out,&DWP(1,$out));
+       &sub    ($len,1);
+       &jnz    (&label("xts_dec_steal"));
+
+       &sub    ($out,&DWP(16*7+0,"esp"));      # rewind $out
+       &mov    ($key,$key_);                   # restore $key
+       &mov    ($rounds,$rounds_);             # restore $rounds
+
+       &movups ($inout0,&QWP(0,$out));         # load input
+       &xorps  ($inout0,$inout4);              # input^=tweak
+       if ($inline)
+       {   &aesni_inline_generate1("dec");     }
+       else
+       {   &call       ("_aesni_decrypt1");    }
+       &xorps  ($inout0,$inout4);              # output^=tweak
+       &movups (&QWP(0,$out),$inout0);         # write output
+
+&set_label("xts_dec_ret");
+       &mov    ("esp",&DWP(16*7+4,"esp"));     # restore %esp
+&function_end("aesni_xts_decrypt");
+}
+}
+\f
+######################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+#                           size_t length, const AES_KEY *key,
+#                           unsigned char *ivp,const int enc);
+&function_begin("${PREFIX}_cbc_encrypt");
+       &mov    ($inp,&wparam(0));
+       &mov    ($rounds_,"esp");
+       &mov    ($out,&wparam(1));
+       &sub    ($rounds_,24);
+       &mov    ($len,&wparam(2));
+       &and    ($rounds_,-16);
+       &mov    ($key,&wparam(3));
+       &mov    ($key_,&wparam(4));
+       &test   ($len,$len);
+       &jz     (&label("cbc_abort"));
+
+       &cmp    (&wparam(5),0);
+       &xchg   ($rounds_,"esp");               # alloca
+       &movups ($ivec,&QWP(0,$key_));          # load IV
+       &mov    ($rounds,&DWP(240,$key));
+       &mov    ($key_,$key);                   # backup $key
+       &mov    (&DWP(16,"esp"),$rounds_);      # save original %esp
+       &mov    ($rounds_,$rounds);             # backup $rounds
+       &je     (&label("cbc_decrypt"));
+
+       &movaps ($inout0,$ivec);
+       &cmp    ($len,16);
+       &jb     (&label("cbc_enc_tail"));
+       &sub    ($len,16);
+       &jmp    (&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+       &movups ($ivec,&QWP(0,$inp));           # input actually
+       &lea    ($inp,&DWP(16,$inp));
+       if ($inline)
+       {   &aesni_inline_generate1("enc",$inout0,$ivec);       }
+       else
+       {   &xorps($inout0,$ivec); &call("_aesni_encrypt1");    }
+       &mov    ($rounds,$rounds_);     # restore $rounds
+       &mov    ($key,$key_);           # restore $key
+       &movups (&QWP(0,$out),$inout0); # store output
+       &lea    ($out,&DWP(16,$out));
+       &sub    ($len,16);
+       &jnc    (&label("cbc_enc_loop"));
+       &add    ($len,16);
+       &jnz    (&label("cbc_enc_tail"));
+       &movaps ($ivec,$inout0);
+       &jmp    (&label("cbc_ret"));
+
+&set_label("cbc_enc_tail");
+       &mov    ("ecx",$len);           # zaps $rounds
+       &data_word(0xA4F3F689);         # rep movsb
+       &mov    ("ecx",16);             # zero tail
+       &sub    ("ecx",$len);
+       &xor    ("eax","eax");          # zaps $len
+       &data_word(0xAAF3F689);         # rep stosb
+       &lea    ($out,&DWP(-16,$out));  # rewind $out by 1 block
+       &mov    ($rounds,$rounds_);     # restore $rounds
+       &mov    ($inp,$out);            # $inp and $out are the same
+       &mov    ($key,$key_);           # restore $key
+       &jmp    (&label("cbc_enc_loop"));
+######################################################################
+&set_label("cbc_decrypt",16);
+       &cmp    ($len,0x50);
+       &jbe    (&label("cbc_dec_tail"));
+       &movaps (&QWP(0,"esp"),$ivec);          # save IV
+       &sub    ($len,0x50);
+       &jmp    (&label("cbc_dec_loop6_enter"));
+
+&set_label("cbc_dec_loop6",16);
+       &movaps (&QWP(0,"esp"),$rndkey0);       # save IV
+       &movups (&QWP(0,$out),$inout5);
+       &lea    ($out,&DWP(0x10,$out));
+&set_label("cbc_dec_loop6_enter");
+       &movdqu ($inout0,&QWP(0,$inp));
+       &movdqu ($inout1,&QWP(0x10,$inp));
+       &movdqu ($inout2,&QWP(0x20,$inp));
+       &movdqu ($inout3,&QWP(0x30,$inp));
+       &movdqu ($inout4,&QWP(0x40,$inp));
+       &movdqu ($inout5,&QWP(0x50,$inp));
+
+       &call   ("_aesni_decrypt6");
+
+       &movups ($rndkey1,&QWP(0,$inp));
+       &movups ($rndkey0,&QWP(0x10,$inp));
+       &xorps  ($inout0,&QWP(0,"esp"));        # ^=IV
+       &xorps  ($inout1,$rndkey1);
+       &movups ($rndkey1,&QWP(0x20,$inp));
+       &xorps  ($inout2,$rndkey0);
+       &movups ($rndkey0,&QWP(0x30,$inp));
+       &xorps  ($inout3,$rndkey1);
+       &movups ($rndkey1,&QWP(0x40,$inp));
+       &xorps  ($inout4,$rndkey0);
+       &movups ($rndkey0,&QWP(0x50,$inp));     # IV
+       &xorps  ($inout5,$rndkey1);
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &lea    ($inp,&DWP(0x60,$inp));
+       &movups (&QWP(0x20,$out),$inout2);
+       &mov    ($rounds,$rounds_)              # restore $rounds
+       &movups (&QWP(0x30,$out),$inout3);
+       &mov    ($key,$key_);                   # restore $key
+       &movups (&QWP(0x40,$out),$inout4);
+       &lea    ($out,&DWP(0x50,$out));
+       &sub    ($len,0x60);
+       &ja     (&label("cbc_dec_loop6"));
+
+       &movaps ($inout0,$inout5);
+       &movaps ($ivec,$rndkey0);
+       &add    ($len,0x50);
+       &jle    (&label("cbc_dec_tail_collected"));
+       &movups (&QWP(0,$out),$inout0);
+       &lea    ($out,&DWP(0x10,$out));
+&set_label("cbc_dec_tail");
+       &movups ($inout0,&QWP(0,$inp));
+       &movaps ($in0,$inout0);
+       &cmp    ($len,0x10);
+       &jbe    (&label("cbc_dec_one"));
+
+       &movups ($inout1,&QWP(0x10,$inp));
+       &movaps ($in1,$inout1);
+       &cmp    ($len,0x20);
+       &jbe    (&label("cbc_dec_two"));
+
+       &movups ($inout2,&QWP(0x20,$inp));
+       &cmp    ($len,0x30);
+       &jbe    (&label("cbc_dec_three"));
+
+       &movups ($inout3,&QWP(0x30,$inp));
+       &cmp    ($len,0x40);
+       &jbe    (&label("cbc_dec_four"));
+
+       &movups ($inout4,&QWP(0x40,$inp));
+       &movaps (&QWP(0,"esp"),$ivec);          # save IV
+       &movups ($inout0,&QWP(0,$inp));
+       &xorps  ($inout5,$inout5);
+       &call   ("_aesni_decrypt6");
+       &movups ($rndkey1,&QWP(0,$inp));
+       &movups ($rndkey0,&QWP(0x10,$inp));
+       &xorps  ($inout0,&QWP(0,"esp"));        # ^= IV
+       &xorps  ($inout1,$rndkey1);
+       &movups ($rndkey1,&QWP(0x20,$inp));
+       &xorps  ($inout2,$rndkey0);
+       &movups ($rndkey0,&QWP(0x30,$inp));
+       &xorps  ($inout3,$rndkey1);
+       &movups ($ivec,&QWP(0x40,$inp));        # IV
+       &xorps  ($inout4,$rndkey0);
+       &movups (&QWP(0,$out),$inout0);
+       &movups (&QWP(0x10,$out),$inout1);
+       &movups (&QWP(0x20,$out),$inout2);
+       &movups (&QWP(0x30,$out),$inout3);
+       &lea    ($out,&DWP(0x40,$out));
+       &movaps ($inout0,$inout4);
+       &sub    ($len,0x50);
+       &jmp    (&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_one",16);
+       if ($inline)
+       {   &aesni_inline_generate1("dec");     }
+       else
+       {   &call       ("_aesni_decrypt1");    }
+       &xorps  ($inout0,$ivec);
+       &movaps ($ivec,$in0);
+       &sub    ($len,0x10);
+       &jmp    (&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_two",16);
+       &xorps  ($inout2,$inout2);
+       &call   ("_aesni_decrypt3");
+       &xorps  ($inout0,$ivec);
+       &xorps  ($inout1,$in0);
+       &movups (&QWP(0,$out),$inout0);
+       &movaps ($inout0,$inout1);
+       &lea    ($out,&DWP(0x10,$out));
+       &movaps ($ivec,$in1);
+       &sub    ($len,0x20);
+       &jmp    (&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_three",16);
+       &call   ("_aesni_decrypt3");
+       &xorps  ($inout0,$ivec);
+       &xorps  ($inout1,$in0);
+       &xorps  ($inout2,$in1);
+       &movups (&QWP(0,$out),$inout0);
+       &movaps ($inout0,$inout2);
+       &movups (&QWP(0x10,$out),$inout1);
+       &lea    ($out,&DWP(0x20,$out));
+       &movups ($ivec,&QWP(0x20,$inp));
+       &sub    ($len,0x30);
+       &jmp    (&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_four",16);
+       &call   ("_aesni_decrypt4");
+       &movups ($rndkey1,&QWP(0x10,$inp));
+       &movups ($rndkey0,&QWP(0x20,$inp));
+       &xorps  ($inout0,$ivec);
+       &movups ($ivec,&QWP(0x30,$inp));
+       &xorps  ($inout1,$in0);
+       &movups (&QWP(0,$out),$inout0);
+       &xorps  ($inout2,$rndkey1);
+       &movups (&QWP(0x10,$out),$inout1);
+       &xorps  ($inout3,$rndkey0);
+       &movups (&QWP(0x20,$out),$inout2);
+       &lea    ($out,&DWP(0x30,$out));
+       &movaps ($inout0,$inout3);
+       &sub    ($len,0x40);
+
+&set_label("cbc_dec_tail_collected");
+       &and    ($len,15);
+       &jnz    (&label("cbc_dec_tail_partial"));
+       &movups (&QWP(0,$out),$inout0);
+       &jmp    (&label("cbc_ret"));
+
+&set_label("cbc_dec_tail_partial",16);
+       &movaps (&QWP(0,"esp"),$inout0);
+       &mov    ("ecx",16);
+       &mov    ($inp,"esp");
+       &sub    ("ecx",$len);
+       &data_word(0xA4F3F689);         # rep movsb
+
+&set_label("cbc_ret");
+       &mov    ("esp",&DWP(16,"esp")); # pull original %esp
+       &mov    ($key_,&wparam(4));
+       &movups (&QWP(0,$key_),$ivec);  # output IV
+&set_label("cbc_abort");
+&function_end("${PREFIX}_cbc_encrypt");
+\f
+######################################################################
+# Mechanical port from aesni-x86_64.pl.
+#
+# _aesni_set_encrypt_key is private interface,
+# input:
+#      "eax"   const unsigned char *userKey
+#      $rounds int bits
+#      $key    AES_KEY *key
+# output:
+#      "eax"   return code
+#      $round  rounds
+
+&function_begin_B("_aesni_set_encrypt_key");
+       &test   ("eax","eax");
+       &jz     (&label("bad_pointer"));
+       &test   ($key,$key);
+       &jz     (&label("bad_pointer"));
+
+       &movups ("xmm0",&QWP(0,"eax")); # pull first 128 bits of *userKey
+       &xorps  ("xmm4","xmm4");        # low dword of xmm4 is assumed 0
+       &lea    ($key,&DWP(16,$key));
+       &cmp    ($rounds,256);
+       &je     (&label("14rounds"));
+       &cmp    ($rounds,192);
+       &je     (&label("12rounds"));
+       &cmp    ($rounds,128);
+       &jne    (&label("bad_keybits"));
+
+&set_label("10rounds",16);
+       &mov            ($rounds,9);
+       &$movekey       (&QWP(-16,$key),"xmm0");        # round 0
+       &aeskeygenassist("xmm1","xmm0",0x01);           # round 1
+       &call           (&label("key_128_cold"));
+       &aeskeygenassist("xmm1","xmm0",0x2);            # round 2
+       &call           (&label("key_128"));
+       &aeskeygenassist("xmm1","xmm0",0x04);           # round 3
+       &call           (&label("key_128"));
+       &aeskeygenassist("xmm1","xmm0",0x08);           # round 4
+       &call           (&label("key_128"));
+       &aeskeygenassist("xmm1","xmm0",0x10);           # round 5
+       &call           (&label("key_128"));
+       &aeskeygenassist("xmm1","xmm0",0x20);           # round 6
+       &call           (&label("key_128"));
+       &aeskeygenassist("xmm1","xmm0",0x40);           # round 7
+       &call           (&label("key_128"));
+       &aeskeygenassist("xmm1","xmm0",0x80);           # round 8
+       &call           (&label("key_128"));
+       &aeskeygenassist("xmm1","xmm0",0x1b);           # round 9
+       &call           (&label("key_128"));
+       &aeskeygenassist("xmm1","xmm0",0x36);           # round 10
+       &call           (&label("key_128"));
+       &$movekey       (&QWP(0,$key),"xmm0");
+       &mov            (&DWP(80,$key),$rounds);
+       &xor            ("eax","eax");
+       &ret();
+
+&set_label("key_128",16);
+       &$movekey       (&QWP(0,$key),"xmm0");
+       &lea            ($key,&DWP(16,$key));
+&set_label("key_128_cold");
+       &shufps         ("xmm4","xmm0",0b00010000);
+       &xorps          ("xmm0","xmm4");
+       &shufps         ("xmm4","xmm0",0b10001100);
+       &xorps          ("xmm0","xmm4");
+       &shufps         ("xmm1","xmm1",0b11111111);     # critical path
+       &xorps          ("xmm0","xmm1");
+       &ret();
+
+&set_label("12rounds",16);
+       &movq           ("xmm2",&QWP(16,"eax"));        # remaining 1/3 of *userKey
+       &mov            ($rounds,11);
+       &$movekey       (&QWP(-16,$key),"xmm0")         # round 0
+       &aeskeygenassist("xmm1","xmm2",0x01);           # round 1,2
+       &call           (&label("key_192a_cold"));
+       &aeskeygenassist("xmm1","xmm2",0x02);           # round 2,3
+       &call           (&label("key_192b"));
+       &aeskeygenassist("xmm1","xmm2",0x04);           # round 4,5
+       &call           (&label("key_192a"));
+       &aeskeygenassist("xmm1","xmm2",0x08);           # round 5,6
+       &call           (&label("key_192b"));
+       &aeskeygenassist("xmm1","xmm2",0x10);           # round 7,8
+       &call           (&label("key_192a"));
+       &aeskeygenassist("xmm1","xmm2",0x20);           # round 8,9
+       &call           (&label("key_192b"));
+       &aeskeygenassist("xmm1","xmm2",0x40);           # round 10,11
+       &call           (&label("key_192a"));
+       &aeskeygenassist("xmm1","xmm2",0x80);           # round 11,12
+       &call           (&label("key_192b"));
+       &$movekey       (&QWP(0,$key),"xmm0");
+       &mov            (&DWP(48,$key),$rounds);
+       &xor            ("eax","eax");
+       &ret();
+
+&set_label("key_192a",16);
+       &$movekey       (&QWP(0,$key),"xmm0");
+       &lea            ($key,&DWP(16,$key));
+&set_label("key_192a_cold",16);
+       &movaps         ("xmm5","xmm2");
+&set_label("key_192b_warm");
+       &shufps         ("xmm4","xmm0",0b00010000);
+       &movdqa         ("xmm3","xmm2");
+       &xorps          ("xmm0","xmm4");
+       &shufps         ("xmm4","xmm0",0b10001100);
+       &pslldq         ("xmm3",4);
+       &xorps          ("xmm0","xmm4");
+       &pshufd         ("xmm1","xmm1",0b01010101);     # critical path
+       &pxor           ("xmm2","xmm3");
+       &pxor           ("xmm0","xmm1");
+       &pshufd         ("xmm3","xmm0",0b11111111);
+       &pxor           ("xmm2","xmm3");
+       &ret();
+
+&set_label("key_192b",16);
+       &movaps         ("xmm3","xmm0");
+       &shufps         ("xmm5","xmm0",0b01000100);
+       &$movekey       (&QWP(0,$key),"xmm5");
+       &shufps         ("xmm3","xmm2",0b01001110);
+       &$movekey       (&QWP(16,$key),"xmm3");
+       &lea            ($key,&DWP(32,$key));
+       &jmp            (&label("key_192b_warm"));
+
+&set_label("14rounds",16);
+       &movups         ("xmm2",&QWP(16,"eax"));        # remaining half of *userKey
+       &mov            ($rounds,13);
+       &lea            ($key,&DWP(16,$key));
+       &$movekey       (&QWP(-32,$key),"xmm0");        # round 0
+       &$movekey       (&QWP(-16,$key),"xmm2");        # round 1
+       &aeskeygenassist("xmm1","xmm2",0x01);           # round 2
+       &call           (&label("key_256a_cold"));
+       &aeskeygenassist("xmm1","xmm0",0x01);           # round 3
+       &call           (&label("key_256b"));
+       &aeskeygenassist("xmm1","xmm2",0x02);           # round 4
+       &call           (&label("key_256a"));
+       &aeskeygenassist("xmm1","xmm0",0x02);           # round 5
+       &call           (&label("key_256b"));
+       &aeskeygenassist("xmm1","xmm2",0x04);           # round 6
+       &call           (&label("key_256a"));
+       &aeskeygenassist("xmm1","xmm0",0x04);           # round 7
+       &call           (&label("key_256b"));
+       &aeskeygenassist("xmm1","xmm2",0x08);           # round 8
+       &call           (&label("key_256a"));
+       &aeskeygenassist("xmm1","xmm0",0x08);           # round 9
+       &call           (&label("key_256b"));
+       &aeskeygenassist("xmm1","xmm2",0x10);           # round 10
+       &call           (&label("key_256a"));
+       &aeskeygenassist("xmm1","xmm0",0x10);           # round 11
+       &call           (&label("key_256b"));
+       &aeskeygenassist("xmm1","xmm2",0x20);           # round 12
+       &call           (&label("key_256a"));
+       &aeskeygenassist("xmm1","xmm0",0x20);           # round 13
+       &call           (&label("key_256b"));
+       &aeskeygenassist("xmm1","xmm2",0x40);           # round 14
+       &call           (&label("key_256a"));
+       &$movekey       (&QWP(0,$key),"xmm0");
+       &mov            (&DWP(16,$key),$rounds);
+       &xor            ("eax","eax");
+       &ret();
+
+&set_label("key_256a",16);
+       &$movekey       (&QWP(0,$key),"xmm2");
+       &lea            ($key,&DWP(16,$key));
+&set_label("key_256a_cold");
+       &shufps         ("xmm4","xmm0",0b00010000);
+       &xorps          ("xmm0","xmm4");
+       &shufps         ("xmm4","xmm0",0b10001100);
+       &xorps          ("xmm0","xmm4");
+       &shufps         ("xmm1","xmm1",0b11111111);     # critical path
+       &xorps          ("xmm0","xmm1");
+       &ret();
+
+&set_label("key_256b",16);
+       &$movekey       (&QWP(0,$key),"xmm0");
+       &lea            ($key,&DWP(16,$key));
+
+       &shufps         ("xmm4","xmm2",0b00010000);
+       &xorps          ("xmm2","xmm4");
+       &shufps         ("xmm4","xmm2",0b10001100);
+       &xorps          ("xmm2","xmm4");
+       &shufps         ("xmm1","xmm1",0b10101010);     # critical path
+       &xorps          ("xmm2","xmm1");
+       &ret();
+
+&set_label("bad_pointer",4);
+       &mov    ("eax",-1);
+       &ret    ();
+&set_label("bad_keybits",4);
+       &mov    ("eax",-2);
+       &ret    ();
+&function_end_B("_aesni_set_encrypt_key");
+
+# int $PREFIX_set_encrypt_key (const unsigned char *userKey, int bits,
+#                              AES_KEY *key)
+&function_begin_B("${PREFIX}_set_encrypt_key");
+       &mov    ("eax",&wparam(0));
+       &mov    ($rounds,&wparam(1));
+       &mov    ($key,&wparam(2));
+       &call   ("_aesni_set_encrypt_key");
+       &ret    ();
+&function_end_B("${PREFIX}_set_encrypt_key");
+
+# int $PREFIX_set_decrypt_key (const unsigned char *userKey, int bits,
+#                              AES_KEY *key)
+&function_begin_B("${PREFIX}_set_decrypt_key");
+       &mov    ("eax",&wparam(0));
+       &mov    ($rounds,&wparam(1));
+       &mov    ($key,&wparam(2));
+       &call   ("_aesni_set_encrypt_key");
+       &mov    ($key,&wparam(2));
+       &shl    ($rounds,4)     # rounds-1 after _aesni_set_encrypt_key
+       &test   ("eax","eax");
+       &jnz    (&label("dec_key_ret"));
+       &lea    ("eax",&DWP(16,$key,$rounds));  # end of key schedule
+
+       &$movekey       ("xmm0",&QWP(0,$key));  # just swap
+       &$movekey       ("xmm1",&QWP(0,"eax"));
+       &$movekey       (&QWP(0,"eax"),"xmm0");
+       &$movekey       (&QWP(0,$key),"xmm1");
+       &lea            ($key,&DWP(16,$key));
+       &lea            ("eax",&DWP(-16,"eax"));
+
+&set_label("dec_key_inverse");
+       &$movekey       ("xmm0",&QWP(0,$key));  # swap and inverse
+       &$movekey       ("xmm1",&QWP(0,"eax"));
+       &aesimc         ("xmm0","xmm0");
+       &aesimc         ("xmm1","xmm1");
+       &lea            ($key,&DWP(16,$key));
+       &lea            ("eax",&DWP(-16,"eax"));
+       &$movekey       (&QWP(16,"eax"),"xmm0");
+       &$movekey       (&QWP(-16,$key),"xmm1");
+       &cmp            ("eax",$key);
+       &ja             (&label("dec_key_inverse"));
+
+       &$movekey       ("xmm0",&QWP(0,$key));  # inverse middle
+       &aesimc         ("xmm0","xmm0");
+       &$movekey       (&QWP(0,$key),"xmm0");
+
+       &xor            ("eax","eax");          # return success
+&set_label("dec_key_ret");
+       &ret    ();
+&function_end_B("${PREFIX}_set_decrypt_key");
+&asciz("AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl
new file mode 100644 (file)
index 0000000..ae0ad7f
--- /dev/null
@@ -0,0 +1,3062 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
+# details].
+#
+# Performance.
+#
+# Given aes(enc|dec) instructions' latency asymptotic performance for
+# non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
+# processed with 128-bit key. And given their throughput asymptotic
+# performance for parallelizable modes is 1.25 cycles per byte. Being
+# asymptotic limit it's not something you commonly achieve in reality,
+# but how close does one get? Below are results collected for
+# different modes and block sized. Pairs of numbers are for en-/
+# decryption.
+#
+#      16-byte     64-byte     256-byte    1-KB        8-KB
+# ECB  4.25/4.25   1.38/1.38   1.28/1.28   1.26/1.26   1.26/1.26
+# CTR  5.42/5.42   1.92/1.92   1.44/1.44   1.28/1.28   1.26/1.26
+# CBC  4.38/4.43   4.15/1.43   4.07/1.32   4.07/1.29   4.06/1.28
+# CCM  5.66/9.42   4.42/5.41   4.16/4.40   4.09/4.15   4.06/4.07   
+# OFB  5.42/5.42   4.64/4.64   4.44/4.44   4.39/4.39   4.38/4.38
+# CFB  5.73/5.85   5.56/5.62   5.48/5.56   5.47/5.55   5.47/5.55
+#
+# ECB, CTR, CBC and CCM results are free from EVP overhead. This means
+# that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
+# [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
+# The results were collected with specially crafted speed.c benchmark
+# in order to compare them with results reported in "Intel Advanced
+# Encryption Standard (AES) New Instruction Set" White Paper Revision
+# 3.0 dated May 2010. All above results are consistently better. This
+# module also provides better performance for block sizes smaller than
+# 128 bytes in points *not* represented in the above table.
+#
+# Looking at the results for 8-KB buffer.
+#
+# CFB and OFB results are far from the limit, because implementation
+# uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
+# single-block aesni_encrypt, which is not the most optimal way to go.
+# CBC encrypt result is unexpectedly high and there is no documented
+# explanation for it. Seemingly there is a small penalty for feeding
+# the result back to AES unit the way it's done in CBC mode. There is
+# nothing one can do and the result appears optimal. CCM result is
+# identical to CBC, because CBC-MAC is essentially CBC encrypt without
+# saving output. CCM CTR "stays invisible," because it's neatly
+# interleaved wih CBC-MAC. This provides ~30% improvement over
+# "straghtforward" CCM implementation with CTR and CBC-MAC performed
+# disjointly. Parallelizable modes practically achieve the theoretical
+# limit.
+#
+# Looking at how results vary with buffer size.
+#
+# Curves are practically saturated at 1-KB buffer size. In most cases
+# "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
+# CTR curve doesn't follow this pattern and is "slowest" changing one
+# with "256-byte" result being 87% of "8-KB." This is because overhead
+# in CTR mode is most computationally intensive. Small-block CCM
+# decrypt is slower than encrypt, because first CTR and last CBC-MAC
+# iterations can't be interleaved.
+#
+# Results for 192- and 256-bit keys.
+#
+# EVP-free results were observed to scale perfectly with number of
+# rounds for larger block sizes, i.e. 192-bit result being 10/12 times
+# lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
+# are a tad smaller, because the above mentioned penalty biases all
+# results by same constant value. In similar way function call
+# overhead affects small-block performance, as well as OFB and CFB
+# results. Differences are not large, most common coefficients are
+# 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
+# observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
+
+# January 2011
+#
+# While Westmere processor features 6 cycles latency for aes[enc|dec]
+# instructions, which can be scheduled every second cycle, Sandy
+# Bridge spends 8 cycles per instruction, but it can schedule them
+# every cycle. This means that code targeting Westmere would perform
+# suboptimally on Sandy Bridge. Therefore this update.
+#
+# In addition, non-parallelizable CBC encrypt (as well as CCM) is
+# optimized. Relative improvement might appear modest, 8% on Westmere,
+# but in absolute terms it's 3.77 cycles per byte encrypted with
+# 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
+# should be compared to asymptotic limits of 3.75 for Westmere and
+# 5.00 for Sandy Bridge. Actually, the fact that they get this close
+# to asymptotic limits is quite amazing. Indeed, the limit is
+# calculated as latency times number of rounds, 10 for 128-bit key,
+# and divided by 16, the number of bytes in block, or in other words
+# it accounts *solely* for aesenc instructions. But there are extra
+# instructions, and numbers so close to the asymptotic limits mean
+# that it's as if it takes as little as *one* additional cycle to
+# execute all of them. How is it possible? It is possible thanks to
+# out-of-order execution logic, which manages to overlap post-
+# processing of previous block, things like saving the output, with
+# actual encryption of current block, as well as pre-processing of
+# current block, things like fetching input and xor-ing it with
+# 0-round element of the key schedule, with actual encryption of
+# previous block. Keep this in mind...
+#
+# For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
+# performance is achieved by interleaving instructions working on
+# independent blocks. In which case asymptotic limit for such modes
+# can be obtained by dividing above mentioned numbers by AES
+# instructions' interleave factor. Westmere can execute at most 3 
+# instructions at a time, meaning that optimal interleave factor is 3,
+# and that's where the "magic" number of 1.25 come from. "Optimal
+# interleave factor" means that increase of interleave factor does
+# not improve performance. The formula has proven to reflect reality
+# pretty well on Westmere... Sandy Bridge on the other hand can
+# execute up to 8 AES instructions at a time, so how does varying
+# interleave factor affect the performance? Here is table for ECB
+# (numbers are cycles per byte processed with 128-bit key):
+#
+# instruction interleave factor                3x      6x      8x
+# theoretical asymptotic limit         1.67    0.83    0.625
+# measured performance for 8KB block   1.05    0.86    0.84
+#
+# "as if" interleave factor            4.7x    5.8x    6.0x
+#
+# Further data for other parallelizable modes:
+#
+# CBC decrypt                          1.16    0.93    0.93
+# CTR                                  1.14    0.91    n/a
+#
+# Well, given 3x column it's probably inappropriate to call the limit
+# asymptotic, if it can be surpassed, isn't it? What happens there?
+# Rewind to CBC paragraph for the answer. Yes, out-of-order execution
+# magic is responsible for this. Processor overlaps not only the
+# additional instructions with AES ones, but even AES instuctions
+# processing adjacent triplets of independent blocks. In the 6x case
+# additional instructions  still claim disproportionally small amount
+# of additional cycles, but in 8x case number of instructions must be
+# a tad too high for out-of-order logic to cope with, and AES unit
+# remains underutilized... As you can see 8x interleave is hardly
+# justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
+# utilizies 6x interleave because of limited register bank capacity.
+#
+# Higher interleave factors do have negative impact on Westmere
+# performance. While for ECB mode it's negligible ~1.5%, other
+# parallelizables perform ~5% worse, which is outweighed by ~25%
+# improvement on Sandy Bridge. To balance regression on Westmere
+# CTR mode was implemented with 6x aesenc interleave factor.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.33 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 0.97. Just like
+# in CTR mode AES instruction interleave factor was chosen to be 6x.
+
+$PREFIX="aesni";       # if $PREFIX is set to "AES", the script
+                       # generates drop-in replacement for
+                       # crypto/aes/asm/aes-x86_64.pl:-)
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$movkey = $PREFIX eq "aesni" ? "movups" : "movups";
+@_4args=$win64?        ("%rcx","%rdx","%r8", "%r9") :  # Win64 order
+               ("%rdi","%rsi","%rdx","%rcx");  # Unix order
+
+$code=".text\n";
+
+$rounds="%eax";        # input to and changed by aesni_[en|de]cryptN !!!
+# this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
+$inp="%rdi";
+$out="%rsi";
+$len="%rdx";
+$key="%rcx";   # input to and changed by aesni_[en|de]cryptN !!!
+$ivp="%r8";    # cbc, ctr, ...
+
+$rnds_="%r10d";        # backup copy for $rounds
+$key_="%r11";  # backup copy for $key
+
+# %xmm register layout
+$rndkey0="%xmm0";      $rndkey1="%xmm1";
+$inout0="%xmm2";       $inout1="%xmm3";
+$inout2="%xmm4";       $inout3="%xmm5";
+$inout4="%xmm6";       $inout5="%xmm7";
+$inout6="%xmm8";       $inout7="%xmm9";
+
+$in2="%xmm6";          $in1="%xmm7";   # used in CBC decrypt, CTR, ...
+$in0="%xmm8";          $iv="%xmm9";
+\f
+# Inline version of internal aesni_[en|de]crypt1.
+#
+# Why folded loop? Because aes[enc|dec] is slow enough to accommodate
+# cycles which take care of loop variables...
+{ my $sn;
+sub aesni_generate1 {
+my ($p,$key,$rounds,$inout,$ivec)=@_;  $inout=$inout0 if (!defined($inout));
+++$sn;
+$code.=<<___;
+       $movkey ($key),$rndkey0
+       $movkey 16($key),$rndkey1
+___
+$code.=<<___ if (defined($ivec));
+       xorps   $rndkey0,$ivec
+       lea     32($key),$key
+       xorps   $ivec,$inout
+___
+$code.=<<___ if (!defined($ivec));
+       lea     32($key),$key
+       xorps   $rndkey0,$inout
+___
+$code.=<<___;
+.Loop_${p}1_$sn:
+       aes${p} $rndkey1,$inout
+       dec     $rounds
+       $movkey ($key),$rndkey1
+       lea     16($key),$key
+       jnz     .Loop_${p}1_$sn # loop body is 16 bytes
+       aes${p}last     $rndkey1,$inout
+___
+}}
+# void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
+#
+{ my ($inp,$out,$key) = @_4args;
+
+$code.=<<___;
+.globl ${PREFIX}_encrypt
+.type  ${PREFIX}_encrypt,\@abi-omnipotent
+.align 16
+${PREFIX}_encrypt:
+       movups  ($inp),$inout0          # load input
+       mov     240($key),$rounds       # key->rounds
+___
+       &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+       movups  $inout0,($out)          # output
+       ret
+.size  ${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl ${PREFIX}_decrypt
+.type  ${PREFIX}_decrypt,\@abi-omnipotent
+.align 16
+${PREFIX}_decrypt:
+       movups  ($inp),$inout0          # load input
+       mov     240($key),$rounds       # key->rounds
+___
+       &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+       movups  $inout0,($out)          # output
+       ret
+.size  ${PREFIX}_decrypt, .-${PREFIX}_decrypt
+___
+}
+\f
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it makes no sense to implement 2x subroutine.
+# aes[enc|dec] latency in next processor generation is 8, but the
+# instructions can be scheduled every cycle. Optimal interleave for
+# new processor is therefore 8x...
+sub aesni_generate3 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-2] is cipher/clear text...
+$code.=<<___;
+.type  _aesni_${dir}rypt3,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt3:
+       $movkey ($key),$rndkey0
+       shr     \$1,$rounds
+       $movkey 16($key),$rndkey1
+       lea     32($key),$key
+       xorps   $rndkey0,$inout0
+       xorps   $rndkey0,$inout1
+       xorps   $rndkey0,$inout2
+       $movkey         ($key),$rndkey0
+
+.L${dir}_loop3:
+       aes${dir}       $rndkey1,$inout0
+       aes${dir}       $rndkey1,$inout1
+       dec             $rounds
+       aes${dir}       $rndkey1,$inout2
+       $movkey         16($key),$rndkey1
+       aes${dir}       $rndkey0,$inout0
+       aes${dir}       $rndkey0,$inout1
+       lea             32($key),$key
+       aes${dir}       $rndkey0,$inout2
+       $movkey         ($key),$rndkey0
+       jnz             .L${dir}_loop3
+
+       aes${dir}       $rndkey1,$inout0
+       aes${dir}       $rndkey1,$inout1
+       aes${dir}       $rndkey1,$inout2
+       aes${dir}last   $rndkey0,$inout0
+       aes${dir}last   $rndkey0,$inout1
+       aes${dir}last   $rndkey0,$inout2
+       ret
+.size  _aesni_${dir}rypt3,.-_aesni_${dir}rypt3
+___
+}
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement would be <20%,
+# so it's not worth it...
+sub aesni_generate4 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-3] is cipher/clear text...
+$code.=<<___;
+.type  _aesni_${dir}rypt4,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt4:
+       $movkey ($key),$rndkey0
+       shr     \$1,$rounds
+       $movkey 16($key),$rndkey1
+       lea     32($key),$key
+       xorps   $rndkey0,$inout0
+       xorps   $rndkey0,$inout1
+       xorps   $rndkey0,$inout2
+       xorps   $rndkey0,$inout3
+       $movkey ($key),$rndkey0
+
+.L${dir}_loop4:
+       aes${dir}       $rndkey1,$inout0
+       aes${dir}       $rndkey1,$inout1
+       dec             $rounds
+       aes${dir}       $rndkey1,$inout2
+       aes${dir}       $rndkey1,$inout3
+       $movkey         16($key),$rndkey1
+       aes${dir}       $rndkey0,$inout0
+       aes${dir}       $rndkey0,$inout1
+       lea             32($key),$key
+       aes${dir}       $rndkey0,$inout2
+       aes${dir}       $rndkey0,$inout3
+       $movkey         ($key),$rndkey0
+       jnz             .L${dir}_loop4
+
+       aes${dir}       $rndkey1,$inout0
+       aes${dir}       $rndkey1,$inout1
+       aes${dir}       $rndkey1,$inout2
+       aes${dir}       $rndkey1,$inout3
+       aes${dir}last   $rndkey0,$inout0
+       aes${dir}last   $rndkey0,$inout1
+       aes${dir}last   $rndkey0,$inout2
+       aes${dir}last   $rndkey0,$inout3
+       ret
+.size  _aesni_${dir}rypt4,.-_aesni_${dir}rypt4
+___
+}
+sub aesni_generate6 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-5] is cipher/clear text...
+$code.=<<___;
+.type  _aesni_${dir}rypt6,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt6:
+       $movkey         ($key),$rndkey0
+       shr             \$1,$rounds
+       $movkey         16($key),$rndkey1
+       lea             32($key),$key
+       xorps           $rndkey0,$inout0
+       pxor            $rndkey0,$inout1
+       aes${dir}       $rndkey1,$inout0
+       pxor            $rndkey0,$inout2
+       aes${dir}       $rndkey1,$inout1
+       pxor            $rndkey0,$inout3
+       aes${dir}       $rndkey1,$inout2
+       pxor            $rndkey0,$inout4
+       aes${dir}       $rndkey1,$inout3
+       pxor            $rndkey0,$inout5
+       dec             $rounds
+       aes${dir}       $rndkey1,$inout4
+       $movkey         ($key),$rndkey0
+       aes${dir}       $rndkey1,$inout5
+       jmp             .L${dir}_loop6_enter
+.align 16
+.L${dir}_loop6:
+       aes${dir}       $rndkey1,$inout0
+       aes${dir}       $rndkey1,$inout1
+       dec             $rounds
+       aes${dir}       $rndkey1,$inout2
+       aes${dir}       $rndkey1,$inout3
+       aes${dir}       $rndkey1,$inout4
+       aes${dir}       $rndkey1,$inout5
+.L${dir}_loop6_enter:                          # happens to be 16-byte aligned
+       $movkey         16($key),$rndkey1
+       aes${dir}       $rndkey0,$inout0
+       aes${dir}       $rndkey0,$inout1
+       lea             32($key),$key
+       aes${dir}       $rndkey0,$inout2
+       aes${dir}       $rndkey0,$inout3
+       aes${dir}       $rndkey0,$inout4
+       aes${dir}       $rndkey0,$inout5
+       $movkey         ($key),$rndkey0
+       jnz             .L${dir}_loop6
+
+       aes${dir}       $rndkey1,$inout0
+       aes${dir}       $rndkey1,$inout1
+       aes${dir}       $rndkey1,$inout2
+       aes${dir}       $rndkey1,$inout3
+       aes${dir}       $rndkey1,$inout4
+       aes${dir}       $rndkey1,$inout5
+       aes${dir}last   $rndkey0,$inout0
+       aes${dir}last   $rndkey0,$inout1
+       aes${dir}last   $rndkey0,$inout2
+       aes${dir}last   $rndkey0,$inout3
+       aes${dir}last   $rndkey0,$inout4
+       aes${dir}last   $rndkey0,$inout5
+       ret
+.size  _aesni_${dir}rypt6,.-_aesni_${dir}rypt6
+___
+}
+sub aesni_generate8 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-7] is cipher/clear text...
+$code.=<<___;
+.type  _aesni_${dir}rypt8,\@abi-omnipotent
+.align 16
+_aesni_${dir}rypt8:
+       $movkey         ($key),$rndkey0
+       shr             \$1,$rounds
+       $movkey         16($key),$rndkey1
+       lea             32($key),$key
+       xorps           $rndkey0,$inout0
+       xorps           $rndkey0,$inout1
+       aes${dir}       $rndkey1,$inout0
+       pxor            $rndkey0,$inout2
+       aes${dir}       $rndkey1,$inout1
+       pxor            $rndkey0,$inout3
+       aes${dir}       $rndkey1,$inout2
+       pxor            $rndkey0,$inout4
+       aes${dir}       $rndkey1,$inout3
+       pxor            $rndkey0,$inout5
+       dec             $rounds
+       aes${dir}       $rndkey1,$inout4
+       pxor            $rndkey0,$inout6
+       aes${dir}       $rndkey1,$inout5
+       pxor            $rndkey0,$inout7
+       $movkey         ($key),$rndkey0
+       aes${dir}       $rndkey1,$inout6
+       aes${dir}       $rndkey1,$inout7
+       $movkey         16($key),$rndkey1
+       jmp             .L${dir}_loop8_enter
+.align 16
+.L${dir}_loop8:
+       aes${dir}       $rndkey1,$inout0
+       aes${dir}       $rndkey1,$inout1
+       dec             $rounds
+       aes${dir}       $rndkey1,$inout2
+       aes${dir}       $rndkey1,$inout3
+       aes${dir}       $rndkey1,$inout4
+       aes${dir}       $rndkey1,$inout5
+       aes${dir}       $rndkey1,$inout6
+       aes${dir}       $rndkey1,$inout7
+       $movkey         16($key),$rndkey1
+.L${dir}_loop8_enter:                          # happens to be 16-byte aligned
+       aes${dir}       $rndkey0,$inout0
+       aes${dir}       $rndkey0,$inout1
+       lea             32($key),$key
+       aes${dir}       $rndkey0,$inout2
+       aes${dir}       $rndkey0,$inout3
+       aes${dir}       $rndkey0,$inout4
+       aes${dir}       $rndkey0,$inout5
+       aes${dir}       $rndkey0,$inout6
+       aes${dir}       $rndkey0,$inout7
+       $movkey         ($key),$rndkey0
+       jnz             .L${dir}_loop8
+
+       aes${dir}       $rndkey1,$inout0
+       aes${dir}       $rndkey1,$inout1
+       aes${dir}       $rndkey1,$inout2
+       aes${dir}       $rndkey1,$inout3
+       aes${dir}       $rndkey1,$inout4
+       aes${dir}       $rndkey1,$inout5
+       aes${dir}       $rndkey1,$inout6
+       aes${dir}       $rndkey1,$inout7
+       aes${dir}last   $rndkey0,$inout0
+       aes${dir}last   $rndkey0,$inout1
+       aes${dir}last   $rndkey0,$inout2
+       aes${dir}last   $rndkey0,$inout3
+       aes${dir}last   $rndkey0,$inout4
+       aes${dir}last   $rndkey0,$inout5
+       aes${dir}last   $rndkey0,$inout6
+       aes${dir}last   $rndkey0,$inout7
+       ret
+.size  _aesni_${dir}rypt8,.-_aesni_${dir}rypt8
+___
+}
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+&aesni_generate8("enc") if ($PREFIX eq "aesni");
+&aesni_generate8("dec");
+\f
+if ($PREFIX eq "aesni") {
+########################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+#                        size_t length, const AES_KEY *key,
+#                        int enc);
+$code.=<<___;
+.globl aesni_ecb_encrypt
+.type  aesni_ecb_encrypt,\@function,5
+.align 16
+aesni_ecb_encrypt:
+       and     \$-16,$len
+       jz      .Lecb_ret
+
+       mov     240($key),$rounds       # key->rounds
+       $movkey ($key),$rndkey0
+       mov     $key,$key_              # backup $key
+       mov     $rounds,$rnds_          # backup $rounds
+       test    %r8d,%r8d               # 5th argument
+       jz      .Lecb_decrypt
+#--------------------------- ECB ENCRYPT ------------------------------#
+       cmp     \$0x80,$len
+       jb      .Lecb_enc_tail
+
+       movdqu  ($inp),$inout0
+       movdqu  0x10($inp),$inout1
+       movdqu  0x20($inp),$inout2
+       movdqu  0x30($inp),$inout3
+       movdqu  0x40($inp),$inout4
+       movdqu  0x50($inp),$inout5
+       movdqu  0x60($inp),$inout6
+       movdqu  0x70($inp),$inout7
+       lea     0x80($inp),$inp
+       sub     \$0x80,$len
+       jmp     .Lecb_enc_loop8_enter
+.align 16
+.Lecb_enc_loop8:
+       movups  $inout0,($out)
+       mov     $key_,$key              # restore $key
+       movdqu  ($inp),$inout0
+       mov     $rnds_,$rounds          # restore $rounds
+       movups  $inout1,0x10($out)
+       movdqu  0x10($inp),$inout1
+       movups  $inout2,0x20($out)
+       movdqu  0x20($inp),$inout2
+       movups  $inout3,0x30($out)
+       movdqu  0x30($inp),$inout3
+       movups  $inout4,0x40($out)
+       movdqu  0x40($inp),$inout4
+       movups  $inout5,0x50($out)
+       movdqu  0x50($inp),$inout5
+       movups  $inout6,0x60($out)
+       movdqu  0x60($inp),$inout6
+       movups  $inout7,0x70($out)
+       lea     0x80($out),$out
+       movdqu  0x70($inp),$inout7
+       lea     0x80($inp),$inp
+.Lecb_enc_loop8_enter:
+
+       call    _aesni_encrypt8
+
+       sub     \$0x80,$len
+       jnc     .Lecb_enc_loop8
+
+       movups  $inout0,($out)
+       mov     $key_,$key              # restore $key
+       movups  $inout1,0x10($out)
+       mov     $rnds_,$rounds          # restore $rounds
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       movups  $inout5,0x50($out)
+       movups  $inout6,0x60($out)
+       movups  $inout7,0x70($out)
+       lea     0x80($out),$out
+       add     \$0x80,$len
+       jz      .Lecb_ret
+
+.Lecb_enc_tail:
+       movups  ($inp),$inout0
+       cmp     \$0x20,$len
+       jb      .Lecb_enc_one
+       movups  0x10($inp),$inout1
+       je      .Lecb_enc_two
+       movups  0x20($inp),$inout2
+       cmp     \$0x40,$len
+       jb      .Lecb_enc_three
+       movups  0x30($inp),$inout3
+       je      .Lecb_enc_four
+       movups  0x40($inp),$inout4
+       cmp     \$0x60,$len
+       jb      .Lecb_enc_five
+       movups  0x50($inp),$inout5
+       je      .Lecb_enc_six
+       movdqu  0x60($inp),$inout6
+       call    _aesni_encrypt8
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       movups  $inout5,0x50($out)
+       movups  $inout6,0x60($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_enc_one:
+___
+       &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+       movups  $inout0,($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_enc_two:
+       xorps   $inout2,$inout2
+       call    _aesni_encrypt3
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_enc_three:
+       call    _aesni_encrypt3
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_enc_four:
+       call    _aesni_encrypt4
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_enc_five:
+       xorps   $inout5,$inout5
+       call    _aesni_encrypt6
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_enc_six:
+       call    _aesni_encrypt6
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       movups  $inout5,0x50($out)
+       jmp     .Lecb_ret
+\f#--------------------------- ECB DECRYPT ------------------------------#
+.align 16
+.Lecb_decrypt:
+       cmp     \$0x80,$len
+       jb      .Lecb_dec_tail
+
+       movdqu  ($inp),$inout0
+       movdqu  0x10($inp),$inout1
+       movdqu  0x20($inp),$inout2
+       movdqu  0x30($inp),$inout3
+       movdqu  0x40($inp),$inout4
+       movdqu  0x50($inp),$inout5
+       movdqu  0x60($inp),$inout6
+       movdqu  0x70($inp),$inout7
+       lea     0x80($inp),$inp
+       sub     \$0x80,$len
+       jmp     .Lecb_dec_loop8_enter
+.align 16
+.Lecb_dec_loop8:
+       movups  $inout0,($out)
+       mov     $key_,$key              # restore $key
+       movdqu  ($inp),$inout0
+       mov     $rnds_,$rounds          # restore $rounds
+       movups  $inout1,0x10($out)
+       movdqu  0x10($inp),$inout1
+       movups  $inout2,0x20($out)
+       movdqu  0x20($inp),$inout2
+       movups  $inout3,0x30($out)
+       movdqu  0x30($inp),$inout3
+       movups  $inout4,0x40($out)
+       movdqu  0x40($inp),$inout4
+       movups  $inout5,0x50($out)
+       movdqu  0x50($inp),$inout5
+       movups  $inout6,0x60($out)
+       movdqu  0x60($inp),$inout6
+       movups  $inout7,0x70($out)
+       lea     0x80($out),$out
+       movdqu  0x70($inp),$inout7
+       lea     0x80($inp),$inp
+.Lecb_dec_loop8_enter:
+
+       call    _aesni_decrypt8
+
+       $movkey ($key_),$rndkey0
+       sub     \$0x80,$len
+       jnc     .Lecb_dec_loop8
+
+       movups  $inout0,($out)
+       mov     $key_,$key              # restore $key
+       movups  $inout1,0x10($out)
+       mov     $rnds_,$rounds          # restore $rounds
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       movups  $inout5,0x50($out)
+       movups  $inout6,0x60($out)
+       movups  $inout7,0x70($out)
+       lea     0x80($out),$out
+       add     \$0x80,$len
+       jz      .Lecb_ret
+
+.Lecb_dec_tail:
+       movups  ($inp),$inout0
+       cmp     \$0x20,$len
+       jb      .Lecb_dec_one
+       movups  0x10($inp),$inout1
+       je      .Lecb_dec_two
+       movups  0x20($inp),$inout2
+       cmp     \$0x40,$len
+       jb      .Lecb_dec_three
+       movups  0x30($inp),$inout3
+       je      .Lecb_dec_four
+       movups  0x40($inp),$inout4
+       cmp     \$0x60,$len
+       jb      .Lecb_dec_five
+       movups  0x50($inp),$inout5
+       je      .Lecb_dec_six
+       movups  0x60($inp),$inout6
+       $movkey ($key),$rndkey0
+       call    _aesni_decrypt8
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       movups  $inout5,0x50($out)
+       movups  $inout6,0x60($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_dec_one:
+___
+       &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+       movups  $inout0,($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_dec_two:
+       xorps   $inout2,$inout2
+       call    _aesni_decrypt3
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_dec_three:
+       call    _aesni_decrypt3
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_dec_four:
+       call    _aesni_decrypt4
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_dec_five:
+       xorps   $inout5,$inout5
+       call    _aesni_decrypt6
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       jmp     .Lecb_ret
+.align 16
+.Lecb_dec_six:
+       call    _aesni_decrypt6
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       movups  $inout5,0x50($out)
+
+.Lecb_ret:
+       ret
+.size  aesni_ecb_encrypt,.-aesni_ecb_encrypt
+___
+\f
+{
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{
+my $cmac="%r9";        # 6th argument
+
+my $increment="%xmm8";
+my $bswap_mask="%xmm9";
+
+$code.=<<___;
+.globl aesni_ccm64_encrypt_blocks
+.type  aesni_ccm64_encrypt_blocks,\@function,6
+.align 16
+aesni_ccm64_encrypt_blocks:
+___
+$code.=<<___ if ($win64);
+       lea     -0x58(%rsp),%rsp
+       movaps  %xmm6,(%rsp)
+       movaps  %xmm7,0x10(%rsp)
+       movaps  %xmm8,0x20(%rsp)
+       movaps  %xmm9,0x30(%rsp)
+.Lccm64_enc_body:
+___
+$code.=<<___;
+       movdqu  ($ivp),$iv
+       movdqu  ($cmac),$inout1
+       movdqa  .Lincrement64(%rip),$increment
+       movdqa  .Lbswap_mask(%rip),$bswap_mask
+       pshufb  $bswap_mask,$iv                 # keep iv in reverse order
+
+       mov     240($key),$rounds               # key->rounds
+       mov     $key,$key_
+       mov     $rounds,$rnds_
+       movdqa  $iv,$inout0
+
+.Lccm64_enc_outer:
+       movups  ($inp),$in0                     # load inp
+       pshufb  $bswap_mask,$inout0
+       mov     $key_,$key
+       mov     $rnds_,$rounds
+
+       $movkey ($key),$rndkey0
+       shr     \$1,$rounds
+       $movkey 16($key),$rndkey1
+       xorps   $rndkey0,$in0
+       lea     32($key),$key
+       xorps   $rndkey0,$inout0
+       xorps   $inout1,$in0                    # cmac^=inp
+       $movkey ($key),$rndkey0
+
+.Lccm64_enc2_loop:
+       aesenc  $rndkey1,$inout0
+       dec     $rounds
+       aesenc  $rndkey1,$inout1
+       $movkey 16($key),$rndkey1
+       aesenc  $rndkey0,$inout0
+       lea     32($key),$key
+       aesenc  $rndkey0,$inout1
+       $movkey 0($key),$rndkey0
+       jnz     .Lccm64_enc2_loop
+       aesenc  $rndkey1,$inout0
+       aesenc  $rndkey1,$inout1
+       aesenclast      $rndkey0,$inout0
+       aesenclast      $rndkey0,$inout1
+
+       paddq   $increment,$iv
+       dec     $len
+       lea     16($inp),$inp
+       xorps   $inout0,$in0                    # inp ^= E(iv)
+       movdqa  $iv,$inout0
+       movups  $in0,($out)                     # save output
+       lea     16($out),$out
+       jnz     .Lccm64_enc_outer
+
+       movups  $inout1,($cmac)
+___
+$code.=<<___ if ($win64);
+       movaps  (%rsp),%xmm6
+       movaps  0x10(%rsp),%xmm7
+       movaps  0x20(%rsp),%xmm8
+       movaps  0x30(%rsp),%xmm9
+       lea     0x58(%rsp),%rsp
+.Lccm64_enc_ret:
+___
+$code.=<<___;
+       ret
+.size  aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
+___
+######################################################################
+$code.=<<___;
+.globl aesni_ccm64_decrypt_blocks
+.type  aesni_ccm64_decrypt_blocks,\@function,6
+.align 16
+aesni_ccm64_decrypt_blocks:
+___
+$code.=<<___ if ($win64);
+       lea     -0x58(%rsp),%rsp
+       movaps  %xmm6,(%rsp)
+       movaps  %xmm7,0x10(%rsp)
+       movaps  %xmm8,0x20(%rsp)
+       movaps  %xmm9,0x30(%rsp)
+.Lccm64_dec_body:
+___
+$code.=<<___;
+       movdqu  ($ivp),$iv
+       movdqu  ($cmac),$inout1
+       movdqa  .Lincrement64(%rip),$increment
+       movdqa  .Lbswap_mask(%rip),$bswap_mask
+
+       mov     240($key),$rounds               # key->rounds
+       movdqa  $iv,$inout0
+       pshufb  $bswap_mask,$iv                 # keep iv in reverse order
+       mov     $rounds,$rnds_
+       mov     $key,$key_
+___
+       &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+.Lccm64_dec_outer:
+       paddq   $increment,$iv
+       movups  ($inp),$in0                     # load inp
+       xorps   $inout0,$in0
+       movdqa  $iv,$inout0
+       lea     16($inp),$inp
+       pshufb  $bswap_mask,$inout0
+       mov     $key_,$key
+       mov     $rnds_,$rounds
+       movups  $in0,($out)
+       lea     16($out),$out
+
+       sub     \$1,$len
+       jz      .Lccm64_dec_break
+
+       $movkey ($key),$rndkey0
+       shr     \$1,$rounds
+       $movkey 16($key),$rndkey1
+       xorps   $rndkey0,$in0
+       lea     32($key),$key
+       xorps   $rndkey0,$inout0
+       xorps   $in0,$inout1                    # cmac^=out
+       $movkey ($key),$rndkey0
+
+.Lccm64_dec2_loop:
+       aesenc  $rndkey1,$inout0
+       dec     $rounds
+       aesenc  $rndkey1,$inout1
+       $movkey 16($key),$rndkey1
+       aesenc  $rndkey0,$inout0
+       lea     32($key),$key
+       aesenc  $rndkey0,$inout1
+       $movkey 0($key),$rndkey0
+       jnz     .Lccm64_dec2_loop
+       aesenc  $rndkey1,$inout0
+       aesenc  $rndkey1,$inout1
+       aesenclast      $rndkey0,$inout0
+       jmp     .Lccm64_dec_outer
+
+.align 16
+.Lccm64_dec_break:
+___
+       &aesni_generate1("enc",$key,$rounds,$inout1);
+$code.=<<___;
+       movups  $inout1,($cmac)
+___
+$code.=<<___ if ($win64);
+       movaps  (%rsp),%xmm6
+       movaps  0x10(%rsp),%xmm7
+       movaps  0x20(%rsp),%xmm8
+       movaps  0x30(%rsp),%xmm9
+       lea     0x58(%rsp),%rsp
+.Lccm64_dec_ret:
+___
+$code.=<<___;
+       ret
+.size  aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
+___
+}\f
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see engine/eng_aesni.c for details)
+#
+{
+my $reserved = $win64?0:-0x28;
+my ($in0,$in1,$in2,$in3)=map("%xmm$_",(8..11));
+my ($iv0,$iv1,$ivec)=("%xmm12","%xmm13","%xmm14");
+my $bswap_mask="%xmm15";
+
+$code.=<<___;
+.globl aesni_ctr32_encrypt_blocks
+.type  aesni_ctr32_encrypt_blocks,\@function,5
+.align 16
+aesni_ctr32_encrypt_blocks:
+___
+$code.=<<___ if ($win64);
+       lea     -0xc8(%rsp),%rsp
+       movaps  %xmm6,0x20(%rsp)
+       movaps  %xmm7,0x30(%rsp)
+       movaps  %xmm8,0x40(%rsp)
+       movaps  %xmm9,0x50(%rsp)
+       movaps  %xmm10,0x60(%rsp)
+       movaps  %xmm11,0x70(%rsp)
+       movaps  %xmm12,0x80(%rsp)
+       movaps  %xmm13,0x90(%rsp)
+       movaps  %xmm14,0xa0(%rsp)
+       movaps  %xmm15,0xb0(%rsp)
+.Lctr32_body:
+___
+$code.=<<___;
+       cmp     \$1,$len
+       je      .Lctr32_one_shortcut
+
+       movdqu  ($ivp),$ivec
+       movdqa  .Lbswap_mask(%rip),$bswap_mask
+       xor     $rounds,$rounds
+       pextrd  \$3,$ivec,$rnds_                # pull 32-bit counter
+       pinsrd  \$3,$rounds,$ivec               # wipe 32-bit counter
+
+       mov     240($key),$rounds               # key->rounds
+       bswap   $rnds_
+       pxor    $iv0,$iv0                       # vector of 3 32-bit counters
+       pxor    $iv1,$iv1                       # vector of 3 32-bit counters
+       pinsrd  \$0,$rnds_,$iv0
+       lea     3($rnds_),$key_
+       pinsrd  \$0,$key_,$iv1
+       inc     $rnds_
+       pinsrd  \$1,$rnds_,$iv0
+       inc     $key_
+       pinsrd  \$1,$key_,$iv1
+       inc     $rnds_
+       pinsrd  \$2,$rnds_,$iv0
+       inc     $key_
+       pinsrd  \$2,$key_,$iv1
+       movdqa  $iv0,$reserved(%rsp)
+       pshufb  $bswap_mask,$iv0
+       movdqa  $iv1,`$reserved+0x10`(%rsp)
+       pshufb  $bswap_mask,$iv1
+
+       pshufd  \$`3<<6`,$iv0,$inout0           # place counter to upper dword
+       pshufd  \$`2<<6`,$iv0,$inout1
+       pshufd  \$`1<<6`,$iv0,$inout2
+       cmp     \$6,$len
+       jb      .Lctr32_tail
+       shr     \$1,$rounds
+       mov     $key,$key_                      # backup $key
+       mov     $rounds,$rnds_                  # backup $rounds
+       sub     \$6,$len
+       jmp     .Lctr32_loop6
+
+.align 16
+.Lctr32_loop6:
+       pshufd  \$`3<<6`,$iv1,$inout3
+       por     $ivec,$inout0                   # merge counter-less ivec
+        $movkey        ($key_),$rndkey0
+       pshufd  \$`2<<6`,$iv1,$inout4
+       por     $ivec,$inout1
+        $movkey        16($key_),$rndkey1
+       pshufd  \$`1<<6`,$iv1,$inout5
+       por     $ivec,$inout2
+       por     $ivec,$inout3
+        xorps          $rndkey0,$inout0
+       por     $ivec,$inout4
+       por     $ivec,$inout5
+
+       # inline _aesni_encrypt6 and interleave last rounds
+       # with own code...
+
+       pxor            $rndkey0,$inout1
+       aesenc          $rndkey1,$inout0
+       lea             32($key_),$key
+       pxor            $rndkey0,$inout2
+       aesenc          $rndkey1,$inout1
+        movdqa         .Lincrement32(%rip),$iv1
+       pxor            $rndkey0,$inout3
+       aesenc          $rndkey1,$inout2
+        movdqa         $reserved(%rsp),$iv0
+       pxor            $rndkey0,$inout4
+       aesenc          $rndkey1,$inout3
+       pxor            $rndkey0,$inout5
+       $movkey         ($key),$rndkey0
+       dec             $rounds
+       aesenc          $rndkey1,$inout4
+       aesenc          $rndkey1,$inout5
+       jmp             .Lctr32_enc_loop6_enter
+.align 16
+.Lctr32_enc_loop6:
+       aesenc          $rndkey1,$inout0
+       aesenc          $rndkey1,$inout1
+       dec             $rounds
+       aesenc          $rndkey1,$inout2
+       aesenc          $rndkey1,$inout3
+       aesenc          $rndkey1,$inout4
+       aesenc          $rndkey1,$inout5
+.Lctr32_enc_loop6_enter:
+       $movkey         16($key),$rndkey1
+       aesenc          $rndkey0,$inout0
+       aesenc          $rndkey0,$inout1
+       lea             32($key),$key
+       aesenc          $rndkey0,$inout2
+       aesenc          $rndkey0,$inout3
+       aesenc          $rndkey0,$inout4
+       aesenc          $rndkey0,$inout5
+       $movkey         ($key),$rndkey0
+       jnz             .Lctr32_enc_loop6
+
+       aesenc          $rndkey1,$inout0
+        paddd          $iv1,$iv0               # increment counter vector
+       aesenc          $rndkey1,$inout1
+        paddd          `$reserved+0x10`(%rsp),$iv1
+       aesenc          $rndkey1,$inout2
+        movdqa         $iv0,$reserved(%rsp)    # save counter vector
+       aesenc          $rndkey1,$inout3
+        movdqa         $iv1,`$reserved+0x10`(%rsp)
+       aesenc          $rndkey1,$inout4
+        pshufb         $bswap_mask,$iv0        # byte swap
+       aesenc          $rndkey1,$inout5
+        pshufb         $bswap_mask,$iv1
+
+       aesenclast      $rndkey0,$inout0
+        movups         ($inp),$in0             # load input
+       aesenclast      $rndkey0,$inout1
+        movups         0x10($inp),$in1
+       aesenclast      $rndkey0,$inout2
+        movups         0x20($inp),$in2
+       aesenclast      $rndkey0,$inout3
+        movups         0x30($inp),$in3
+       aesenclast      $rndkey0,$inout4
+        movups         0x40($inp),$rndkey1
+       aesenclast      $rndkey0,$inout5
+        movups         0x50($inp),$rndkey0
+        lea    0x60($inp),$inp
+
+       xorps   $inout0,$in0                    # xor
+        pshufd \$`3<<6`,$iv0,$inout0
+       xorps   $inout1,$in1
+        pshufd \$`2<<6`,$iv0,$inout1
+       movups  $in0,($out)                     # store output
+       xorps   $inout2,$in2
+        pshufd \$`1<<6`,$iv0,$inout2
+       movups  $in1,0x10($out)
+       xorps   $inout3,$in3
+       movups  $in2,0x20($out)
+       xorps   $inout4,$rndkey1
+       movups  $in3,0x30($out)
+       xorps   $inout5,$rndkey0
+       movups  $rndkey1,0x40($out)
+       movups  $rndkey0,0x50($out)
+       lea     0x60($out),$out
+       mov     $rnds_,$rounds
+       sub     \$6,$len
+       jnc     .Lctr32_loop6
+
+       add     \$6,$len
+       jz      .Lctr32_done
+       mov     $key_,$key                      # restore $key
+       lea     1($rounds,$rounds),$rounds      # restore original value
+
+.Lctr32_tail:
+       por     $ivec,$inout0
+       movups  ($inp),$in0
+       cmp     \$2,$len
+       jb      .Lctr32_one
+
+       por     $ivec,$inout1
+       movups  0x10($inp),$in1
+       je      .Lctr32_two
+
+       pshufd  \$`3<<6`,$iv1,$inout3
+       por     $ivec,$inout2
+       movups  0x20($inp),$in2
+       cmp     \$4,$len
+       jb      .Lctr32_three
+
+       pshufd  \$`2<<6`,$iv1,$inout4
+       por     $ivec,$inout3
+       movups  0x30($inp),$in3
+       je      .Lctr32_four
+
+       por     $ivec,$inout4
+       xorps   $inout5,$inout5
+
+       call    _aesni_encrypt6
+
+       movups  0x40($inp),$rndkey1
+       xorps   $inout0,$in0
+       xorps   $inout1,$in1
+       movups  $in0,($out)
+       xorps   $inout2,$in2
+       movups  $in1,0x10($out)
+       xorps   $inout3,$in3
+       movups  $in2,0x20($out)
+       xorps   $inout4,$rndkey1
+       movups  $in3,0x30($out)
+       movups  $rndkey1,0x40($out)
+       jmp     .Lctr32_done
+
+.align 16
+.Lctr32_one_shortcut:
+       movups  ($ivp),$inout0
+       movups  ($inp),$in0
+       mov     240($key),$rounds               # key->rounds
+.Lctr32_one:
+___
+       &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+       xorps   $inout0,$in0
+       movups  $in0,($out)
+       jmp     .Lctr32_done
+
+.align 16
+.Lctr32_two:
+       xorps   $inout2,$inout2
+       call    _aesni_encrypt3
+       xorps   $inout0,$in0
+       xorps   $inout1,$in1
+       movups  $in0,($out)
+       movups  $in1,0x10($out)
+       jmp     .Lctr32_done
+
+.align 16
+.Lctr32_three:
+       call    _aesni_encrypt3
+       xorps   $inout0,$in0
+       xorps   $inout1,$in1
+       movups  $in0,($out)
+       xorps   $inout2,$in2
+       movups  $in1,0x10($out)
+       movups  $in2,0x20($out)
+       jmp     .Lctr32_done
+
+.align 16
+.Lctr32_four:
+       call    _aesni_encrypt4
+       xorps   $inout0,$in0
+       xorps   $inout1,$in1
+       movups  $in0,($out)
+       xorps   $inout2,$in2
+       movups  $in1,0x10($out)
+       xorps   $inout3,$in3
+       movups  $in2,0x20($out)
+       movups  $in3,0x30($out)
+
+.Lctr32_done:
+___
+$code.=<<___ if ($win64);
+       movaps  0x20(%rsp),%xmm6
+       movaps  0x30(%rsp),%xmm7
+       movaps  0x40(%rsp),%xmm8
+       movaps  0x50(%rsp),%xmm9
+       movaps  0x60(%rsp),%xmm10
+       movaps  0x70(%rsp),%xmm11
+       movaps  0x80(%rsp),%xmm12
+       movaps  0x90(%rsp),%xmm13
+       movaps  0xa0(%rsp),%xmm14
+       movaps  0xb0(%rsp),%xmm15
+       lea     0xc8(%rsp),%rsp
+.Lctr32_ret:
+___
+$code.=<<___;
+       ret
+.size  aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
+___
+}
+\f
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#      const AES_KEY *key1, const AES_KEY *key2
+#      const unsigned char iv[16]);
+#
+{
+my @tweak=map("%xmm$_",(10..15));
+my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
+my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
+my $frame_size = 0x68 + ($win64?160:0);
+
+$code.=<<___;
+.globl aesni_xts_encrypt
+.type  aesni_xts_encrypt,\@function,6
+.align 16
+aesni_xts_encrypt:
+       lea     -$frame_size(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+       movaps  %xmm6,0x60(%rsp)
+       movaps  %xmm7,0x70(%rsp)
+       movaps  %xmm8,0x80(%rsp)
+       movaps  %xmm9,0x90(%rsp)
+       movaps  %xmm10,0xa0(%rsp)
+       movaps  %xmm11,0xb0(%rsp)
+       movaps  %xmm12,0xc0(%rsp)
+       movaps  %xmm13,0xd0(%rsp)
+       movaps  %xmm14,0xe0(%rsp)
+       movaps  %xmm15,0xf0(%rsp)
+.Lxts_enc_body:
+___
+$code.=<<___;
+       movups  ($ivp),@tweak[5]                # load clear-text tweak
+       mov     240(%r8),$rounds                # key2->rounds
+       mov     240($key),$rnds_                # key1->rounds
+___
+       # generate the tweak
+       &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
+$code.=<<___;
+       mov     $key,$key_                      # backup $key
+       mov     $rnds_,$rounds                  # backup $rounds
+       mov     $len,$len_                      # backup $len
+       and     \$-16,$len
+
+       movdqa  .Lxts_magic(%rip),$twmask
+       pxor    $twtmp,$twtmp
+       pcmpgtd @tweak[5],$twtmp                # broadcast upper bits
+___
+    for ($i=0;$i<4;$i++) {
+    $code.=<<___;
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[$i]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+       pand    $twmask,$twres                  # isolate carry and residue
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+       pxor    $twres,@tweak[5]
+___
+    }
+$code.=<<___;
+       sub     \$16*6,$len
+       jc      .Lxts_enc_short
+
+       shr     \$1,$rounds
+       sub     \$1,$rounds
+       mov     $rounds,$rnds_
+       jmp     .Lxts_enc_grandloop
+
+.align 16
+.Lxts_enc_grandloop:
+       pshufd  \$0x13,$twtmp,$twres
+       movdqa  @tweak[5],@tweak[4]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+       movdqu  `16*0`($inp),$inout0            # load input
+       pand    $twmask,$twres                  # isolate carry and residue
+       movdqu  `16*1`($inp),$inout1
+       pxor    $twres,@tweak[5]
+
+       movdqu  `16*2`($inp),$inout2
+       pxor    @tweak[0],$inout0               # input^=tweak
+       movdqu  `16*3`($inp),$inout3
+       pxor    @tweak[1],$inout1
+       movdqu  `16*4`($inp),$inout4
+       pxor    @tweak[2],$inout2
+       movdqu  `16*5`($inp),$inout5
+       lea     `16*6`($inp),$inp
+       pxor    @tweak[3],$inout3
+       $movkey         ($key_),$rndkey0
+       pxor    @tweak[4],$inout4
+       pxor    @tweak[5],$inout5
+
+       # inline _aesni_encrypt6 and interleave first and last rounds
+       # with own code...
+       $movkey         16($key_),$rndkey1
+       pxor            $rndkey0,$inout0
+       pxor            $rndkey0,$inout1
+        movdqa @tweak[0],`16*0`(%rsp)          # put aside tweaks
+       aesenc          $rndkey1,$inout0
+       lea             32($key_),$key
+       pxor            $rndkey0,$inout2
+        movdqa @tweak[1],`16*1`(%rsp)
+       aesenc          $rndkey1,$inout1
+       pxor            $rndkey0,$inout3
+        movdqa @tweak[2],`16*2`(%rsp)
+       aesenc          $rndkey1,$inout2
+       pxor            $rndkey0,$inout4
+        movdqa @tweak[3],`16*3`(%rsp)
+       aesenc          $rndkey1,$inout3
+       pxor            $rndkey0,$inout5
+       $movkey         ($key),$rndkey0
+       dec             $rounds
+        movdqa @tweak[4],`16*4`(%rsp)
+       aesenc          $rndkey1,$inout4
+        movdqa @tweak[5],`16*5`(%rsp)
+       aesenc          $rndkey1,$inout5
+       pxor    $twtmp,$twtmp
+       pcmpgtd @tweak[5],$twtmp
+       jmp             .Lxts_enc_loop6_enter
+
+.align 16
+.Lxts_enc_loop6:
+       aesenc          $rndkey1,$inout0
+       aesenc          $rndkey1,$inout1
+       dec             $rounds
+       aesenc          $rndkey1,$inout2
+       aesenc          $rndkey1,$inout3
+       aesenc          $rndkey1,$inout4
+       aesenc          $rndkey1,$inout5
+.Lxts_enc_loop6_enter:
+       $movkey         16($key),$rndkey1
+       aesenc          $rndkey0,$inout0
+       aesenc          $rndkey0,$inout1
+       lea             32($key),$key
+       aesenc          $rndkey0,$inout2
+       aesenc          $rndkey0,$inout3
+       aesenc          $rndkey0,$inout4
+       aesenc          $rndkey0,$inout5
+       $movkey         ($key),$rndkey0
+       jnz             .Lxts_enc_loop6
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        aesenc         $rndkey1,$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        aesenc         $rndkey1,$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcast upper bits
+        aesenc         $rndkey1,$inout2
+       pxor    $twres,@tweak[5]
+        aesenc         $rndkey1,$inout3
+        aesenc         $rndkey1,$inout4
+        aesenc         $rndkey1,$inout5
+        $movkey        16($key),$rndkey1
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[0]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        aesenc         $rndkey0,$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        aesenc         $rndkey0,$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+        aesenc         $rndkey0,$inout2
+       pxor    $twres,@tweak[5]
+        aesenc         $rndkey0,$inout3
+        aesenc         $rndkey0,$inout4
+        aesenc         $rndkey0,$inout5
+        $movkey        32($key),$rndkey0
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[1]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        aesenc         $rndkey1,$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        aesenc         $rndkey1,$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+        aesenc         $rndkey1,$inout2
+       pxor    $twres,@tweak[5]
+        aesenc         $rndkey1,$inout3
+        aesenc         $rndkey1,$inout4
+        aesenc         $rndkey1,$inout5
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[2]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        aesenclast     $rndkey0,$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        aesenclast     $rndkey0,$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+        aesenclast     $rndkey0,$inout2
+       pxor    $twres,@tweak[5]
+        aesenclast     $rndkey0,$inout3
+        aesenclast     $rndkey0,$inout4
+        aesenclast     $rndkey0,$inout5
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[3]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        xorps  `16*0`(%rsp),$inout0            # output^=tweak
+       pand    $twmask,$twres                  # isolate carry and residue
+        xorps  `16*1`(%rsp),$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+       pxor    $twres,@tweak[5]
+
+       xorps   `16*2`(%rsp),$inout2
+       movups  $inout0,`16*0`($out)            # write output
+       xorps   `16*3`(%rsp),$inout3
+       movups  $inout1,`16*1`($out)
+       xorps   `16*4`(%rsp),$inout4
+       movups  $inout2,`16*2`($out)
+       xorps   `16*5`(%rsp),$inout5
+       movups  $inout3,`16*3`($out)
+       mov     $rnds_,$rounds                  # restore $rounds
+       movups  $inout4,`16*4`($out)
+       movups  $inout5,`16*5`($out)
+       lea     `16*6`($out),$out
+       sub     \$16*6,$len
+       jnc     .Lxts_enc_grandloop
+
+       lea     3($rounds,$rounds),$rounds      # restore original value
+       mov     $key_,$key                      # restore $key
+       mov     $rounds,$rnds_                  # backup $rounds
+
+.Lxts_enc_short:
+       add     \$16*6,$len
+       jz      .Lxts_enc_done
+
+       cmp     \$0x20,$len
+       jb      .Lxts_enc_one
+       je      .Lxts_enc_two
+
+       cmp     \$0x40,$len
+       jb      .Lxts_enc_three
+       je      .Lxts_enc_four
+
+       pshufd  \$0x13,$twtmp,$twres
+       movdqa  @tweak[5],@tweak[4]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        movdqu ($inp),$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        movdqu 16*1($inp),$inout1
+       pxor    $twres,@tweak[5]
+
+       movdqu  16*2($inp),$inout2
+       pxor    @tweak[0],$inout0
+       movdqu  16*3($inp),$inout3
+       pxor    @tweak[1],$inout1
+       movdqu  16*4($inp),$inout4
+       lea     16*5($inp),$inp
+       pxor    @tweak[2],$inout2
+       pxor    @tweak[3],$inout3
+       pxor    @tweak[4],$inout4
+
+       call    _aesni_encrypt6
+
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[5],@tweak[0]
+       xorps   @tweak[1],$inout1
+       xorps   @tweak[2],$inout2
+       movdqu  $inout0,($out)
+       xorps   @tweak[3],$inout3
+       movdqu  $inout1,16*1($out)
+       xorps   @tweak[4],$inout4
+       movdqu  $inout2,16*2($out)
+       movdqu  $inout3,16*3($out)
+       movdqu  $inout4,16*4($out)
+       lea     16*5($out),$out
+       jmp     .Lxts_enc_done
+
+.align 16
+.Lxts_enc_one:
+       movups  ($inp),$inout0
+       lea     16*1($inp),$inp
+       xorps   @tweak[0],$inout0
+___
+       &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[1],@tweak[0]
+       movups  $inout0,($out)
+       lea     16*1($out),$out
+       jmp     .Lxts_enc_done
+
+.align 16
+.Lxts_enc_two:
+       movups  ($inp),$inout0
+       movups  16($inp),$inout1
+       lea     32($inp),$inp
+       xorps   @tweak[0],$inout0
+       xorps   @tweak[1],$inout1
+
+       call    _aesni_encrypt3
+
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[2],@tweak[0]
+       xorps   @tweak[1],$inout1
+       movups  $inout0,($out)
+       movups  $inout1,16*1($out)
+       lea     16*2($out),$out
+       jmp     .Lxts_enc_done
+
+.align 16
+.Lxts_enc_three:
+       movups  ($inp),$inout0
+       movups  16*1($inp),$inout1
+       movups  16*2($inp),$inout2
+       lea     16*3($inp),$inp
+       xorps   @tweak[0],$inout0
+       xorps   @tweak[1],$inout1
+       xorps   @tweak[2],$inout2
+
+       call    _aesni_encrypt3
+
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[3],@tweak[0]
+       xorps   @tweak[1],$inout1
+       xorps   @tweak[2],$inout2
+       movups  $inout0,($out)
+       movups  $inout1,16*1($out)
+       movups  $inout2,16*2($out)
+       lea     16*3($out),$out
+       jmp     .Lxts_enc_done
+
+.align 16
+.Lxts_enc_four:
+       movups  ($inp),$inout0
+       movups  16*1($inp),$inout1
+       movups  16*2($inp),$inout2
+       xorps   @tweak[0],$inout0
+       movups  16*3($inp),$inout3
+       lea     16*4($inp),$inp
+       xorps   @tweak[1],$inout1
+       xorps   @tweak[2],$inout2
+       xorps   @tweak[3],$inout3
+
+       call    _aesni_encrypt4
+
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[5],@tweak[0]
+       xorps   @tweak[1],$inout1
+       xorps   @tweak[2],$inout2
+       movups  $inout0,($out)
+       xorps   @tweak[3],$inout3
+       movups  $inout1,16*1($out)
+       movups  $inout2,16*2($out)
+       movups  $inout3,16*3($out)
+       lea     16*4($out),$out
+       jmp     .Lxts_enc_done
+
+.align 16
+.Lxts_enc_done:
+       and     \$15,$len_
+       jz      .Lxts_enc_ret
+       mov     $len_,$len
+
+.Lxts_enc_steal:
+       movzb   ($inp),%eax                     # borrow $rounds ...
+       movzb   -16($out),%ecx                  # ... and $key
+       lea     1($inp),$inp
+       mov     %al,-16($out)
+       mov     %cl,0($out)
+       lea     1($out),$out
+       sub     \$1,$len
+       jnz     .Lxts_enc_steal
+
+       sub     $len_,$out                      # rewind $out
+       mov     $key_,$key                      # restore $key
+       mov     $rnds_,$rounds                  # restore $rounds
+
+       movups  -16($out),$inout0
+       xorps   @tweak[0],$inout0
+___
+       &aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+       xorps   @tweak[0],$inout0
+       movups  $inout0,-16($out)
+
+.Lxts_enc_ret:
+___
+$code.=<<___ if ($win64);
+       movaps  0x60(%rsp),%xmm6
+       movaps  0x70(%rsp),%xmm7
+       movaps  0x80(%rsp),%xmm8
+       movaps  0x90(%rsp),%xmm9
+       movaps  0xa0(%rsp),%xmm10
+       movaps  0xb0(%rsp),%xmm11
+       movaps  0xc0(%rsp),%xmm12
+       movaps  0xd0(%rsp),%xmm13
+       movaps  0xe0(%rsp),%xmm14
+       movaps  0xf0(%rsp),%xmm15
+___
+$code.=<<___;
+       lea     $frame_size(%rsp),%rsp
+.Lxts_enc_epilogue:
+       ret
+.size  aesni_xts_encrypt,.-aesni_xts_encrypt
+___
+
+$code.=<<___;
+.globl aesni_xts_decrypt
+.type  aesni_xts_decrypt,\@function,6
+.align 16
+aesni_xts_decrypt:
+       lea     -$frame_size(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+       movaps  %xmm6,0x60(%rsp)
+       movaps  %xmm7,0x70(%rsp)
+       movaps  %xmm8,0x80(%rsp)
+       movaps  %xmm9,0x90(%rsp)
+       movaps  %xmm10,0xa0(%rsp)
+       movaps  %xmm11,0xb0(%rsp)
+       movaps  %xmm12,0xc0(%rsp)
+       movaps  %xmm13,0xd0(%rsp)
+       movaps  %xmm14,0xe0(%rsp)
+       movaps  %xmm15,0xf0(%rsp)
+.Lxts_dec_body:
+___
+$code.=<<___;
+       movups  ($ivp),@tweak[5]                # load clear-text tweak
+       mov     240($key2),$rounds              # key2->rounds
+       mov     240($key),$rnds_                # key1->rounds
+___
+       # generate the tweak
+       &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
+$code.=<<___;
+       xor     %eax,%eax                       # if ($len%16) len-=16;
+       test    \$15,$len
+       setnz   %al
+       shl     \$4,%rax
+       sub     %rax,$len
+
+       mov     $key,$key_                      # backup $key
+       mov     $rnds_,$rounds                  # backup $rounds
+       mov     $len,$len_                      # backup $len
+       and     \$-16,$len
+
+       movdqa  .Lxts_magic(%rip),$twmask
+       pxor    $twtmp,$twtmp
+       pcmpgtd @tweak[5],$twtmp                # broadcast upper bits
+___
+    for ($i=0;$i<4;$i++) {
+    $code.=<<___;
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[$i]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+       pand    $twmask,$twres                  # isolate carry and residue
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+       pxor    $twres,@tweak[5]
+___
+    }
+$code.=<<___;
+       sub     \$16*6,$len
+       jc      .Lxts_dec_short
+
+       shr     \$1,$rounds
+       sub     \$1,$rounds
+       mov     $rounds,$rnds_
+       jmp     .Lxts_dec_grandloop
+
+.align 16
+.Lxts_dec_grandloop:
+       pshufd  \$0x13,$twtmp,$twres
+       movdqa  @tweak[5],@tweak[4]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+       movdqu  `16*0`($inp),$inout0            # load input
+       pand    $twmask,$twres                  # isolate carry and residue
+       movdqu  `16*1`($inp),$inout1
+       pxor    $twres,@tweak[5]
+
+       movdqu  `16*2`($inp),$inout2
+       pxor    @tweak[0],$inout0               # input^=tweak
+       movdqu  `16*3`($inp),$inout3
+       pxor    @tweak[1],$inout1
+       movdqu  `16*4`($inp),$inout4
+       pxor    @tweak[2],$inout2
+       movdqu  `16*5`($inp),$inout5
+       lea     `16*6`($inp),$inp
+       pxor    @tweak[3],$inout3
+       $movkey         ($key_),$rndkey0
+       pxor    @tweak[4],$inout4
+       pxor    @tweak[5],$inout5
+
+       # inline _aesni_decrypt6 and interleave first and last rounds
+       # with own code...
+       $movkey         16($key_),$rndkey1
+       pxor            $rndkey0,$inout0
+       pxor            $rndkey0,$inout1
+        movdqa @tweak[0],`16*0`(%rsp)          # put aside tweaks
+       aesdec          $rndkey1,$inout0
+       lea             32($key_),$key
+       pxor            $rndkey0,$inout2
+        movdqa @tweak[1],`16*1`(%rsp)
+       aesdec          $rndkey1,$inout1
+       pxor            $rndkey0,$inout3
+        movdqa @tweak[2],`16*2`(%rsp)
+       aesdec          $rndkey1,$inout2
+       pxor            $rndkey0,$inout4
+        movdqa @tweak[3],`16*3`(%rsp)
+       aesdec          $rndkey1,$inout3
+       pxor            $rndkey0,$inout5
+       $movkey         ($key),$rndkey0
+       dec             $rounds
+        movdqa @tweak[4],`16*4`(%rsp)
+       aesdec          $rndkey1,$inout4
+        movdqa @tweak[5],`16*5`(%rsp)
+       aesdec          $rndkey1,$inout5
+       pxor    $twtmp,$twtmp
+       pcmpgtd @tweak[5],$twtmp
+       jmp             .Lxts_dec_loop6_enter
+
+.align 16
+.Lxts_dec_loop6:
+       aesdec          $rndkey1,$inout0
+       aesdec          $rndkey1,$inout1
+       dec             $rounds
+       aesdec          $rndkey1,$inout2
+       aesdec          $rndkey1,$inout3
+       aesdec          $rndkey1,$inout4
+       aesdec          $rndkey1,$inout5
+.Lxts_dec_loop6_enter:
+       $movkey         16($key),$rndkey1
+       aesdec          $rndkey0,$inout0
+       aesdec          $rndkey0,$inout1
+       lea             32($key),$key
+       aesdec          $rndkey0,$inout2
+       aesdec          $rndkey0,$inout3
+       aesdec          $rndkey0,$inout4
+       aesdec          $rndkey0,$inout5
+       $movkey         ($key),$rndkey0
+       jnz             .Lxts_dec_loop6
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        aesdec         $rndkey1,$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        aesdec         $rndkey1,$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcast upper bits
+        aesdec         $rndkey1,$inout2
+       pxor    $twres,@tweak[5]
+        aesdec         $rndkey1,$inout3
+        aesdec         $rndkey1,$inout4
+        aesdec         $rndkey1,$inout5
+        $movkey        16($key),$rndkey1
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[0]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        aesdec         $rndkey0,$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        aesdec         $rndkey0,$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+        aesdec         $rndkey0,$inout2
+       pxor    $twres,@tweak[5]
+        aesdec         $rndkey0,$inout3
+        aesdec         $rndkey0,$inout4
+        aesdec         $rndkey0,$inout5
+        $movkey        32($key),$rndkey0
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[1]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        aesdec         $rndkey1,$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        aesdec         $rndkey1,$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+        aesdec         $rndkey1,$inout2
+       pxor    $twres,@tweak[5]
+        aesdec         $rndkey1,$inout3
+        aesdec         $rndkey1,$inout4
+        aesdec         $rndkey1,$inout5
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[2]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        aesdeclast     $rndkey0,$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        aesdeclast     $rndkey0,$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+        aesdeclast     $rndkey0,$inout2
+       pxor    $twres,@tweak[5]
+        aesdeclast     $rndkey0,$inout3
+        aesdeclast     $rndkey0,$inout4
+        aesdeclast     $rndkey0,$inout5
+
+       pshufd  \$0x13,$twtmp,$twres
+       pxor    $twtmp,$twtmp
+       movdqa  @tweak[5],@tweak[3]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        xorps  `16*0`(%rsp),$inout0            # output^=tweak
+       pand    $twmask,$twres                  # isolate carry and residue
+        xorps  `16*1`(%rsp),$inout1
+       pcmpgtd @tweak[5],$twtmp                # broadcat upper bits
+       pxor    $twres,@tweak[5]
+
+       xorps   `16*2`(%rsp),$inout2
+       movups  $inout0,`16*0`($out)            # write output
+       xorps   `16*3`(%rsp),$inout3
+       movups  $inout1,`16*1`($out)
+       xorps   `16*4`(%rsp),$inout4
+       movups  $inout2,`16*2`($out)
+       xorps   `16*5`(%rsp),$inout5
+       movups  $inout3,`16*3`($out)
+       mov     $rnds_,$rounds                  # restore $rounds
+       movups  $inout4,`16*4`($out)
+       movups  $inout5,`16*5`($out)
+       lea     `16*6`($out),$out
+       sub     \$16*6,$len
+       jnc     .Lxts_dec_grandloop
+
+       lea     3($rounds,$rounds),$rounds      # restore original value
+       mov     $key_,$key                      # restore $key
+       mov     $rounds,$rnds_                  # backup $rounds
+
+.Lxts_dec_short:
+       add     \$16*6,$len
+       jz      .Lxts_dec_done
+
+       cmp     \$0x20,$len
+       jb      .Lxts_dec_one
+       je      .Lxts_dec_two
+
+       cmp     \$0x40,$len
+       jb      .Lxts_dec_three
+       je      .Lxts_dec_four
+
+       pshufd  \$0x13,$twtmp,$twres
+       movdqa  @tweak[5],@tweak[4]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        movdqu ($inp),$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        movdqu 16*1($inp),$inout1
+       pxor    $twres,@tweak[5]
+
+       movdqu  16*2($inp),$inout2
+       pxor    @tweak[0],$inout0
+       movdqu  16*3($inp),$inout3
+       pxor    @tweak[1],$inout1
+       movdqu  16*4($inp),$inout4
+       lea     16*5($inp),$inp
+       pxor    @tweak[2],$inout2
+       pxor    @tweak[3],$inout3
+       pxor    @tweak[4],$inout4
+
+       call    _aesni_decrypt6
+
+       xorps   @tweak[0],$inout0
+       xorps   @tweak[1],$inout1
+       xorps   @tweak[2],$inout2
+       movdqu  $inout0,($out)
+       xorps   @tweak[3],$inout3
+       movdqu  $inout1,16*1($out)
+       xorps   @tweak[4],$inout4
+       movdqu  $inout2,16*2($out)
+        pxor           $twtmp,$twtmp
+       movdqu  $inout3,16*3($out)
+        pcmpgtd        @tweak[5],$twtmp
+       movdqu  $inout4,16*4($out)
+       lea     16*5($out),$out
+        pshufd         \$0x13,$twtmp,@tweak[1] # $twres
+       and     \$15,$len_
+       jz      .Lxts_dec_ret
+
+       movdqa  @tweak[5],@tweak[0]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+       pand    $twmask,@tweak[1]               # isolate carry and residue
+       pxor    @tweak[5],@tweak[1]
+       jmp     .Lxts_dec_done2
+
+.align 16
+.Lxts_dec_one:
+       movups  ($inp),$inout0
+       lea     16*1($inp),$inp
+       xorps   @tweak[0],$inout0
+___
+       &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[1],@tweak[0]
+       movups  $inout0,($out)
+       movdqa  @tweak[2],@tweak[1]
+       lea     16*1($out),$out
+       jmp     .Lxts_dec_done
+
+.align 16
+.Lxts_dec_two:
+       movups  ($inp),$inout0
+       movups  16($inp),$inout1
+       lea     32($inp),$inp
+       xorps   @tweak[0],$inout0
+       xorps   @tweak[1],$inout1
+
+       call    _aesni_decrypt3
+
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[2],@tweak[0]
+       xorps   @tweak[1],$inout1
+       movdqa  @tweak[3],@tweak[1]
+       movups  $inout0,($out)
+       movups  $inout1,16*1($out)
+       lea     16*2($out),$out
+       jmp     .Lxts_dec_done
+
+.align 16
+.Lxts_dec_three:
+       movups  ($inp),$inout0
+       movups  16*1($inp),$inout1
+       movups  16*2($inp),$inout2
+       lea     16*3($inp),$inp
+       xorps   @tweak[0],$inout0
+       xorps   @tweak[1],$inout1
+       xorps   @tweak[2],$inout2
+
+       call    _aesni_decrypt3
+
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[3],@tweak[0]
+       xorps   @tweak[1],$inout1
+       movdqa  @tweak[5],@tweak[1]
+       xorps   @tweak[2],$inout2
+       movups  $inout0,($out)
+       movups  $inout1,16*1($out)
+       movups  $inout2,16*2($out)
+       lea     16*3($out),$out
+       jmp     .Lxts_dec_done
+
+.align 16
+.Lxts_dec_four:
+       pshufd  \$0x13,$twtmp,$twres
+       movdqa  @tweak[5],@tweak[4]
+       paddq   @tweak[5],@tweak[5]             # psllq 1,$tweak
+        movups ($inp),$inout0
+       pand    $twmask,$twres                  # isolate carry and residue
+        movups 16*1($inp),$inout1
+       pxor    $twres,@tweak[5]
+
+       movups  16*2($inp),$inout2
+       xorps   @tweak[0],$inout0
+       movups  16*3($inp),$inout3
+       lea     16*4($inp),$inp
+       xorps   @tweak[1],$inout1
+       xorps   @tweak[2],$inout2
+       xorps   @tweak[3],$inout3
+
+       call    _aesni_decrypt4
+
+       xorps   @tweak[0],$inout0
+       movdqa  @tweak[4],@tweak[0]
+       xorps   @tweak[1],$inout1
+       movdqa  @tweak[5],@tweak[1]
+       xorps   @tweak[2],$inout2
+       movups  $inout0,($out)
+       xorps   @tweak[3],$inout3
+       movups  $inout1,16*1($out)
+       movups  $inout2,16*2($out)
+       movups  $inout3,16*3($out)
+       lea     16*4($out),$out
+       jmp     .Lxts_dec_done
+
+.align 16
+.Lxts_dec_done:
+       and     \$15,$len_
+       jz      .Lxts_dec_ret
+.Lxts_dec_done2:
+       mov     $len_,$len
+       mov     $key_,$key                      # restore $key
+       mov     $rnds_,$rounds                  # restore $rounds
+
+       movups  ($inp),$inout0
+       xorps   @tweak[1],$inout0
+___
+       &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+       xorps   @tweak[1],$inout0
+       movups  $inout0,($out)
+
+.Lxts_dec_steal:
+       movzb   16($inp),%eax                   # borrow $rounds ...
+       movzb   ($out),%ecx                     # ... and $key
+       lea     1($inp),$inp
+       mov     %al,($out)
+       mov     %cl,16($out)
+       lea     1($out),$out
+       sub     \$1,$len
+       jnz     .Lxts_dec_steal
+
+       sub     $len_,$out                      # rewind $out
+       mov     $key_,$key                      # restore $key
+       mov     $rnds_,$rounds                  # restore $rounds
+
+       movups  ($out),$inout0
+       xorps   @tweak[0],$inout0
+___
+       &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+       xorps   @tweak[0],$inout0
+       movups  $inout0,($out)
+
+.Lxts_dec_ret:
+___
+$code.=<<___ if ($win64);
+       movaps  0x60(%rsp),%xmm6
+       movaps  0x70(%rsp),%xmm7
+       movaps  0x80(%rsp),%xmm8
+       movaps  0x90(%rsp),%xmm9
+       movaps  0xa0(%rsp),%xmm10
+       movaps  0xb0(%rsp),%xmm11
+       movaps  0xc0(%rsp),%xmm12
+       movaps  0xd0(%rsp),%xmm13
+       movaps  0xe0(%rsp),%xmm14
+       movaps  0xf0(%rsp),%xmm15
+___
+$code.=<<___;
+       lea     $frame_size(%rsp),%rsp
+.Lxts_dec_epilogue:
+       ret
+.size  aesni_xts_decrypt,.-aesni_xts_decrypt
+___
+} }}
+\f
+########################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+#                          size_t length, const AES_KEY *key,
+#                          unsigned char *ivp,const int enc);
+{
+my $reserved = $win64?0x40:-0x18;      # used in decrypt
+$code.=<<___;
+.globl ${PREFIX}_cbc_encrypt
+.type  ${PREFIX}_cbc_encrypt,\@function,6
+.align 16
+${PREFIX}_cbc_encrypt:
+       test    $len,$len               # check length
+       jz      .Lcbc_ret
+
+       mov     240($key),$rnds_        # key->rounds
+       mov     $key,$key_              # backup $key
+       test    %r9d,%r9d               # 6th argument
+       jz      .Lcbc_decrypt
+#--------------------------- CBC ENCRYPT ------------------------------#
+       movups  ($ivp),$inout0          # load iv as initial state
+       mov     $rnds_,$rounds
+       cmp     \$16,$len
+       jb      .Lcbc_enc_tail
+       sub     \$16,$len
+       jmp     .Lcbc_enc_loop
+.align 16
+.Lcbc_enc_loop:
+       movups  ($inp),$inout1          # load input
+       lea     16($inp),$inp
+       #xorps  $inout1,$inout0
+___
+       &aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
+$code.=<<___;
+       mov     $rnds_,$rounds          # restore $rounds
+       mov     $key_,$key              # restore $key
+       movups  $inout0,0($out)         # store output
+       lea     16($out),$out
+       sub     \$16,$len
+       jnc     .Lcbc_enc_loop
+       add     \$16,$len
+       jnz     .Lcbc_enc_tail
+       movups  $inout0,($ivp)
+       jmp     .Lcbc_ret
+
+.Lcbc_enc_tail:
+       mov     $len,%rcx       # zaps $key
+       xchg    $inp,$out       # $inp is %rsi and $out is %rdi now
+       .long   0x9066A4F3      # rep movsb
+       mov     \$16,%ecx       # zero tail
+       sub     $len,%rcx
+       xor     %eax,%eax
+       .long   0x9066AAF3      # rep stosb
+       lea     -16(%rdi),%rdi  # rewind $out by 1 block
+       mov     $rnds_,$rounds  # restore $rounds
+       mov     %rdi,%rsi       # $inp and $out are the same
+       mov     $key_,$key      # restore $key
+       xor     $len,$len       # len=16
+       jmp     .Lcbc_enc_loop  # one more spin
+\f#--------------------------- CBC DECRYPT ------------------------------#
+.align 16
+.Lcbc_decrypt:
+___
+$code.=<<___ if ($win64);
+       lea     -0x58(%rsp),%rsp
+       movaps  %xmm6,(%rsp)
+       movaps  %xmm7,0x10(%rsp)
+       movaps  %xmm8,0x20(%rsp)
+       movaps  %xmm9,0x30(%rsp)
+.Lcbc_decrypt_body:
+___
+$code.=<<___;
+       movups  ($ivp),$iv
+       mov     $rnds_,$rounds
+       cmp     \$0x70,$len
+       jbe     .Lcbc_dec_tail
+       shr     \$1,$rnds_
+       sub     \$0x70,$len
+       mov     $rnds_,$rounds
+       movaps  $iv,$reserved(%rsp)
+       jmp     .Lcbc_dec_loop8_enter
+.align 16
+.Lcbc_dec_loop8:
+       movaps  $rndkey0,$reserved(%rsp)        # save IV
+       movups  $inout7,($out)
+       lea     0x10($out),$out
+.Lcbc_dec_loop8_enter:
+       $movkey         ($key),$rndkey0
+       movups  ($inp),$inout0                  # load input
+       movups  0x10($inp),$inout1
+       $movkey         16($key),$rndkey1
+
+       lea             32($key),$key
+       movdqu  0x20($inp),$inout2
+       xorps           $rndkey0,$inout0
+       movdqu  0x30($inp),$inout3
+       xorps           $rndkey0,$inout1
+       movdqu  0x40($inp),$inout4
+       aesdec          $rndkey1,$inout0
+       pxor            $rndkey0,$inout2
+       movdqu  0x50($inp),$inout5
+       aesdec          $rndkey1,$inout1
+       pxor            $rndkey0,$inout3
+       movdqu  0x60($inp),$inout6
+       aesdec          $rndkey1,$inout2
+       pxor            $rndkey0,$inout4
+       movdqu  0x70($inp),$inout7
+       aesdec          $rndkey1,$inout3
+       pxor            $rndkey0,$inout5
+       dec             $rounds
+       aesdec          $rndkey1,$inout4
+       pxor            $rndkey0,$inout6
+       aesdec          $rndkey1,$inout5
+       pxor            $rndkey0,$inout7
+       $movkey         ($key),$rndkey0
+       aesdec          $rndkey1,$inout6
+       aesdec          $rndkey1,$inout7
+       $movkey         16($key),$rndkey1
+
+       call            .Ldec_loop8_enter
+
+       movups  ($inp),$rndkey1         # re-load input
+       movups  0x10($inp),$rndkey0
+       xorps   $reserved(%rsp),$inout0 # ^= IV
+       xorps   $rndkey1,$inout1
+       movups  0x20($inp),$rndkey1
+       xorps   $rndkey0,$inout2
+       movups  0x30($inp),$rndkey0
+       xorps   $rndkey1,$inout3
+       movups  0x40($inp),$rndkey1
+       xorps   $rndkey0,$inout4
+       movups  0x50($inp),$rndkey0
+       xorps   $rndkey1,$inout5
+       movups  0x60($inp),$rndkey1
+       xorps   $rndkey0,$inout6
+       movups  0x70($inp),$rndkey0     # IV
+       xorps   $rndkey1,$inout7
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       mov     $rnds_,$rounds          # restore $rounds
+       movups  $inout4,0x40($out)
+       mov     $key_,$key              # restore $key
+       movups  $inout5,0x50($out)
+       lea     0x80($inp),$inp
+       movups  $inout6,0x60($out)
+       lea     0x70($out),$out
+       sub     \$0x80,$len
+       ja      .Lcbc_dec_loop8
+
+       movaps  $inout7,$inout0
+       movaps  $rndkey0,$iv
+       add     \$0x70,$len
+       jle     .Lcbc_dec_tail_collected
+       movups  $inout0,($out)
+       lea     1($rnds_,$rnds_),$rounds
+       lea     0x10($out),$out
+.Lcbc_dec_tail:
+       movups  ($inp),$inout0
+       movaps  $inout0,$in0
+       cmp     \$0x10,$len
+       jbe     .Lcbc_dec_one
+
+       movups  0x10($inp),$inout1
+       movaps  $inout1,$in1
+       cmp     \$0x20,$len
+       jbe     .Lcbc_dec_two
+
+       movups  0x20($inp),$inout2
+       movaps  $inout2,$in2
+       cmp     \$0x30,$len
+       jbe     .Lcbc_dec_three
+
+       movups  0x30($inp),$inout3
+       cmp     \$0x40,$len
+       jbe     .Lcbc_dec_four
+
+       movups  0x40($inp),$inout4
+       cmp     \$0x50,$len
+       jbe     .Lcbc_dec_five
+
+       movups  0x50($inp),$inout5
+       cmp     \$0x60,$len
+       jbe     .Lcbc_dec_six
+
+       movups  0x60($inp),$inout6
+       movaps  $iv,$reserved(%rsp)     # save IV
+       call    _aesni_decrypt8
+       movups  ($inp),$rndkey1
+       movups  0x10($inp),$rndkey0
+       xorps   $reserved(%rsp),$inout0 # ^= IV
+       xorps   $rndkey1,$inout1
+       movups  0x20($inp),$rndkey1
+       xorps   $rndkey0,$inout2
+       movups  0x30($inp),$rndkey0
+       xorps   $rndkey1,$inout3
+       movups  0x40($inp),$rndkey1
+       xorps   $rndkey0,$inout4
+       movups  0x50($inp),$rndkey0
+       xorps   $rndkey1,$inout5
+       movups  0x60($inp),$iv          # IV
+       xorps   $rndkey0,$inout6
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       movups  $inout5,0x50($out)
+       lea     0x60($out),$out
+       movaps  $inout6,$inout0
+       sub     \$0x70,$len
+       jmp     .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_one:
+___
+       &aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+       xorps   $iv,$inout0
+       movaps  $in0,$iv
+       sub     \$0x10,$len
+       jmp     .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_two:
+       xorps   $inout2,$inout2
+       call    _aesni_decrypt3
+       xorps   $iv,$inout0
+       xorps   $in0,$inout1
+       movups  $inout0,($out)
+       movaps  $in1,$iv
+       movaps  $inout1,$inout0
+       lea     0x10($out),$out
+       sub     \$0x20,$len
+       jmp     .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_three:
+       call    _aesni_decrypt3
+       xorps   $iv,$inout0
+       xorps   $in0,$inout1
+       movups  $inout0,($out)
+       xorps   $in1,$inout2
+       movups  $inout1,0x10($out)
+       movaps  $in2,$iv
+       movaps  $inout2,$inout0
+       lea     0x20($out),$out
+       sub     \$0x30,$len
+       jmp     .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_four:
+       call    _aesni_decrypt4
+       xorps   $iv,$inout0
+       movups  0x30($inp),$iv
+       xorps   $in0,$inout1
+       movups  $inout0,($out)
+       xorps   $in1,$inout2
+       movups  $inout1,0x10($out)
+       xorps   $in2,$inout3
+       movups  $inout2,0x20($out)
+       movaps  $inout3,$inout0
+       lea     0x30($out),$out
+       sub     \$0x40,$len
+       jmp     .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_five:
+       xorps   $inout5,$inout5
+       call    _aesni_decrypt6
+       movups  0x10($inp),$rndkey1
+       movups  0x20($inp),$rndkey0
+       xorps   $iv,$inout0
+       xorps   $in0,$inout1
+       xorps   $rndkey1,$inout2
+       movups  0x30($inp),$rndkey1
+       xorps   $rndkey0,$inout3
+       movups  0x40($inp),$iv
+       xorps   $rndkey1,$inout4
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       lea     0x40($out),$out
+       movaps  $inout4,$inout0
+       sub     \$0x50,$len
+       jmp     .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_six:
+       call    _aesni_decrypt6
+       movups  0x10($inp),$rndkey1
+       movups  0x20($inp),$rndkey0
+       xorps   $iv,$inout0
+       xorps   $in0,$inout1
+       xorps   $rndkey1,$inout2
+       movups  0x30($inp),$rndkey1
+       xorps   $rndkey0,$inout3
+       movups  0x40($inp),$rndkey0
+       xorps   $rndkey1,$inout4
+       movups  0x50($inp),$iv
+       xorps   $rndkey0,$inout5
+       movups  $inout0,($out)
+       movups  $inout1,0x10($out)
+       movups  $inout2,0x20($out)
+       movups  $inout3,0x30($out)
+       movups  $inout4,0x40($out)
+       lea     0x50($out),$out
+       movaps  $inout5,$inout0
+       sub     \$0x60,$len
+       jmp     .Lcbc_dec_tail_collected
+.align 16
+.Lcbc_dec_tail_collected:
+       and     \$15,$len
+       movups  $iv,($ivp)
+       jnz     .Lcbc_dec_tail_partial
+       movups  $inout0,($out)
+       jmp     .Lcbc_dec_ret
+.align 16
+.Lcbc_dec_tail_partial:
+       movaps  $inout0,$reserved(%rsp)
+       mov     \$16,%rcx
+       mov     $out,%rdi
+       sub     $len,%rcx
+       lea     $reserved(%rsp),%rsi
+       .long   0x9066A4F3      # rep movsb
+
+.Lcbc_dec_ret:
+___
+$code.=<<___ if ($win64);
+       movaps  (%rsp),%xmm6
+       movaps  0x10(%rsp),%xmm7
+       movaps  0x20(%rsp),%xmm8
+       movaps  0x30(%rsp),%xmm9
+       lea     0x58(%rsp),%rsp
+___
+$code.=<<___;
+.Lcbc_ret:
+       ret
+.size  ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+} \f
+# int $PREFIX_set_[en|de]crypt_key (const unsigned char *userKey,
+#                              int bits, AES_KEY *key)
+{ my ($inp,$bits,$key) = @_4args;
+  $bits =~ s/%r/%e/;
+
+$code.=<<___;
+.globl ${PREFIX}_set_decrypt_key
+.type  ${PREFIX}_set_decrypt_key,\@abi-omnipotent
+.align 16
+${PREFIX}_set_decrypt_key:
+       .byte   0x48,0x83,0xEC,0x08     # sub rsp,8
+       call    __aesni_set_encrypt_key
+       shl     \$4,$bits               # rounds-1 after _aesni_set_encrypt_key
+       test    %eax,%eax
+       jnz     .Ldec_key_ret
+       lea     16($key,$bits),$inp     # points at the end of key schedule
+
+       $movkey ($key),%xmm0            # just swap
+       $movkey ($inp),%xmm1
+       $movkey %xmm0,($inp)
+       $movkey %xmm1,($key)
+       lea     16($key),$key
+       lea     -16($inp),$inp
+
+.Ldec_key_inverse:
+       $movkey ($key),%xmm0            # swap and inverse
+       $movkey ($inp),%xmm1
+       aesimc  %xmm0,%xmm0
+       aesimc  %xmm1,%xmm1
+       lea     16($key),$key
+       lea     -16($inp),$inp
+       $movkey %xmm0,16($inp)
+       $movkey %xmm1,-16($key)
+       cmp     $key,$inp
+       ja      .Ldec_key_inverse
+
+       $movkey ($key),%xmm0            # inverse middle
+       aesimc  %xmm0,%xmm0
+       $movkey %xmm0,($inp)
+.Ldec_key_ret:
+       add     \$8,%rsp
+       ret
+.LSEH_end_set_decrypt_key:
+.size  ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+___
+\f
+# This is based on submission by
+#
+#      Huang Ying <ying.huang@intel.com>
+#      Vinodh Gopal <vinodh.gopal@intel.com>
+#      Kahraman Akdemir
+#
+# Agressively optimized in respect to aeskeygenassist's critical path
+# and is contained in %xmm0-5 to meet Win64 ABI requirement.
+#
+$code.=<<___;
+.globl ${PREFIX}_set_encrypt_key
+.type  ${PREFIX}_set_encrypt_key,\@abi-omnipotent
+.align 16
+${PREFIX}_set_encrypt_key:
+__aesni_set_encrypt_key:
+       .byte   0x48,0x83,0xEC,0x08     # sub rsp,8
+       mov     \$-1,%rax
+       test    $inp,$inp
+       jz      .Lenc_key_ret
+       test    $key,$key
+       jz      .Lenc_key_ret
+
+       movups  ($inp),%xmm0            # pull first 128 bits of *userKey
+       xorps   %xmm4,%xmm4             # low dword of xmm4 is assumed 0
+       lea     16($key),%rax
+       cmp     \$256,$bits
+       je      .L14rounds
+       cmp     \$192,$bits
+       je      .L12rounds
+       cmp     \$128,$bits
+       jne     .Lbad_keybits
+
+.L10rounds:
+       mov     \$9,$bits                       # 10 rounds for 128-bit key
+       $movkey %xmm0,($key)                    # round 0
+       aeskeygenassist \$0x1,%xmm0,%xmm1       # round 1
+       call            .Lkey_expansion_128_cold
+       aeskeygenassist \$0x2,%xmm0,%xmm1       # round 2
+       call            .Lkey_expansion_128
+       aeskeygenassist \$0x4,%xmm0,%xmm1       # round 3
+       call            .Lkey_expansion_128
+       aeskeygenassist \$0x8,%xmm0,%xmm1       # round 4
+       call            .Lkey_expansion_128
+       aeskeygenassist \$0x10,%xmm0,%xmm1      # round 5
+       call            .Lkey_expansion_128
+       aeskeygenassist \$0x20,%xmm0,%xmm1      # round 6
+       call            .Lkey_expansion_128
+       aeskeygenassist \$0x40,%xmm0,%xmm1      # round 7
+       call            .Lkey_expansion_128
+       aeskeygenassist \$0x80,%xmm0,%xmm1      # round 8
+       call            .Lkey_expansion_128
+       aeskeygenassist \$0x1b,%xmm0,%xmm1      # round 9
+       call            .Lkey_expansion_128
+       aeskeygenassist \$0x36,%xmm0,%xmm1      # round 10
+       call            .Lkey_expansion_128
+       $movkey %xmm0,(%rax)
+       mov     $bits,80(%rax)  # 240(%rdx)
+       xor     %eax,%eax
+       jmp     .Lenc_key_ret
+
+.align 16
+.L12rounds:
+       movq    16($inp),%xmm2                  # remaining 1/3 of *userKey
+       mov     \$11,$bits                      # 12 rounds for 192
+       $movkey %xmm0,($key)                    # round 0
+       aeskeygenassist \$0x1,%xmm2,%xmm1       # round 1,2
+       call            .Lkey_expansion_192a_cold
+       aeskeygenassist \$0x2,%xmm2,%xmm1       # round 2,3
+       call            .Lkey_expansion_192b
+       aeskeygenassist \$0x4,%xmm2,%xmm1       # round 4,5
+       call            .Lkey_expansion_192a
+       aeskeygenassist \$0x8,%xmm2,%xmm1       # round 5,6
+       call            .Lkey_expansion_192b
+       aeskeygenassist \$0x10,%xmm2,%xmm1      # round 7,8
+       call            .Lkey_expansion_192a
+       aeskeygenassist \$0x20,%xmm2,%xmm1      # round 8,9
+       call            .Lkey_expansion_192b
+       aeskeygenassist \$0x40,%xmm2,%xmm1      # round 10,11
+       call            .Lkey_expansion_192a
+       aeskeygenassist \$0x80,%xmm2,%xmm1      # round 11,12
+       call            .Lkey_expansion_192b
+       $movkey %xmm0,(%rax)
+       mov     $bits,48(%rax)  # 240(%rdx)
+       xor     %rax, %rax
+       jmp     .Lenc_key_ret
+
+.align 16
+.L14rounds:
+       movups  16($inp),%xmm2                  # remaning half of *userKey
+       mov     \$13,$bits                      # 14 rounds for 256
+       lea     16(%rax),%rax
+       $movkey %xmm0,($key)                    # round 0
+       $movkey %xmm2,16($key)                  # round 1
+       aeskeygenassist \$0x1,%xmm2,%xmm1       # round 2
+       call            .Lkey_expansion_256a_cold
+       aeskeygenassist \$0x1,%xmm0,%xmm1       # round 3
+       call            .Lkey_expansion_256b
+       aeskeygenassist \$0x2,%xmm2,%xmm1       # round 4
+       call            .Lkey_expansion_256a
+       aeskeygenassist \$0x2,%xmm0,%xmm1       # round 5
+       call            .Lkey_expansion_256b
+       aeskeygenassist \$0x4,%xmm2,%xmm1       # round 6
+       call            .Lkey_expansion_256a
+       aeskeygenassist \$0x4,%xmm0,%xmm1       # round 7
+       call            .Lkey_expansion_256b
+       aeskeygenassist \$0x8,%xmm2,%xmm1       # round 8
+       call            .Lkey_expansion_256a
+       aeskeygenassist \$0x8,%xmm0,%xmm1       # round 9
+       call            .Lkey_expansion_256b
+       aeskeygenassist \$0x10,%xmm2,%xmm1      # round 10
+       call            .Lkey_expansion_256a
+       aeskeygenassist \$0x10,%xmm0,%xmm1      # round 11
+       call            .Lkey_expansion_256b
+       aeskeygenassist \$0x20,%xmm2,%xmm1      # round 12
+       call            .Lkey_expansion_256a
+       aeskeygenassist \$0x20,%xmm0,%xmm1      # round 13
+       call            .Lkey_expansion_256b
+       aeskeygenassist \$0x40,%xmm2,%xmm1      # round 14
+       call            .Lkey_expansion_256a
+       $movkey %xmm0,(%rax)
+       mov     $bits,16(%rax)  # 240(%rdx)
+       xor     %rax,%rax
+       jmp     .Lenc_key_ret
+
+.align 16
+.Lbad_keybits:
+       mov     \$-2,%rax
+.Lenc_key_ret:
+       add     \$8,%rsp
+       ret
+.LSEH_end_set_encrypt_key:
+\f
+.align 16
+.Lkey_expansion_128:
+       $movkey %xmm0,(%rax)
+       lea     16(%rax),%rax
+.Lkey_expansion_128_cold:
+       shufps  \$0b00010000,%xmm0,%xmm4
+       xorps   %xmm4, %xmm0
+       shufps  \$0b10001100,%xmm0,%xmm4
+       xorps   %xmm4, %xmm0
+       shufps  \$0b11111111,%xmm1,%xmm1        # critical path
+       xorps   %xmm1,%xmm0
+       ret
+
+.align 16
+.Lkey_expansion_192a:
+       $movkey %xmm0,(%rax)
+       lea     16(%rax),%rax
+.Lkey_expansion_192a_cold:
+       movaps  %xmm2, %xmm5
+.Lkey_expansion_192b_warm:
+       shufps  \$0b00010000,%xmm0,%xmm4
+       movdqa  %xmm2,%xmm3
+       xorps   %xmm4,%xmm0
+       shufps  \$0b10001100,%xmm0,%xmm4
+       pslldq  \$4,%xmm3
+       xorps   %xmm4,%xmm0
+       pshufd  \$0b01010101,%xmm1,%xmm1        # critical path
+       pxor    %xmm3,%xmm2
+       pxor    %xmm1,%xmm0
+       pshufd  \$0b11111111,%xmm0,%xmm3
+       pxor    %xmm3,%xmm2
+       ret
+
+.align 16
+.Lkey_expansion_192b:
+       movaps  %xmm0,%xmm3
+       shufps  \$0b01000100,%xmm0,%xmm5
+       $movkey %xmm5,(%rax)
+       shufps  \$0b01001110,%xmm2,%xmm3
+       $movkey %xmm3,16(%rax)
+       lea     32(%rax),%rax
+       jmp     .Lkey_expansion_192b_warm
+
+.align 16
+.Lkey_expansion_256a:
+       $movkey %xmm2,(%rax)
+       lea     16(%rax),%rax
+.Lkey_expansion_256a_cold:
+       shufps  \$0b00010000,%xmm0,%xmm4
+       xorps   %xmm4,%xmm0
+       shufps  \$0b10001100,%xmm0,%xmm4
+       xorps   %xmm4,%xmm0
+       shufps  \$0b11111111,%xmm1,%xmm1        # critical path
+       xorps   %xmm1,%xmm0
+       ret
+
+.align 16
+.Lkey_expansion_256b:
+       $movkey %xmm0,(%rax)
+       lea     16(%rax),%rax
+
+       shufps  \$0b00010000,%xmm2,%xmm4
+       xorps   %xmm4,%xmm2
+       shufps  \$0b10001100,%xmm2,%xmm4
+       xorps   %xmm4,%xmm2
+       shufps  \$0b10101010,%xmm1,%xmm1        # critical path
+       xorps   %xmm1,%xmm2
+       ret
+.size  ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+.size  __aesni_set_encrypt_key,.-__aesni_set_encrypt_key
+___
+}
+\f
+$code.=<<___;
+.align 64
+.Lbswap_mask:
+       .byte   15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
+.Lincrement32:
+       .long   6,6,6,0
+.Lincrement64:
+       .long   1,0,0,0
+.Lxts_magic:
+       .long   0x87,0,1,0
+
+.asciz  "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
+.align 64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#              CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern        __imp_RtlVirtualUnwind
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.type  ecb_se_handler,\@abi-omnipotent
+.align 16
+ecb_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       jmp     .Lcommon_seh_tail
+.size  ecb_se_handler,.-ecb_se_handler
+
+.type  ccm64_se_handler,\@abi-omnipotent
+.align 16
+ccm64_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue label
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lcommon_seh_tail
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lcommon_seh_tail
+
+       lea     0(%rax),%rsi            # %xmm save area
+       lea     512($context),%rdi      # &context.Xmm6
+       mov     \$8,%ecx                # 4*sizeof(%xmm0)/sizeof(%rax)
+       .long   0xa548f3fc              # cld; rep movsq
+       lea     0x58(%rax),%rax         # adjust stack pointer
+
+       jmp     .Lcommon_seh_tail
+.size  ccm64_se_handler,.-ccm64_se_handler
+
+.type  ctr32_se_handler,\@abi-omnipotent
+.align 16
+ctr32_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       lea     .Lctr32_body(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<"prologue" label
+       jb      .Lcommon_seh_tail
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       lea     .Lctr32_ret(%rip),%r10
+       cmp     %r10,%rbx
+       jae     .Lcommon_seh_tail
+
+       lea     0x20(%rax),%rsi         # %xmm save area
+       lea     512($context),%rdi      # &context.Xmm6
+       mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
+       .long   0xa548f3fc              # cld; rep movsq
+       lea     0xc8(%rax),%rax         # adjust stack pointer
+
+       jmp     .Lcommon_seh_tail
+.size  ctr32_se_handler,.-ctr32_se_handler
+
+.type  xts_se_handler,\@abi-omnipotent
+.align 16
+xts_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     120($context),%rax      # pull context->Rax
+       mov     248($context),%rbx      # pull context->Rip
+
+       mov     8($disp),%rsi           # disp->ImageBase
+       mov     56($disp),%r11          # disp->HandlerData
+
+       mov     0(%r11),%r10d           # HandlerData[0]
+       lea     (%rsi,%r10),%r10        # prologue lable
+       cmp     %r10,%rbx               # context->Rip<prologue label
+       jb      .Lcommon_seh_tail
+
+       mov     152($context),%rax      # pull context->Rsp
+
+       mov     4(%r11),%r10d           # HandlerData[1]
+       lea     (%rsi,%r10),%r10        # epilogue label
+       cmp     %r10,%rbx               # context->Rip>=epilogue label
+       jae     .Lcommon_seh_tail
+
+       lea     0x60(%rax),%rsi         # %xmm save area
+       lea     512($context),%rdi      # & context.Xmm6
+       mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
+       .long   0xa548f3fc              # cld; rep movsq
+       lea     0x68+160(%rax),%rax     # adjust stack pointer
+
+       jmp     .Lcommon_seh_tail
+.size  xts_se_handler,.-xts_se_handler
+___
+$code.=<<___;
+.type  cbc_se_handler,\@abi-omnipotent
+.align 16
+cbc_se_handler:
+       push    %rsi
+       push    %rdi
+       push    %rbx
+       push    %rbp
+       push    %r12
+       push    %r13
+       push    %r14
+       push    %r15
+       pushfq
+       sub     \$64,%rsp
+
+       mov     152($context),%rax      # pull context->Rsp
+       mov     248($context),%rbx      # pull context->Rip
+
+       lea     .Lcbc_decrypt(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<"prologue" label
+       jb      .Lcommon_seh_tail
+
+       lea     .Lcbc_decrypt_body(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip<cbc_decrypt_body
+       jb      .Lrestore_cbc_rax
+
+       lea     .Lcbc_ret(%rip),%r10
+       cmp     %r10,%rbx               # context->Rip>="epilogue" label
+       jae     .Lcommon_seh_tail
+
+       lea     0(%rax),%rsi            # top of stack
+       lea     512($context),%rdi      # &context.Xmm6
+       mov     \$8,%ecx                # 4*sizeof(%xmm0)/sizeof(%rax)
+       .long   0xa548f3fc              # cld; rep movsq
+       lea     0x58(%rax),%rax         # adjust stack pointer
+       jmp     .Lcommon_seh_tail
+
+.Lrestore_cbc_rax:
+       mov     120($context),%rax
+
+.Lcommon_seh_tail:
+       mov     8(%rax),%rdi
+       mov     16(%rax),%rsi
+       mov     %rax,152($context)      # restore context->Rsp
+       mov     %rsi,168($context)      # restore context->Rsi
+       mov     %rdi,176($context)      # restore context->Rdi
+
+       mov     40($disp),%rdi          # disp->ContextRecord
+       mov     $context,%rsi           # context
+       mov     \$154,%ecx              # sizeof(CONTEXT)
+       .long   0xa548f3fc              # cld; rep movsq
+
+       mov     $disp,%rsi
+       xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
+       mov     8(%rsi),%rdx            # arg2, disp->ImageBase
+       mov     0(%rsi),%r8             # arg3, disp->ControlPc
+       mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
+       mov     40(%rsi),%r10           # disp->ContextRecord
+       lea     56(%rsi),%r11           # &disp->HandlerData
+       lea     24(%rsi),%r12           # &disp->EstablisherFrame
+       mov     %r10,32(%rsp)           # arg5
+       mov     %r11,40(%rsp)           # arg6
+       mov     %r12,48(%rsp)           # arg7
+       mov     %rcx,56(%rsp)           # arg8, (NULL)
+       call    *__imp_RtlVirtualUnwind(%rip)
+
+       mov     \$1,%eax                # ExceptionContinueSearch
+       add     \$64,%rsp
+       popfq
+       pop     %r15
+       pop     %r14
+       pop     %r13
+       pop     %r12
+       pop     %rbp
+       pop     %rbx
+       pop     %rdi
+       pop     %rsi
+       ret
+.size  cbc_se_handler,.-cbc_se_handler
+
+.section       .pdata
+.align 4
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+       .rva    .LSEH_begin_aesni_ecb_encrypt
+       .rva    .LSEH_end_aesni_ecb_encrypt
+       .rva    .LSEH_info_ecb
+
+       .rva    .LSEH_begin_aesni_ccm64_encrypt_blocks
+       .rva    .LSEH_end_aesni_ccm64_encrypt_blocks
+       .rva    .LSEH_info_ccm64_enc
+
+       .rva    .LSEH_begin_aesni_ccm64_decrypt_blocks
+       .rva    .LSEH_end_aesni_ccm64_decrypt_blocks
+       .rva    .LSEH_info_ccm64_dec
+
+       .rva    .LSEH_begin_aesni_ctr32_encrypt_blocks
+       .rva    .LSEH_end_aesni_ctr32_encrypt_blocks
+       .rva    .LSEH_info_ctr32
+
+       .rva    .LSEH_begin_aesni_xts_encrypt
+       .rva    .LSEH_end_aesni_xts_encrypt
+       .rva    .LSEH_info_xts_enc
+
+       .rva    .LSEH_begin_aesni_xts_decrypt
+       .rva    .LSEH_end_aesni_xts_decrypt
+       .rva    .LSEH_info_xts_dec
+___
+$code.=<<___;
+       .rva    .LSEH_begin_${PREFIX}_cbc_encrypt
+       .rva    .LSEH_end_${PREFIX}_cbc_encrypt
+       .rva    .LSEH_info_cbc
+
+       .rva    ${PREFIX}_set_decrypt_key
+       .rva    .LSEH_end_set_decrypt_key
+       .rva    .LSEH_info_key
+
+       .rva    ${PREFIX}_set_encrypt_key
+       .rva    .LSEH_end_set_encrypt_key
+       .rva    .LSEH_info_key
+.section       .xdata
+.align 8
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.LSEH_info_ecb:
+       .byte   9,0,0,0
+       .rva    ecb_se_handler
+.LSEH_info_ccm64_enc:
+       .byte   9,0,0,0
+       .rva    ccm64_se_handler
+       .rva    .Lccm64_enc_body,.Lccm64_enc_ret        # HandlerData[]
+.LSEH_info_ccm64_dec:
+       .byte   9,0,0,0
+       .rva    ccm64_se_handler
+       .rva    .Lccm64_dec_body,.Lccm64_dec_ret        # HandlerData[]
+.LSEH_info_ctr32:
+       .byte   9,0,0,0
+       .rva    ctr32_se_handler
+.LSEH_info_xts_enc:
+       .byte   9,0,0,0
+       .rva    xts_se_handler
+       .rva    .Lxts_enc_body,.Lxts_enc_epilogue       # HandlerData[]
+.LSEH_info_xts_dec:
+       .byte   9,0,0,0
+       .rva    xts_se_handler
+       .rva    .Lxts_dec_body,.Lxts_dec_epilogue       # HandlerData[]
+___
+$code.=<<___;
+.LSEH_info_cbc:
+       .byte   9,0,0,0
+       .rva    cbc_se_handler
+.LSEH_info_key:
+       .byte   0x01,0x04,0x01,0x00
+       .byte   0x04,0x02,0x00,0x00     # sub rsp,8
+___
+}
+
+sub rex {
+  local *opcode=shift;
+  my ($dst,$src)=@_;
+  my $rex=0;
+
+    $rex|=0x04                 if($dst>=8);
+    $rex|=0x01                 if($src>=8);
+    push @opcode,$rex|0x40     if($rex);
+}
+
+sub aesni {
+  my $line=shift;
+  my @opcode=(0x66);
+
+    if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+       rex(\@opcode,$4,$3);
+       push @opcode,0x0f,0x3a,0xdf;
+       push @opcode,0xc0|($3&7)|(($4&7)<<3);   # ModR/M
+       my $c=$2;
+       push @opcode,$c=~/^0/?oct($c):$c;
+       return ".byte\t".join(',',@opcode);
+    }
+    elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+       my %opcodelet = (
+               "aesimc" => 0xdb,
+               "aesenc" => 0xdc,       "aesenclast" => 0xdd,
+               "aesdec" => 0xde,       "aesdeclast" => 0xdf
+       );
+       return undef if (!defined($opcodelet{$1}));
+       rex(\@opcode,$3,$2);
+       push @opcode,0x0f,0x38,$opcodelet{$1};
+       push @opcode,0xc0|($2&7)|(($3&7)<<3);   # ModR/M
+       return ".byte\t".join(',',@opcode);
+    }
+    return $line;
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+
+print $code;
+
+close STDOUT;
index 33679ca6b025ae123f71dfdd48177821b7e84809..08468f755b61bbdc4fb02342e41aece814bef826 100644 (file)
@@ -1,5 +1,5 @@
 /* ====================================================================
 /* ====================================================================
- * Copyright (c) 2001 The OpenSSL Project.  All rights reserved.
+ * Copyright (c) 2001-2011 The OpenSSL Project.  All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 
 #ifndef OPENSSL_FIPS
 
 
 #ifndef OPENSSL_FIPS
 
-static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
-                                       const unsigned char *iv, int enc);
-
 typedef struct
        {
        AES_KEY ks;
        } EVP_AES_KEY;
 
 typedef struct
        {
        AES_KEY ks;
        } EVP_AES_KEY;
 
-#define data(ctx)      EVP_C_DATA(EVP_AES_KEY,ctx)
-
-IMPLEMENT_BLOCK_CIPHER(aes_128, ks, AES, EVP_AES_KEY,
-                      NID_aes_128, 16, 16, 16, 128,
-                      0, aes_init_key, NULL, 
-                      EVP_CIPHER_set_asn1_iv,
-                      EVP_CIPHER_get_asn1_iv,
-                      NULL)
-IMPLEMENT_BLOCK_CIPHER(aes_192, ks, AES, EVP_AES_KEY,
-                      NID_aes_192, 16, 24, 16, 128,
-                      0, aes_init_key, NULL, 
-                      EVP_CIPHER_set_asn1_iv,
-                      EVP_CIPHER_get_asn1_iv,
-                      NULL)
-IMPLEMENT_BLOCK_CIPHER(aes_256, ks, AES, EVP_AES_KEY,
-                      NID_aes_256, 16, 32, 16, 128,
-                      0, aes_init_key, NULL, 
-                      EVP_CIPHER_set_asn1_iv,
-                      EVP_CIPHER_get_asn1_iv,
-                      NULL)
-
-#define IMPLEMENT_AES_CFBR(ksize,cbits)        IMPLEMENT_CFBR(aes,AES,EVP_AES_KEY,ks,ksize,cbits,16)
-
-IMPLEMENT_AES_CFBR(128,1)
-IMPLEMENT_AES_CFBR(192,1)
-IMPLEMENT_AES_CFBR(256,1)
-
-IMPLEMENT_AES_CFBR(128,8)
-IMPLEMENT_AES_CFBR(192,8)
-IMPLEMENT_AES_CFBR(256,8)
-
-static int aes_counter (EVP_CIPHER_CTX *ctx, unsigned char *out,
+#define MAXBITCHUNK    ((size_t)1<<(sizeof(size_t)*8-4))
+
+#if    defined(AES_ASM) && !defined(I386_ONLY) &&      (  \
+       ((defined(__i386)       || defined(__i386__)    || \
+         defined(_M_IX86)) && defined(OPENSSL_IA32_SSE2))|| \
+       defined(__x86_64)       || defined(__x86_64__)  || \
+       defined(_M_AMD64)       || defined(_M_X64)      || \
+       defined(__INTEL__)                              )
+/*
+ * AES-NI section
+ */
+extern unsigned int OPENSSL_ia32cap_P[2];
+#define        AESNI_CAPABLE   (1<<(57-32))
+
+int aesni_set_encrypt_key(const unsigned char *userKey, int bits,
+                       AES_KEY *key);
+int aesni_set_decrypt_key(const unsigned char *userKey, int bits,
+                       AES_KEY *key);
+
+void aesni_encrypt(const unsigned char *in, unsigned char *out,
+                       const AES_KEY *key);
+void aesni_decrypt(const unsigned char *in, unsigned char *out,
+                       const AES_KEY *key);
+
+void aesni_ecb_encrypt(const unsigned char *in,
+                       unsigned char *out,
+                       size_t length,
+                       const AES_KEY *key,
+                       int enc);
+void aesni_cbc_encrypt(const unsigned char *in,
+                       unsigned char *out,
+                       size_t length,
+                       const AES_KEY *key,
+                       unsigned char *ivec, int enc);
+
+void aesni_ctr32_encrypt_blocks(const unsigned char *in,
+                       unsigned char *out,
+                       size_t blocks,
+                       const void *key,
+                       const unsigned char *ivec);
+
+static int aesni_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
+                  const unsigned char *iv, int enc)
+       {
+       int ret;
+
+       if (((ctx->cipher->flags & EVP_CIPH_MODE) == EVP_CIPH_ECB_MODE
+           || (ctx->cipher->flags & EVP_CIPH_MODE) == EVP_CIPH_CBC_MODE)
+           && !enc) 
+               ret = aesni_set_decrypt_key(key, ctx->key_len*8, ctx->cipher_data);
+       else
+               ret = aesni_set_encrypt_key(key, ctx->key_len*8, ctx->cipher_data);
+
+       if(ret < 0)
+               {
+               EVPerr(EVP_F_AES_INIT_KEY,EVP_R_AES_KEY_SETUP_FAILED);
+               return 0;
+               }
+
+       return 1;
+       }
+
+static int aesni_cbc_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in, size_t len)
+{
+       aesni_cbc_encrypt(in,out,len,ctx->cipher_data,ctx->iv,ctx->encrypt);
+
+       return 1;
+}
+
+static int aesni_ecb_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in, size_t len)
+{
+       size_t  bl = ctx->cipher->block_size;
+
+       if (len<bl)     return 1;
+
+       aesni_ecb_encrypt(in,out,len,ctx->cipher_data,ctx->encrypt);
+
+       return 1;
+}
+
+static int aesni_ofb_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in,size_t len)
+{
+       CRYPTO_ofb128_encrypt(in,out,len,ctx->cipher_data,
+                       ctx->iv,&ctx->num,
+                       (block128_f)aesni_encrypt);
+       return 1;
+}
+
+static int aesni_cfb_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in,size_t len)
+{
+       CRYPTO_cfb128_encrypt(in,out,len,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)aesni_encrypt);
+       return 1;
+}
+
+static int aesni_cfb8_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in,size_t len)
+{
+       CRYPTO_cfb128_8_encrypt(in,out,len,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)aesni_encrypt);
+       return 1;
+}
+
+static int aesni_cfb1_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in,size_t len)
+{
+       if (ctx->flags&EVP_CIPH_FLAG_LENGTH_BITS) {
+               CRYPTO_cfb128_1_encrypt(in,out,len,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)aesni_encrypt);
+               return 1;
+       }
+
+       while (len>=MAXBITCHUNK) {
+               CRYPTO_cfb128_1_encrypt(in,out,MAXBITCHUNK*8,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)aesni_encrypt);
+               len-=MAXBITCHUNK;
+       }
+       if (len)
+               CRYPTO_cfb128_1_encrypt(in,out,len*8,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)aesni_encrypt);
+       
+       return 1;
+}
+
+static int aesni_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
                const unsigned char *in, size_t len)
 {
        unsigned int num;
        num = ctx->num;
                const unsigned char *in, size_t len)
 {
        unsigned int num;
        num = ctx->num;
-#ifdef AES_CTR_ASM
-       void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
-                       size_t blocks, const AES_KEY *key,
-                       const unsigned char ivec[AES_BLOCK_SIZE]);
 
        CRYPTO_ctr128_encrypt_ctr32(in,out,len,
 
        CRYPTO_ctr128_encrypt_ctr32(in,out,len,
-               &((EVP_AES_KEY *)ctx->cipher_data)->ks,
-               ctx->iv,ctx->buf,&num,(ctr128_f)AES_ctr32_encrypt);
-#else
-       CRYPTO_ctr128_encrypt(in,out,len,
-               &((EVP_AES_KEY *)ctx->cipher_data)->ks,
-               ctx->iv,ctx->buf,&num,(block128_f)AES_encrypt);
-#endif
+                       ctx->cipher_data,ctx->iv,ctx->buf,&num,
+                       (ctr128_f)aesni_ctr32_encrypt_blocks);
+
        ctx->num = (size_t)num;
        return 1;
 }
 
        ctx->num = (size_t)num;
        return 1;
 }
 
-static const EVP_CIPHER aes_128_ctr_cipher=
-       {
-       NID_aes_128_ctr,1,16,16,
-       EVP_CIPH_CTR_MODE,
-       aes_init_key,
-       aes_counter,
-       NULL,
-       sizeof(EVP_AES_KEY),
-       NULL,
-       NULL,
-       NULL,
-       NULL
-       };
-
-const EVP_CIPHER *EVP_aes_128_ctr (void)
-{      return &aes_128_ctr_cipher;     }
-
-static const EVP_CIPHER aes_192_ctr_cipher=
-       {
-       NID_aes_192_ctr,1,24,16,
-       EVP_CIPH_CTR_MODE,
-       aes_init_key,
-       aes_counter,
-       NULL,
-       sizeof(EVP_AES_KEY),
-       NULL,
-       NULL,
-       NULL,
-       NULL
-       };
-
-const EVP_CIPHER *EVP_aes_192_ctr (void)
-{      return &aes_192_ctr_cipher;     }
-
-static const EVP_CIPHER aes_256_ctr_cipher=
-       {
-       NID_aes_256_ctr,1,32,16,
-       EVP_CIPH_CTR_MODE,
-       aes_init_key,
-       aes_counter,
-       NULL,
-       sizeof(EVP_AES_KEY),
-       NULL,
-       NULL,
-       NULL,
-       NULL
-       };
-
-const EVP_CIPHER *EVP_aes_256_ctr (void)
-{      return &aes_256_ctr_cipher;     }
+#define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
+static const EVP_CIPHER aesni_##keylen##_##mode = { \
+       nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
+       flags|EVP_CIPH_##MODE##_MODE,   \
+       aesni_init_key,                 \
+       aesni_##mode##_cipher,          \
+       NULL,                           \
+       sizeof(EVP_AES_KEY),            \
+       NULL,NULL,NULL,NULL }; \
+static const EVP_CIPHER aes_##keylen##_##mode = { \
+       nid##_##keylen##_##nmode,blocksize,     \
+       keylen/8,ivlen, \
+       flags|EVP_CIPH_##MODE##_MODE,   \
+       aes_init_key,                   \
+       aes_##mode##_cipher,            \
+       NULL,                           \
+       sizeof(EVP_AES_KEY),            \
+       NULL,NULL,NULL,NULL }; \
+const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
+{ return (OPENSSL_ia32cap_P[1]&AESNI_CAPABLE)? \
+  &aesni_##keylen##_##mode:&aes_##keylen##_##mode; }
+
+#else
+
+#define BLOCK_CIPHER_generic(nid,keylen,blocksize,ivlen,nmode,mode,MODE,flags) \
+static const EVP_CIPHER aes_##keylen##_##mode = { \
+       nid##_##keylen##_##nmode,blocksize,keylen/8,ivlen, \
+       flags|EVP_CIPH_##MODE##_MODE,   \
+       aes_init_key,                   \
+       aes_##mode##_cipher,            \
+       NULL,                           \
+       sizeof(EVP_AES_KEY),            \
+       NULL,NULL,NULL,NULL }; \
+const EVP_CIPHER *EVP_aes_##keylen##_##mode(void) \
+{ return &aes_##keylen##_##mode; }
+#endif
+
+#define BLOCK_CIPHER_generic_pack(nid,keylen,flags)            \
+       BLOCK_CIPHER_generic(nid,keylen,16,16,cbc,cbc,CBC,flags|EVP_CIPH_FLAG_DEFAULT_ASN1)     \
+       BLOCK_CIPHER_generic(nid,keylen,16,0,ecb,ecb,ECB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1)      \
+       BLOCK_CIPHER_generic(nid,keylen,1,16,ofb128,ofb,OFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1)   \
+       BLOCK_CIPHER_generic(nid,keylen,1,16,cfb128,cfb,CFB,flags|EVP_CIPH_FLAG_DEFAULT_ASN1)   \
+       BLOCK_CIPHER_generic(nid,keylen,1,16,cfb1,cfb1,CFB,flags)       \
+       BLOCK_CIPHER_generic(nid,keylen,1,16,cfb8,cfb8,CFB,flags)       \
+       BLOCK_CIPHER_generic(nid,keylen,1,16,ctr,ctr,CTR,flags)
 
 static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
                   const unsigned char *iv, int enc)
 
 static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
                   const unsigned char *iv, int enc)
@@ -180,9 +266,9 @@ static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
        if (((ctx->cipher->flags & EVP_CIPH_MODE) == EVP_CIPH_ECB_MODE
            || (ctx->cipher->flags & EVP_CIPH_MODE) == EVP_CIPH_CBC_MODE)
            && !enc) 
        if (((ctx->cipher->flags & EVP_CIPH_MODE) == EVP_CIPH_ECB_MODE
            || (ctx->cipher->flags & EVP_CIPH_MODE) == EVP_CIPH_CBC_MODE)
            && !enc) 
-               ret=AES_set_decrypt_key(key, ctx->key_len * 8, ctx->cipher_data);
+               ret = AES_set_decrypt_key(key, ctx->key_len * 8, ctx->cipher_data);
        else
        else
-               ret=AES_set_encrypt_key(key, ctx->key_len * 8, ctx->cipher_data);
+               ret = AES_set_encrypt_key(key, ctx->key_len * 8, ctx->cipher_data);
 
        if(ret < 0)
                {
 
        if(ret < 0)
                {
@@ -193,6 +279,109 @@ static int aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
        return 1;
        }
 
        return 1;
        }
 
+static int aes_cbc_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in, size_t len)
+{
+       AES_cbc_encrypt(in,out,len,ctx->cipher_data,ctx->iv,ctx->encrypt);
+
+       return 1;
+}
+
+static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in, size_t len)
+{
+       size_t  bl = ctx->cipher->block_size;
+       size_t  i;
+
+       if (len<bl)     return 1;
+
+       if (ctx->encrypt) {
+               for (i=0,len-=bl;i<=len;i+=bl)
+                       AES_encrypt(in+i,out+i,ctx->cipher_data);
+       } else {
+               for (i=0,len-=bl;i<=len;i+=bl)
+                       AES_decrypt(in+i,out+i,ctx->cipher_data);
+       }
+
+       return 1;
+}
+
+static int aes_ofb_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in,size_t len)
+{
+       CRYPTO_ofb128_encrypt(in,out,len,ctx->cipher_data,
+                       ctx->iv,&ctx->num,
+                       (block128_f)AES_encrypt);
+       return 1;
+}
+
+static int aes_cfb_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in,size_t len)
+{
+       CRYPTO_cfb128_encrypt(in,out,len,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)AES_encrypt);
+       return 1;
+}
+
+static int aes_cfb8_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in,size_t len)
+{
+       CRYPTO_cfb128_8_encrypt(in,out,len,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)AES_encrypt);
+       return 1;
+}
+
+static int aes_cfb1_cipher(EVP_CIPHER_CTX *ctx,unsigned char *out,
+       const unsigned char *in,size_t len)
+{
+       if (ctx->flags&EVP_CIPH_FLAG_LENGTH_BITS) {
+               CRYPTO_cfb128_1_encrypt(in,out,len,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)AES_encrypt);
+               return 1;
+       }
+
+       while (len>=MAXBITCHUNK) {
+               CRYPTO_cfb128_1_encrypt(in,out,MAXBITCHUNK*8,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)AES_encrypt);
+               len-=MAXBITCHUNK;
+       }
+       if (len)
+               CRYPTO_cfb128_1_encrypt(in,out,len*8,ctx->cipher_data,
+                       ctx->iv,&ctx->num,ctx->encrypt,
+                       (block128_f)AES_encrypt);
+       
+       return 1;
+}
+
+static int aes_ctr_cipher (EVP_CIPHER_CTX *ctx, unsigned char *out,
+               const unsigned char *in, size_t len)
+{
+       unsigned int num;
+       num = ctx->num;
+#ifdef AES_CTR_ASM
+       void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
+                       size_t blocks, const AES_KEY *key,
+                       const unsigned char ivec[AES_BLOCK_SIZE]);
+
+       CRYPTO_ctr128_encrypt_ctr32(in,out,len,
+               &((EVP_AES_KEY *)ctx->cipher_data)->ks,
+               ctx->iv,ctx->buf,&num,(ctr128_f)AES_ctr32_encrypt);
+#else
+       CRYPTO_ctr128_encrypt(in,out,len,
+               &((EVP_AES_KEY *)ctx->cipher_data)->ks,
+               ctx->iv,ctx->buf,&num,(block128_f)AES_encrypt);
 #endif
 #endif
+       ctx->num = (size_t)num;
+       return 1;
+}
 
 
+BLOCK_CIPHER_generic_pack(NID_aes,128,0)
+BLOCK_CIPHER_generic_pack(NID_aes,192,0)
+BLOCK_CIPHER_generic_pack(NID_aes,256,0)
+
+#endif
 #endif
 #endif
index 932037f5514dbdf4799168492b4f86f03df0049c..181614a98c2d522f8fc54ed98ce4061fe3d67c0b 100644 (file)
@@ -62,10 +62,10 @@ typedef unsigned int u32;
 typedef unsigned char u8;
 
 #define STRICT_ALIGNMENT
 typedef unsigned char u8;
 
 #define STRICT_ALIGNMENT
-#if defined(__i386)    || defined(__i386__)    || \
-    defined(__x86_64)  || defined(__x86_64__)  || \
-    defined(_M_IX86)   || defined(_M_AMD64)    || defined(_M_X64) || \
-    defined(__s390__)  || defined(__s390x__)
+#if defined(__i386)    || defined(__i386__)    || \
+    defined(__x86_64)  || defined(__x86_64__)  || \
+    defined(_M_IX86)   || defined(_M_AMD64)    || defined(_M_X64) || \
+    defined(__s390__)  || defined(__s390x__)
 #  undef STRICT_ALIGNMENT
 #endif
 
 #  undef STRICT_ALIGNMENT
 #endif
 
@@ -182,3 +182,84 @@ void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
 
        *num=n;
 }
 
        *num=n;
 }
+
+#define GETU32(p)      ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3])
+#define PUTU32(p,v)    ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v))
+
+/* increment upper 96 bits of 128-bit counter by 1 */
+static void ctr96_inc(unsigned char *counter) {
+       u32 n=12;
+       u8  c;
+
+       do {
+               --n;
+               c = counter[n];
+               ++c;
+               counter[n] = c;
+               if (c) return;
+       } while (n);
+}
+
+void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
+                       size_t len, const void *key,
+                       unsigned char ivec[16], unsigned char ecount_buf[16],
+                       unsigned int *num, ctr128_f func)
+{
+       unsigned int n,ctr32;
+
+       assert(in && out && key && ecount_buf && num);
+       assert(*num < 16);
+
+       n = *num;
+
+       while (n && len) {
+               *(out++) = *(in++) ^ ecount_buf[n];
+               --len;
+               n = (n+1) % 16;
+       }
+
+       ctr32 = GETU32(ivec+12);
+       while (len>=16) {
+               size_t blocks = len/16;
+               /*
+                * 1<<28 is just a not-so-small yet not-so-large number...
+                * Below condition is practically never met, but it has to
+                * be checked for code correctness.
+                */
+               if (sizeof(size_t)>sizeof(unsigned int) && blocks>(1U<<28))
+                       blocks = (1U<<28);
+               /*
+                * As (*func) operates on 32-bit counter, caller
+                * has to handle overflow. 'if' below detects the
+                * overflow, which is then handled by limiting the
+                * amount of blocks to the exact overflow point...
+                */
+               ctr32 += (u32)blocks;
+               if (ctr32 < blocks) {
+                       blocks -= ctr32;
+                       ctr32   = 0;
+               }
+               (*func)(in,out,blocks,key,ivec);
+               /* (*ctr) does not update ivec, caller does: */
+               PUTU32(ivec+12,ctr32);
+               /* ... overflow was detected, propogate carry. */
+               if (ctr32 == 0) ctr96_inc(ivec);
+               blocks *= 16;
+               len -= blocks;
+               out += blocks;
+               in  += blocks;
+       }
+       if (len) {
+               memset(ecount_buf,0,16);
+               (*func)(ecount_buf,ecount_buf,1,key,ivec);
+               ++ctr32;
+               PUTU32(ivec+12,ctr32);
+               if (ctr32 == 0) ctr96_inc(ivec);
+               while (len--) {
+                       out[n] = in[n] ^ ecount_buf[n];
+                       ++n;
+               }
+       }
+
+       *num=n;
+}
index af8d97d795892f1bf51934c472bf7cf0cdaed558..261c4640bedecbffef6662b5f22ce8750fd43ff0 100644 (file)
@@ -15,6 +15,10 @@ typedef void (*cbc128_f)(const unsigned char *in, unsigned char *out,
                        size_t len, const void *key,
                        unsigned char ivec[16], int enc);
 
                        size_t len, const void *key,
                        unsigned char ivec[16], int enc);
 
+typedef void (*ctr128_f)(const unsigned char *in, unsigned char *out,
+                       size_t blocks, const void *key,
+                       const unsigned char ivec[16]);
+
 void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
                        size_t len, const void *key,
                        unsigned char ivec[16], block128_f block);
 void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out,
                        size_t len, const void *key,
                        unsigned char ivec[16], block128_f block);
@@ -27,6 +31,11 @@ void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out,
                        unsigned char ivec[16], unsigned char ecount_buf[16],
                        unsigned int *num, block128_f block);
 
                        unsigned char ivec[16], unsigned char ecount_buf[16],
                        unsigned int *num, block128_f block);
 
+void CRYPTO_ctr128_encrypt_ctr32(const unsigned char *in, unsigned char *out,
+                       size_t len, const void *key,
+                       unsigned char ivec[16], unsigned char ecount_buf[16],
+                       unsigned int *num, ctr128_f ctr);
+
 void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
                        size_t len, const void *key,
                        unsigned char ivec[16], int *num,
 void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out,
                        size_t len, const void *key,
                        unsigned char ivec[16], int *num,