From: Andy Polyakov Date: Mon, 13 May 2013 20:49:58 +0000 (+0200) Subject: Add AES-SHA256 stitch. X-Git-Tag: master-post-reformat~1317 X-Git-Url: https://git.openssl.org/gitweb/?p=openssl.git;a=commitdiff_plain;h=8a97a33063d93be1130b762daefd729346af4d29;hp=22de0e6583b7f8764e383cf818db45863281eeec Add AES-SHA256 stitch. --- diff --git a/Configure b/Configure index e2497a4f99..601f8931a6 100755 --- a/Configure +++ b/Configure @@ -128,7 +128,7 @@ my $x86_asm="x86cpuid.o:bn-586.o co-586.o x86-mont.o x86-gf2m.o:des-586.o crypt5 my $x86_elf_asm="$x86_asm:elf"; -my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o::aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o rc4-md5-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o:ghash-x86_64.o aesni-gcm-x86_64.o:e_padlock-x86_64.o"; +my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o::aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o rc4-md5-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o:ghash-x86_64.o aesni-gcm-x86_64.o:e_padlock-x86_64.o"; my $ia64_asm="ia64cpuid.o:bn-ia64.o ia64-mont.o::aes_core.o aes_cbc.o aes-ia64.o::md5-ia64.o:sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o rc4_skey.o:::::ghash-ia64.o::void"; my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o vis3-mont.o sparct4-mont.o sparcv9-gf2m.o:des_enc-sparc.o fcrypt_b.o dest4-sparcv9.o:aes_core.o aes_cbc.o aes-sparcv9.o aest4-sparcv9.o::md5-sparcv9.o:sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o::::::camellia.o cmll_misc.o cmll_cbc.o cmllt4-sparcv9.o:ghash-sparcv9.o::void"; my $sparcv8_asm=":sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::::::void"; diff --git a/TABLE b/TABLE index 46d148fb4f..1e90ee37d5 100644 --- a/TABLE +++ b/TABLE @@ -308,7 +308,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -803,7 +803,7 @@ $bn_ops = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN $cpuid_obj = x86_64cpuid.o $bn_obj = bn_asm.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -1496,7 +1496,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -1661,7 +1661,7 @@ $bn_ops = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN $cpuid_obj = x86_64cpuid.o $bn_obj = bn_asm.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -1760,7 +1760,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -1826,7 +1826,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -2024,7 +2024,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -2552,7 +2552,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -2750,7 +2750,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -2816,7 +2816,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -4466,7 +4466,7 @@ $bn_ops = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -4499,7 +4499,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -4532,7 +4532,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -4730,7 +4730,7 @@ $bn_ops = SIXTY_FOUR_BIT RC4_CHUNK_LL DES_INT EXPORT_VAR_AS_FN $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -5720,7 +5720,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o @@ -5753,7 +5753,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL $cpuid_obj = x86_64cpuid.o $bn_obj = x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o $des_obj = -$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o +$aes_obj = aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o aesni-sha256-x86_64.o $bf_obj = $md5_obj = md5-x86_64.o $sha1_obj = sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o diff --git a/crypto/aes/Makefile b/crypto/aes/Makefile index e185ae75db..d2069e46d3 100644 --- a/crypto/aes/Makefile +++ b/crypto/aes/Makefile @@ -65,6 +65,8 @@ aesni-x86_64.s: asm/aesni-x86_64.pl $(PERL) asm/aesni-x86_64.pl $(PERLASM_SCHEME) > $@ aesni-sha1-x86_64.s: asm/aesni-sha1-x86_64.pl $(PERL) asm/aesni-sha1-x86_64.pl $(PERLASM_SCHEME) > $@ +aesni-sha256-x86_64.s: asm/aesni-sha256-x86_64.pl + $(PERL) asm/aesni-sha256-x86_64.pl $(PERLASM_SCHEME) > $@ aes-sparcv9.s: asm/aes-sparcv9.pl $(PERL) asm/aes-sparcv9.pl $(CFLAGS) > $@ diff --git a/crypto/aes/asm/aesni-sha256-x86_64.pl b/crypto/aes/asm/aesni-sha256-x86_64.pl new file mode 100644 index 0000000000..ebce021452 --- /dev/null +++ b/crypto/aes/asm/aesni-sha256-x86_64.pl @@ -0,0 +1,1358 @@ +#!/usr/bin/env perl +# +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== +# +# January 2013 +# +# This is AESNI-CBC+SHA256 stitch implementation. The idea, as spelled +# in http://download.intel.com/design/intarch/papers/323686.pdf, is +# that since AESNI-CBC encrypt exhibit *very* low instruction-level +# parallelism, interleaving it with another algorithm would allow to +# utilize processor resources better and achieve better performance. +# SHA256 instruction sequences(*) are taken from sha512-x86_64.pl and +# AESNI code is weaved into it. As SHA256 dominates execution time, +# stitch performance does not depend on AES key length. Below are +# performance numbers in cycles per processed byte, less is better, +# for standalone AESNI-CBC encrypt, standalone SHA256, and stitched +# subroutine: +# +# AES-128/-192/-256+SHA256 this(**)gain +# Sandy Bridge 5.05/6.05/7.05+11.6 13.0 +28%/36%/43% +# Ivy Bridge 5.05/6.05/7.05+10.3 11.6 +32%/41%/50% +# Bulldozer 5.77/6.89/8.00+13.7 13.7 +42%/50%/58% +# +# (*) there are XOP, AVX1 and AVX2 code pathes, meaning that +# Westmere is omitted from loop, this is because gain was not +# estimated high enough to justify the effort; +# (**) these are EVP-free results, results obtained with 'speed +# -evp aes-256-cbc-hmac-sha256' will vary by percent or two; + +$flavour = shift; +$output = shift; +if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } + +$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or +die "can't locate x86_64-xlate.pl"; + +if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` + =~ /GNU assembler version ([2-9]\.[0-9]+)/) { + $avx = ($1>=2.19) + ($1>=2.22); +} + +if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && + `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) { + $avx = ($1>=2.09) + ($1>=2.10); +} + +if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && + `ml64 2>&1` =~ /Version ([0-9]+)\./) { + $avx = ($1>=10) + ($1>=11); +} + +open OUT,"| \"$^X\" $xlate $flavour $output"; +*STDOUT=*OUT; + +$func="aesni_cbc_sha256_enc"; +$TABLE="K256"; +$SZ=4; +@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx", + "%r8d","%r9d","%r10d","%r11d"); +($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%esi"); +@Sigma0=( 2,13,22); +@Sigma1=( 6,11,25); +@sigma0=( 7,18, 3); +@sigma1=(17,19,10); +$rounds=64; + +######################################################################## +# void aesni_cbc_sha256_enc(const void *inp, +# void *out, +# size_t length, +# const AES_KEY *key, +# unsigned char *iv, +# SHA256_CTX *ctx, +# const void *in0); +($inp, $out, $len, $key, $ivp, $ctx, $in0) = +("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10"); + +$Tbl="%rbp"; + +$_inp="16*$SZ+0*8(%rsp)"; +$_out="16*$SZ+1*8(%rsp)"; +$_end="16*$SZ+2*8(%rsp)"; +$_key="16*$SZ+3*8(%rsp)"; +$_ivp="16*$SZ+4*8(%rsp)"; +$_ctx="16*$SZ+5*8(%rsp)"; +$_in0="16*$SZ+6*8(%rsp)"; +$_rsp="16*$SZ+7*8(%rsp)"; +$framesz=16*$SZ+8*8; + +$code=<<___; +.text + +.extern OPENSSL_ia32cap_P +.globl $func +.type $func,\@abi-omnipotent +.align 16 +$func: +___ +$code.=<<___ if ($avx); + lea OPENSSL_ia32cap_P(%rip),%r11 + mov \$1,%eax + cmp \$0,`$win64?"%rcx":"%rdi"` + je .Lprobe + mov 0(%r11),%eax + mov 4(%r11),%r10d + mov 8(%r11),%r11d + + test \$`1<<11`,%r10d # check for XOP + jnz ${func}_xop +___ +$code.=<<___ if ($avx>1); + and \$`1<<8|1<<5|1<<3`,%r11d # check for BMI2+AVX2+BMI1 + cmp \$`1<<8|1<<5|1<<3`,%r11d + je ${func}_avx2 +___ +$code.=<<___ if ($avx); + and \$`1<<30`,%eax # mask "Intel CPU" bit + and \$`1<<28|1<<9`,%r10d # mask AVX+SSSE3 bits + or %eax,%r10d + cmp \$`1<<28|1<<9|1<<30`,%r10d + je ${func}_avx + ud2 +___ +$code.=<<___; + xor %eax,%eax + cmp \$0,`$win64?"%rcx":"%rdi"` + je .Lprobe + ud2 +.Lprobe: + ret +.size $func,.-$func + +.align 64 +.type $TABLE,\@object +$TABLE: + .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 + .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 + .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 + .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 + .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 + .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 + .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 + .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 + .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc + .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc + .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da + .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da + .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 + .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 + .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 + .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 + .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 + .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 + .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 + .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 + .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 + .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 + .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 + .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 + .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 + .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 + .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 + .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 + .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 + .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 + .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 + .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 + + .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f + .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f + .long 0,0,0,0, 0,0,0,0, -1,-1,-1,-1 + .long 0,0,0,0, 0,0,0,0 + .asciz "AESNI-CBC+SHA256 stitch for x86_64, CRYPTOGAMS by " +.align 64 +___ + +###################################################################### +# SIMD code paths +# +{{{ +($iv,$inout,$roundkey,$temp, + $mask10,$mask12,$mask14,$offload)=map("%xmm$_",(8..15)); + +$aesni_cbc_idx=0; +@aesni_cbc_block = ( +## &vmovdqu ($roundkey,"0x00-0x80($inp)");' +## &vmovdqu ($inout,($inp)); +## &mov ($_inp,$inp); + + '&vpxor ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x10-0x80($inp)");', + + '&vpxor ($inout,$inout,$iv);', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x20-0x80($inp)");', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x30-0x80($inp)");', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x40-0x80($inp)");', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x50-0x80($inp)");', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x60-0x80($inp)");', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x70-0x80($inp)");', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x80-0x80($inp)");', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x90-0x80($inp)");', + + '&vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0xa0-0x80($inp)");', + + '&vaesenclast ($temp,$inout,$roundkey);'. + ' &vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0xb0-0x80($inp)");', + + '&vpand ($iv,$temp,$mask10);'. + ' &vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0xc0-0x80($inp)");', + + '&vaesenclast ($temp,$inout,$roundkey);'. + ' &vaesenc ($inout,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0xd0-0x80($inp)");', + + '&vpand ($temp,$temp,$mask12);'. + ' &vaesenc ($inout,$inout,$roundkey);'. + '&vmovdqu ($roundkey,"0xe0-0x80($inp)");', + + '&vpor ($iv,$iv,$temp);'. + ' &vaesenclast ($temp,$inout,$roundkey);'. + ' &vmovdqu ($roundkey,"0x00-0x80($inp)");' + +## &mov ($inp,$_inp); +## &mov ($out,$_out); +## &vpand ($temp,$temp,$mask14); +## &vpor ($iv,$iv,$temp); +## &vmovdqu ($iv,($out,$inp); +## &lea (inp,16($inp)); +); + +my $a4=$T1; +my ($a,$b,$c,$d,$e,$f,$g,$h); + +sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm +{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; + my $arg = pop; + $arg = "\$$arg" if ($arg*1 eq $arg); + $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; +} + +sub body_00_15 () { + ( + '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'. + + '&ror ($a0,$Sigma1[2]-$Sigma1[1])', + '&mov ($a,$a1)', + '&mov ($a4,$f)', + + '&xor ($a0,$e)', + '&ror ($a1,$Sigma0[2]-$Sigma0[1])', + '&xor ($a4,$g)', # f^g + + '&ror ($a0,$Sigma1[1]-$Sigma1[0])', + '&xor ($a1,$a)', + '&and ($a4,$e)', # (f^g)&e + + @aesni_cbc_block[$aesni_cbc_idx++]. + '&xor ($a0,$e)', + '&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i] + '&mov ($a2,$a)', + + '&ror ($a1,$Sigma0[1]-$Sigma0[0])', + '&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g + '&xor ($a2,$b)', # a^b, b^c in next round + + '&ror ($a0,$Sigma1[0])', # Sigma1(e) + '&add ($h,$a4)', # h+=Ch(e,f,g) + '&and ($a3,$a2)', # (b^c)&(a^b) + + '&xor ($a1,$a)', + '&add ($h,$a0)', # h+=Sigma1(e) + '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b) + + '&add ($d,$h)', # d+=h + '&ror ($a1,$Sigma0[0])', # Sigma0(a) + '&add ($h,$a3)', # h+=Maj(a,b,c) + + '&mov ($a0,$d)', + '&add ($a1,$h);'. # h+=Sigma0(a) + '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;' + ); +} + +if ($avx) {{ +###################################################################### +# XOP code path +# +$code.=<<___; +.type ${func}_xop,\@function,6 +.align 64 +${func}_xop: +.Lxop_shortcut: + mov `($win64?56:8)`(%rsp),$in0 # load 7th parameter + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + mov %rsp,%r11 # copy %rsp + sub \$`$framesz+$win64*16*10`,%rsp + and \$-64,%rsp # align stack frame + + shl \$6,$len + sub $inp,$out # re-bias + sub $inp,$in0 + add $inp,$len # end of input + + #mov $inp,$_inp # saved later + mov $out,$_out + mov $len,$_end + #mov $key,$_key # remains resident in $inp register + mov $ivp,$_ivp + mov $ctx,$_ctx + mov $in0,$_in0 + mov %r11,$_rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,`$framesz+16*0`(%rsp) + movaps %xmm7,`$framesz+16*1`(%rsp) + movaps %xmm8,`$framesz+16*2`(%rsp) + movaps %xmm9,`$framesz+16*3`(%rsp) + movaps %xmm10,`$framesz+16*4`(%rsp) + movaps %xmm11,`$framesz+16*5`(%rsp) + movaps %xmm12,`$framesz+16*6`(%rsp) + movaps %xmm13,`$framesz+16*7`(%rsp) + movaps %xmm14,`$framesz+16*8`(%rsp) + movaps %xmm15,`$framesz+16*9`(%rsp) +___ +$code.=<<___; +.Lprologue_xop: + vzeroall + + mov $inp,%r12 # borrow $a4 + lea 0x80($key),$inp # size optimization, reassign + lea $TABLE+`$SZ*2*$rounds+32`(%rip),%r13 # borrow $a0 + mov 0xf0-0x80($inp),%r14d # rounds, borrow $a1 + mov $ctx,%r15 # borrow $a2 + mov $in0,%rsi # borrow $a3 + vmovdqu ($ivp),$iv # load IV + sub \$9,%r14 + + mov $SZ*0(%r15),$A + mov $SZ*1(%r15),$B + mov $SZ*2(%r15),$C + mov $SZ*3(%r15),$D + mov $SZ*4(%r15),$E + mov $SZ*5(%r15),$F + mov $SZ*6(%r15),$G + mov $SZ*7(%r15),$H + + vmovdqa 0x00(%r13,%r14,8),$mask14 + vmovdqa 0x10(%r13,%r14,8),$mask12 + vmovdqa 0x20(%r13,%r14,8),$mask10 + vmovdqu 0x00-0x80($inp),$roundkey + jmp .Lloop_xop +___ + if ($SZ==4) { # SHA256 + my @X = map("%xmm$_",(0..3)); + my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7)); + +$code.=<<___; +.align 16 +.Lloop_xop: + vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 + vmovdqu 0x00(%rsi,%r12),@X[0] + vmovdqu 0x10(%rsi,%r12),@X[1] + vmovdqu 0x20(%rsi,%r12),@X[2] + vmovdqu 0x30(%rsi,%r12),@X[3] + vpshufb $t3,@X[0],@X[0] + lea $TABLE(%rip),$Tbl + vpshufb $t3,@X[1],@X[1] + vpshufb $t3,@X[2],@X[2] + vpaddd 0x00($Tbl),@X[0],$t0 + vpshufb $t3,@X[3],@X[3] + vpaddd 0x20($Tbl),@X[1],$t1 + vpaddd 0x40($Tbl),@X[2],$t2 + vpaddd 0x60($Tbl),@X[3],$t3 + vmovdqa $t0,0x00(%rsp) + mov $A,$a1 + vmovdqa $t1,0x10(%rsp) + mov $B,$a3 + vmovdqa $t2,0x20(%rsp) + xor $C,$a3 # magic + vmovdqa $t3,0x30(%rsp) + mov $E,$a0 + jmp .Lxop_00_47 + +.align 16 +.Lxop_00_47: + sub \$-16*2*$SZ,$Tbl # size optimization + vmovdqu (%r12),$inout # $a4 + mov %r12,$_inp # $a4 +___ +sub XOP_256_00_47 () { +my $j = shift; +my $body = shift; +my @X = @_; +my @insns = (&$body,&$body,&$body,&$body); # 104 instructions + + &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4] + eval(shift(@insns)); + eval(shift(@insns)); + &vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12] + eval(shift(@insns)); + eval(shift(@insns)); + &vprotd ($t1,$t0,8*$SZ-$sigma0[1]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpsrld ($t0,$t0,$sigma0[2]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12] + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor ($t0,$t0,$t1); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vprotd ($t3,@X[3],8*$SZ-$sigma1[1]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor ($t0,$t0,$t2); # sigma0(X[1..4]) + eval(shift(@insns)); + eval(shift(@insns)); + &vpsrld ($t2,@X[3],$sigma1[2]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4]) + eval(shift(@insns)); + eval(shift(@insns)); + &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor ($t3,$t3,$t2); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor ($t3,$t3,$t1); # sigma1(X[14..15]) + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vpsrldq ($t3,$t3,8); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15]) + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vprotd ($t3,@X[0],8*$SZ-$sigma1[1]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpsrld ($t2,@X[0],$sigma1[2]); + eval(shift(@insns)); + eval(shift(@insns)); + &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]); + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor ($t3,$t3,$t2); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vpxor ($t3,$t3,$t1); # sigma1(X[16..17]) + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vpslldq ($t3,$t3,8); # 22 instructions + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17]) + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + &vpaddd ($t2,@X[0],16*2*$j."($Tbl)"); + foreach (@insns) { eval; } # remaining instructions + &vmovdqa (16*$j."(%rsp)",$t2); +} + + $aesni_cbc_idx=0; + for ($i=0,$j=0; $j<4; $j++) { + &XOP_256_00_47($j,\&body_00_15,@X); + push(@X,shift(@X)); # rotate(@X) + } + &mov ("%r12",$_inp); # borrow $a4 + &vpand ($temp,$temp,$mask14); + &mov ("%r15",$_out); # borrow $a2 + &vpor ($iv,$iv,$temp); + &vmovdqu ("(%r15,%r12)",$iv); # write output + &lea ("%r12","16(%r12)"); # inp++ + + &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0); + &jne (".Lxop_00_47"); + + &vmovdqu ($inout,"(%r12)"); + &mov ($_inp,"%r12"); + + $aesni_cbc_idx=0; + for ($i=0; $i<16; ) { + foreach(body_00_15()) { eval; } + } + } +$code.=<<___; + mov $_inp,%r12 # borrow $a4 + mov $_out,%r13 # borrow $a0 + mov $_ctx,%r15 # borrow $a2 + mov $_in0,%rsi # borrow $a3 + + vpand $mask14,$temp,$temp + mov $a1,$A + vpor $temp,$iv,$iv + vmovdqu $iv,(%r13,%r12) # write output + lea 16(%r12),%r12 # inp++ + + add $SZ*0(%r15),$A + add $SZ*1(%r15),$B + add $SZ*2(%r15),$C + add $SZ*3(%r15),$D + add $SZ*4(%r15),$E + add $SZ*5(%r15),$F + add $SZ*6(%r15),$G + add $SZ*7(%r15),$H + + cmp $_end,%r12 + + mov $A,$SZ*0(%r15) + mov $B,$SZ*1(%r15) + mov $C,$SZ*2(%r15) + mov $D,$SZ*3(%r15) + mov $E,$SZ*4(%r15) + mov $F,$SZ*5(%r15) + mov $G,$SZ*6(%r15) + mov $H,$SZ*7(%r15) + + jb .Lloop_xop + + mov $_ivp,$ivp + mov $_rsp,%rsi + vmovdqu $iv,($ivp) # output IV + vzeroall +___ +$code.=<<___ if ($win64); + movaps `$framesz+16*0`(%rsp),%xmm6 + movaps `$framesz+16*1`(%rsp),%xmm7 + movaps `$framesz+16*2`(%rsp),%xmm8 + movaps `$framesz+16*3`(%rsp),%xmm9 + movaps `$framesz+16*4`(%rsp),%xmm10 + movaps `$framesz+16*5`(%rsp),%xmm11 + movaps `$framesz+16*6`(%rsp),%xmm12 + movaps `$framesz+16*7`(%rsp),%xmm13 + movaps `$framesz+16*8`(%rsp),%xmm14 + movaps `$framesz+16*9`(%rsp),%xmm15 +___ +$code.=<<___; + mov (%rsi),%r15 + mov 8(%rsi),%r14 + mov 16(%rsi),%r13 + mov 24(%rsi),%r12 + mov 32(%rsi),%rbp + mov 40(%rsi),%rbx + lea 48(%rsi),%rsp +.Lepilogue_xop: + ret +.size ${func}_xop,.-${func}_xop +___ +###################################################################### +# AVX+shrd code path +# +local *ror = sub { &shrd(@_[0],@_) }; + +$code.=<<___; +.type ${func}_avx,\@function,6 +.align 64 +${func}_avx: +.Lavx_shortcut: + mov `($win64?56:8)`(%rsp),$in0 # load 7th parameter + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + mov %rsp,%r11 # copy %rsp + sub \$`$framesz+$win64*16*10`,%rsp + and \$-64,%rsp # align stack frame + + shl \$6,$len + sub $inp,$out # re-bias + sub $inp,$in0 + add $inp,$len # end of input + + #mov $inp,$_inp # saved later + mov $out,$_out + mov $len,$_end + #mov $key,$_key # remains resident in $inp register + mov $ivp,$_ivp + mov $ctx,$_ctx + mov $in0,$_in0 + mov %r11,$_rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,`$framesz+16*0`(%rsp) + movaps %xmm7,`$framesz+16*1`(%rsp) + movaps %xmm8,`$framesz+16*2`(%rsp) + movaps %xmm9,`$framesz+16*3`(%rsp) + movaps %xmm10,`$framesz+16*4`(%rsp) + movaps %xmm11,`$framesz+16*5`(%rsp) + movaps %xmm12,`$framesz+16*6`(%rsp) + movaps %xmm13,`$framesz+16*7`(%rsp) + movaps %xmm14,`$framesz+16*8`(%rsp) + movaps %xmm15,`$framesz+16*9`(%rsp) +___ +$code.=<<___; +.Lprologue_avx: + vzeroall + + mov $inp,%r12 # borrow $a4 + lea 0x80($key),$inp # size optimization, reassign + lea $TABLE+`$SZ*2*$rounds+32`(%rip),%r13 # borrow $a0 + mov 0xf0-0x80($inp),%r14d # rounds, borrow $a1 + mov $ctx,%r15 # borrow $a2 + mov $in0,%rsi # borrow $a3 + vmovdqu ($ivp),$iv # load IV + sub \$9,%r14 + + mov $SZ*0(%r15),$A + mov $SZ*1(%r15),$B + mov $SZ*2(%r15),$C + mov $SZ*3(%r15),$D + mov $SZ*4(%r15),$E + mov $SZ*5(%r15),$F + mov $SZ*6(%r15),$G + mov $SZ*7(%r15),$H + + vmovdqa 0x00(%r13,%r14,8),$mask14 + vmovdqa 0x10(%r13,%r14,8),$mask12 + vmovdqa 0x20(%r13,%r14,8),$mask10 + vmovdqu 0x00-0x80($inp),$roundkey +___ + if ($SZ==4) { # SHA256 + my @X = map("%xmm$_",(0..3)); + my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7)); + +$code.=<<___; + jmp .Lloop_avx +.align 16 +.Lloop_avx: + vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 + vmovdqu 0x00(%rsi,%r12),@X[0] + vmovdqu 0x10(%rsi,%r12),@X[1] + vmovdqu 0x20(%rsi,%r12),@X[2] + vmovdqu 0x30(%rsi,%r12),@X[3] + vpshufb $t3,@X[0],@X[0] + lea $TABLE(%rip),$Tbl + vpshufb $t3,@X[1],@X[1] + vpshufb $t3,@X[2],@X[2] + vpaddd 0x00($Tbl),@X[0],$t0 + vpshufb $t3,@X[3],@X[3] + vpaddd 0x20($Tbl),@X[1],$t1 + vpaddd 0x40($Tbl),@X[2],$t2 + vpaddd 0x60($Tbl),@X[3],$t3 + vmovdqa $t0,0x00(%rsp) + mov $A,$a1 + vmovdqa $t1,0x10(%rsp) + mov $B,$a3 + vmovdqa $t2,0x20(%rsp) + xor $C,$a3 # magic + vmovdqa $t3,0x30(%rsp) + mov $E,$a0 + jmp .Lavx_00_47 + +.align 16 +.Lavx_00_47: + sub \$-16*2*$SZ,$Tbl # size optimization + vmovdqu (%r12),$inout # $a4 + mov %r12,$_inp # $a4 +___ +sub Xupdate_256_AVX () { + ( + '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4] + '&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12] + '&vpsrld ($t2,$t0,$sigma0[0]);', + '&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12] + '&vpsrld ($t3,$t0,$sigma0[2])', + '&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);', + '&vpxor ($t0,$t3,$t2)', + '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15] + '&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);', + '&vpxor ($t0,$t0,$t1)', + '&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);', + '&vpxor ($t0,$t0,$t2)', + '&vpsrld ($t2,$t3,$sigma1[2]);', + '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4]) + '&vpsrlq ($t3,$t3,$sigma1[0]);', + '&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4]) + '&vpxor ($t2,$t2,$t3);', + '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])', + '&vpxor ($t2,$t2,$t3)', # sigma1(X[14..15]) + '&vpshufd ($t2,$t2,0b10000100)', + '&vpsrldq ($t2,$t2,8)', + '&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15]) + '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17] + '&vpsrld ($t2,$t3,$sigma1[2])', + '&vpsrlq ($t3,$t3,$sigma1[0])', + '&vpxor ($t2,$t2,$t3);', + '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])', + '&vpxor ($t2,$t2,$t3)', + '&vpshufd ($t2,$t2,0b11101000)', + '&vpslldq ($t2,$t2,8)', + '&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17]) + ); +} + +sub AVX_256_00_47 () { +my $j = shift; +my $body = shift; +my @X = @_; +my @insns = (&$body,&$body,&$body,&$body); # 104 instructions + + foreach (Xupdate_256_AVX()) { # 29 instructions + eval; + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + } + &vpaddd ($t2,@X[0],16*2*$j."($Tbl)"); + foreach (@insns) { eval; } # remaining instructions + &vmovdqa (16*$j."(%rsp)",$t2); +} + + $aesni_cbc_idx=0; + for ($i=0,$j=0; $j<4; $j++) { + &AVX_256_00_47($j,\&body_00_15,@X); + push(@X,shift(@X)); # rotate(@X) + } + &mov ("%r12",$_inp); # borrow $a4 + &vpand ($temp,$temp,$mask14); + &mov ("%r15",$_out); # borrow $a2 + &vpor ($iv,$iv,$temp); + &vmovdqu ("(%r15,%r12)",$iv); # write output + &lea ("%r12","16(%r12)"); # inp++ + + &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0); + &jne (".Lavx_00_47"); + + &vmovdqu ($inout,"(%r12)"); + &mov ($_inp,"%r12"); + + $aesni_cbc_idx=0; + for ($i=0; $i<16; ) { + foreach(body_00_15()) { eval; } + } + + } +$code.=<<___; + mov $_inp,%r12 # borrow $a4 + mov $_out,%r13 # borrow $a0 + mov $_ctx,%r15 # borrow $a2 + mov $_in0,%rsi # borrow $a3 + + vpand $mask14,$temp,$temp + mov $a1,$A + vpor $temp,$iv,$iv + vmovdqu $iv,(%r13,%r12) # write output + lea 16(%r12),%r12 # inp++ + + add $SZ*0(%r15),$A + add $SZ*1(%r15),$B + add $SZ*2(%r15),$C + add $SZ*3(%r15),$D + add $SZ*4(%r15),$E + add $SZ*5(%r15),$F + add $SZ*6(%r15),$G + add $SZ*7(%r15),$H + + cmp $_end,%r12 + + mov $A,$SZ*0(%r15) + mov $B,$SZ*1(%r15) + mov $C,$SZ*2(%r15) + mov $D,$SZ*3(%r15) + mov $E,$SZ*4(%r15) + mov $F,$SZ*5(%r15) + mov $G,$SZ*6(%r15) + mov $H,$SZ*7(%r15) + jb .Lloop_avx + + mov $_ivp,$ivp + mov $_rsp,%rsi + vmovdqu $iv,($ivp) # output IV + vzeroall +___ +$code.=<<___ if ($win64); + movaps `$framesz+16*0`(%rsp),%xmm6 + movaps `$framesz+16*1`(%rsp),%xmm7 + movaps `$framesz+16*2`(%rsp),%xmm8 + movaps `$framesz+16*3`(%rsp),%xmm9 + movaps `$framesz+16*4`(%rsp),%xmm10 + movaps `$framesz+16*5`(%rsp),%xmm11 + movaps `$framesz+16*6`(%rsp),%xmm12 + movaps `$framesz+16*7`(%rsp),%xmm13 + movaps `$framesz+16*8`(%rsp),%xmm14 + movaps `$framesz+16*9`(%rsp),%xmm15 +___ +$code.=<<___; + mov (%rsi),%r15 + mov 8(%rsi),%r14 + mov 16(%rsi),%r13 + mov 24(%rsi),%r12 + mov 32(%rsi),%rbp + mov 40(%rsi),%rbx + lea 48(%rsi),%rsp +.Lepilogue_avx: + ret +.size ${func}_avx,.-${func}_avx +___ + +if ($avx>1) {{ +###################################################################### +# AVX2+BMI code path +# +my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp +my $PUSH8=8*2*$SZ; +use integer; + +sub bodyx_00_15 () { + # at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f + ( + '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'. + + '&add ($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)', # h+=X[i]+K[i] + '&and ($a4,$e)', # f&e + '&rorx ($a0,$e,$Sigma1[2])', + '&rorx ($a2,$e,$Sigma1[1])', + + '&lea ($a,"($a,$a1)")', # h+=Sigma0(a) from the past + '&lea ($h,"($h,$a4)")', + '&andn ($a4,$e,$g)', # ~e&g + '&xor ($a0,$a2)', + + '&rorx ($a1,$e,$Sigma1[0])', + '&lea ($h,"($h,$a4)")', # h+=Ch(e,f,g)=(e&f)+(~e&g) + '&xor ($a0,$a1)', # Sigma1(e) + '&mov ($a2,$a)', + + '&rorx ($a4,$a,$Sigma0[2])', + '&lea ($h,"($h,$a0)")', # h+=Sigma1(e) + '&xor ($a2,$b)', # a^b, b^c in next round + '&rorx ($a1,$a,$Sigma0[1])', + + '&rorx ($a0,$a,$Sigma0[0])', + '&lea ($d,"($d,$h)")', # d+=h + '&and ($a3,$a2)', # (b^c)&(a^b) + @aesni_cbc_block[$aesni_cbc_idx++]. + '&xor ($a1,$a4)', + + '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b) + '&xor ($a1,$a0)', # Sigma0(a) + '&lea ($h,"($h,$a3)");'. # h+=Maj(a,b,c) + '&mov ($a4,$e)', # copy of f in future + + '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;' + ); + # and at the finish one has to $a+=$a1 +} + +$code.=<<___; +.type ${func}_avx2,\@function,6 +.align 64 +${func}_avx2: +.Lavx2_shortcut: + mov `($win64?56:8)`(%rsp),$in0 # load 7th parameter + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + mov %rsp,%r11 # copy %rsp + sub \$`2*$SZ*$rounds+8*8+$win64*16*10`,%rsp + and \$-256*$SZ,%rsp # align stack frame + add \$`2*$SZ*($rounds-8)`,%rsp + + shl \$6,$len + sub $inp,$out # re-bias + sub $inp,$in0 + add $inp,$len # end of input + + #mov $inp,$_inp # saved later + #mov $out,$_out # kept in $offload + mov $len,$_end + #mov $key,$_key # remains resident in $inp register + mov $ivp,$_ivp + mov $ctx,$_ctx + mov $in0,$_in0 + mov %r11,$_rsp +___ +$code.=<<___ if ($win64); + movaps %xmm6,`$framesz+16*0`(%rsp) + movaps %xmm7,`$framesz+16*1`(%rsp) + movaps %xmm8,`$framesz+16*2`(%rsp) + movaps %xmm9,`$framesz+16*3`(%rsp) + movaps %xmm10,`$framesz+16*4`(%rsp) + movaps %xmm11,`$framesz+16*5`(%rsp) + movaps %xmm12,`$framesz+16*6`(%rsp) + movaps %xmm13,`$framesz+16*7`(%rsp) + movaps %xmm14,`$framesz+16*8`(%rsp) + movaps %xmm15,`$framesz+16*9`(%rsp) +___ +$code.=<<___; +.Lprologue_avx2: + vzeroall + + mov $inp,%r13 # borrow $a0 + vpinsrq \$1,$out,$offload,$offload + lea 0x80($key),$inp # size optimization, reassign + lea $TABLE+`$SZ*2*$rounds+32`(%rip),%r12 # borrow $a4 + mov 0xf0-0x80($inp),%r14d # rounds, borrow $a1 + mov $ctx,%r15 # borrow $a2 + mov $in0,%rsi # borrow $a3 + vmovdqu ($ivp),$iv # load IV + lea -9(%r14),%r14 + + vmovdqa 0x00(%r12,%r14,8),$mask14 + vmovdqa 0x10(%r12,%r14,8),$mask12 + vmovdqa 0x20(%r12,%r14,8),$mask10 + + sub \$-16*$SZ,%r13 # inp++, size optimization + mov $SZ*0(%r15),$A + xor %r12,%r12 # borrow $a0 + mov $SZ*1(%r15),$B + cmp $len,%r13 # $_end + mov $SZ*2(%r15),$C + sete %r12b + mov $SZ*3(%r15),$D + mov $SZ*4(%r15),$E + mov $SZ*5(%r15),$F + mov $SZ*6(%r15),$G + mov $SZ*7(%r15),$H + vmovdqu 0x00-0x80($inp),$roundkey +___ + if ($SZ==4) { # SHA256 + my @X = map("%ymm$_",(0..3)); + my ($t0,$t1,$t2,$t3) = map("%ymm$_",(4..7)); + +$code.=<<___; + jmp .Loop_avx2 +.align 16 +.Loop_avx2: + shl \$`log(16*$SZ)/log(2)`,%r12 + vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3 + neg %r12 + vmovdqu -16*$SZ+0(%rsi,%r13),%xmm0 + add %rsi,%r12 # next or same input block + vmovdqu -16*$SZ+16(%rsi,%r13),%xmm1 + vmovdqu -16*$SZ+32(%rsi,%r13),%xmm2 + vmovdqu -16*$SZ+48(%rsi,%r13),%xmm3 + + vinserti128 \$1,(%r12,%r13),@X[0],@X[0] + vinserti128 \$1,16(%r12,%r13),@X[1],@X[1] + vpshufb $t3,@X[0],@X[0] + vinserti128 \$1,32(%r12,%r13),@X[2],@X[2] + vpshufb $t3,@X[1],@X[1] + vinserti128 \$1,48(%r12,%r13),@X[3],@X[3] + + lea $TABLE(%rip),$Tbl + vpshufb $t3,@X[2],@X[2] + lea -16*$SZ(%r13),%r13 + vpaddd 0x00($Tbl),@X[0],$t0 + vpshufb $t3,@X[3],@X[3] + vpaddd 0x20($Tbl),@X[1],$t1 + vpaddd 0x40($Tbl),@X[2],$t2 + vpaddd 0x60($Tbl),@X[3],$t3 + vmovdqa $t0,0x00(%rsp) + xor $a1,$a1 + vmovdqa $t1,0x20(%rsp) + lea -$PUSH8(%rsp),%rsp + mov $B,$a3 + vmovdqa $t2,0x00(%rsp) + xor $C,$a3 # magic + vmovdqa $t3,0x20(%rsp) + mov $F,$a4 + sub \$-16*2*$SZ,$Tbl # size optimization + jmp .Lavx2_00_47 + +.align 16 +.Lavx2_00_47: + vmovdqu (%r13),$inout + vpinsrq \$0,%r13,$offload,$offload +___ + +sub AVX2_256_00_47 () { +my $j = shift; +my $body = shift; +my @X = @_; +my @insns = (&$body,&$body,&$body,&$body); # 96 instructions +my $base = "+2*$PUSH8(%rsp)"; + + &lea ("%rsp","-$PUSH8(%rsp)") if (($j%2)==0); + foreach (Xupdate_256_AVX()) { # 29 instructions + eval; + eval(shift(@insns)); + eval(shift(@insns)); + eval(shift(@insns)); + } + &vpaddd ($t2,@X[0],16*2*$j."($Tbl)"); + foreach (@insns) { eval; } # remaining instructions + &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2); +} + $aesni_cbc_idx=0; + for ($i=0,$j=0; $j<4; $j++) { + &AVX2_256_00_47($j,\&bodyx_00_15,@X); + push(@X,shift(@X)); # rotate(@X) + } + &vmovq ("%r13",$offload); # borrow $a0 + &vpextrq ("%r15",$offload,1); # borrow $a2 + &vpand ($temp,$temp,$mask14); + &vpor ($iv,$iv,$temp); + &vmovdqu ("(%r15,%r13)",$iv); # write output + &lea ("%r13","16(%r13)"); # inp++ + + &lea ($Tbl,16*2*$SZ."($Tbl)"); + &cmpb (($SZ-1)."($Tbl)",0); + &jne (".Lavx2_00_47"); + + &vmovdqu ($inout,"(%r13)"); + &vpinsrq ($offload,$offload,"%r13",0); + + $aesni_cbc_idx=0; + for ($i=0; $i<16; ) { + my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)"; + foreach(bodyx_00_15()) { eval; } + } + } +$code.=<<___; + vpextrq \$1,$offload,%r12 # $_out, borrow $a4 + vmovq $offload,%r13 # $_inp, borrow $a0 + mov `2*$SZ*$rounds+5*8`(%rsp),%r15 # $_ctx, borrow $a2 + add $a1,$A + lea `2*$SZ*($rounds-8)`(%rsp),$Tbl + + vpand $mask14,$temp,$temp + vpor $temp,$iv,$iv + vmovdqu $iv,(%r12,%r13) # write output + lea 16(%r13),%r13 + + add $SZ*0(%r15),$A + add $SZ*1(%r15),$B + add $SZ*2(%r15),$C + add $SZ*3(%r15),$D + add $SZ*4(%r15),$E + add $SZ*5(%r15),$F + add $SZ*6(%r15),$G + add $SZ*7(%r15),$H + + mov $A,$SZ*0(%r15) + mov $B,$SZ*1(%r15) + mov $C,$SZ*2(%r15) + mov $D,$SZ*3(%r15) + mov $E,$SZ*4(%r15) + mov $F,$SZ*5(%r15) + mov $G,$SZ*6(%r15) + mov $H,$SZ*7(%r15) + + cmp `$PUSH8+2*8`($Tbl),%r13 # $_end + je .Ldone_avx2 + + xor $a1,$a1 + mov $B,$a3 + mov $F,$a4 + xor $C,$a3 # magic + jmp .Lower_avx2 +.align 16 +.Lower_avx2: + vmovdqu (%r13),$inout + vpinsrq \$0,%r13,$offload,$offload +___ + $aesni_cbc_idx=0; + for ($i=0; $i<16; ) { + my $base="+16($Tbl)"; + foreach(bodyx_00_15()) { eval; } + &lea ($Tbl,"-$PUSH8($Tbl)") if ($i==8); + } +$code.=<<___; + vmovq $offload,%r13 # borrow $a0 + vpextrq \$1,$offload,%r15 # borrow $a2 + vpand $mask14,$temp,$temp + vpor $temp,$iv,$iv + lea -$PUSH8($Tbl),$Tbl + vmovdqu $iv,(%r15,%r13) # write output + lea 16(%r13),%r13 # inp++ + cmp %rsp,$Tbl + jae .Lower_avx2 + + mov `2*$SZ*$rounds+5*8`(%rsp),%r15 # $_ctx, borrow $a2 + lea 16*$SZ(%r13),%r13 + mov `2*$SZ*$rounds+6*8`(%rsp),%rsi # $_in0, borrow $a3 + add $a1,$A + lea `2*$SZ*($rounds-8)`(%rsp),%rsp + + add $SZ*0(%r15),$A + add $SZ*1(%r15),$B + add $SZ*2(%r15),$C + add $SZ*3(%r15),$D + add $SZ*4(%r15),$E + add $SZ*5(%r15),$F + add $SZ*6(%r15),$G + xor %r12,%r12 + add $SZ*7(%r15),$H + + cmp $_end,%r13 + + mov $A,$SZ*0(%r15) + mov $B,$SZ*1(%r15) + mov $C,$SZ*2(%r15) + mov $D,$SZ*3(%r15) + mov $E,$SZ*4(%r15) + mov $F,$SZ*5(%r15) + mov $G,$SZ*6(%r15) + mov $H,$SZ*7(%r15) + + sete %r12b + jbe .Loop_avx2 + lea (%rsp),$Tbl + +.Ldone_avx2: + lea ($Tbl),%rsp + mov $_ivp,$ivp + mov $_rsp,%rsi + vmovdqu $iv,($ivp) # output IV + vzeroall +___ +$code.=<<___ if ($win64); + movaps `$framesz+16*0`(%rsp),%xmm6 + movaps `$framesz+16*1`(%rsp),%xmm7 + movaps `$framesz+16*2`(%rsp),%xmm8 + movaps `$framesz+16*3`(%rsp),%xmm9 + movaps `$framesz+16*4`(%rsp),%xmm10 + movaps `$framesz+16*5`(%rsp),%xmm11 + movaps `$framesz+16*6`(%rsp),%xmm12 + movaps `$framesz+16*7`(%rsp),%xmm13 + movaps `$framesz+16*8`(%rsp),%xmm14 + movaps `$framesz+16*9`(%rsp),%xmm15 +___ +$code.=<<___; + mov (%rsi),%r15 + mov 8(%rsi),%r14 + mov 16(%rsi),%r13 + mov 24(%rsi),%r12 + mov 32(%rsi),%rbp + mov 40(%rsi),%rbx + lea 48(%rsi),%rsp +.Lepilogue_avx2: + ret +.size ${func}_avx2,.-${func}_avx2 +___ +}} +}}}}} + +# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, +# CONTEXT *context,DISPATCHER_CONTEXT *disp) +if ($win64) { +$rec="%rcx"; +$frame="%rdx"; +$context="%r8"; +$disp="%r9"; + +$code.=<<___ if ($avx); +.extern __imp_RtlVirtualUnwind +.type se_handler,\@abi-omnipotent +.align 16 +se_handler: + push %rsi + push %rdi + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + pushfq + sub \$64,%rsp + + mov 120($context),%rax # pull context->Rax + mov 248($context),%rbx # pull context->Rip + + mov 8($disp),%rsi # disp->ImageBase + mov 56($disp),%r11 # disp->HanderlData + + mov 0(%r11),%r10d # HandlerData[0] + lea (%rsi,%r10),%r10 # prologue label + cmp %r10,%rbx # context->RipRsp + + mov 4(%r11),%r10d # HandlerData[1] + lea (%rsi,%r10),%r10 # epilogue label + cmp %r10,%rbx # context->Rip>=epilogue label + jae .Lin_prologue +___ +$code.=<<___ if ($avx>1); + lea .Lavx2_shortcut(%rip),%r10 + cmp %r10,%rbx # context->RipRbx + mov %rbp,160($context) # restore context->Rbp + mov %r12,216($context) # restore context->R12 + mov %r13,224($context) # restore context->R13 + mov %r14,232($context) # restore context->R14 + mov %r15,240($context) # restore context->R15 + + lea .Lepilogue(%rip),%r10 + cmp %r10,%rbx + jb .Lin_prologue # non-AVX code + + lea 16*$SZ+8*8(%rsi),%rsi # Xmm6- save area + lea 512($context),%rdi # &context.Xmm6 + mov \$20,%ecx + .long 0xa548f3fc # cld; rep movsq + +.Lin_prologue: + mov 8(%rax),%rdi + mov 16(%rax),%rsi + mov %rax,152($context) # restore context->Rsp + mov %rsi,168($context) # restore context->Rsi + mov %rdi,176($context) # restore context->Rdi + + mov 40($disp),%rdi # disp->ContextRecord + mov $context,%rsi # context + mov \$154,%ecx # sizeof(CONTEXT) + .long 0xa548f3fc # cld; rep movsq + + mov $disp,%rsi + xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER + mov 8(%rsi),%rdx # arg2, disp->ImageBase + mov 0(%rsi),%r8 # arg3, disp->ControlPc + mov 16(%rsi),%r9 # arg4, disp->FunctionEntry + mov 40(%rsi),%r10 # disp->ContextRecord + lea 56(%rsi),%r11 # &disp->HandlerData + lea 24(%rsi),%r12 # &disp->EstablisherFrame + mov %r10,32(%rsp) # arg5 + mov %r11,40(%rsp) # arg6 + mov %r12,48(%rsp) # arg7 + mov %rcx,56(%rsp) # arg8, (NULL) + call *__imp_RtlVirtualUnwind(%rip) + + mov \$1,%eax # ExceptionContinueSearch + add \$64,%rsp + popfq + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + pop %rdi + pop %rsi + ret +.size se_handler,.-se_handler + +.section .pdata + .rva .LSEH_begin_${func}_xop + .rva .LSEH_end_${func}_xop + .rva .LSEH_info_${func}_xop + + .rva .LSEH_begin_${func}_avx + .rva .LSEH_end_${func}_avx + .rva .LSEH_info_${func}_avx +___ +$code.=<<___ if ($avx>1); + .rva .LSEH_begin_${func}_avx2 + .rva .LSEH_end_${func}_avx2 + .rva .LSEH_info_${func}_avx2 +___ +$code.=<<___ if ($avx); +.section .xdata +.align 8 +.LSEH_info_${func}_xop: + .byte 9,0,0,0 + .rva se_handler + .rva .Lprologue_xop,.Lepilogue_xop # HandlerData[] + +.LSEH_info_${func}_avx: + .byte 9,0,0,0 + .rva se_handler + .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[] +___ +$code.=<<___ if ($avx>1); +.LSEH_info_${func}_avx2: + .byte 9,0,0,0 + .rva se_handler + .rva .Lprologue_avx2,.Lepilogue_avx2 # HandlerData[] +___ +} + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +print $code; +close STDOUT; diff --git a/crypto/evp/Makefile b/crypto/evp/Makefile index d6f8e906dc..98a7e8de56 100644 --- a/crypto/evp/Makefile +++ b/crypto/evp/Makefile @@ -29,7 +29,7 @@ LIBSRC= encode.c digest.c evp_enc.c evp_key.c evp_acnf.c evp_cnf.c \ c_all.c c_allc.c c_alld.c evp_lib.c bio_ok.c \ evp_pkey.c evp_pbe.c p5_crpt.c p5_crpt2.c \ e_old.c pmeth_lib.c pmeth_fn.c pmeth_gn.c m_sigver.c \ - e_aes_cbc_hmac_sha1.c e_rc4_hmac_md5.c + e_aes_cbc_hmac_sha1.c e_aes_cbc_hmac_sha256.c e_rc4_hmac_md5.c LIBOBJ= encode.o digest.o evp_enc.o evp_key.o evp_acnf.o evp_cnf.o \ e_des.o e_bf.o e_idea.o e_des3.o e_camellia.o\ @@ -42,7 +42,7 @@ LIBOBJ= encode.o digest.o evp_enc.o evp_key.o evp_acnf.o evp_cnf.o \ c_all.o c_allc.o c_alld.o evp_lib.o bio_ok.o \ evp_pkey.o evp_pbe.o p5_crpt.o p5_crpt2.o \ e_old.o pmeth_lib.o pmeth_fn.o pmeth_gn.o m_sigver.o \ - e_aes_cbc_hmac_sha1.o e_rc4_hmac_md5.o + e_aes_cbc_hmac_sha1.o e_aes_cbc_hmac_sha256.o e_rc4_hmac_md5.o SRC= $(LIBSRC) diff --git a/crypto/evp/c_allc.c b/crypto/evp/c_allc.c index 6a39d7a5d0..1e3a89e5a4 100644 --- a/crypto/evp/c_allc.c +++ b/crypto/evp/c_allc.c @@ -202,6 +202,10 @@ void OpenSSL_add_all_ciphers(void) EVP_add_cipher(EVP_aes_128_cbc_hmac_sha1()); EVP_add_cipher(EVP_aes_256_cbc_hmac_sha1()); #endif +#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256) + EVP_add_cipher(EVP_aes_128_cbc_hmac_sha256()); + EVP_add_cipher(EVP_aes_256_cbc_hmac_sha256()); +#endif #endif #ifndef OPENSSL_NO_CAMELLIA diff --git a/crypto/evp/e_aes_cbc_hmac_sha256.c b/crypto/evp/e_aes_cbc_hmac_sha256.c new file mode 100644 index 0000000000..53262fa8e0 --- /dev/null +++ b/crypto/evp/e_aes_cbc_hmac_sha256.c @@ -0,0 +1,598 @@ +/* ==================================================================== + * Copyright (c) 2011-2013 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * licensing@OpenSSL.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + */ + +#include + +#include +#include + +#if !defined(OPENSSL_NO_AES) && !defined(OPENSSL_NO_SHA256) + +#include +#include +#include +#include + +#ifndef EVP_CIPH_FLAG_AEAD_CIPHER +#define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000 +#define EVP_CTRL_AEAD_TLS1_AAD 0x16 +#define EVP_CTRL_AEAD_SET_MAC_KEY 0x17 +#endif + +#if !defined(EVP_CIPH_FLAG_DEFAULT_ASN1) +#define EVP_CIPH_FLAG_DEFAULT_ASN1 0 +#endif + +#define TLS1_1_VERSION 0x0302 + +typedef struct + { + AES_KEY ks; + SHA256_CTX head,tail,md; + size_t payload_length; /* AAD length in decrypt case */ + union { + unsigned int tls_ver; + unsigned char tls_aad[16]; /* 13 used */ + } aux; + } EVP_AES_HMAC_SHA256; + +#define NO_PAYLOAD_LENGTH ((size_t)-1) + +#if defined(AES_ASM) && ( \ + defined(__x86_64) || defined(__x86_64__) || \ + defined(_M_AMD64) || defined(_M_X64) || \ + defined(__INTEL__) ) + +#if defined(__GNUC__) && __GNUC__>=2 && !defined(PEDANTIC) +# define BSWAP(x) ({ unsigned int r=(x); asm ("bswapl %0":"=r"(r):"0"(r)); r; }) +#endif + +extern unsigned int OPENSSL_ia32cap_P[3]; +#define AESNI_AVX_CAPABLE (1<<(57-32)|1<<(60-32)) + +int aesni_set_encrypt_key(const unsigned char *userKey, int bits, + AES_KEY *key); +int aesni_set_decrypt_key(const unsigned char *userKey, int bits, + AES_KEY *key); + +void aesni_cbc_encrypt(const unsigned char *in, + unsigned char *out, + size_t length, + const AES_KEY *key, + unsigned char *ivec, int enc); + +int aesni_cbc_sha256_enc (const void *inp, void *out, size_t blocks, + const AES_KEY *key, unsigned char iv[16], + SHA256_CTX *ctx,const void *in0); + +#define data(ctx) ((EVP_AES_HMAC_SHA256 *)(ctx)->cipher_data) + +static int aesni_cbc_hmac_sha256_init_key(EVP_CIPHER_CTX *ctx, + const unsigned char *inkey, + const unsigned char *iv, int enc) + { + EVP_AES_HMAC_SHA256 *key = data(ctx); + int ret; + + if (enc) + memset(&key->ks,0,sizeof(key->ks.rd_key)), + ret=aesni_set_encrypt_key(inkey,ctx->key_len*8,&key->ks); + else + ret=aesni_set_decrypt_key(inkey,ctx->key_len*8,&key->ks); + + SHA256_Init(&key->head); /* handy when benchmarking */ + key->tail = key->head; + key->md = key->head; + + key->payload_length = NO_PAYLOAD_LENGTH; + + return ret<0?0:1; + } + +#define STITCHED_CALL + +#if !defined(STITCHED_CALL) +#define aes_off 0 +#endif + +void sha256_block_data_order (void *c,const void *p,size_t len); + +static void sha256_update(SHA256_CTX *c,const void *data,size_t len) +{ const unsigned char *ptr = data; + size_t res; + + if ((res = c->num)) { + res = SHA256_CBLOCK-res; + if (lenNh += len>>29; + c->Nl += len<<=3; + if (c->Nl<(unsigned int)len) c->Nh++; + } + + if (res) + SHA256_Update(c,ptr,res); +} + +#ifdef SHA256_Update +#undef SHA256_Update +#endif +#define SHA256_Update sha256_update + +static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, + const unsigned char *in, size_t len) + { + EVP_AES_HMAC_SHA256 *key = data(ctx); + unsigned int l; + size_t plen = key->payload_length, + iv = 0, /* explicit IV in TLS 1.1 and later */ + sha_off = 0; +#if defined(STITCHED_CALL) + size_t aes_off = 0, + blocks; + + sha_off = SHA256_CBLOCK-key->md.num; +#endif + + key->payload_length = NO_PAYLOAD_LENGTH; + + if (len%AES_BLOCK_SIZE) return 0; + + if (ctx->encrypt) { + if (plen==NO_PAYLOAD_LENGTH) + plen = len; + else if (len!=((plen+SHA256_DIGEST_LENGTH+AES_BLOCK_SIZE)&-AES_BLOCK_SIZE)) + return 0; + else if (key->aux.tls_ver >= TLS1_1_VERSION) + iv = AES_BLOCK_SIZE; + +#if defined(STITCHED_CALL) + if (plen>(sha_off+iv) && (blocks=(plen-(sha_off+iv))/SHA256_CBLOCK)) { + SHA256_Update(&key->md,in+iv,sha_off); + + (void)aesni_cbc_sha256_enc(in,out,blocks,&key->ks, + ctx->iv,&key->md,in+iv+sha_off); + blocks *= SHA256_CBLOCK; + aes_off += blocks; + sha_off += blocks; + key->md.Nh += blocks>>29; + key->md.Nl += blocks<<=3; + if (key->md.Nl<(unsigned int)blocks) key->md.Nh++; + } else { + sha_off = 0; + } +#endif + sha_off += iv; + SHA256_Update(&key->md,in+sha_off,plen-sha_off); + + if (plen!=len) { /* "TLS" mode of operation */ + if (in!=out) + memcpy(out+aes_off,in+aes_off,plen-aes_off); + + /* calculate HMAC and append it to payload */ + SHA256_Final(out+plen,&key->md); + key->md = key->tail; + SHA256_Update(&key->md,out+plen,SHA256_DIGEST_LENGTH); + SHA256_Final(out+plen,&key->md); + + /* pad the payload|hmac */ + plen += SHA256_DIGEST_LENGTH; + for (l=len-plen-1;plenks,ctx->iv,1); + } else { + aesni_cbc_encrypt(in+aes_off,out+aes_off,len-aes_off, + &key->ks,ctx->iv,1); + } + } else { + union { unsigned int u[SHA256_DIGEST_LENGTH/sizeof(unsigned int)]; + unsigned char c[64+SHA256_DIGEST_LENGTH]; } mac, *pmac; + + /* arrange cache line alignment */ + pmac = (void *)(((size_t)mac.c+63)&((size_t)0-64)); + + /* decrypt HMAC|padding at once */ + aesni_cbc_encrypt(in,out,len, + &key->ks,ctx->iv,0); + + if (plen) { /* "TLS" mode of operation */ + size_t inp_len, mask, j, i; + unsigned int res, maxpad, pad, bitlen; + int ret = 1; + union { unsigned int u[SHA_LBLOCK]; + unsigned char c[SHA256_CBLOCK]; } + *data = (void *)key->md.data; + + if ((key->aux.tls_aad[plen-4]<<8|key->aux.tls_aad[plen-3]) + >= TLS1_1_VERSION) + iv = AES_BLOCK_SIZE; + + if (len<(iv+SHA256_DIGEST_LENGTH+1)) + return 0; + + /* omit explicit iv */ + out += iv; + len -= iv; + + /* figure out payload length */ + pad = out[len-1]; + maxpad = len-(SHA256_DIGEST_LENGTH+1); + maxpad |= (255-maxpad)>>(sizeof(maxpad)*8-8); + maxpad &= 255; + + inp_len = len - (SHA256_DIGEST_LENGTH+pad+1); + mask = (0-((inp_len-len)>>(sizeof(inp_len)*8-1))); + inp_len &= mask; + ret &= (int)mask; + + key->aux.tls_aad[plen-2] = inp_len>>8; + key->aux.tls_aad[plen-1] = inp_len; + + /* calculate HMAC */ + key->md = key->head; + SHA256_Update(&key->md,key->aux.tls_aad,plen); + +#if 1 + len -= SHA256_DIGEST_LENGTH; /* amend mac */ + if (len>=(256+SHA256_CBLOCK)) { + j = (len-(256+SHA256_CBLOCK))&(0-SHA256_CBLOCK); + j += SHA256_CBLOCK-key->md.num; + SHA256_Update(&key->md,out,j); + out += j; + len -= j; + inp_len -= j; + } + + /* but pretend as if we hashed padded payload */ + bitlen = key->md.Nl+(inp_len<<3); /* at most 18 bits */ +#ifdef BSWAP + bitlen = BSWAP(bitlen); +#else + mac.c[0] = 0; + mac.c[1] = (unsigned char)(bitlen>>16); + mac.c[2] = (unsigned char)(bitlen>>8); + mac.c[3] = (unsigned char)bitlen; + bitlen = mac.u[0]; +#endif + + pmac->u[0]=0; + pmac->u[1]=0; + pmac->u[2]=0; + pmac->u[3]=0; + pmac->u[4]=0; + pmac->u[5]=0; + pmac->u[6]=0; + pmac->u[7]=0; + + for (res=key->md.num, j=0;j>(sizeof(j)*8-8); + c &= mask; + c |= 0x80&~mask&~((inp_len-j)>>(sizeof(j)*8-8)); + data->c[res++]=(unsigned char)c; + + if (res!=SHA256_CBLOCK) continue; + + /* j is not incremented yet */ + mask = 0-((inp_len+7-j)>>(sizeof(j)*8-1)); + data->u[SHA_LBLOCK-1] |= bitlen&mask; + sha256_block_data_order(&key->md,data,1); + mask &= 0-((j-inp_len-72)>>(sizeof(j)*8-1)); + pmac->u[0] |= key->md.h[0] & mask; + pmac->u[1] |= key->md.h[1] & mask; + pmac->u[2] |= key->md.h[2] & mask; + pmac->u[3] |= key->md.h[3] & mask; + pmac->u[4] |= key->md.h[4] & mask; + pmac->u[5] |= key->md.h[5] & mask; + pmac->u[6] |= key->md.h[6] & mask; + pmac->u[7] |= key->md.h[7] & mask; + res=0; + } + + for(i=res;ic[i]=0; + + if (res>SHA256_CBLOCK-8) { + mask = 0-((inp_len+8-j)>>(sizeof(j)*8-1)); + data->u[SHA_LBLOCK-1] |= bitlen&mask; + sha256_block_data_order(&key->md,data,1); + mask &= 0-((j-inp_len-73)>>(sizeof(j)*8-1)); + pmac->u[0] |= key->md.h[0] & mask; + pmac->u[1] |= key->md.h[1] & mask; + pmac->u[2] |= key->md.h[2] & mask; + pmac->u[3] |= key->md.h[3] & mask; + pmac->u[4] |= key->md.h[4] & mask; + pmac->u[5] |= key->md.h[5] & mask; + pmac->u[6] |= key->md.h[6] & mask; + pmac->u[7] |= key->md.h[7] & mask; + + memset(data,0,SHA256_CBLOCK); + j+=64; + } + data->u[SHA_LBLOCK-1] = bitlen; + sha256_block_data_order(&key->md,data,1); + mask = 0-((j-inp_len-73)>>(sizeof(j)*8-1)); + pmac->u[0] |= key->md.h[0] & mask; + pmac->u[1] |= key->md.h[1] & mask; + pmac->u[2] |= key->md.h[2] & mask; + pmac->u[3] |= key->md.h[3] & mask; + pmac->u[4] |= key->md.h[4] & mask; + pmac->u[5] |= key->md.h[5] & mask; + pmac->u[6] |= key->md.h[6] & mask; + pmac->u[7] |= key->md.h[7] & mask; + +#ifdef BSWAP + pmac->u[0] = BSWAP(pmac->u[0]); + pmac->u[1] = BSWAP(pmac->u[1]); + pmac->u[2] = BSWAP(pmac->u[2]); + pmac->u[3] = BSWAP(pmac->u[3]); + pmac->u[4] = BSWAP(pmac->u[4]); + pmac->u[5] = BSWAP(pmac->u[5]); + pmac->u[6] = BSWAP(pmac->u[6]); + pmac->u[7] = BSWAP(pmac->u[7]); +#else + for (i=0;i<8;;i++) { + res = pmac->u[i]; + pmac->c[4*i+0]=(unsigned char)(res>>24); + pmac->c[4*i+1]=(unsigned char)(res>>16); + pmac->c[4*i+2]=(unsigned char)(res>>8); + pmac->c[4*i+3]=(unsigned char)res; + } +#endif + len += SHA256_DIGEST_LENGTH; +#else + SHA256_Update(&key->md,out,inp_len); + res = key->md.num; + SHA256_Final(pmac->c,&key->md); + + { + unsigned int inp_blocks, pad_blocks; + + /* but pretend as if we hashed padded payload */ + inp_blocks = 1+((SHA256_CBLOCK-9-res)>>(sizeof(res)*8-1)); + res += (unsigned int)(len-inp_len); + pad_blocks = res / SHA256_CBLOCK; + res %= SHA256_CBLOCK; + pad_blocks += 1+((SHA256_CBLOCK-9-res)>>(sizeof(res)*8-1)); + for (;inp_blocksmd,data,1); + } +#endif + key->md = key->tail; + SHA256_Update(&key->md,pmac->c,SHA256_DIGEST_LENGTH); + SHA256_Final(pmac->c,&key->md); + + /* verify HMAC */ + out += inp_len; + len -= inp_len; +#if 1 + { + unsigned char *p = out+len-1-maxpad-SHA256_DIGEST_LENGTH; + size_t off = out-p; + unsigned int c, cmask; + + maxpad += SHA256_DIGEST_LENGTH; + for (res=0,i=0,j=0;j>(sizeof(int)*8-1); + res |= (c^pad)&~cmask; /* ... and padding */ + cmask &= ((int)(off-1-j))>>(sizeof(int)*8-1); + res |= (c^pmac->c[i])&cmask; + i += 1&cmask; + } + maxpad -= SHA256_DIGEST_LENGTH; + + res = 0-((0-res)>>(sizeof(res)*8-1)); + ret &= (int)~res; + } +#else + for (res=0,i=0;ic[i]; + res = 0-((0-res)>>(sizeof(res)*8-1)); + ret &= (int)~res; + + /* verify padding */ + pad = (pad&~res) | (maxpad&res); + out = out+len-1-pad; + for (res=0,i=0;i>(sizeof(res)*8-1); + ret &= (int)~res; +#endif + return ret; + } else { + SHA256_Update(&key->md,out,len); + } + } + + return 1; + } + +static int aesni_cbc_hmac_sha256_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) + { + EVP_AES_HMAC_SHA256 *key = data(ctx); + + switch (type) + { + case EVP_CTRL_AEAD_SET_MAC_KEY: + { + unsigned int i; + unsigned char hmac_key[64]; + + memset (hmac_key,0,sizeof(hmac_key)); + + if (arg > (int)sizeof(hmac_key)) { + SHA256_Init(&key->head); + SHA256_Update(&key->head,ptr,arg); + SHA256_Final(hmac_key,&key->head); + } else { + memcpy(hmac_key,ptr,arg); + } + + for (i=0;ihead); + SHA256_Update(&key->head,hmac_key,sizeof(hmac_key)); + + for (i=0;itail); + SHA256_Update(&key->tail,hmac_key,sizeof(hmac_key)); + + OPENSSL_cleanse(hmac_key,sizeof(hmac_key)); + + return 1; + } + case EVP_CTRL_AEAD_TLS1_AAD: + { + unsigned char *p=ptr; + unsigned int len=p[arg-2]<<8|p[arg-1]; + + if (ctx->encrypt) + { + key->payload_length = len; + if ((key->aux.tls_ver=p[arg-4]<<8|p[arg-3]) >= TLS1_1_VERSION) { + len -= AES_BLOCK_SIZE; + p[arg-2] = len>>8; + p[arg-1] = len; + } + key->md = key->head; + SHA256_Update(&key->md,p,arg); + + return (int)(((len+SHA256_DIGEST_LENGTH+AES_BLOCK_SIZE)&-AES_BLOCK_SIZE) + - len); + } + else + { + if (arg>13) arg = 13; + memcpy(key->aux.tls_aad,ptr,arg); + key->payload_length = arg; + + return SHA256_DIGEST_LENGTH; + } + } + default: + return -1; + } + } + +static EVP_CIPHER aesni_128_cbc_hmac_sha256_cipher = + { +#ifdef NID_aes_128_cbc_hmac_sha256 + NID_aes_128_cbc_hmac_sha256, +#else + NID_undef, +#endif + 16,16,16, + EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1|EVP_CIPH_FLAG_AEAD_CIPHER, + aesni_cbc_hmac_sha256_init_key, + aesni_cbc_hmac_sha256_cipher, + NULL, + sizeof(EVP_AES_HMAC_SHA256), + EVP_CIPH_FLAG_DEFAULT_ASN1?NULL:EVP_CIPHER_set_asn1_iv, + EVP_CIPH_FLAG_DEFAULT_ASN1?NULL:EVP_CIPHER_get_asn1_iv, + aesni_cbc_hmac_sha256_ctrl, + NULL + }; + +static EVP_CIPHER aesni_256_cbc_hmac_sha256_cipher = + { +#ifdef NID_aes_256_cbc_hmac_sha256 + NID_aes_256_cbc_hmac_sha256, +#else + NID_undef, +#endif + 16,32,16, + EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1|EVP_CIPH_FLAG_AEAD_CIPHER, + aesni_cbc_hmac_sha256_init_key, + aesni_cbc_hmac_sha256_cipher, + NULL, + sizeof(EVP_AES_HMAC_SHA256), + EVP_CIPH_FLAG_DEFAULT_ASN1?NULL:EVP_CIPHER_set_asn1_iv, + EVP_CIPH_FLAG_DEFAULT_ASN1?NULL:EVP_CIPHER_get_asn1_iv, + aesni_cbc_hmac_sha256_ctrl, + NULL + }; + +const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void) + { + return((OPENSSL_ia32cap_P[1]&AESNI_AVX_CAPABLE)==AESNI_AVX_CAPABLE && + aesni_cbc_sha256_enc(NULL,NULL,0,NULL,NULL,NULL,NULL) ? + &aesni_128_cbc_hmac_sha256_cipher:NULL); + } + +const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void) + { + return((OPENSSL_ia32cap_P[1]&AESNI_AVX_CAPABLE)==AESNI_AVX_CAPABLE && + aesni_cbc_sha256_enc(NULL,NULL,0,NULL,NULL,NULL,NULL)? + &aesni_256_cbc_hmac_sha256_cipher:NULL); + } +#else +const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void) + { + return NULL; + } +const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void) + { + return NULL; + } +#endif +#endif diff --git a/crypto/objects/obj_dat.h b/crypto/objects/obj_dat.h index ad2e1dbda0..241d4e148c 100644 --- a/crypto/objects/obj_dat.h +++ b/crypto/objects/obj_dat.h @@ -62,9 +62,9 @@ * [including the GNU Public Licence.] */ -#define NUM_NID 935 -#define NUM_SN 928 -#define NUM_LN 928 +#define NUM_NID 938 +#define NUM_SN 931 +#define NUM_LN 931 #define NUM_OBJ 872 static const unsigned char lvalues[6113]={ @@ -2450,12 +2450,19 @@ static const ASN1_OBJECT nid_objs[NUM_NID]={ &(lvalues[6094]),0}, {"brainpoolP512t1","brainpoolP512t1",NID_brainpoolP512t1,9, &(lvalues[6103]),0}, +{"AES-128-CBC-HMAC-SHA256","aes-128-cbc-hmac-sha256", + NID_aes_128_cbc_hmac_sha256,0,NULL,0}, +{"AES-192-CBC-HMAC-SHA256","aes-192-cbc-hmac-sha256", + NID_aes_192_cbc_hmac_sha256,0,NULL,0}, +{"AES-256-CBC-HMAC-SHA256","aes-256-cbc-hmac-sha256", + NID_aes_256_cbc_hmac_sha256,0,NULL,0}, }; static const unsigned int sn_objs[NUM_SN]={ 364, /* "AD_DVCS" */ 419, /* "AES-128-CBC" */ 916, /* "AES-128-CBC-HMAC-SHA1" */ +935, /* "AES-128-CBC-HMAC-SHA256" */ 421, /* "AES-128-CFB" */ 650, /* "AES-128-CFB1" */ 653, /* "AES-128-CFB8" */ @@ -2465,6 +2472,7 @@ static const unsigned int sn_objs[NUM_SN]={ 913, /* "AES-128-XTS" */ 423, /* "AES-192-CBC" */ 917, /* "AES-192-CBC-HMAC-SHA1" */ +936, /* "AES-192-CBC-HMAC-SHA256" */ 425, /* "AES-192-CFB" */ 651, /* "AES-192-CFB1" */ 654, /* "AES-192-CFB8" */ @@ -2473,6 +2481,7 @@ static const unsigned int sn_objs[NUM_SN]={ 424, /* "AES-192-OFB" */ 427, /* "AES-256-CBC" */ 918, /* "AES-256-CBC-HMAC-SHA1" */ +937, /* "AES-256-CBC-HMAC-SHA256" */ 429, /* "AES-256-CFB" */ 652, /* "AES-256-CFB1" */ 655, /* "AES-256-CFB8" */ @@ -3530,6 +3539,7 @@ static const unsigned int ln_objs[NUM_LN]={ 606, /* "additional verification" */ 419, /* "aes-128-cbc" */ 916, /* "aes-128-cbc-hmac-sha1" */ +935, /* "aes-128-cbc-hmac-sha256" */ 896, /* "aes-128-ccm" */ 421, /* "aes-128-cfb" */ 650, /* "aes-128-cfb1" */ @@ -3541,6 +3551,7 @@ static const unsigned int ln_objs[NUM_LN]={ 913, /* "aes-128-xts" */ 423, /* "aes-192-cbc" */ 917, /* "aes-192-cbc-hmac-sha1" */ +936, /* "aes-192-cbc-hmac-sha256" */ 899, /* "aes-192-ccm" */ 425, /* "aes-192-cfb" */ 651, /* "aes-192-cfb1" */ @@ -3551,6 +3562,7 @@ static const unsigned int ln_objs[NUM_LN]={ 424, /* "aes-192-ofb" */ 427, /* "aes-256-cbc" */ 918, /* "aes-256-cbc-hmac-sha1" */ +937, /* "aes-256-cbc-hmac-sha256" */ 902, /* "aes-256-ccm" */ 429, /* "aes-256-cfb" */ 652, /* "aes-256-cfb1" */ diff --git a/crypto/objects/obj_mac.h b/crypto/objects/obj_mac.h index 704697eee0..68063777d8 100644 --- a/crypto/objects/obj_mac.h +++ b/crypto/objects/obj_mac.h @@ -4030,6 +4030,18 @@ #define LN_aes_256_cbc_hmac_sha1 "aes-256-cbc-hmac-sha1" #define NID_aes_256_cbc_hmac_sha1 918 +#define SN_aes_128_cbc_hmac_sha256 "AES-128-CBC-HMAC-SHA256" +#define LN_aes_128_cbc_hmac_sha256 "aes-128-cbc-hmac-sha256" +#define NID_aes_128_cbc_hmac_sha256 935 + +#define SN_aes_192_cbc_hmac_sha256 "AES-192-CBC-HMAC-SHA256" +#define LN_aes_192_cbc_hmac_sha256 "aes-192-cbc-hmac-sha256" +#define NID_aes_192_cbc_hmac_sha256 936 + +#define SN_aes_256_cbc_hmac_sha256 "AES-256-CBC-HMAC-SHA256" +#define LN_aes_256_cbc_hmac_sha256 "aes-256-cbc-hmac-sha256" +#define NID_aes_256_cbc_hmac_sha256 937 + #define SN_dhpublicnumber "dhpublicnumber" #define LN_dhpublicnumber "X9.42 DH" #define NID_dhpublicnumber 920 diff --git a/crypto/objects/obj_mac.num b/crypto/objects/obj_mac.num index 929743572f..181532424d 100644 --- a/crypto/objects/obj_mac.num +++ b/crypto/objects/obj_mac.num @@ -932,3 +932,6 @@ brainpoolP384r1 931 brainpoolP384t1 932 brainpoolP512r1 933 brainpoolP512t1 934 +aes_128_cbc_hmac_sha256 935 +aes_192_cbc_hmac_sha256 936 +aes_256_cbc_hmac_sha256 937 diff --git a/crypto/objects/objects.txt b/crypto/objects/objects.txt index 42514695bd..1868446392 100644 --- a/crypto/objects/objects.txt +++ b/crypto/objects/objects.txt @@ -1290,6 +1290,9 @@ kisa 1 6 : SEED-OFB : seed-ofb : AES-128-CBC-HMAC-SHA1 : aes-128-cbc-hmac-sha1 : AES-192-CBC-HMAC-SHA1 : aes-192-cbc-hmac-sha1 : AES-256-CBC-HMAC-SHA1 : aes-256-cbc-hmac-sha1 + : AES-128-CBC-HMAC-SHA256 : aes-128-cbc-hmac-sha256 + : AES-192-CBC-HMAC-SHA256 : aes-192-cbc-hmac-sha256 + : AES-256-CBC-HMAC-SHA256 : aes-256-cbc-hmac-sha256 ISO-US 10046 2 1 : dhpublicnumber : X9.42 DH diff --git a/ssl/ssl_algs.c b/ssl/ssl_algs.c index d5a7a20e8a..d3646aef97 100644 --- a/ssl/ssl_algs.c +++ b/ssl/ssl_algs.c @@ -94,7 +94,10 @@ int SSL_library_init(void) EVP_add_cipher(EVP_aes_128_cbc_hmac_sha1()); EVP_add_cipher(EVP_aes_256_cbc_hmac_sha1()); #endif - +#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256) + EVP_add_cipher(EVP_aes_128_cbc_hmac_sha256()); + EVP_add_cipher(EVP_aes_256_cbc_hmac_sha256()); +#endif #endif #ifndef OPENSSL_NO_CAMELLIA EVP_add_cipher(EVP_camellia_128_cbc()); diff --git a/ssl/ssl_ciph.c b/ssl/ssl_ciph.c index 1683f7bb1f..afdc8ff5df 100644 --- a/ssl/ssl_ciph.c +++ b/ssl/ssl_ciph.c @@ -638,6 +638,14 @@ int ssl_cipher_get_evp(const SSL_SESSION *s, const EVP_CIPHER **enc, c->algorithm_mac == SSL_SHA1 && (evp=EVP_get_cipherbyname("AES-256-CBC-HMAC-SHA1"))) *enc = evp, *md = NULL; + else if (c->algorithm_enc == SSL_AES128 && + c->algorithm_mac == SSL_SHA256 && + (evp=EVP_get_cipherbyname("AES-128-CBC-HMAC-SHA256"))) + *enc = evp, *md = NULL; + else if (c->algorithm_enc == SSL_AES256 && + c->algorithm_mac == SSL_SHA256 && + (evp=EVP_get_cipherbyname("AES-256-CBC-HMAC-SHA256"))) + *enc = evp, *md = NULL; return(1); } else