From 50f1b47c7f30bb1cd6d91b0e43a6087014b30abe Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Sat, 1 Feb 2014 21:48:31 +0100 Subject: [PATCH] PPC assembly pack: jumbo update from master. Add Vector Permutation AES and little-endian support. --- Configure | 5 +- TABLE | 61 +- crypto/aes/Makefile | 2 + crypto/aes/asm/aes-ppc.pl | 90 ++ crypto/aes/asm/vpaes-ppc.pl | 1507 ++++++++++++++++++++++++++++++++++ crypto/evp/e_aes.c | 5 + crypto/perlasm/ppc-xlate.pl | 41 +- crypto/sha/asm/sha1-ppc.pl | 29 +- crypto/sha/asm/sha512-ppc.pl | 202 +++-- 9 files changed, 1834 insertions(+), 108 deletions(-) create mode 100644 crypto/aes/asm/vpaes-ppc.pl diff --git a/Configure b/Configure index f8265d3d9a..7eb0c1a112 100755 --- a/Configure +++ b/Configure @@ -139,8 +139,8 @@ my $s390x_asm="s390xcap.o s390xcpuid.o:bn-s390x.o s390x-mont.o s390x-gf2m.o::aes my $armv4_asm="armcap.o armv4cpuid.o:bn_asm.o armv4-mont.o armv4-gf2m.o::aes_cbc.o aes-armv4.o bsaes-armv7.o:::sha1-armv4-large.o sha256-armv4.o sha512-armv4.o:::::::ghash-armv4.o::void"; my $parisc11_asm="pariscid.o:bn_asm.o parisc-mont.o::aes_core.o aes_cbc.o aes-parisc.o:::sha1-parisc.o sha256-parisc.o sha512-parisc.o::rc4-parisc.o:::::ghash-parisc.o::32"; my $parisc20_asm="pariscid.o:pa-risc2W.o parisc-mont.o::aes_core.o aes_cbc.o aes-parisc.o:::sha1-parisc.o sha256-parisc.o sha512-parisc.o::rc4-parisc.o:::::ghash-parisc.o::64"; -my $ppc32_asm="ppccpuid.o ppccap.o:bn-ppc.o ppc-mont.o ppc64-mont.o::aes_core.o aes_cbc.o aes-ppc.o:::sha1-ppc.o sha256-ppc.o::::::::"; -my $ppc64_asm="ppccpuid.o ppccap.o:bn-ppc.o ppc-mont.o ppc64-mont.o::aes_core.o aes_cbc.o aes-ppc.o:::sha1-ppc.o sha256-ppc.o sha512-ppc.o::::::::"; +my $ppc64_asm="ppccpuid.o ppccap.o:bn-ppc.o ppc-mont.o ppc64-mont.o::aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o:::sha1-ppc.o sha256-ppc.o sha512-ppc.o::::::::"; +my $ppc32_asm=$ppc64_asm; my $no_asm=":::::::::::::::void"; # As for $BSDthreads. Idea is to maintain "collective" set of flags, @@ -365,6 +365,7 @@ my %table=( #### "linux-generic64","gcc:-DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL BF_PTR:${no_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "linux-ppc64", "gcc:-m64 -DB_ENDIAN -DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL:${ppc64_asm}:linux64:dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR):::64", +"linux-ppc64le","gcc:-m64 -DL_ENDIAN -DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL:$ppc64_asm:linux64le:dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR):::", "linux-ia64", "gcc:-DL_ENDIAN -DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_UNROLL DES_INT:${ia64_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "linux-ia64-icc","icc:-DL_ENDIAN -DTERMIO -O2 -Wall::-D_REENTRANT::-ldl -no_cpprt:SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_RISC1 DES_INT:${ia64_asm}:dlfcn:linux-shared:-fPIC::.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR)", "linux-x86_64", "gcc:-m64 -DL_ENDIAN -DTERMIO -O3 -Wall::-D_REENTRANT::-ldl:SIXTY_FOUR_BIT_LONG RC4_CHUNK DES_INT DES_UNROLL:${x86_64_asm}:elf:dlfcn:linux-shared:-fPIC:-m64:.so.\$(SHLIB_MAJOR).\$(SHLIB_MINOR):::64", diff --git a/TABLE b/TABLE index b697134e3f..c7ceea9d01 100644 --- a/TABLE +++ b/TABLE @@ -902,10 +902,10 @@ $bn_ops = BN_LLONG RC4_CHAR $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = -$sha1_obj = sha1-ppc.o sha256-ppc.o +$sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o $cast_obj = $rc4_obj = $rmd160_obj = @@ -935,10 +935,10 @@ $bn_ops = BN_LLONG RC4_CHAR $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = -$sha1_obj = sha1-ppc.o sha256-ppc.o +$sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o $cast_obj = $rc4_obj = $rmd160_obj = @@ -1001,7 +1001,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHAR $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = $sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o @@ -1034,7 +1034,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHAR $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = $sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o @@ -1463,10 +1463,10 @@ $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = -$sha1_obj = sha1-ppc.o sha256-ppc.o +$sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o $cast_obj = $rc4_obj = $rmd160_obj = @@ -1496,7 +1496,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = $sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o @@ -2189,10 +2189,10 @@ $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = -$sha1_obj = sha1-ppc.o sha256-ppc.o +$sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o $cast_obj = $rc4_obj = $rmd160_obj = @@ -4400,10 +4400,10 @@ $bn_ops = BN_LLONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = -$sha1_obj = sha1-ppc.o sha256-ppc.o +$sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o $cast_obj = $rc4_obj = $rmd160_obj = @@ -4433,7 +4433,7 @@ $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL $cpuid_obj = ppccpuid.o ppccap.o $bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o $des_obj = -$aes_obj = aes_core.o aes_cbc.o aes-ppc.o +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o $bf_obj = $md5_obj = $sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o @@ -4455,6 +4455,39 @@ $ranlib = $arflags = $multilib = 64 +*** linux-ppc64le +$cc = gcc +$cflags = -m64 -DL_ENDIAN -DTERMIO -O3 -Wall +$unistd = +$thread_cflag = -D_REENTRANT +$sys_id = +$lflags = -ldl +$bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_RISC1 DES_UNROLL +$cpuid_obj = ppccpuid.o ppccap.o +$bn_obj = bn-ppc.o ppc-mont.o ppc64-mont.o +$des_obj = +$aes_obj = aes_core.o aes_cbc.o aes-ppc.o vpaes-ppc.o +$bf_obj = +$md5_obj = +$sha1_obj = sha1-ppc.o sha256-ppc.o sha512-ppc.o +$cast_obj = +$rc4_obj = +$rmd160_obj = +$rc5_obj = +$wp_obj = +$cmll_obj = +$modes_obj = +$engines_obj = +$perlasm_scheme = linux64le +$dso_scheme = dlfcn +$shared_target= linux-shared +$shared_cflag = -fPIC +$shared_ldflag = -m64 +$shared_extension = .so.$(SHLIB_MAJOR).$(SHLIB_MINOR) +$ranlib = +$arflags = +$multilib = + *** linux-sparcv8 $cc = gcc $cflags = -mv8 -DB_ENDIAN -DTERMIO -O3 -fomit-frame-pointer -Wall -DBN_DIV2W diff --git a/crypto/aes/Makefile b/crypto/aes/Makefile index 7f84aaa2cd..fdfda4218d 100644 --- a/crypto/aes/Makefile +++ b/crypto/aes/Makefile @@ -75,6 +75,8 @@ aest4-sparcv9.s: asm/aest4-sparcv9.pl aes-ppc.s: asm/aes-ppc.pl $(PERL) asm/aes-ppc.pl $(PERLASM_SCHEME) $@ +vpaes-ppc.s: asm/vpaes-ppc.pl + $(PERL) asm/vpaes-ppc.pl $(PERLASM_SCHEME) $@ aes-parisc.s: asm/aes-parisc.pl $(PERL) asm/aes-parisc.pl $(PERLASM_SCHEME) $@ diff --git a/crypto/aes/asm/aes-ppc.pl b/crypto/aes/asm/aes-ppc.pl index 18138c2c84..7a99fc3d04 100644 --- a/crypto/aes/asm/aes-ppc.pl +++ b/crypto/aes/asm/aes-ppc.pl @@ -45,6 +45,8 @@ if ($flavour =~ /64/) { $PUSH ="stw"; } else { die "nonsense $flavour"; } +$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0; + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or @@ -361,17 +363,61 @@ $code.=<<___; bne Lenc_unaligned Lenc_unaligned_ok: +___ +$code.=<<___ if (!$LITTLE_ENDIAN); lwz $s0,0($inp) lwz $s1,4($inp) lwz $s2,8($inp) lwz $s3,12($inp) +___ +$code.=<<___ if ($LITTLE_ENDIAN); + lwz $t0,0($inp) + lwz $t1,4($inp) + lwz $t2,8($inp) + lwz $t3,12($inp) + rotlwi $s0,$t0,8 + rotlwi $s1,$t1,8 + rotlwi $s2,$t2,8 + rotlwi $s3,$t3,8 + rlwimi $s0,$t0,24,0,7 + rlwimi $s1,$t1,24,0,7 + rlwimi $s2,$t2,24,0,7 + rlwimi $s3,$t3,24,0,7 + rlwimi $s0,$t0,24,16,23 + rlwimi $s1,$t1,24,16,23 + rlwimi $s2,$t2,24,16,23 + rlwimi $s3,$t3,24,16,23 +___ +$code.=<<___; bl LAES_Te bl Lppc_AES_encrypt_compact $POP $out,`$FRAME-$SIZE_T*19`($sp) +___ +$code.=<<___ if ($LITTLE_ENDIAN); + rotlwi $t0,$s0,8 + rotlwi $t1,$s1,8 + rotlwi $t2,$s2,8 + rotlwi $t3,$s3,8 + rlwimi $t0,$s0,24,0,7 + rlwimi $t1,$s1,24,0,7 + rlwimi $t2,$s2,24,0,7 + rlwimi $t3,$s3,24,0,7 + rlwimi $t0,$s0,24,16,23 + rlwimi $t1,$s1,24,16,23 + rlwimi $t2,$s2,24,16,23 + rlwimi $t3,$s3,24,16,23 + stw $t0,0($out) + stw $t1,4($out) + stw $t2,8($out) + stw $t3,12($out) +___ +$code.=<<___ if (!$LITTLE_ENDIAN); stw $s0,0($out) stw $s1,4($out) stw $s2,8($out) stw $s3,12($out) +___ +$code.=<<___; b Lenc_done Lenc_unaligned: @@ -795,17 +841,61 @@ Lenc_compact_done: bne Ldec_unaligned Ldec_unaligned_ok: +___ +$code.=<<___ if (!$LITTLE_ENDIAN); lwz $s0,0($inp) lwz $s1,4($inp) lwz $s2,8($inp) lwz $s3,12($inp) +___ +$code.=<<___ if ($LITTLE_ENDIAN); + lwz $t0,0($inp) + lwz $t1,4($inp) + lwz $t2,8($inp) + lwz $t3,12($inp) + rotlwi $s0,$t0,8 + rotlwi $s1,$t1,8 + rotlwi $s2,$t2,8 + rotlwi $s3,$t3,8 + rlwimi $s0,$t0,24,0,7 + rlwimi $s1,$t1,24,0,7 + rlwimi $s2,$t2,24,0,7 + rlwimi $s3,$t3,24,0,7 + rlwimi $s0,$t0,24,16,23 + rlwimi $s1,$t1,24,16,23 + rlwimi $s2,$t2,24,16,23 + rlwimi $s3,$t3,24,16,23 +___ +$code.=<<___; bl LAES_Td bl Lppc_AES_decrypt_compact $POP $out,`$FRAME-$SIZE_T*19`($sp) +___ +$code.=<<___ if ($LITTLE_ENDIAN); + rotlwi $t0,$s0,8 + rotlwi $t1,$s1,8 + rotlwi $t2,$s2,8 + rotlwi $t3,$s3,8 + rlwimi $t0,$s0,24,0,7 + rlwimi $t1,$s1,24,0,7 + rlwimi $t2,$s2,24,0,7 + rlwimi $t3,$s3,24,0,7 + rlwimi $t0,$s0,24,16,23 + rlwimi $t1,$s1,24,16,23 + rlwimi $t2,$s2,24,16,23 + rlwimi $t3,$s3,24,16,23 + stw $t0,0($out) + stw $t1,4($out) + stw $t2,8($out) + stw $t3,12($out) +___ +$code.=<<___ if (!$LITTLE_ENDIAN); stw $s0,0($out) stw $s1,4($out) stw $s2,8($out) stw $s3,12($out) +___ +$code.=<<___; b Ldec_done Ldec_unaligned: diff --git a/crypto/aes/asm/vpaes-ppc.pl b/crypto/aes/asm/vpaes-ppc.pl new file mode 100644 index 0000000000..f78e713f70 --- /dev/null +++ b/crypto/aes/asm/vpaes-ppc.pl @@ -0,0 +1,1507 @@ +#!/usr/bin/env perl + +###################################################################### +## Constant-time SSSE3 AES core implementation. +## version 0.1 +## +## By Mike Hamburg (Stanford University), 2009 +## Public domain. +## +## For details see http://shiftleft.org/papers/vector_aes/ and +## http://crypto.stanford.edu/vpaes/. + +# CBC encrypt/decrypt performance in cycles per byte processed with +# 128-bit key. +# +# aes-ppc.pl this +# G4e 35.5/52.1/(23.8) 11.9(*)/15.4 +# POWER6 42.7/54.3/(28.2) 63.0/92.8(**) +# POWER7 32.3/42.9/(18.4) 18.5/23.3 +# +# (*) This is ~10% worse than reported in paper. The reason is +# twofold. This module doesn't make any assumption about +# key schedule (or data for that matter) alignment and handles +# it in-line. Secondly it, being transliterated from +# vpaes-x86_64.pl, relies on "nested inversion" better suited +# for Intel CPUs. +# (**) Inadequate POWER6 performance is due to astronomic AltiVec +# latency, 9 cycles per simple logical operation. + +$flavour = shift; + +if ($flavour =~ /64/) { + $SIZE_T =8; + $LRSAVE =2*$SIZE_T; + $STU ="stdu"; + $POP ="ld"; + $PUSH ="std"; +} elsif ($flavour =~ /32/) { + $SIZE_T =4; + $LRSAVE =$SIZE_T; + $STU ="stwu"; + $POP ="lwz"; + $PUSH ="stw"; +} else { die "nonsense $flavour"; } + +$sp="r1"; +$FRAME=6*$SIZE_T+13*16; # 13*16 is for v20-v31 offload + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or +( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or +die "can't locate ppc-xlate.pl"; + +open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; + +$code.=<<___; +.machine "any" + +.text + +.align 7 # totally strategic alignment +_vpaes_consts: +Lk_mc_forward: # mc_forward + .long 0x01020300, 0x05060704, 0x090a0b08, 0x0d0e0f0c ?inv + .long 0x05060704, 0x090a0b08, 0x0d0e0f0c, 0x01020300 ?inv + .long 0x090a0b08, 0x0d0e0f0c, 0x01020300, 0x05060704 ?inv + .long 0x0d0e0f0c, 0x01020300, 0x05060704, 0x090a0b08 ?inv +Lk_mc_backward: # mc_backward + .long 0x03000102, 0x07040506, 0x0b08090a, 0x0f0c0d0e ?inv + .long 0x0f0c0d0e, 0x03000102, 0x07040506, 0x0b08090a ?inv + .long 0x0b08090a, 0x0f0c0d0e, 0x03000102, 0x07040506 ?inv + .long 0x07040506, 0x0b08090a, 0x0f0c0d0e, 0x03000102 ?inv +Lk_sr: # sr + .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f ?inv + .long 0x00050a0f, 0x04090e03, 0x080d0207, 0x0c01060b ?inv + .long 0x0009020b, 0x040d060f, 0x08010a03, 0x0c050e07 ?inv + .long 0x000d0a07, 0x04010e0b, 0x0805020f, 0x0c090603 ?inv + +## +## "Hot" constants +## +Lk_inv: # inv, inva + .long 0xf001080d, 0x0f06050e, 0x020c0b0a, 0x09030704 ?rev + .long 0xf0070b0f, 0x060a0401, 0x09080502, 0x0c0e0d03 ?rev +Lk_ipt: # input transform (lo, hi) + .long 0x00702a5a, 0x98e8b2c2, 0x08782252, 0x90e0baca ?rev + .long 0x004d7c31, 0x7d30014c, 0x81ccfdb0, 0xfcb180cd ?rev +Lk_sbo: # sbou, sbot + .long 0x00c7bd6f, 0x176dd2d0, 0x78a802c5, 0x7abfaa15 ?rev + .long 0x006abb5f, 0xa574e4cf, 0xfa352b41, 0xd1901e8e ?rev +Lk_sb1: # sb1u, sb1t + .long 0x0023e2fa, 0x15d41836, 0xefd92e0d, 0xc1ccf73b ?rev + .long 0x003e50cb, 0x8fe19bb1, 0x44f52a14, 0x6e7adfa5 ?rev +Lk_sb2: # sb2u, sb2t + .long 0x0029e10a, 0x4088eb69, 0x4a2382ab, 0xc863a1c2 ?rev + .long 0x0024710b, 0xc6937ae2, 0xcd2f98bc, 0x55e9b75e ?rev + +## +## Decryption stuff +## +Lk_dipt: # decryption input transform + .long 0x005f540b, 0x045b500f, 0x1a454e11, 0x1e414a15 ?rev + .long 0x00650560, 0xe683e386, 0x94f191f4, 0x72177712 ?rev +Lk_dsbo: # decryption sbox final output + .long 0x0040f97e, 0x53ea8713, 0x2d3e94d4, 0xb96daac7 ?rev + .long 0x001d4493, 0x0f56d712, 0x9c8ec5d8, 0x59814bca ?rev +Lk_dsb9: # decryption sbox output *9*u, *9*t + .long 0x00d6869a, 0x53031c85, 0xc94c994f, 0x501fd5ca ?rev + .long 0x0049d7ec, 0x89173bc0, 0x65a5fbb2, 0x9e2c5e72 ?rev +Lk_dsbd: # decryption sbox output *D*u, *D*t + .long 0x00a2b1e6, 0xdfcc577d, 0x39442a88, 0x139b6ef5 ?rev + .long 0x00cbc624, 0xf7fae23c, 0xd3efde15, 0x0d183129 ?rev +Lk_dsbb: # decryption sbox output *B*u, *B*t + .long 0x0042b496, 0x926422d0, 0x04d4f2b0, 0xf6462660 ?rev + .long 0x006759cd, 0xa69894c1, 0x6baa5532, 0x3e0cfff3 ?rev +Lk_dsbe: # decryption sbox output *E*u, *E*t + .long 0x00d0d426, 0x9692f246, 0xb0f6b464, 0x04604222 ?rev + .long 0x00c1aaff, 0xcda6550c, 0x323e5998, 0x6bf36794 ?rev + +## +## Key schedule constants +## +Lk_dksd: # decryption key schedule: invskew x*D + .long 0x0047e4a3, 0x5d1ab9fe, 0xf9be1d5a, 0xa4e34007 ?rev + .long 0x008336b5, 0xf477c241, 0x1e9d28ab, 0xea69dc5f ?rev +Lk_dksb: # decryption key schedule: invskew x*B + .long 0x00d55085, 0x1fca4f9a, 0x994cc91c, 0x8653d603 ?rev + .long 0x004afcb6, 0xa7ed5b11, 0xc882347e, 0x6f2593d9 ?rev +Lk_dkse: # decryption key schedule: invskew x*E + 0x63 + .long 0x00d6c91f, 0xca1c03d5, 0x86504f99, 0x4c9a8553 ?rev + .long 0xe87bdc4f, 0x059631a2, 0x8714b320, 0x6af95ecd ?rev +Lk_dks9: # decryption key schedule: invskew x*9 + .long 0x00a7d97e, 0xc86f11b6, 0xfc5b2582, 0x3493ed4a ?rev + .long 0x00331427, 0x62517645, 0xcefddae9, 0xac9fb88b ?rev + +Lk_rcon: # rcon + .long 0xb6ee9daf, 0xb991831f, 0x817d7c4d, 0x08982a70 ?asis +Lk_s63: + .long 0x5b5b5b5b, 0x5b5b5b5b, 0x5b5b5b5b, 0x5b5b5b5b ?asis + +Lk_opt: # output transform + .long 0x0060b6d6, 0x29499fff, 0x0868bede, 0x214197f7 ?rev + .long 0x00ecbc50, 0x51bded01, 0xe00c5cb0, 0xb15d0de1 ?rev +Lk_deskew: # deskew tables: inverts the sbox's "skew" + .long 0x00e3a447, 0x40a3e407, 0x1af9be5d, 0x5ab9fe1d ?rev + .long 0x0069ea83, 0xdcb5365f, 0x771e9df4, 0xabc24128 ?rev +.align 5 +Lconsts: + mflr r0 + bcl 20,31,\$+4 + mflr r12 #vvvvv "distance between . and _vpaes_consts + addi r12,r12,-0x308 + mtlr r0 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 +.asciz "Vector Permutaion AES for AltiVec, Mike Hamburg (Stanford University)" +.align 6 +___ + +my ($inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm) = map("v$_",(26..31)); +{ +my ($inp,$out,$key) = map("r$_",(3..5)); + +my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_",(10..15)); +my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_",(16..19)); +my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_",(16..23)); + +$code.=<<___; +## +## _aes_preheat +## +## Fills register %r10 -> .aes_consts (so you can -fPIC) +## and %xmm9-%xmm15 as specified below. +## +.align 4 +_vpaes_encrypt_preheat: + mflr r8 + bl Lconsts + mtlr r8 + li r11, 0xc0 # Lk_inv + li r10, 0xd0 + li r9, 0xe0 # Lk_ipt + li r8, 0xf0 + vxor v7, v7, v7 # 0x00..00 + vspltisb v8,4 # 0x04..04 + vspltisb v9,0x0f # 0x0f..0f + lvx $invlo, r12, r11 + li r11, 0x100 + lvx $invhi, r12, r10 + li r10, 0x110 + lvx $iptlo, r12, r9 + li r9, 0x120 + lvx $ipthi, r12, r8 + li r8, 0x130 + lvx $sbou, r12, r11 + li r11, 0x140 + lvx $sbot, r12, r10 + li r10, 0x150 + lvx $sb1u, r12, r9 + lvx $sb1t, r12, r8 + lvx $sb2u, r12, r11 + lvx $sb2t, r12, r10 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +## +## _aes_encrypt_core +## +## AES-encrypt %xmm0. +## +## Inputs: +## %xmm0 = input +## %xmm9-%xmm15 as in _vpaes_preheat +## (%rdx) = scheduled keys +## +## Output in %xmm0 +## Clobbers %xmm1-%xmm6, %r9, %r10, %r11, %rax +## +## +.align 5 +_vpaes_encrypt_core: + lwz r8, 240($key) # pull rounds + li r9, 16 + lvx v5, 0, $key # vmovdqu (%r9), %xmm5 # round0 key + li r11, 0x10 + lvx v6, r9, $key + addi r9, r9, 16 + ?vperm v5, v5, v6, $keyperm # align round key + addi r10, r11, 0x40 + vsrb v1, v0, v8 # vpsrlb \$4, %xmm0, %xmm0 + vperm v0, $iptlo, $iptlo, v0 # vpshufb %xmm1, %xmm2, %xmm1 + vperm v1, $ipthi, $ipthi, v1 # vpshufb %xmm0, %xmm3, %xmm2 + vxor v0, v0, v5 # vpxor %xmm5, %xmm1, %xmm0 + vxor v0, v0, v1 # vpxor %xmm2, %xmm0, %xmm0 + mtctr r8 + b Lenc_entry + +.align 4 +Lenc_loop: + # middle of middle round + vperm v4, $sb1t, v7, v2 # vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u + lvx v1, r12, r11 # vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] + addi r11, r11, 16 + vperm v0, $sb1u, v7, v3 # vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t + vxor v4, v4, v5 # vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k + andi. r11, r11, 0x30 # and \$0x30, %r11 # ... mod 4 + vperm v5, $sb2t, v7, v2 # vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u + vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = A + vperm v2, $sb2u, v7, v3 # vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t + lvx v4, r12, r10 # vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] + addi r10, r11, 0x40 + vperm v3, v0, v7, v1 # vpshufb %xmm1, %xmm0, %xmm3 # 0 = B + vxor v2, v2, v5 # vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A + vperm v0, v0, v7, v4 # vpshufb %xmm4, %xmm0, %xmm0 # 3 = D + vxor v3, v3, v2 # vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B + vperm v4, v3, v7, v1 # vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C + vxor v0, v0, v3 # vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D + vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D + +Lenc_entry: + # top of round + vsrb v1, v0, v8 # vpsrlb \$4, %xmm0, %xmm0 # 1 = i + vperm v5, $invhi, $invhi, v0 # vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k + vxor v0, v0, v1 # vpxor %xmm0, %xmm1, %xmm1 # 0 = j + vperm v3, $invlo, $invlo, v1 # vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i + vperm v4, $invlo, $invlo, v0 # vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j + vand v0, v0, v9 + vxor v3, v3, v5 # vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k + vxor v4, v4, v5 # vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k + vperm v2, $invlo, v7, v3 # vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak + vmr v5, v6 + lvx v6, r9, $key # vmovdqu (%r9), %xmm5 + vperm v3, $invlo, v7, v4 # vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak + addi r9, r9, 16 + vxor v2, v2, v0 # vpxor %xmm1, %xmm2, %xmm2 # 2 = io + ?vperm v5, v5, v6, $keyperm # align round key + vxor v3, v3, v1 # vpxor %xmm0, %xmm3, %xmm3 # 3 = jo + bdnz Lenc_loop + + # middle of last round + addi r10, r11, 0x80 + # vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo + # vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 + vperm v4, $sbou, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou + lvx v1, r12, r10 # vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] + vperm v0, $sbot, v7, v3 # vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t + vxor v4, v4, v5 # vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k + vxor v0, v0, v4 # vpxor %xmm4, %xmm0, %xmm0 # 0 = A + vperm v0, v0, v7, v1 # vpshufb %xmm1, %xmm0, %xmm0 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +.globl .vpaes_encrypt +.align 5 +.vpaes_encrypt: + $STU $sp,-$FRAME($sp) + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mflr r6 + mfspr r7, 256 # save vrsave + stvx v20,r10,$sp + addi r10,r10,16 + stvx v21,r11,$sp + addi r11,r11,16 + stvx v22,r10,$sp + addi r10,r10,16 + stvx v23,r11,$sp + addi r11,r11,16 + stvx v24,r10,$sp + addi r10,r10,16 + stvx v25,r11,$sp + addi r11,r11,16 + stvx v26,r10,$sp + addi r10,r10,16 + stvx v27,r11,$sp + addi r11,r11,16 + stvx v28,r10,$sp + addi r10,r10,16 + stvx v29,r11,$sp + addi r11,r11,16 + stvx v30,r10,$sp + stvx v31,r11,$sp + lwz r7,`$FRAME-4`($sp) # save vrsave + li r0, -1 + $PUSH r6,`$FRAME+$LRSAVE`($sp) + mtspr 256, r0 # preserve all AltiVec registers + + bl _vpaes_encrypt_preheat + + ?lvsl $inpperm, 0, $inp # prepare for unaligned access + lvx v0, 0, $inp + addi $inp, $inp, 15 # 15 is not a typo + ?lvsr $outperm, 0, $out + ?lvsl $keyperm, 0, $key # prepare for unaligned access + vnor $outmask, v7, v7 # 0xff..ff + lvx $inptail, 0, $inp # redundant in aligned case + ?vperm $outmask, v7, $outmask, $outperm + lvx $outhead, 0, $out + ?vperm v0, v0, $inptail, $inpperm + + bl _vpaes_encrypt_core + + vperm v0, v0, v0, $outperm # rotate right/left + vsel v1, $outhead, v0, $outmask + vmr $outhead, v0 + stvx v1, 0, $out + addi $out, $out, 15 # 15 is not a typo + ######## + + lvx v1, 0, $out # redundant in aligned case + vsel v1, $outhead, v1, $outmask + stvx v1, 0, $out + + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mtlr r6 + mtspr 256, r7 # restore vrsave + lvx v20,r10,$sp + addi r10,r10,16 + lvx v21,r11,$sp + addi r11,r11,16 + lvx v22,r10,$sp + addi r10,r10,16 + lvx v23,r11,$sp + addi r11,r11,16 + lvx v24,r10,$sp + addi r10,r10,16 + lvx v25,r11,$sp + addi r11,r11,16 + lvx v26,r10,$sp + addi r10,r10,16 + lvx v27,r11,$sp + addi r11,r11,16 + lvx v28,r10,$sp + addi r10,r10,16 + lvx v29,r11,$sp + addi r11,r11,16 + lvx v30,r10,$sp + lvx v31,r11,$sp + addi $sp,$sp,$FRAME + blr + .long 0 + .byte 0,12,0x04,1,0x80,0,3,0 + .long 0 +.size .vpaes_encrypt,.-.vpaes_encrypt + +.align 4 +_vpaes_decrypt_preheat: + mflr r8 + bl Lconsts + mtlr r8 + li r11, 0xc0 # Lk_inv + li r10, 0xd0 + li r9, 0x160 # Ldipt + li r8, 0x170 + vxor v7, v7, v7 # 0x00..00 + vspltisb v8,4 # 0x04..04 + vspltisb v9,0x0f # 0x0f..0f + lvx $invlo, r12, r11 + li r11, 0x180 + lvx $invhi, r12, r10 + li r10, 0x190 + lvx $iptlo, r12, r9 + li r9, 0x1a0 + lvx $ipthi, r12, r8 + li r8, 0x1b0 + lvx $sbou, r12, r11 + li r11, 0x1c0 + lvx $sbot, r12, r10 + li r10, 0x1d0 + lvx $sb9u, r12, r9 + li r9, 0x1e0 + lvx $sb9t, r12, r8 + li r8, 0x1f0 + lvx $sbdu, r12, r11 + li r11, 0x200 + lvx $sbdt, r12, r10 + li r10, 0x210 + lvx $sbbu, r12, r9 + lvx $sbbt, r12, r8 + lvx $sbeu, r12, r11 + lvx $sbet, r12, r10 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +## +## Decryption core +## +## Same API as encryption core. +## +.align 4 +_vpaes_decrypt_core: + lwz r8, 240($key) # pull rounds + li r9, 16 + lvx v5, 0, $key # vmovdqu (%r9), %xmm4 # round0 key + li r11, 0x30 + lvx v6, r9, $key + addi r9, r9, 16 + ?vperm v5, v5, v6, $keyperm # align round key + vsrb v1, v0, v8 # vpsrlb \$4, %xmm0, %xmm0 + vperm v0, $iptlo, $iptlo, v0 # vpshufb %xmm1, %xmm2, %xmm2 + vperm v1, $ipthi, $ipthi, v1 # vpshufb %xmm0, %xmm1, %xmm0 + vxor v0, v0, v5 # vpxor %xmm4, %xmm2, %xmm2 + vxor v0, v0, v1 # vpxor %xmm2, %xmm0, %xmm0 + mtctr r8 + b Ldec_entry + +.align 4 +Ldec_loop: +# +# Inverse mix columns +# + lvx v0, r12, r11 # v5 and v0 are flipped + # vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u + # vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t + vperm v4, $sb9u, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u + subi r11, r11, 16 + vperm v1, $sb9t, v7, v3 # vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t + andi. r11, r11, 0x30 + vxor v5, v5, v4 # vpxor %xmm4, %xmm0, %xmm0 + # vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu + vxor v5, v5, v1 # vpxor %xmm1, %xmm0, %xmm0 # 0 = ch + # vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt + + vperm v4, $sbdu, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu + vperm v5, v5, v7, v0 # vpshufb %xmm5, %xmm0, %xmm0 # MC ch + vperm v1, $sbdt, v7, v3 # vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt + vxor v5, v5, v4 # vpxor %xmm4, %xmm0, %xmm0 # 4 = ch + # vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu + vxor v5, v5, v1 # vpxor %xmm1, %xmm0, %xmm0 # 0 = ch + # vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt + + vperm v4, $sbbu, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu + vperm v5, v5, v7, v0 # vpshufb %xmm5, %xmm0, %xmm0 # MC ch + vperm v1, $sbbt, v7, v3 # vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt + vxor v5, v5, v4 # vpxor %xmm4, %xmm0, %xmm0 # 4 = ch + # vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu + vxor v5, v5, v1 # vpxor %xmm1, %xmm0, %xmm0 # 0 = ch + # vmovdqa 0x50(%r10), %xmm1 # 0 : sbet + + vperm v4, $sbeu, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu + vperm v5, v5, v7, v0 # vpshufb %xmm5, %xmm0, %xmm0 # MC ch + vperm v1, $sbet, v7, v3 # vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet + vxor v0, v5, v4 # vpxor %xmm4, %xmm0, %xmm0 # 4 = ch + vxor v0, v0, v1 # vpxor %xmm1, %xmm0, %xmm0 # 0 = ch + +Ldec_entry: + # top of round + vsrb v1, v0, v8 # vpsrlb \$4, %xmm0, %xmm0 # 1 = i + vperm v2, $invhi, $invhi, v0 # vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k + vxor v0, v0, v1 # vpxor %xmm0, %xmm1, %xmm1 # 0 = j + vperm v3, $invlo, $invlo, v1 # vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i + vperm v4, $invlo, $invlo, v0 # vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j + vand v0, v0, v9 + vxor v3, v3, v2 # vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k + vxor v4, v4, v2 # vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k + vperm v2, $invlo, v7, v3 # vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak + vmr v5, v6 + lvx v6, r9, $key # vmovdqu (%r9), %xmm0 + vperm v3, $invlo, v7, v4 # vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak + addi r9, r9, 16 + vxor v2, v2, v0 # vpxor %xmm1, %xmm2, %xmm2 # 2 = io + ?vperm v5, v5, v6, $keyperm # align round key + vxor v3, v3, v1 # vpxor %xmm0, %xmm3, %xmm3 # 3 = jo + bdnz Ldec_loop + + # middle of last round + addi r10, r11, 0x80 + # vmovdqa 0x60(%r10), %xmm4 # 3 : sbou + vperm v4, $sbou, v7, v2 # vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou + # vmovdqa 0x70(%r10), %xmm1 # 0 : sbot + lvx v2, r12, r10 # vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 + vperm v1, $sbot, v7, v3 # vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t + vxor v4, v4, v5 # vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k + vxor v0, v1, v4 # vpxor %xmm4, %xmm1, %xmm0 # 0 = A + vperm v0, v0, v7, v2 # vpshufb %xmm2, %xmm0, %xmm0 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +.globl .vpaes_decrypt +.align 5 +.vpaes_decrypt: + $STU $sp,-$FRAME($sp) + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mflr r6 + mfspr r7, 256 # save vrsave + stvx v20,r10,$sp + addi r10,r10,16 + stvx v21,r11,$sp + addi r11,r11,16 + stvx v22,r10,$sp + addi r10,r10,16 + stvx v23,r11,$sp + addi r11,r11,16 + stvx v24,r10,$sp + addi r10,r10,16 + stvx v25,r11,$sp + addi r11,r11,16 + stvx v26,r10,$sp + addi r10,r10,16 + stvx v27,r11,$sp + addi r11,r11,16 + stvx v28,r10,$sp + addi r10,r10,16 + stvx v29,r11,$sp + addi r11,r11,16 + stvx v30,r10,$sp + stvx v31,r11,$sp + lwz r7,`$FRAME-4`($sp) # save vrsave + li r0, -1 + $PUSH r6,`$FRAME+$LRSAVE`($sp) + mtspr 256, r0 # preserve all AltiVec registers + + bl _vpaes_decrypt_preheat + + ?lvsl $inpperm, 0, $inp # prepare for unaligned access + lvx v0, 0, $inp + addi $inp, $inp, 15 # 15 is not a typo + ?lvsr $outperm, 0, $out + ?lvsl $keyperm, 0, $key + vnor $outmask, v7, v7 # 0xff..ff + lvx $inptail, 0, $inp # redundant in aligned case + ?vperm $outmask, v7, $outmask, $outperm + lvx $outhead, 0, $out + ?vperm v0, v0, $inptail, $inpperm + + bl _vpaes_decrypt_core + + vperm v0, v0, v0, $outperm # rotate right/left + vsel v1, $outhead, v0, $outmask + vmr $outhead, v0 + stvx v1, 0, $out + addi $out, $out, 15 # 15 is not a typo + ######## + + lvx v1, 0, $out # redundant in aligned case + vsel v1, $outhead, v1, $outmask + stvx v1, 0, $out + + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mtlr r6 + mtspr 256, r7 # restore vrsave + lvx v20,r10,$sp + addi r10,r10,16 + lvx v21,r11,$sp + addi r11,r11,16 + lvx v22,r10,$sp + addi r10,r10,16 + lvx v23,r11,$sp + addi r11,r11,16 + lvx v24,r10,$sp + addi r10,r10,16 + lvx v25,r11,$sp + addi r11,r11,16 + lvx v26,r10,$sp + addi r10,r10,16 + lvx v27,r11,$sp + addi r11,r11,16 + lvx v28,r10,$sp + addi r10,r10,16 + lvx v29,r11,$sp + addi r11,r11,16 + lvx v30,r10,$sp + lvx v31,r11,$sp + addi $sp,$sp,$FRAME + blr + .long 0 + .byte 0,12,0x04,1,0x80,0,3,0 + .long 0 +.size .vpaes_decrypt,.-.vpaes_decrypt + +.globl .vpaes_cbc_encrypt +.align 5 +.vpaes_cbc_encrypt: + $STU $sp,-`($FRAME+2*$SIZE_T)`($sp) + mflr r0 + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mfspr r12, 256 + stvx v20,r10,$sp + addi r10,r10,16 + stvx v21,r11,$sp + addi r11,r11,16 + stvx v22,r10,$sp + addi r10,r10,16 + stvx v23,r11,$sp + addi r11,r11,16 + stvx v24,r10,$sp + addi r10,r10,16 + stvx v25,r11,$sp + addi r11,r11,16 + stvx v26,r10,$sp + addi r10,r10,16 + stvx v27,r11,$sp + addi r11,r11,16 + stvx v28,r10,$sp + addi r10,r10,16 + stvx v29,r11,$sp + addi r11,r11,16 + stvx v30,r10,$sp + stvx v31,r11,$sp + lwz r12,`$FRAME-4`($sp) # save vrsave + $PUSH r30,`$FRAME+$SIZE_T*0`($sp) + $PUSH r31,`$FRAME+$SIZE_T*1`($sp) + li r9, 16 + $PUSH r0, `$FRAME+$SIZE_T*2+$LRSAVE`($sp) + + sub. r30, r5, r9 # copy length-16 + mr r5, r6 # copy pointer to key + mr r31, r7 # copy pointer to iv + blt Lcbc_abort + cmpwi r8, 0 # test direction + li r6, -1 + mr r7, r12 # copy vrsave + mtspr 256, r6 # preserve all AltiVec registers + + lvx v24, 0, r31 # load [potentially unaligned] iv + li r9, 15 + ?lvsl $inpperm, 0, r31 + lvx v25, r9, r31 + ?vperm v24, v24, v25, $inpperm + + neg r8, $inp # prepare for unaligned access + vxor v7, v7, v7 + ?lvsl $keyperm, 0, $key + ?lvsr $outperm, 0, $out + ?lvsr $inpperm, 0, r8 # -$inp + vnor $outmask, v7, v7 # 0xff..ff + lvx $inptail, 0, $inp + ?vperm $outmask, v7, $outmask, $outperm + addi $inp, $inp, 15 # 15 is not a typo + lvx $outhead, 0, $out + + beq Lcbc_decrypt + + bl _vpaes_encrypt_preheat + li r0, 16 + +Lcbc_enc_loop: + vmr v0, $inptail + lvx $inptail, 0, $inp + addi $inp, $inp, 16 + ?vperm v0, v0, $inptail, $inpperm + vxor v0, v0, v24 # ^= iv + + bl _vpaes_encrypt_core + + vmr v24, v0 # put aside iv + sub. r30, r30, r0 # len -= 16 + vperm v0, v0, v0, $outperm # rotate right/left + vsel v1, $outhead, v0, $outmask + vmr $outhead, v0 + stvx v1, 0, $out + addi $out, $out, 16 + bge Lcbc_enc_loop + + b Lcbc_done + +.align 5 +Lcbc_decrypt: + bl _vpaes_decrypt_preheat + li r0, 16 + +Lcbc_dec_loop: + vmr v0, $inptail + lvx $inptail, 0, $inp + addi $inp, $inp, 16 + ?vperm v0, v0, $inptail, $inpperm + vmr v25, v0 # put aside input + + bl _vpaes_decrypt_core + + vxor v0, v0, v24 # ^= iv + vmr v24, v25 + sub. r30, r30, r0 # len -= 16 + vperm v0, v0, v0, $outperm # rotate right/left + vsel v1, $outhead, v0, $outmask + vmr $outhead, v0 + stvx v1, 0, $out + addi $out, $out, 16 + bge Lcbc_dec_loop + +Lcbc_done: + addi $out, $out, -1 + lvx v1, 0, $out # redundant in aligned case + vsel v1, $outhead, v1, $outmask + stvx v1, 0, $out + + neg r8, r31 # write [potentially unaligned] iv + ?lvsl $outperm, 0, r8 + li r6, 15 + vnor $outmask, v7, v7 # 0xff..ff + ?vperm $outmask, v7, $outmask, $outperm + lvx $outhead, 0, r31 + vperm v24, v24, v24, $outperm # rotate right/left + vsel v0, $outhead, v24, $outmask + lvx v1, r6, r31 + stvx v0, 0, r31 + vsel v1, v24, v1, $outmask + stvx v1, r6, r31 + + mtspr 256, r7 # restore vrsave + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + lvx v20,r10,$sp + addi r10,r10,16 + lvx v21,r11,$sp + addi r11,r11,16 + lvx v22,r10,$sp + addi r10,r10,16 + lvx v23,r11,$sp + addi r11,r11,16 + lvx v24,r10,$sp + addi r10,r10,16 + lvx v25,r11,$sp + addi r11,r11,16 + lvx v26,r10,$sp + addi r10,r10,16 + lvx v27,r11,$sp + addi r11,r11,16 + lvx v28,r10,$sp + addi r10,r10,16 + lvx v29,r11,$sp + addi r11,r11,16 + lvx v30,r10,$sp + lvx v31,r11,$sp +Lcbc_abort: + $POP r0, `$FRAME+$SIZE_T*2+$LRSAVE`($sp) + $POP r30,`$FRAME+$SIZE_T*0`($sp) + $POP r31,`$FRAME+$SIZE_T*1`($sp) + mtlr r0 + addi $sp,$sp,`$FRAME+$SIZE_T*2` + blr + .long 0 + .byte 0,12,0x04,1,0x80,2,6,0 + .long 0 +.size .vpaes_cbc_encrypt,.-.vpaes_cbc_encrypt +___ +} +{ +my ($inp,$bits,$out)=map("r$_",(3..5)); +my $dir="cr1"; +my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_",(10..13,24)); + +$code.=<<___; +######################################################## +## ## +## AES key schedule ## +## ## +######################################################## +.align 4 +_vpaes_key_preheat: + mflr r8 + bl Lconsts + mtlr r8 + li r11, 0xc0 # Lk_inv + li r10, 0xd0 + li r9, 0xe0 # L_ipt + li r8, 0xf0 + + vspltisb v8,4 # 0x04..04 + vxor v9,v9,v9 # 0x00..00 + lvx $invlo, r12, r11 # Lk_inv + li r11, 0x120 + lvx $invhi, r12, r10 + li r10, 0x130 + lvx $iptlo, r12, r9 # Lk_ipt + li r9, 0x220 + lvx $ipthi, r12, r8 + li r8, 0x230 + + lvx v14, r12, r11 # Lk_sb1 + li r11, 0x240 + lvx v15, r12, r10 + li r10, 0x250 + + lvx v16, r12, r9 # Lk_dksd + li r9, 0x260 + lvx v17, r12, r8 + li r8, 0x270 + lvx v18, r12, r11 # Lk_dksb + li r11, 0x280 + lvx v19, r12, r10 + li r10, 0x290 + lvx v20, r12, r9 # Lk_dkse + li r9, 0x2a0 + lvx v21, r12, r8 + li r8, 0x2b0 + lvx v22, r12, r11 # Lk_dks9 + lvx v23, r12, r10 + + lvx v24, r12, r9 # Lk_rcon + lvx v25, 0, r12 # Lk_mc_forward[0] + lvx v26, r12, r8 # Lks63 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +.align 4 +_vpaes_schedule_core: + mflr r7 + + bl _vpaes_key_preheat # load the tables + + #lvx v0, 0, $inp # vmovdqu (%rdi), %xmm0 # load key (unaligned) + neg r8, $inp # prepare for unaligned access + lvx v0, 0, $inp + addi $inp, $inp, 15 # 15 is not typo + ?lvsr $inpperm, 0, r8 # -$inp + lvx v6, 0, $inp # v6 serves as inptail + addi $inp, $inp, 8 + ?vperm v0, v0, v6, $inpperm + + # input transform + vmr v3, v0 # vmovdqa %xmm0, %xmm3 + bl _vpaes_schedule_transform + vmr v7, v0 # vmovdqa %xmm0, %xmm7 + + bne $dir, Lschedule_am_decrypting + + # encrypting, output zeroth round key after transform + li r8, 0x30 # mov \$0x30,%r8d + addi r10, r12, 0x80 # lea .Lk_sr(%rip),%r10 + + ?lvsr $outperm, 0, $out # prepare for unaligned access + vnor $outmask, v9, v9 # 0xff..ff + lvx $outhead, 0, $out + ?vperm $outmask, v9, $outmask, $outperm + + #stvx v0, 0, $out # vmovdqu %xmm0, (%rdx) + vperm v1, v0, v0, $outperm # rotate right/left + vsel v2, $outhead, v1, $outmask + vmr $outhead, v1 + stvx v2, 0, $out + b Lschedule_go + +Lschedule_am_decrypting: + srwi r8, $bits, 1 # shr \$1,%r8d + andi. r8, r8, 32 # and \$32,%r8d + xori r8, r8, 32 # xor \$32,%r8d # nbits==192?0:32 + addi r10, r12, 0x80 # lea .Lk_sr(%rip),%r10 + # decrypting, output zeroth round key after shiftrows + lvx v1, r8, r10 # vmovdqa (%r8,%r10), %xmm1 + vperm v4, v3, v3, v1 # vpshufb %xmm1, %xmm3, %xmm3 + + neg r0, $out # prepare for unaligned access + ?lvsl $outperm, 0, r0 + addi $out, $out, 15 # 15 is not typo + vnor $outmask, v9, v9 # 0xff..ff + lvx $outhead, 0, $out + ?vperm $outmask, $outmask, v9, $outperm + + #stvx v4, 0, $out # vmovdqu %xmm3, (%rdx) + vperm v4, v4, v4, $outperm # rotate right/left + vsel v2, $outhead, v4, $outmask + vmr $outhead, v4 + stvx v2, 0, $out + xori r8, r8, 0x30 # xor \$0x30, %r8 + +Lschedule_go: + cmplwi $bits, 192 # cmp \$192, %esi + bgt Lschedule_256 + beq Lschedule_192 + # 128: fall though + +## +## .schedule_128 +## +## 128-bit specific part of key schedule. +## +## This schedule is really simple, because all its parts +## are accomplished by the subroutines. +## +Lschedule_128: + li r0, 10 # mov \$10, %esi + mtctr r0 + +Loop_schedule_128: + bl _vpaes_schedule_round + bdz Lschedule_mangle_last # dec %esi + bl _vpaes_schedule_mangle # write output + b Loop_schedule_128 + +## +## .aes_schedule_192 +## +## 192-bit specific part of key schedule. +## +## The main body of this schedule is the same as the 128-bit +## schedule, but with more smearing. The long, high side is +## stored in %xmm7 as before, and the short, low side is in +## the high bits of %xmm6. +## +## This schedule is somewhat nastier, however, because each +## round produces 192 bits of key material, or 1.5 round keys. +## Therefore, on each cycle we do 2 rounds and produce 3 round +## keys. +## +.align 4 +Lschedule_192: + li r0, 4 # mov \$4, %esi + lvx v0, 0, $inp + ?vperm v0, v6, v0, $inpperm + ?vsldoi v0, v3, v0, 8 # vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) + bl _vpaes_schedule_transform # input transform + ?vsldoi v6, v0, v9, 8 + ?vsldoi v6, v9, v6, 8 # clobber "low" side with zeros + mtctr r0 + +Loop_schedule_192: + bl _vpaes_schedule_round + ?vsldoi v0, v6, v0, 8 # vpalignr \$8,%xmm6,%xmm0,%xmm0 + bl _vpaes_schedule_mangle # save key n + bl _vpaes_schedule_192_smear + bl _vpaes_schedule_mangle # save key n+1 + bl _vpaes_schedule_round + bdz Lschedule_mangle_last # dec %esi + bl _vpaes_schedule_mangle # save key n+2 + bl _vpaes_schedule_192_smear + b Loop_schedule_192 + +## +## .aes_schedule_256 +## +## 256-bit specific part of key schedule. +## +## The structure here is very similar to the 128-bit +## schedule, but with an additional "low side" in +## %xmm6. The low side's rounds are the same as the +## high side's, except no rcon and no rotation. +## +.align 4 +Lschedule_256: + li r0, 7 # mov \$7, %esi + addi $inp, $inp, 8 + lvx v0, 0, $inp # vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) + ?vperm v0, v6, v0, $inpperm + bl _vpaes_schedule_transform # input transform + mtctr r0 + +Loop_schedule_256: + bl _vpaes_schedule_mangle # output low result + vmr v6, v0 # vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 + + # high round + bl _vpaes_schedule_round + bdz Lschedule_mangle_last # dec %esi + bl _vpaes_schedule_mangle + + # low round. swap xmm7 and xmm6 + ?vspltw v0, v0, 3 # vpshufd \$0xFF, %xmm0, %xmm0 + vmr v5, v7 # vmovdqa %xmm7, %xmm5 + vmr v7, v6 # vmovdqa %xmm6, %xmm7 + bl _vpaes_schedule_low_round + vmr v7, v5 # vmovdqa %xmm5, %xmm7 + + b Loop_schedule_256 +## +## .aes_schedule_mangle_last +## +## Mangler for last round of key schedule +## Mangles %xmm0 +## when encrypting, outputs out(%xmm0) ^ 63 +## when decrypting, outputs unskew(%xmm0) +## +## Always called right before return... jumps to cleanup and exits +## +.align 4 +Lschedule_mangle_last: + # schedule last round key from xmm0 + li r11, 0x2e0 # lea .Lk_deskew(%rip),%r11 + li r9, 0x2f0 + bne $dir, Lschedule_mangle_last_dec + + # encrypting + lvx v1, r8, r10 # vmovdqa (%r8,%r10),%xmm1 + li r11, 0x2c0 # lea .Lk_opt(%rip), %r11 # prepare to output transform + li r9, 0x2d0 # prepare to output transform + vperm v0, v0, v0, v1 # vpshufb %xmm1, %xmm0, %xmm0 # output permute + + lvx $iptlo, r11, r12 # reload $ipt + lvx $ipthi, r9, r12 + addi $out, $out, 16 # add \$16, %rdx + vxor v0, v0, v26 # vpxor .Lk_s63(%rip), %xmm0, %xmm0 + bl _vpaes_schedule_transform # output transform + + #stvx v0, r0, $out # vmovdqu %xmm0, (%rdx) # save last key + vperm v0, v0, v0, $outperm # rotate right/left + vsel v2, $outhead, v0, $outmask + vmr $outhead, v0 + stvx v2, 0, $out + + addi $out, $out, 15 # 15 is not typo + lvx v1, 0, $out # redundant in aligned case + vsel v1, $outhead, v1, $outmask + stvx v1, 0, $out + b Lschedule_mangle_done + +.align 4 +Lschedule_mangle_last_dec: + lvx $iptlo, r11, r12 # reload $ipt + lvx $ipthi, r9, r12 + addi $out, $out, -16 # add \$-16, %rdx + vxor v0, v0, v26 # vpxor .Lk_s63(%rip), %xmm0, %xmm0 + bl _vpaes_schedule_transform # output transform + + #stvx v0, r0, $out # vmovdqu %xmm0, (%rdx) # save last key + vperm v0, v0, v0, $outperm # rotate right/left + vsel v2, $outhead, v0, $outmask + vmr $outhead, v0 + stvx v2, 0, $out + + addi $out, $out, -15 # -15 is not typo + lvx v1, 0, $out # redundant in aligned case + vsel v1, $outhead, v1, $outmask + stvx v1, 0, $out + +Lschedule_mangle_done: + mtlr r7 + # cleanup + vxor v0, v0, v0 # vpxor %xmm0, %xmm0, %xmm0 + vxor v1, v1, v1 # vpxor %xmm1, %xmm1, %xmm1 + vxor v2, v2, v2 # vpxor %xmm2, %xmm2, %xmm2 + vxor v3, v3, v3 # vpxor %xmm3, %xmm3, %xmm3 + vxor v4, v4, v4 # vpxor %xmm4, %xmm4, %xmm4 + vxor v5, v5, v5 # vpxor %xmm5, %xmm5, %xmm5 + vxor v6, v6, v6 # vpxor %xmm6, %xmm6, %xmm6 + vxor v7, v7, v7 # vpxor %xmm7, %xmm7, %xmm7 + + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +## +## .aes_schedule_192_smear +## +## Smear the short, low side in the 192-bit key schedule. +## +## Inputs: +## %xmm7: high side, b a x y +## %xmm6: low side, d c 0 0 +## %xmm13: 0 +## +## Outputs: +## %xmm6: b+c+d b+c 0 0 +## %xmm0: b+c+d b+c b a +## +.align 4 +_vpaes_schedule_192_smear: + ?vspltw v0, v7, 3 + ?vsldoi v1, v9, v6, 12 # vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 + ?vsldoi v0, v7, v0, 8 # vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a + vxor v6, v6, v1 # vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 + vxor v6, v6, v0 # vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a + vmr v0, v6 + ?vsldoi v6, v6, v9, 8 + ?vsldoi v6, v9, v6, 8 # clobber low side with zeros + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +## +## .aes_schedule_round +## +## Runs one main round of the key schedule on %xmm0, %xmm7 +## +## Specifically, runs subbytes on the high dword of %xmm0 +## then rotates it by one byte and xors into the low dword of +## %xmm7. +## +## Adds rcon from low byte of %xmm8, then rotates %xmm8 for +## next rcon. +## +## Smears the dwords of %xmm7 by xoring the low into the +## second low, result into third, result into highest. +## +## Returns results in %xmm7 = %xmm0. +## Clobbers %xmm1-%xmm4, %r11. +## +.align 4 +_vpaes_schedule_round: + # extract rcon from xmm8 + #vxor v4, v4, v4 # vpxor %xmm4, %xmm4, %xmm4 + ?vsldoi v1, $rcon, v9, 15 # vpalignr \$15, %xmm8, %xmm4, %xmm1 + ?vsldoi $rcon, $rcon, $rcon, 15 # vpalignr \$15, %xmm8, %xmm8, %xmm8 + vxor v7, v7, v1 # vpxor %xmm1, %xmm7, %xmm7 + + # rotate + ?vspltw v0, v0, 3 # vpshufd \$0xFF, %xmm0, %xmm0 + ?vsldoi v0, v0, v0, 1 # vpalignr \$1, %xmm0, %xmm0, %xmm0 + + # fall through... + + # low round: same as high round, but no rotation and no rcon. +_vpaes_schedule_low_round: + # smear xmm7 + ?vsldoi v1, v9, v7, 12 # vpslldq \$4, %xmm7, %xmm1 + vxor v7, v7, v1 # vpxor %xmm1, %xmm7, %xmm7 + vspltisb v1, 0x0f # 0x0f..0f + ?vsldoi v4, v9, v7, 8 # vpslldq \$8, %xmm7, %xmm4 + + # subbytes + vand v1, v1, v0 # vpand %xmm9, %xmm0, %xmm1 # 0 = k + vsrb v0, v0, v8 # vpsrlb \$4, %xmm0, %xmm0 # 1 = i + vxor v7, v7, v4 # vpxor %xmm4, %xmm7, %xmm7 + vperm v2, $invhi, v9, v1 # vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k + vxor v1, v1, v0 # vpxor %xmm0, %xmm1, %xmm1 # 0 = j + vperm v3, $invlo, v9, v0 # vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i + vxor v3, v3, v2 # vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k + vperm v4, $invlo, v9, v1 # vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j + vxor v7, v7, v26 # vpxor .Lk_s63(%rip), %xmm7, %xmm7 + vperm v3, $invlo, v9, v3 # vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak + vxor v4, v4, v2 # vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k + vperm v2, $invlo, v9, v4 # vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak + vxor v3, v3, v1 # vpxor %xmm1, %xmm3, %xmm3 # 2 = io + vxor v2, v2, v0 # vpxor %xmm0, %xmm2, %xmm2 # 3 = jo + vperm v4, v15, v9, v3 # vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou + vperm v1, v14, v9, v2 # vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t + vxor v1, v1, v4 # vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output + + # add in smeared stuff + vxor v0, v1, v7 # vpxor %xmm7, %xmm1, %xmm0 + vxor v7, v1, v7 # vmovdqa %xmm0, %xmm7 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +## +## .aes_schedule_transform +## +## Linear-transform %xmm0 according to tables at (%r11) +## +## Requires that %xmm9 = 0x0F0F... as in preheat +## Output in %xmm0 +## Clobbers %xmm2 +## +.align 4 +_vpaes_schedule_transform: + #vand v1, v0, v9 # vpand %xmm9, %xmm0, %xmm1 + vsrb v2, v0, v8 # vpsrlb \$4, %xmm0, %xmm0 + # vmovdqa (%r11), %xmm2 # lo + vperm v0, $iptlo, $iptlo, v0 # vpshufb %xmm1, %xmm2, %xmm2 + # vmovdqa 16(%r11), %xmm1 # hi + vperm v2, $ipthi, $ipthi, v2 # vpshufb %xmm0, %xmm1, %xmm0 + vxor v0, v0, v2 # vpxor %xmm2, %xmm0, %xmm0 + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +## +## .aes_schedule_mangle +## +## Mangle xmm0 from (basis-transformed) standard version +## to our version. +## +## On encrypt, +## xor with 0x63 +## multiply by circulant 0,1,1,1 +## apply shiftrows transform +## +## On decrypt, +## xor with 0x63 +## multiply by "inverse mixcolumns" circulant E,B,D,9 +## deskew +## apply shiftrows transform +## +## +## Writes out to (%rdx), and increments or decrements it +## Keeps track of round number mod 4 in %r8 +## Preserves xmm0 +## Clobbers xmm1-xmm5 +## +.align 4 +_vpaes_schedule_mangle: + #vmr v4, v0 # vmovdqa %xmm0, %xmm4 # save xmm0 for later + # vmovdqa .Lk_mc_forward(%rip),%xmm5 + bne $dir, Lschedule_mangle_dec + + # encrypting + vxor v4, v0, v26 # vpxor .Lk_s63(%rip), %xmm0, %xmm4 + addi $out, $out, 16 # add \$16, %rdx + vperm v4, v4, v4, v25 # vpshufb %xmm5, %xmm4, %xmm4 + vperm v1, v4, v4, v25 # vpshufb %xmm5, %xmm4, %xmm1 + vperm v3, v1, v1, v25 # vpshufb %xmm5, %xmm1, %xmm3 + vxor v4, v4, v1 # vpxor %xmm1, %xmm4, %xmm4 + lvx v1, r8, r10 # vmovdqa (%r8,%r10), %xmm1 + vxor v3, v3, v4 # vpxor %xmm4, %xmm3, %xmm3 + + vperm v3, v3, v3, v1 # vpshufb %xmm1, %xmm3, %xmm3 + addi r8, r8, -16 # add \$-16, %r8 + andi. r8, r8, 0x30 # and \$0x30, %r8 + + #stvx v3, 0, $out # vmovdqu %xmm3, (%rdx) + vperm v1, v3, v3, $outperm # rotate right/left + vsel v2, $outhead, v1, $outmask + vmr $outhead, v1 + stvx v2, 0, $out + blr + +.align 4 +Lschedule_mangle_dec: + # inverse mix columns + # lea .Lk_dksd(%rip),%r11 + vsrb v1, v0, v8 # vpsrlb \$4, %xmm4, %xmm1 # 1 = hi + #and v4, v0, v9 # vpand %xmm9, %xmm4, %xmm4 # 4 = lo + + # vmovdqa 0x00(%r11), %xmm2 + vperm v2, v16, v16, v0 # vpshufb %xmm4, %xmm2, %xmm2 + # vmovdqa 0x10(%r11), %xmm3 + vperm v3, v17, v17, v1 # vpshufb %xmm1, %xmm3, %xmm3 + vxor v3, v3, v2 # vpxor %xmm2, %xmm3, %xmm3 + vperm v3, v3, v9, v25 # vpshufb %xmm5, %xmm3, %xmm3 + + # vmovdqa 0x20(%r11), %xmm2 + vperm v2, v18, v18, v0 # vpshufb %xmm4, %xmm2, %xmm2 + vxor v2, v2, v3 # vpxor %xmm3, %xmm2, %xmm2 + # vmovdqa 0x30(%r11), %xmm3 + vperm v3, v19, v19, v1 # vpshufb %xmm1, %xmm3, %xmm3 + vxor v3, v3, v2 # vpxor %xmm2, %xmm3, %xmm3 + vperm v3, v3, v9, v25 # vpshufb %xmm5, %xmm3, %xmm3 + + # vmovdqa 0x40(%r11), %xmm2 + vperm v2, v20, v20, v0 # vpshufb %xmm4, %xmm2, %xmm2 + vxor v2, v2, v3 # vpxor %xmm3, %xmm2, %xmm2 + # vmovdqa 0x50(%r11), %xmm3 + vperm v3, v21, v21, v1 # vpshufb %xmm1, %xmm3, %xmm3 + vxor v3, v3, v2 # vpxor %xmm2, %xmm3, %xmm3 + + # vmovdqa 0x60(%r11), %xmm2 + vperm v2, v22, v22, v0 # vpshufb %xmm4, %xmm2, %xmm2 + vperm v3, v3, v9, v25 # vpshufb %xmm5, %xmm3, %xmm3 + # vmovdqa 0x70(%r11), %xmm4 + vperm v4, v23, v23, v1 # vpshufb %xmm1, %xmm4, %xmm4 + lvx v1, r8, r10 # vmovdqa (%r8,%r10), %xmm1 + vxor v2, v2, v3 # vpxor %xmm3, %xmm2, %xmm2 + vxor v3, v4, v2 # vpxor %xmm2, %xmm4, %xmm3 + + addi $out, $out, -16 # add \$-16, %rdx + + vperm v3, v3, v3, v1 # vpshufb %xmm1, %xmm3, %xmm3 + addi r8, r8, -16 # add \$-16, %r8 + andi. r8, r8, 0x30 # and \$0x30, %r8 + + #stvx v3, 0, $out # vmovdqu %xmm3, (%rdx) + vperm v1, v3, v3, $outperm # rotate right/left + vsel v2, $outhead, v1, $outmask + vmr $outhead, v1 + stvx v2, 0, $out + blr + .long 0 + .byte 0,12,0x14,0,0,0,0,0 + +.globl .vpaes_set_encrypt_key +.align 5 +.vpaes_set_encrypt_key: + $STU $sp,-$FRAME($sp) + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mflr r0 + mfspr r6, 256 # save vrsave + stvx v20,r10,$sp + addi r10,r10,16 + stvx v21,r11,$sp + addi r11,r11,16 + stvx v22,r10,$sp + addi r10,r10,16 + stvx v23,r11,$sp + addi r11,r11,16 + stvx v24,r10,$sp + addi r10,r10,16 + stvx v25,r11,$sp + addi r11,r11,16 + stvx v26,r10,$sp + addi r10,r10,16 + stvx v27,r11,$sp + addi r11,r11,16 + stvx v28,r10,$sp + addi r10,r10,16 + stvx v29,r11,$sp + addi r11,r11,16 + stvx v30,r10,$sp + stvx v31,r11,$sp + lwz r6,`$FRAME-4`($sp) # save vrsave + li r7, -1 + $PUSH r0, `$FRAME+$LRSAVE`($sp) + mtspr 256, r7 # preserve all AltiVec registers + + srwi r9, $bits, 5 # shr \$5,%eax + addi r9, r9, 6 # add \$5,%eax + stw r9, 240($out) # mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; + + cmplw $dir, $bits, $bits # set encrypt direction + li r8, 0x30 # mov \$0x30,%r8d + bl _vpaes_schedule_core + + $POP r0, `$FRAME+$LRSAVE`($sp) + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mtspr 256, r6 # restore vrsave + mtlr r0 + xor r3, r3, r3 + lvx v20,r10,$sp + addi r10,r10,16 + lvx v21,r11,$sp + addi r11,r11,16 + lvx v22,r10,$sp + addi r10,r10,16 + lvx v23,r11,$sp + addi r11,r11,16 + lvx v24,r10,$sp + addi r10,r10,16 + lvx v25,r11,$sp + addi r11,r11,16 + lvx v26,r10,$sp + addi r10,r10,16 + lvx v27,r11,$sp + addi r11,r11,16 + lvx v28,r10,$sp + addi r10,r10,16 + lvx v29,r11,$sp + addi r11,r11,16 + lvx v30,r10,$sp + lvx v31,r11,$sp + addi $sp,$sp,$FRAME + blr + .long 0 + .byte 0,12,0x04,1,0x80,3,0 + .long 0 +.size .vpaes_set_encrypt_key,.-.vpaes_set_encrypt_key + +.globl .vpaes_set_decrypt_key +.align 4 +.vpaes_set_decrypt_key: + $STU $sp,-$FRAME($sp) + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mflr r0 + mfspr r6, 256 # save vrsave + stvx v20,r10,$sp + addi r10,r10,16 + stvx v21,r11,$sp + addi r11,r11,16 + stvx v22,r10,$sp + addi r10,r10,16 + stvx v23,r11,$sp + addi r11,r11,16 + stvx v24,r10,$sp + addi r10,r10,16 + stvx v25,r11,$sp + addi r11,r11,16 + stvx v26,r10,$sp + addi r10,r10,16 + stvx v27,r11,$sp + addi r11,r11,16 + stvx v28,r10,$sp + addi r10,r10,16 + stvx v29,r11,$sp + addi r11,r11,16 + stvx v30,r10,$sp + stvx v31,r11,$sp + lwz r6,`$FRAME-4`($sp) # save vrsave + li r7, -1 + $PUSH r0, `$FRAME+$LRSAVE`($sp) + mtspr 256, r7 # preserve all AltiVec registers + + srwi r9, $bits, 5 # shr \$5,%eax + addi r9, r9, 6 # add \$5,%eax + stw r9, 240($out) # mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; + + slwi r9, r9, 4 # shl \$4,%eax + add $out, $out, r9 # lea (%rdx,%rax),%rdx + + cmplwi $dir, $bits, 0 # set decrypt direction + srwi r8, $bits, 1 # shr \$1,%r8d + andi. r8, r8, 32 # and \$32,%r8d + xori r8, r8, 32 # xor \$32,%r8d # nbits==192?0:32 + bl _vpaes_schedule_core + + $POP r0, `$FRAME+$LRSAVE`($sp) + li r10,`15+6*$SIZE_T` + li r11,`31+6*$SIZE_T` + mtspr 256, r6 # restore vrsave + mtlr r0 + xor r3, r3, r3 + lvx v20,r10,$sp + addi r10,r10,16 + lvx v21,r11,$sp + addi r11,r11,16 + lvx v22,r10,$sp + addi r10,r10,16 + lvx v23,r11,$sp + addi r11,r11,16 + lvx v24,r10,$sp + addi r10,r10,16 + lvx v25,r11,$sp + addi r11,r11,16 + lvx v26,r10,$sp + addi r10,r10,16 + lvx v27,r11,$sp + addi r11,r11,16 + lvx v28,r10,$sp + addi r10,r10,16 + lvx v29,r11,$sp + addi r11,r11,16 + lvx v30,r10,$sp + lvx v31,r11,$sp + addi $sp,$sp,$FRAME + blr + .long 0 + .byte 0,12,0x04,1,0x80,3,0 + .long 0 +.size .vpaes_set_decrypt_key,.-.vpaes_set_decrypt_key +___ +} + +my $consts=1; +foreach (split("\n",$code)) { + s/\`([^\`]*)\`/eval $1/geo; + + # constants table endian-specific conversion + if ($consts && m/\.long\s+(.+)\s+(\?[a-z]*)$/o) { + my $conv=$2; + my @bytes=(); + + # convert to endian-agnostic format + foreach (split(/,\s+/,$1)) { + my $l = /^0/?oct:int; + push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff; + } + + # little-endian conversion + if ($flavour =~ /le$/o) { + SWITCH: for($conv) { + /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; }; + /\?rev/ && do { @bytes=reverse(@bytes); last; }; + } + } + + #emit + print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n"; + next; + } + $consts=0 if (m/Lconsts:/o); # end of table + + # instructions prefixed with '?' are endian-specific and need + # to be adjusted accordingly... + if ($flavour =~ /le$/o) { # little-endian + s/\?lvsr/lvsl/o or + s/\?lvsl/lvsr/o or + s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or + s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or + s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o; + } else { # big-endian + s/\?([a-z]+)/$1/o; + } + + print $_,"\n"; +} + +close STDOUT; diff --git a/crypto/evp/e_aes.c b/crypto/evp/e_aes.c index 30d44ef2a2..77e4bead71 100644 --- a/crypto/evp/e_aes.c +++ b/crypto/evp/e_aes.c @@ -157,6 +157,11 @@ void AES_xts_decrypt(const char *inp,char *out,size_t len, const unsigned char iv[16]); #endif +#if defined(VPAES_ASM) && (defined(__powerpc__) || defined(__ppc__) || defined(_ARCH_PPC)) +extern unsigned int OPENSSL_ppccap_P; +#define VPAES_CAPABLE (OPENSSL_ppccap_P&(1<<1)) +#endif + #if defined(AES_ASM) && !defined(I386_ONLY) && ( \ ((defined(__i386) || defined(__i386__) || \ defined(_M_IX86)) && defined(OPENSSL_IA32_SSE2))|| \ diff --git a/crypto/perlasm/ppc-xlate.pl b/crypto/perlasm/ppc-xlate.pl index 3ed7bd9665..a67eef57d5 100755 --- a/crypto/perlasm/ppc-xlate.pl +++ b/crypto/perlasm/ppc-xlate.pl @@ -27,7 +27,8 @@ my $globl = sub { /osx/ && do { $name = "_$name"; last; }; - /linux.*32/ && do { $ret .= ".globl $name\n"; + /linux.*(32|64le)/ + && do { $ret .= ".globl $name\n"; $ret .= ".type $name,\@function"; last; }; @@ -49,7 +50,9 @@ my $globl = sub { $ret; }; my $text = sub { - ($flavour =~ /aix/) ? ".csect" : ".text"; + my $ret = ($flavour =~ /aix/) ? ".csect\t.text[PR],7" : ".text"; + $ret = ".abiversion 2\n".$ret if ($flavour =~ /linux.*64le/); + $ret; }; my $machine = sub { my $junk = shift; @@ -64,8 +67,8 @@ my $size = sub { if ($flavour =~ /linux/) { shift; my $name = shift; $name =~ s|^[\.\_]||; - my $ret = ".size $name,.-".($flavour=~/64/?".":"").$name; - $ret .= "\n.size .$name,.-.$name" if ($flavour=~/64/); + my $ret = ".size $name,.-".($flavour=~/64$/?".":"").$name; + $ret .= "\n.size .$name,.-.$name" if ($flavour=~/64$/); $ret; } else @@ -79,6 +82,25 @@ my $asciz = sub { else { ""; } }; +my $quad = sub { + shift; + my @ret; + my ($hi,$lo); + for (@_) { + if (/^0x([0-9a-f]*?)([0-9a-f]{1,8})$/io) + { $hi=$1?"0x$1":"0"; $lo="0x$2"; } + elsif (/^([0-9]+)$/o) + { $hi=$1>>32; $lo=$1&0xffffffff; } # error-prone with 32-bit perl + else + { $hi=undef; $lo=$_; } + + if (defined($hi)) + { push(@ret,$flavour=~/le$/o?".long\t$lo,$hi":".long\t$hi,$lo"); } + else + { push(@ret,".quad $lo"); } + } + join("\n",@ret); +}; ################################################################ # simplified mnemonics not handled by at least one assembler @@ -124,6 +146,10 @@ my $extrdi = sub { $b = ($b+$n)&63; $n = 64-$n; " rldicl $ra,$rs,$b,$n"; }; +my $vmr = sub { + my ($f,$vx,$vy) = @_; + " vor $vx,$vy,$vy"; +}; while($line=<>) { @@ -140,7 +166,10 @@ while($line=<>) { { $line =~ s|(^[\.\w]+)\:\s*||; my $label = $1; - printf "%s:",($GLOBALS{$label} or $label) if ($label); + if ($label) { + printf "%s:",($GLOBALS{$label} or $label); + printf "\n.localentry\t$GLOBALS{$label},0" if ($GLOBALS{$label} && $flavour =~ /linux.*64le/); + } } { @@ -149,7 +178,7 @@ while($line=<>) { my $mnemonic = $2; my $f = $3; my $opcode = eval("\$$mnemonic"); - $line =~ s|\bc?[rf]([0-9]+)\b|$1|g if ($c ne "." and $flavour !~ /osx/); + $line =~ s/\b(c?[rf]|v|vs)([0-9]+)\b/$2/g if ($c ne "." and $flavour !~ /osx/); if (ref($opcode) eq 'CODE') { $line = &$opcode($f,split(',',$line)); } elsif ($mnemonic) { $line = $c.$mnemonic.$f."\t".$line; } } diff --git a/crypto/sha/asm/sha1-ppc.pl b/crypto/sha/asm/sha1-ppc.pl index 8aa5a37865..24a5d065d9 100755 --- a/crypto/sha/asm/sha1-ppc.pl +++ b/crypto/sha/asm/sha1-ppc.pl @@ -9,8 +9,7 @@ # I let hardware handle unaligned input(*), except on page boundaries # (see below for details). Otherwise straightforward implementation -# with X vector in register bank. The module is big-endian [which is -# not big deal as there're no little-endian targets left around]. +# with X vector in register bank. # # (*) this means that this module is inappropriate for PPC403? Does # anybody know if pre-POWER3 can sustain unaligned load? @@ -38,6 +37,10 @@ if ($flavour =~ /64/) { $PUSH ="stw"; } else { die "nonsense $flavour"; } +# Define endianess based on flavour +# i.e.: linux64le +$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0; + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or @@ -68,14 +71,28 @@ $T ="r12"; @X=("r16","r17","r18","r19","r20","r21","r22","r23", "r24","r25","r26","r27","r28","r29","r30","r31"); +sub loadbe { +my ($dst, $src, $temp_reg) = @_; +$code.=<<___ if (!$LITTLE_ENDIAN); + lwz $dst,$src +___ +$code.=<<___ if ($LITTLE_ENDIAN); + lwz $temp_reg,$src + rotlwi $dst,$temp_reg,8 + rlwimi $dst,$temp_reg,24,0,7 + rlwimi $dst,$temp_reg,24,16,23 +___ +} + sub BODY_00_19 { my ($i,$a,$b,$c,$d,$e,$f)=@_; my $j=$i+1; -$code.=<<___ if ($i==0); - lwz @X[$i],`$i*4`($inp) -___ + + # Since the last value of $f is discarded, we can use + # it as a temp reg to swap byte-order when needed. + loadbe("@X[$i]","`$i*4`($inp)",$f) if ($i==0); + loadbe("@X[$j]","`$j*4`($inp)",$f) if ($i<15); $code.=<<___ if ($i<15); - lwz @X[$j],`$j*4`($inp) add $f,$K,$e rotlwi $e,$a,5 add $f,$f,@X[$i] diff --git a/crypto/sha/asm/sha512-ppc.pl b/crypto/sha/asm/sha512-ppc.pl index d934903787..5c3ac2c095 100755 --- a/crypto/sha/asm/sha512-ppc.pl +++ b/crypto/sha/asm/sha512-ppc.pl @@ -9,8 +9,7 @@ # I let hardware handle unaligned input, except on page boundaries # (see below for details). Otherwise straightforward implementation -# with X vector in register bank. The module is big-endian [which is -# not big deal as there're no little-endian targets left around]. +# with X vector in register bank. # sha256 | sha512 # -m64 -m32 | -m64 -m32 @@ -56,6 +55,8 @@ if ($flavour =~ /64/) { $PUSH="stw"; } else { die "nonsense $flavour"; } +$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0; + $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or @@ -229,7 +230,7 @@ ___ } else { for ($i=16;$i<32;$i++) { $code.=<<___; - lwz r$i,`4*($i-16)`($ctx) + lwz r$i,`$LITTLE_ENDIAN^(4*($i-16))`($ctx) ___ } } @@ -353,15 +354,32 @@ Lsha2_block_private: $LD $t1,0($Tbl) ___ for($i=0;$i<16;$i++) { -$code.=<<___ if ($SZ==4); +$code.=<<___ if ($SZ==4 && !$LITTLE_ENDIAN); lwz @X[$i],`$i*$SZ`($inp) ___ +$code.=<<___ if ($SZ==4 && $LITTLE_ENDIAN); + lwz $a0,`$i*$SZ`($inp) + rotlwi @X[$i],$a0,8 + rlwimi @X[$i],$a0,24,0,7 + rlwimi @X[$i],$a0,24,16,23 +___ # 64-bit loads are split to 2x32-bit ones, as CPU can't handle # unaligned 64-bit loads, only 32-bit ones... -$code.=<<___ if ($SZ==8); +$code.=<<___ if ($SZ==8 && !$LITTLE_ENDIAN); lwz $t0,`$i*$SZ`($inp) lwz @X[$i],`$i*$SZ+4`($inp) insrdi @X[$i],$t0,32,0 +___ +$code.=<<___ if ($SZ==8 && $LITTLE_ENDIAN); + lwz $a0,`$i*$SZ`($inp) + lwz $a1,`$i*$SZ+4`($inp) + rotlwi $t0,$a0,8 + rotlwi @X[$i],$a1,8 + rlwimi $t0,$a0,24,0,7 + rlwimi @X[$i],$a1,24,0,7 + rlwimi $t0,$a0,24,16,23 + rlwimi @X[$i],$a1,24,16,23 + insrdi @X[$i],$t0,32,0 ___ &ROUND_00_15($i,@V); unshift(@V,pop(@V)); @@ -445,9 +463,9 @@ my ($i, $ahi,$alo,$bhi,$blo,$chi,$clo,$dhi,$dlo, $ehi,$elo,$fhi,$flo,$ghi,$glo,$hhi,$hlo)=@_; $code.=<<___; - lwz $t2,`$SZ*($i%16)+4`($Tbl) + lwz $t2,`$SZ*($i%16)+($LITTLE_ENDIAN^4)`($Tbl) xor $a0,$flo,$glo - lwz $t3,`$SZ*($i%16)+0`($Tbl) + lwz $t3,`$SZ*($i%16)+($LITTLE_ENDIAN^0)`($Tbl) xor $a1,$fhi,$ghi addc $hlo,$hlo,$t0 ; h+=x[i] stw $t0,`$XOFF+0+$SZ*($i%16)`($sp) ; save x[i] @@ -510,10 +528,20 @@ $code.=<<___ if ($i>=15); lwz $t0,`$XOFF+0+$SZ*(($i+2)%16)`($sp) lwz $t1,`$XOFF+4+$SZ*(($i+2)%16)`($sp) ___ -$code.=<<___ if ($i<15); +$code.=<<___ if ($i<15 && !$LITTLE_ENDIAN); lwz $t1,`$SZ*($i+1)+0`($inp) lwz $t0,`$SZ*($i+1)+4`($inp) ___ +$code.=<<___ if ($i<15 && $LITTLE_ENDIAN); + lwz $a2,`$SZ*($i+1)+0`($inp) + lwz $a3,`$SZ*($i+1)+4`($inp) + rotlwi $t1,$a2,8 + rotlwi $t0,$a3,8 + rlwimi $t1,$a2,24,0,7 + rlwimi $t0,$a3,24,0,7 + rlwimi $t1,$a2,24,16,23 + rlwimi $t0,$a3,24,16,23 +___ $code.=<<___; xor $s0,$s0,$t2 ; Sigma0(a) xor $s1,$s1,$t3 @@ -579,11 +607,25 @@ ___ $code.=<<___; .align 4 Lsha2_block_private: +___ +$code.=<<___ if (!$LITTLE_ENDIAN); lwz $t1,0($inp) xor $a2,@V[3],@V[5] ; B^C, magic seed lwz $t0,4($inp) xor $a3,@V[2],@V[4] ___ +$code.=<<___ if ($LITTLE_ENDIAN); + lwz $a1,0($inp) + xor $a2,@V[3],@V[5] ; B^C, magic seed + lwz $a0,4($inp) + xor $a3,@V[2],@V[4] + rotlwi $t1,$a1,8 + rotlwi $t0,$a0,8 + rlwimi $t1,$a1,24,0,7 + rlwimi $t0,$a0,24,0,7 + rlwimi $t1,$a1,24,16,23 + rlwimi $t0,$a0,24,16,23 +___ for($i=0;$i<16;$i++) { &ROUND_00_15_ppc32($i,@V); unshift(@V,pop(@V)); unshift(@V,pop(@V)); @@ -609,54 +651,54 @@ $code.=<<___; $POP $num,`$FRAME-$SIZE_T*24`($sp) ; end pointer subi $Tbl,$Tbl,`($rounds-16)*$SZ` ; rewind Tbl - lwz $t0,0($ctx) - lwz $t1,4($ctx) - lwz $t2,8($ctx) - lwz $t3,12($ctx) - lwz $a0,16($ctx) - lwz $a1,20($ctx) - lwz $a2,24($ctx) + lwz $t0,`$LITTLE_ENDIAN^0`($ctx) + lwz $t1,`$LITTLE_ENDIAN^4`($ctx) + lwz $t2,`$LITTLE_ENDIAN^8`($ctx) + lwz $t3,`$LITTLE_ENDIAN^12`($ctx) + lwz $a0,`$LITTLE_ENDIAN^16`($ctx) + lwz $a1,`$LITTLE_ENDIAN^20`($ctx) + lwz $a2,`$LITTLE_ENDIAN^24`($ctx) addc @V[1],@V[1],$t1 - lwz $a3,28($ctx) + lwz $a3,`$LITTLE_ENDIAN^28`($ctx) adde @V[0],@V[0],$t0 - lwz $t0,32($ctx) + lwz $t0,`$LITTLE_ENDIAN^32`($ctx) addc @V[3],@V[3],$t3 - lwz $t1,36($ctx) + lwz $t1,`$LITTLE_ENDIAN^36`($ctx) adde @V[2],@V[2],$t2 - lwz $t2,40($ctx) + lwz $t2,`$LITTLE_ENDIAN^40`($ctx) addc @V[5],@V[5],$a1 - lwz $t3,44($ctx) + lwz $t3,`$LITTLE_ENDIAN^44`($ctx) adde @V[4],@V[4],$a0 - lwz $a0,48($ctx) + lwz $a0,`$LITTLE_ENDIAN^48`($ctx) addc @V[7],@V[7],$a3 - lwz $a1,52($ctx) + lwz $a1,`$LITTLE_ENDIAN^52`($ctx) adde @V[6],@V[6],$a2 - lwz $a2,56($ctx) + lwz $a2,`$LITTLE_ENDIAN^56`($ctx) addc @V[9],@V[9],$t1 - lwz $a3,60($ctx) + lwz $a3,`$LITTLE_ENDIAN^60`($ctx) adde @V[8],@V[8],$t0 - stw @V[0],0($ctx) - stw @V[1],4($ctx) + stw @V[0],`$LITTLE_ENDIAN^0`($ctx) + stw @V[1],`$LITTLE_ENDIAN^4`($ctx) addc @V[11],@V[11],$t3 - stw @V[2],8($ctx) - stw @V[3],12($ctx) + stw @V[2],`$LITTLE_ENDIAN^8`($ctx) + stw @V[3],`$LITTLE_ENDIAN^12`($ctx) adde @V[10],@V[10],$t2 - stw @V[4],16($ctx) - stw @V[5],20($ctx) + stw @V[4],`$LITTLE_ENDIAN^16`($ctx) + stw @V[5],`$LITTLE_ENDIAN^20`($ctx) addc @V[13],@V[13],$a1 - stw @V[6],24($ctx) - stw @V[7],28($ctx) + stw @V[6],`$LITTLE_ENDIAN^24`($ctx) + stw @V[7],`$LITTLE_ENDIAN^28`($ctx) adde @V[12],@V[12],$a0 - stw @V[8],32($ctx) - stw @V[9],36($ctx) + stw @V[8],`$LITTLE_ENDIAN^32`($ctx) + stw @V[9],`$LITTLE_ENDIAN^36`($ctx) addc @V[15],@V[15],$a3 - stw @V[10],40($ctx) - stw @V[11],44($ctx) + stw @V[10],`$LITTLE_ENDIAN^40`($ctx) + stw @V[11],`$LITTLE_ENDIAN^44`($ctx) adde @V[14],@V[14],$a2 - stw @V[12],48($ctx) - stw @V[13],52($ctx) - stw @V[14],56($ctx) - stw @V[15],60($ctx) + stw @V[12],`$LITTLE_ENDIAN^48`($ctx) + stw @V[13],`$LITTLE_ENDIAN^52`($ctx) + stw @V[14],`$LITTLE_ENDIAN^56`($ctx) + stw @V[15],`$LITTLE_ENDIAN^60`($ctx) addi $inp,$inp,`16*$SZ` ; advance inp $PUSH $inp,`$FRAME-$SIZE_T*23`($sp) @@ -685,46 +727,46 @@ LPICmeup: .space `64-9*4` ___ $code.=<<___ if ($SZ==8); - .long 0x428a2f98,0xd728ae22,0x71374491,0x23ef65cd - .long 0xb5c0fbcf,0xec4d3b2f,0xe9b5dba5,0x8189dbbc - .long 0x3956c25b,0xf348b538,0x59f111f1,0xb605d019 - .long 0x923f82a4,0xaf194f9b,0xab1c5ed5,0xda6d8118 - .long 0xd807aa98,0xa3030242,0x12835b01,0x45706fbe - .long 0x243185be,0x4ee4b28c,0x550c7dc3,0xd5ffb4e2 - .long 0x72be5d74,0xf27b896f,0x80deb1fe,0x3b1696b1 - .long 0x9bdc06a7,0x25c71235,0xc19bf174,0xcf692694 - .long 0xe49b69c1,0x9ef14ad2,0xefbe4786,0x384f25e3 - .long 0x0fc19dc6,0x8b8cd5b5,0x240ca1cc,0x77ac9c65 - .long 0x2de92c6f,0x592b0275,0x4a7484aa,0x6ea6e483 - .long 0x5cb0a9dc,0xbd41fbd4,0x76f988da,0x831153b5 - .long 0x983e5152,0xee66dfab,0xa831c66d,0x2db43210 - .long 0xb00327c8,0x98fb213f,0xbf597fc7,0xbeef0ee4 - .long 0xc6e00bf3,0x3da88fc2,0xd5a79147,0x930aa725 - .long 0x06ca6351,0xe003826f,0x14292967,0x0a0e6e70 - .long 0x27b70a85,0x46d22ffc,0x2e1b2138,0x5c26c926 - .long 0x4d2c6dfc,0x5ac42aed,0x53380d13,0x9d95b3df - .long 0x650a7354,0x8baf63de,0x766a0abb,0x3c77b2a8 - .long 0x81c2c92e,0x47edaee6,0x92722c85,0x1482353b - .long 0xa2bfe8a1,0x4cf10364,0xa81a664b,0xbc423001 - .long 0xc24b8b70,0xd0f89791,0xc76c51a3,0x0654be30 - .long 0xd192e819,0xd6ef5218,0xd6990624,0x5565a910 - .long 0xf40e3585,0x5771202a,0x106aa070,0x32bbd1b8 - .long 0x19a4c116,0xb8d2d0c8,0x1e376c08,0x5141ab53 - .long 0x2748774c,0xdf8eeb99,0x34b0bcb5,0xe19b48a8 - .long 0x391c0cb3,0xc5c95a63,0x4ed8aa4a,0xe3418acb - .long 0x5b9cca4f,0x7763e373,0x682e6ff3,0xd6b2b8a3 - .long 0x748f82ee,0x5defb2fc,0x78a5636f,0x43172f60 - .long 0x84c87814,0xa1f0ab72,0x8cc70208,0x1a6439ec - .long 0x90befffa,0x23631e28,0xa4506ceb,0xde82bde9 - .long 0xbef9a3f7,0xb2c67915,0xc67178f2,0xe372532b - .long 0xca273ece,0xea26619c,0xd186b8c7,0x21c0c207 - .long 0xeada7dd6,0xcde0eb1e,0xf57d4f7f,0xee6ed178 - .long 0x06f067aa,0x72176fba,0x0a637dc5,0xa2c898a6 - .long 0x113f9804,0xbef90dae,0x1b710b35,0x131c471b - .long 0x28db77f5,0x23047d84,0x32caab7b,0x40c72493 - .long 0x3c9ebe0a,0x15c9bebc,0x431d67c4,0x9c100d4c - .long 0x4cc5d4be,0xcb3e42b6,0x597f299c,0xfc657e2a - .long 0x5fcb6fab,0x3ad6faec,0x6c44198c,0x4a475817 + .quad 0x428a2f98d728ae22,0x7137449123ef65cd + .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc + .quad 0x3956c25bf348b538,0x59f111f1b605d019 + .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 + .quad 0xd807aa98a3030242,0x12835b0145706fbe + .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 + .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 + .quad 0x9bdc06a725c71235,0xc19bf174cf692694 + .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 + .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 + .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 + .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 + .quad 0x983e5152ee66dfab,0xa831c66d2db43210 + .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 + .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 + .quad 0x06ca6351e003826f,0x142929670a0e6e70 + .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 + .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df + .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 + .quad 0x81c2c92e47edaee6,0x92722c851482353b + .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 + .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 + .quad 0xd192e819d6ef5218,0xd69906245565a910 + .quad 0xf40e35855771202a,0x106aa07032bbd1b8 + .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 + .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 + .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb + .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 + .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 + .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec + .quad 0x90befffa23631e28,0xa4506cebde82bde9 + .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b + .quad 0xca273eceea26619c,0xd186b8c721c0c207 + .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 + .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 + .quad 0x113f9804bef90dae,0x1b710b35131c471b + .quad 0x28db77f523047d84,0x32caab7b40c72493 + .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c + .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a + .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 ___ $code.=<<___ if ($SZ==4); .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 -- 2.34.1