From: Andy Polyakov Date: Mon, 9 Feb 2009 15:42:04 +0000 (+0000) Subject: s390x assembler pack update. X-Git-Tag: OpenSSL_0_9_8k^2~45 X-Git-Url: https://git.openssl.org/gitweb/?p=openssl.git;a=commitdiff_plain;h=8626230a0227b15c0e0542f5a65f802ee32772b6 s390x assembler pack update. --- diff --git a/Configure b/Configure index 2f6d0a5476..31f06dcb4c 100755 --- a/Configure +++ b/Configure @@ -131,7 +131,7 @@ my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a- my $sparcv8_asm=":sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::::void"; my $alpha_asm="alphacpuid.o:bn_asm.o alpha-mont.o::::::::::::void"; my $mips3_asm=":bn-mips3.o::::::::::::void"; -my $s390x_asm=":bn-s390x.o::aes_cbc.o aes-s390x.o:::sha1-s390x.o sha256-s390x.o sha512-s390x.o:::::::void"; +my $s390x_asm="s390xcpuid.o:bn-s390x.o s390x-mont.o::aes-s390x.o:::sha1-s390x.o sha256-s390x.o sha512-s390x.o:::::::void"; my $armv4_asm=":bn_asm.o armv4-mont.o::aes_cbc.o aes-armv4.o:::sha1-armv4-large.o sha256-armv4.o sha512-armv4.o:::::::void"; my $ppc32_asm="ppccpuid.o:bn-ppc.o::aes_core.o aes_cbc.o aes-ppc.o:::sha1-ppc.o sha256-ppc.o::::::"; my $ppc64_asm="ppccpuid.o:bn-ppc.o ppc-mont.o::aes_core.o aes_cbc.o aes-ppc.o:::sha1-ppc.o sha256-ppc.o sha512-ppc.o::::::"; diff --git a/TABLE b/TABLE index 4521bfcc99..e3caccaa01 100644 --- a/TABLE +++ b/TABLE @@ -3542,10 +3542,10 @@ $thread_cflag = -D_REENTRANT $sys_id = $lflags = -ldl $bn_ops = SIXTY_FOUR_BIT_LONG RC4_CHAR RC4_CHUNK DES_INT DES_UNROLL -$cpuid_obj = -$bn_obj = bn-s390x.o +$cpuid_obj = s390xcpuid.o +$bn_obj = bn-s390x.o s390x-mont.o $des_obj = -$aes_obj = aes_cbc.o aes-s390x.o +$aes_obj = aes-s390x.o $bf_obj = $md5_obj = $sha1_obj = sha1-s390x.o sha256-s390x.o sha512-s390x.o diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl index 573333c642..6ee2097a15 100644 --- a/crypto/aes/asm/aes-s390x.pl +++ b/crypto/aes/asm/aes-s390x.pl @@ -31,12 +31,31 @@ # Implement AES_set_[en|de]crypt_key. Key schedule setup is avoided # for 128-bit keys, if hardware support is detected. +# Januray 2009. +# +# Add support for hardware AES192/256 and reschedule instructions to +# minimize/avoid Address Generation Interlock hazard and to favour +# dual-issue z10 pipeline. This gave ~25% improvement on z10. The gain +# should be larger on earlier CPUs, because being dual-issue z10 makes +# it improssible to eliminate the interlock condition, critial path is +# not long enough. Yet z10 spends ~24 cycles per byte processed with +# 128-bit key. +# +# Unlike previous version hardware support detection takes place only +# at the moment of key schedule setup, which is denoted in key->rounds. +# This is done, because deferred key setup can't be made MT-safe, not +# for key lengthes longer than 128 bits. +# +# Add AES_cbc_encrypt, which gives incredible performance improvement, +# it was measured to be ~6.6x. It's less than previously mentioned 8x, +# because software implementation was optimized. + $softonly=0; # allow hardware support -$t1="%r0"; -$t2="%r1"; -$t3="%r2"; $inp="%r2"; -$out="%r3"; $mask="%r3"; $bits="%r3"; +$t0="%r0"; $mask="%r0"; +$t1="%r1"; +$t2="%r2"; $inp="%r2"; +$t3="%r3"; $out="%r3"; $bits="%r3"; $key="%r4"; $i1="%r5"; $i2="%r6"; @@ -59,7 +78,7 @@ $code=<<___; .text .type AES_Te,\@object -.align 64 +.align 256 AES_Te: ___ &_data_word( @@ -165,6 +184,7 @@ $code.=<<___; .long 0x01000000, 0x02000000, 0x04000000, 0x08000000 .long 0x10000000, 0x20000000, 0x40000000, 0x80000000 .long 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0 +.align 256 .size AES_Te,.-AES_Te # void AES_encrypt(const unsigned char *inp, unsigned char *out, @@ -172,49 +192,32 @@ $code.=<<___; .globl AES_encrypt .type AES_encrypt,\@function AES_encrypt: - stg $ra,112($sp) ___ $code.=<<___ if (!$softonly); - lghi %r0,10 - c %r0,240($key) - jne .Lesoft - lghi %r0,0 # query capability vector - la %r1,16($sp) - .long 0xb92e0042 # km %r4,%r2 - lg %r0,16($sp) - tmhl %r0,`0x8000>>2` - jz .Lesoft128 - lghi %r0,`0x00|0x12` # encrypt AES-128 + l %r0,240($key) + lhi %r1,16 + clr %r0,%r1 + jl .Lesoft + la %r1,0($key) #la %r2,0($inp) la %r4,0($out) lghi %r3,16 # single block length .long 0xb92e0042 # km %r4,%r2 - bcr 8,%r14 # return if done - la $out,0(%r4) # restore arguments - la $key,0(%r1) -.Lesoft128: - lghi %r0,0 - c %r0,236($key) - je .Lesoft - stmg $inp,$key,16($sp) - la $inp,0($key) - lghi $bits,128 - bras $ra,.Lekey_internal # postponed key schedule setup - lmg $inp,$key,16($sp) + brc 1,.-4 # can this happen? + br %r14 +.align 64 .Lesoft: ___ $code.=<<___; - stmg %r3,%r13,24($sp) - - larl $tbl,AES_Te + stmg %r3,$ra,24($sp) llgf $s0,0($inp) llgf $s1,4($inp) llgf $s2,8($inp) llgf $s3,12($inp) - llill $mask,`0xff<<3` + larl $tbl,AES_Te bras $ra,_s390x_AES_encrypt lg $out,24($sp) @@ -230,26 +233,25 @@ $code.=<<___; .type _s390x_AES_encrypt,\@function .align 16 _s390x_AES_encrypt: + stg $ra,152($sp) x $s0,0($key) x $s1,4($key) x $s2,8($key) x $s3,12($key) l $rounds,240($key) + llill $mask,`0xff<<3` aghi $rounds,-1 - + j .Lenc_loop +.align 16 .Lenc_loop: - sllg $i1,$s0,`0+3` - srlg $i2,$s0,`8-3` - srlg $i3,$s0,`16-3` + sllg $t1,$s0,`0+3` + srlg $t2,$s0,`8-3` + srlg $t3,$s0,`16-3` srl $s0,`24-3` nr $s0,$mask - ngr $i1,$mask - nr $i2,$mask - nr $i3,$mask - l $s0,0($s0,$tbl) # Te0[s0>>24] - l $t1,1($i1,$tbl) # Te3[s0>>0] - l $t2,2($i2,$tbl) # Te2[s0>>8] - l $t3,3($i3,$tbl) # Te1[s0>>16] + ngr $t1,$mask + nr $t2,$mask + nr $t3,$mask srlg $i1,$s1,`16-3` # i0 sllg $i2,$s1,`0+3` @@ -259,72 +261,84 @@ _s390x_AES_encrypt: nr $s1,$mask ngr $i2,$mask nr $i3,$mask + + l $s0,0($s0,$tbl) # Te0[s0>>24] + l $t1,1($t1,$tbl) # Te3[s0>>0] + l $t2,2($t2,$tbl) # Te2[s0>>8] + l $t3,3($t3,$tbl) # Te1[s0>>16] + x $s0,3($i1,$tbl) # Te1[s1>>16] l $s1,0($s1,$tbl) # Te0[s1>>24] x $t2,1($i2,$tbl) # Te3[s1>>0] x $t3,2($i3,$tbl) # Te2[s1>>8] - xr $s1,$t1 srlg $i1,$s2,`8-3` # i0 srlg $i2,$s2,`16-3` # i1 - sllg $i3,$s2,`0+3` - srl $s2,`24-3` nr $i1,$mask nr $i2,$mask + sllg $i3,$s2,`0+3` + srl $s2,`24-3` nr $s2,$mask ngr $i3,$mask + + xr $s1,$t1 + srlg $ra,$s3,`8-3` # i1 + sllg $t1,$s3,`0+3` # i0 + nr $ra,$mask + la $key,16($key) + ngr $t1,$mask + x $s0,2($i1,$tbl) # Te2[s2>>8] x $s1,3($i2,$tbl) # Te1[s2>>16] l $s2,0($s2,$tbl) # Te0[s2>>24] x $t3,1($i3,$tbl) # Te3[s2>>0] - xr $s2,$t2 - sllg $i1,$s3,`0+3` # i0 - srlg $i2,$s3,`8-3` # i1 srlg $i3,$s3,`16-3` # i2 + xr $s2,$t2 srl $s3,`24-3` - ngr $i1,$mask - nr $i2,$mask nr $i3,$mask nr $s3,$mask - x $s0,1($i1,$tbl) # Te3[s3>>0] - x $s1,2($i2,$tbl) # Te2[s3>>8] - x $s2,3($i3,$tbl) # Te1[s3>>16] - l $s3,0($s3,$tbl) # Te0[s3>>24] - xr $s3,$t3 - la $key,16($key) x $s0,0($key) x $s1,4($key) x $s2,8($key) - x $s3,12($key) + x $t3,12($key) + + x $s0,1($t1,$tbl) # Te3[s3>>0] + x $s1,2($ra,$tbl) # Te2[s3>>8] + x $s2,3($i3,$tbl) # Te1[s3>>16] + l $s3,0($s3,$tbl) # Te0[s3>>24] + xr $s3,$t3 brct $rounds,.Lenc_loop + .align 16 - sllg $i1,$s0,`0+3` - srlg $i2,$s0,`8-3` - srlg $i3,$s0,`16-3` + sllg $t1,$s0,`0+3` + srlg $t2,$s0,`8-3` + ngr $t1,$mask + srlg $t3,$s0,`16-3` srl $s0,`24-3` nr $s0,$mask - ngr $i1,$mask - nr $i2,$mask - nr $i3,$mask - llgc $s0,2($s0,$tbl) # Te4[s0>>24] - llgc $t1,2($i1,$tbl) # Te4[s0>>0] - llgc $t2,2($i2,$tbl) # Te4[s0>>8] - llgc $t3,2($i3,$tbl) # Te4[s0>>16] - sll $s0,24 - sll $t2,8 - sll $t3,16 + nr $t2,$mask + nr $t3,$mask srlg $i1,$s1,`16-3` # i0 sllg $i2,$s1,`0+3` + ngr $i2,$mask srlg $i3,$s1,`8-3` srl $s1,`24-3` nr $i1,$mask nr $s1,$mask - ngr $i2,$mask nr $i3,$mask + + llgc $s0,2($s0,$tbl) # Te4[s0>>24] + llgc $t1,2($t1,$tbl) # Te4[s0>>0] + sll $s0,24 + llgc $t2,2($t2,$tbl) # Te4[s0>>8] + llgc $t3,2($t3,$tbl) # Te4[s0>>16] + sll $t2,8 + sll $t3,16 + llgc $i1,2($i1,$tbl) # Te4[s1>>16] llgc $s1,2($s1,$tbl) # Te4[s1>>24] llgc $i2,2($i2,$tbl) # Te4[s1>>0] @@ -339,34 +353,40 @@ _s390x_AES_encrypt: srlg $i1,$s2,`8-3` # i0 srlg $i2,$s2,`16-3` # i1 - sllg $i3,$s2,`0+3` - srl $s2,`24-3` nr $i1,$mask nr $i2,$mask - nr $s2,$mask + sllg $i3,$s2,`0+3` + srl $s2,`24-3` ngr $i3,$mask + nr $s2,$mask + + sllg $t1,$s3,`0+3` # i0 + srlg $ra,$s3,`8-3` # i1 + ngr $t1,$mask + llgc $i1,2($i1,$tbl) # Te4[s2>>8] llgc $i2,2($i2,$tbl) # Te4[s2>>16] + sll $i1,8 llgc $s2,2($s2,$tbl) # Te4[s2>>24] llgc $i3,2($i3,$tbl) # Te4[s2>>0] - sll $i1,8 sll $i2,16 + nr $ra,$mask sll $s2,24 or $s0,$i1 or $s1,$i2 or $s2,$t2 or $t3,$i3 - sllg $i1,$s3,`0+3` # i0 - srlg $i2,$s3,`8-3` # i1 srlg $i3,$s3,`16-3` # i2 srl $s3,`24-3` - ngr $i1,$mask - nr $i2,$mask nr $i3,$mask nr $s3,$mask - llgc $i1,2($i1,$tbl) # Te4[s3>>0] - llgc $i2,2($i2,$tbl) # Te4[s3>>8] + + l $t0,16($key) + l $t2,20($key) + + llgc $i1,2($t1,$tbl) # Te4[s3>>0] + llgc $i2,2($ra,$tbl) # Te4[s3>>8] llgc $i3,2($i3,$tbl) # Te4[s3>>16] llgc $s3,2($s3,$tbl) # Te4[s3>>24] sll $i2,8 @@ -377,8 +397,9 @@ _s390x_AES_encrypt: or $s2,$i3 or $s3,$t3 - x $s0,16($key) - x $s1,20($key) + lg $ra,152($sp) + xr $s0,$t0 + xr $s1,$t2 x $s2,24($key) x $s3,28($key) @@ -388,7 +409,7 @@ ___ $code.=<<___; .type AES_Td,\@object -.align 64 +.align 256 AES_Td: ___ &_data_word( @@ -497,50 +518,32 @@ $code.=<<___; .globl AES_decrypt .type AES_decrypt,\@function AES_decrypt: - stg $ra,112($sp) ___ $code.=<<___ if (!$softonly); - lghi %r0,10 - c %r0,240($key) - jne .Ldsoft - lghi %r0,0 # query capability vector - la %r1,16($sp) - .long 0xb92e0042 # km %r4,%r2 - lg %r0,16($sp) - tmhl %r0,`0x8000>>2` - jz .Ldsoft128 - lghi %r0,`0x80|0x12` # decrypt AES-128 - la %r1,160($key) + l %r0,240($key) + lhi %r1,16 + clr %r0,%r1 + jl .Ldsoft + + la %r1,0($key) #la %r2,0($inp) la %r4,0($out) lghi %r3,16 # single block length .long 0xb92e0042 # km %r4,%r2 - bcr 8,%r14 # return if done - la $out,0(%r4) # restore arguments - lghi $key,-160 - la $key,0($key,%r1) -.Ldsoft128: - lghi %r0,0 - c %r0,236($key) - je .Ldsoft - stmg $inp,$key,16($sp) - la $inp,160($key) - lghi $bits,128 - bras $ra,.Ldkey_internal # postponed key schedule setup - lmg $inp,$key,16($sp) + brc 1,.-4 # can this happen? + br %r14 +.align 64 .Ldsoft: ___ $code.=<<___; - stmg %r3,%r13,24($sp) - - larl $tbl,AES_Td + stmg %r3,$ra,24($sp) llgf $s0,0($inp) llgf $s1,4($inp) llgf $s2,8($inp) llgf $s3,12($inp) - llill $mask,`0xff<<3` + larl $tbl,AES_Td bras $ra,_s390x_AES_decrypt lg $out,24($sp) @@ -556,26 +559,25 @@ $code.=<<___; .type _s390x_AES_decrypt,\@function .align 16 _s390x_AES_decrypt: + stg $ra,152($sp) x $s0,0($key) x $s1,4($key) x $s2,8($key) x $s3,12($key) l $rounds,240($key) + llill $mask,`0xff<<3` aghi $rounds,-1 - + j .Ldec_loop +.align 16 .Ldec_loop: - srlg $i1,$s0,`16-3` - srlg $i2,$s0,`8-3` - sllg $i3,$s0,`0+3` + srlg $t1,$s0,`16-3` + srlg $t2,$s0,`8-3` + sllg $t3,$s0,`0+3` srl $s0,`24-3` nr $s0,$mask - nr $i1,$mask - nr $i2,$mask - ngr $i3,$mask - l $s0,0($s0,$tbl) # Td0[s0>>24] - l $t1,3($i1,$tbl) # Td1[s0>>16] - l $t2,2($i2,$tbl) # Td2[s0>>8] - l $t3,1($i3,$tbl) # Td3[s0>>0] + nr $t1,$mask + nr $t2,$mask + ngr $t3,$mask sllg $i1,$s1,`0+3` # i0 srlg $i2,$s1,`16-3` @@ -585,11 +587,16 @@ _s390x_AES_decrypt: nr $s1,$mask nr $i2,$mask nr $i3,$mask + + l $s0,0($s0,$tbl) # Td0[s0>>24] + l $t1,3($t1,$tbl) # Td1[s0>>16] + l $t2,2($t2,$tbl) # Td2[s0>>8] + l $t3,1($t3,$tbl) # Td3[s0>>0] + x $s0,1($i1,$tbl) # Td3[s1>>0] l $s1,0($s1,$tbl) # Td0[s1>>24] x $t2,3($i2,$tbl) # Td1[s1>>16] x $t3,2($i3,$tbl) # Td2[s1>>8] - xr $s1,$t1 srlg $i1,$s2,`8-3` # i0 sllg $i2,$s2,`0+3` # i1 @@ -599,69 +606,72 @@ _s390x_AES_decrypt: ngr $i2,$mask nr $s2,$mask nr $i3,$mask + + xr $s1,$t1 + srlg $ra,$s3,`8-3` # i1 + srlg $t1,$s3,`16-3` # i0 + nr $ra,$mask + la $key,16($key) + nr $t1,$mask + x $s0,2($i1,$tbl) # Td2[s2>>8] x $s1,1($i2,$tbl) # Td3[s2>>0] l $s2,0($s2,$tbl) # Td0[s2>>24] x $t3,3($i3,$tbl) # Td1[s2>>16] - xr $s2,$t2 - srlg $i1,$s3,`16-3` # i0 - srlg $i2,$s3,`8-3` # i1 sllg $i3,$s3,`0+3` # i2 srl $s3,`24-3` - nr $i1,$mask - nr $i2,$mask ngr $i3,$mask nr $s3,$mask - x $s0,3($i1,$tbl) # Td1[s3>>16] - x $s1,2($i2,$tbl) # Td2[s3>>8] - x $s2,1($i3,$tbl) # Td3[s3>>0] - l $s3,0($s3,$tbl) # Td0[s3>>24] - xr $s3,$t3 - la $key,16($key) + xr $s2,$t2 x $s0,0($key) x $s1,4($key) x $s2,8($key) - x $s3,12($key) + x $t3,12($key) + + x $s0,3($t1,$tbl) # Td1[s3>>16] + x $s1,2($ra,$tbl) # Td2[s3>>8] + x $s2,1($i3,$tbl) # Td3[s3>>0] + l $s3,0($s3,$tbl) # Td0[s3>>24] + xr $s3,$t3 brct $rounds,.Ldec_loop + .align 16 l $t1,`2048+0`($tbl) # prefetch Td4 - l $t2,`2048+32`($tbl) - l $t3,`2048+64`($tbl) - l $i1,`2048+96`($tbl) - l $i2,`2048+128`($tbl) - l $i3,`2048+160`($tbl) - l $t1,`2048+192`($tbl) - l $t2,`2048+224`($tbl) + l $t2,`2048+64`($tbl) + l $t3,`2048+128`($tbl) + l $i1,`2048+192`($tbl) llill $mask,0xff srlg $i3,$s0,24 # i0 - srlg $i1,$s0,16 - srlg $i2,$s0,8 + srlg $t1,$s0,16 + srlg $t2,$s0,8 nr $s0,$mask # i3 - nr $i1,$mask + nr $t1,$mask + + srlg $i1,$s1,24 + nr $t2,$mask + srlg $i2,$s1,16 + srlg $ra,$s1,8 + nr $s1,$mask # i0 nr $i2,$mask + nr $ra,$mask + llgc $i3,2048($i3,$tbl) # Td4[s0>>24] - llgc $t1,2048($i1,$tbl) # Td4[s0>>16] - llgc $t2,2048($i2,$tbl) # Td4[s0>>8] + llgc $t1,2048($t1,$tbl) # Td4[s0>>16] + llgc $t2,2048($t2,$tbl) # Td4[s0>>8] + sll $t1,16 llgc $t3,2048($s0,$tbl) # Td4[s0>>0] sllg $s0,$i3,24 - sll $t1,16 sll $t2,8 - srlg $i1,$s1,24 - srlg $i2,$s1,16 - srlg $i3,$s1,8 - nr $s1,$mask # i0 - nr $i2,$mask - nr $i3,$mask llgc $s1,2048($s1,$tbl) # Td4[s1>>0] llgc $i1,2048($i1,$tbl) # Td4[s1>>24] llgc $i2,2048($i2,$tbl) # Td4[s1>>16] - llgc $i3,2048($i3,$tbl) # Td4[s1>>8] sll $i1,24 + llgc $i3,2048($ra,$tbl) # Td4[s1>>8] sll $i2,16 sll $i3,8 or $s0,$s1 @@ -681,9 +691,8 @@ _s390x_AES_decrypt: llgc $i3,2048($i3,$tbl) # Td4[s2>>16] sll $i1,8 sll $i2,24 - sll $i3,16 or $s0,$i1 - or $s1,$t1 + sll $i3,16 or $t2,$i2 or $t3,$i3 @@ -693,11 +702,17 @@ _s390x_AES_decrypt: nr $s3,$mask # i2 nr $i1,$mask nr $i2,$mask + + lg $ra,152($sp) + or $s1,$t1 + l $t0,16($key) + l $t1,20($key) + llgc $i1,2048($i1,$tbl) # Td4[s3>>16] llgc $i2,2048($i2,$tbl) # Td4[s3>>8] + sll $i1,16 llgc $s2,2048($s3,$tbl) # Td4[s3>>0] llgc $s3,2048($i3,$tbl) # Td4[s3>>24] - sll $i1,16 sll $i2,8 sll $s3,24 or $s0,$i1 @@ -705,54 +720,72 @@ _s390x_AES_decrypt: or $s2,$t2 or $s3,$t3 - x $s0,16($key) - x $s1,20($key) + xr $s0,$t0 + xr $s1,$t1 x $s2,24($key) x $s3,28($key) br $ra .size _s390x_AES_decrypt,.-_s390x_AES_decrypt +___ +$code.=<<___; # void AES_set_encrypt_key(const unsigned char *in, int bits, # AES_KEY *key) { .globl AES_set_encrypt_key .type AES_set_encrypt_key,\@function .align 16 AES_set_encrypt_key: - lghi $t1,0 - clgr $inp,$t1 + lghi $t0,0 + clgr $inp,$t0 je .Lminus1 - clgr $key,$t1 + clgr $key,$t0 je .Lminus1 - lghi $t1,128 - clr $bits,$t1 - je .Lproceed128 - lghi $t1,192 - clr $bits,$t1 - je .Lekey_internal - lghi $t1,256 - clr $bits,$t1 - je .Lekey_internal + lghi $t0,128 + clr $bits,$t0 + je .Lproceed + lghi $t0,192 + clr $bits,$t0 + je .Lproceed + lghi $t0,256 + clr $bits,$t0 + je .Lproceed lghi %r2,-2 br %r14 -.align 4 -.Lproceed128: +.align 16 +.Lproceed: ___ $code.=<<___ if (!$softonly); + # convert bits to km code, [128,192,256]->[18,19,20] + lhi %r5,-128 + lhi %r0,18 + ar %r5,$bits + srl %r5,6 + ar %r5,%r0 + lghi %r0,0 # query capability vector la %r1,16($sp) - .long 0xb92e0042 # km %r4,%r2 - lg %r0,16($sp) - tmhl %r0,`0x8000>>2` + .long 0xb92f0042 # kmc %r4,%r2 + + llihh %r1,0x8000 + srlg %r1,%r1,0(%r5) + ng %r1,16($sp) jz .Lekey_internal - lmg $t1,$t2,0($inp) # just copy 128 bits... - stmg $t1,$t2,0($key) - lghi $t1,10 - st $t1,236($key) # ... postpone key setup - st $t1,240($key) + lmg %r0,%r1,0($inp) # just copy 128 bits... + stmg %r0,%r1,0($key) + lhi %r0,192 + cr $bits,%r0 + jl 1f + lg %r1,16($inp) + stg %r1,16($key) + je 1f + lg %r1,24($inp) + stg %r1,24($key) +1: st $bits,236($key) # save bits + st %r5,240($key) # save km code lghi %r2,0 br %r14 ___ @@ -771,18 +804,15 @@ $code.=<<___; st $s1,4($key) st $s2,8($key) st $s3,12($key) - lghi $t1,128 - cr $bits,$t1 + lghi $t0,128 + cr $bits,$t0 jne .Lnot128 llill $mask,0xff lghi $t3,0 # i=0 lghi $rounds,10 - st $t3,236($key) # mark as set up st $rounds,240($key) -.align 8 -.L128_loop: llgfr $t2,$s3 # temp=rk[3] srlg $i1,$s3,8 srlg $i2,$s3,16 @@ -790,6 +820,9 @@ $code.=<<___; nr $t2,$mask nr $i1,$mask nr $i2,$mask + +.align 16 +.L128_loop: la $t2,0($t2,$tbl) la $i1,0($i1,$tbl) la $i2,0($i2,$tbl) @@ -803,6 +836,15 @@ $code.=<<___; xr $s1,$s0 # rk[5]=rk[1]^rk[4] xr $s2,$s1 # rk[6]=rk[2]^rk[5] xr $s3,$s2 # rk[7]=rk[3]^rk[6] + + llgfr $t2,$s3 # temp=rk[3] + srlg $i1,$s3,8 + srlg $i2,$s3,16 + nr $t2,$mask + nr $i1,$mask + srlg $i3,$s3,24 + nr $i2,$mask + st $s0,16($key) st $s1,20($key) st $s2,24($key) @@ -814,14 +856,14 @@ $code.=<<___; lmg %r6,%r13,48($sp) br $ra -.align 4 +.align 16 .Lnot128: - llgf $t1,16($inp) - llgf $t2,20($inp) - st $t1,16($key) - st $t2,20($key) - lghi $t1,192 - cr $bits,$t1 + llgf $t0,16($inp) + llgf $t1,20($inp) + st $t0,16($key) + st $t1,20($key) + lghi $t0,192 + cr $bits,$t0 jne .Lnot192 llill $mask,0xff @@ -830,27 +872,29 @@ $code.=<<___; st $rounds,240($key) lghi $rounds,8 -.align 8 -.L192_loop: - srlg $i1,$t2,8 - srlg $i2,$t2,16 - srlg $i3,$t2,24 - nr $t2,$mask + srlg $i1,$t1,8 + srlg $i2,$t1,16 + srlg $i3,$t1,24 + nr $t1,$mask nr $i1,$mask nr $i2,$mask - la $t2,0($t2,$tbl) + +.align 16 +.L192_loop: + la $t1,0($t1,$tbl) la $i1,0($i1,$tbl) la $i2,0($i2,$tbl) la $i3,0($i3,$tbl) - icm $t2,2,0($t2) # Te4[rk[5]>>0]<<8 - icm $t2,4,0($i1) # Te4[rk[5]>>8]<<16 - icm $t2,8,0($i2) # Te4[rk[5]>>16]<<24 - icm $t2,1,0($i3) # Te4[rk[5]>>24] - x $t2,256($t3,$tbl) # rcon[i] - xr $s0,$t2 # rk[6]=rk[0]^... + icm $t1,2,0($t1) # Te4[rk[5]>>0]<<8 + icm $t1,4,0($i1) # Te4[rk[5]>>8]<<16 + icm $t1,8,0($i2) # Te4[rk[5]>>16]<<24 + icm $t1,1,0($i3) # Te4[rk[5]>>24] + x $t1,256($t3,$tbl) # rcon[i] + xr $s0,$t1 # rk[6]=rk[0]^... xr $s1,$s0 # rk[7]=rk[1]^rk[6] xr $s2,$s1 # rk[8]=rk[2]^rk[7] xr $s3,$s2 # rk[9]=rk[3]^rk[8] + st $s0,24($key) st $s1,28($key) st $s2,32($key) @@ -859,47 +903,57 @@ $code.=<<___; lghi %r2,0 lmg %r6,%r13,48($sp) br $ra -.align 4 + +.align 16 .L192_continue: - lgr $t2,$s3 - x $t2,16($key) # rk[10]=rk[4]^rk[9] - st $t2,40($key) - x $t2,20($key) # rk[11]=rk[5]^rk[10] - st $t2,44($key) + lgr $t1,$s3 + x $t1,16($key) # rk[10]=rk[4]^rk[9] + st $t1,40($key) + x $t1,20($key) # rk[11]=rk[5]^rk[10] + st $t1,44($key) + + srlg $i1,$t1,8 + srlg $i2,$t1,16 + srlg $i3,$t1,24 + nr $t1,$mask + nr $i1,$mask + nr $i2,$mask + la $key,24($key) # key+=6 la $t3,4($t3) # i++ j .L192_loop -.align 4 +.align 16 .Lnot192: - llgf $t1,24($inp) - llgf $t2,28($inp) - st $t1,24($key) - st $t2,28($key) + llgf $t0,24($inp) + llgf $t1,28($inp) + st $t0,24($key) + st $t1,28($key) llill $mask,0xff lghi $t3,0 # i=0 lghi $rounds,14 st $rounds,240($key) lghi $rounds,7 -.align 8 -.L256_loop: - srlg $i1,$t2,8 - srlg $i2,$t2,16 - srlg $i3,$t2,24 - nr $t2,$mask + srlg $i1,$t1,8 + srlg $i2,$t1,16 + srlg $i3,$t1,24 + nr $t1,$mask nr $i1,$mask nr $i2,$mask - la $t2,0($t2,$tbl) + +.align 16 +.L256_loop: + la $t1,0($t1,$tbl) la $i1,0($i1,$tbl) la $i2,0($i2,$tbl) la $i3,0($i3,$tbl) - icm $t2,2,0($t2) # Te4[rk[7]>>0]<<8 - icm $t2,4,0($i1) # Te4[rk[7]>>8]<<16 - icm $t2,8,0($i2) # Te4[rk[7]>>16]<<24 - icm $t2,1,0($i3) # Te4[rk[7]>>24] - x $t2,256($t3,$tbl) # rcon[i] - xr $s0,$t2 # rk[8]=rk[0]^... + icm $t1,2,0($t1) # Te4[rk[7]>>0]<<8 + icm $t1,4,0($i1) # Te4[rk[7]>>8]<<16 + icm $t1,8,0($i2) # Te4[rk[7]>>16]<<24 + icm $t1,1,0($i3) # Te4[rk[7]>>24] + x $t1,256($t3,$tbl) # rcon[i] + xr $s0,$t1 # rk[8]=rk[0]^... xr $s1,$s0 # rk[9]=rk[1]^rk[8] xr $s2,$s1 # rk[10]=rk[2]^rk[9] xr $s3,$s2 # rk[11]=rk[3]^rk[10] @@ -911,39 +965,47 @@ $code.=<<___; lghi %r2,0 lmg %r6,%r13,48($sp) br $ra -.align 4 + +.align 16 .L256_continue: - lgr $t2,$s3 # temp=rk[11] + lgr $t1,$s3 # temp=rk[11] srlg $i1,$s3,8 srlg $i2,$s3,16 srlg $i3,$s3,24 - nr $t2,$mask + nr $t1,$mask nr $i1,$mask nr $i2,$mask - la $t2,0($t2,$tbl) + la $t1,0($t1,$tbl) la $i1,0($i1,$tbl) la $i2,0($i2,$tbl) la $i3,0($i3,$tbl) - llgc $t2,0($t2) # Te4[rk[11]>>0] - icm $t2,2,0($i1) # Te4[rk[11]>>8]<<8 - icm $t2,4,0($i2) # Te4[rk[11]>>16]<<16 - icm $t2,8,0($i3) # Te4[rk[11]>>24]<<24 - x $t2,16($key) # rk[12]=rk[4]^... - st $t2,48($key) - x $t2,20($key) # rk[13]=rk[5]^rk[12] - st $t2,52($key) - x $t2,24($key) # rk[14]=rk[6]^rk[13] - st $t2,56($key) - x $t2,28($key) # rk[15]=rk[7]^rk[14] - st $t2,60($key) + llgc $t1,0($t1) # Te4[rk[11]>>0] + icm $t1,2,0($i1) # Te4[rk[11]>>8]<<8 + icm $t1,4,0($i2) # Te4[rk[11]>>16]<<16 + icm $t1,8,0($i3) # Te4[rk[11]>>24]<<24 + x $t1,16($key) # rk[12]=rk[4]^... + st $t1,48($key) + x $t1,20($key) # rk[13]=rk[5]^rk[12] + st $t1,52($key) + x $t1,24($key) # rk[14]=rk[6]^rk[13] + st $t1,56($key) + x $t1,28($key) # rk[15]=rk[7]^rk[14] + st $t1,60($key) + + srlg $i1,$t1,8 + srlg $i2,$t1,16 + srlg $i3,$t1,24 + nr $t1,$mask + nr $i1,$mask + nr $i2,$mask la $key,32($key) # key+=8 la $t3,4($t3) # i++ j .L256_loop -.align 4 + .Lminus1: lghi %r2,-1 - br %r14 + br $ra .size AES_set_encrypt_key,.-AES_set_encrypt_key # void AES_set_decrypt_key(const unsigned char *in, int bits, @@ -961,16 +1023,12 @@ AES_set_decrypt_key: bnzr $ra ___ $code.=<<___ if (!$softonly); - lghi $t1,10 - c $t1,240($key) - jne .Lgo - lghi $t1,0 - c $t1,236($key) - je .Lgo - - lmg $t1,$t2,0($key) # just copy 128 bits otherwise - stmg $t1,$t2,160($key) - lghi %r2,0 + l $t0,240($key) + lhi $t1,16 + cr $t0,$t1 + jl .Lgo + oill $t0,0x80 # set "decrypt" bit + st $t0,240($key) br $ra .align 16 @@ -988,14 +1046,15 @@ $code.=<<___; sllg $i2,$rounds,4 la $i2,0($i2,$key) srl $rounds,1 + lghi $t1,-16 -.align 8 +.align 16 .Linv: lmg $s0,$s1,0($i1) lmg $s2,$s3,0($i2) stmg $s0,$s1,0($i2) stmg $s2,$s3,0($i1) - aghi $i1,16 - aghi $i2,-16 + la $i1,16($i1) + la $i2,0($t1,$i2) brct $rounds,.Linv ___ $mask80=$i1; @@ -1006,13 +1065,13 @@ $code.=<<___; aghi $rounds,-1 sll $rounds,2 # (rounds-1)*4 llilh $mask80,0x8080 - oill $mask80,0x8080 llilh $mask1b,0x1b1b - oill $mask1b,0x1b1b llilh $maskfe,0xfefe + oill $mask80,0x8080 + oill $mask1b,0x1b1b oill $maskfe,0xfefe -.align 8 +.align 16 .Lmix: l $s0,16($key) # tp1 lr $s1,$s0 ngr $s1,$mask80 @@ -1044,16 +1103,15 @@ $code.=<<___; xr $s1,$s0 # tp2^tp1 xr $s2,$s0 # tp4^tp1 rll $s0,$s0,24 # = ROTATE(tp1,8) + xr $s2,$s3 # ^=tp8 xr $s0,$s1 # ^=tp2^tp1 - xr $s0,$s2 # ^=tp4^tp1 - xr $s0,$s3 # ^= tp8[^(tp4^tp1)^(tp2^tp1)=tp4^tp2] xr $s1,$s3 # tp2^tp1^tp8 + xr $s0,$s2 # ^=tp4^tp1^tp8 rll $s1,$s1,8 - xr $s0,$s1 # ^= ROTATE(tp8^tp2^tp1,24) - xr $s2,$s3 # tp4^tp1^tp8 rll $s2,$s2,16 - xr $s0,$s2 # ^= ROTATE(tp8^tp4^tp1,16) + xr $s0,$s1 # ^= ROTATE(tp8^tp2^tp1,24) rll $s3,$s3,24 + xr $s0,$s2 # ^= ROTATE(tp8^tp4^tp1,16) xr $s0,$s3 # ^= ROTATE(tp8,8) st $s0,16($key) @@ -1064,6 +1122,210 @@ $code.=<<___; lghi %r2,0 br $ra .size AES_set_decrypt_key,.-AES_set_decrypt_key +___ + +#void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, +# size_t length, const AES_KEY *key, +# unsigned char *ivec, const int enc) +{ +my $inp="%r2"; +my $out="%r4"; # length and out are swapped +my $len="%r3"; +my $key="%r5"; +my $ivp="%r6"; + +$code.=<<___; +.globl AES_cbc_encrypt +.type AES_cbc_encrypt,\@function +.align 16 +AES_cbc_encrypt: + xgr %r3,%r4 # flip %r3 and %r4, out and len + xgr %r4,%r3 + xgr %r3,%r4 +___ +$code.=<<___ if (!$softonly); + lhi %r0,16 + cl %r0,240($key) + jh .Lcbc_software + + lg %r0,0($ivp) # copy ivec + lg %r1,8($ivp) + stmg %r0,%r1,16($sp) + lmg %r0,%r1,0($key) # copy key, cover 256 bit + stmg %r0,%r1,32($sp) + lmg %r0,%r1,16($key) + stmg %r0,%r1,48($sp) + l %r0,240($key) # load kmc code + lghi $key,15 # res=len%16, len-=res; + ngr $key,$len + slgr $len,$key + la %r1,16($sp) # parameter block - ivec || key + jz .Lkmc_truncated + .long 0xb92f0042 # kmc %r4,%r2 + brc 1,.-4 # pay attention to "partial completion" + ltr $key,$key + jnz .Lkmc_truncated +.Lkmc_done: + lmg %r0,%r1,16($sp) # copy ivec to caller + stg %r0,0($ivp) + stg %r1,8($ivp) + br $ra +.align 16 +.Lkmc_truncated: + ahi $key,-1 # it's the way it's encoded in mvc + tmll %r0,0x80 + jnz .Lkmc_truncated_dec + lghi %r1,0 + stg %r1,128($sp) + stg %r1,136($sp) + bras %r1,1f + mvc 128(1,$sp),0($inp) +1: ex $key,0(%r1) + la %r1,16($sp) # restore parameter block + la $inp,128($sp) + lghi $len,16 + .long 0xb92f0042 # kmc %r4,%r2 + j .Lkmc_done +.align 16 +.Lkmc_truncated_dec: + stg $out,64($sp) + la $out,128($sp) + lghi $len,16 + .long 0xb92f0042 # kmc %r4,%r2 + lg $out,64($sp) + bras %r1,2f + mvc 0(1,$out),128($sp) +2: ex $key,0(%r1) + j .Lkmc_done +.align 16 +.Lcbc_software: +___ +$code.=<<___; + stmg $key,$ra,40($sp) + lhi %r0,0 + cl %r0,164($sp) + je .Lcbc_decrypt + + larl $tbl,AES_Te + + llgf $s0,0($ivp) + llgf $s1,4($ivp) + llgf $s2,8($ivp) + llgf $s3,12($ivp) + + lghi $t0,16 + slgr $len,$t0 + brc 4,.Lcbc_enc_tail # if borrow +.Lcbc_enc_loop: + stmg $inp,$out,16($sp) + x $s0,0($inp) + x $s1,4($inp) + x $s2,8($inp) + x $s3,12($inp) + lgr %r4,$key + + bras $ra,_s390x_AES_encrypt + + lmg $inp,$key,16($sp) + st $s0,0($out) + st $s1,4($out) + st $s2,8($out) + st $s3,12($out) + + la $inp,16($inp) + la $out,16($out) + lghi $t0,16 + ltgr $len,$len + jz .Lcbc_enc_done + slgr $len,$t0 + brc 4,.Lcbc_enc_tail # if borrow + j .Lcbc_enc_loop +.align 16 +.Lcbc_enc_done: + lg $ivp,48($sp) + st $s0,0($ivp) + st $s1,4($ivp) + st $s2,8($ivp) + st $s3,12($ivp) + + lmg %r7,$ra,56($sp) + br $ra + +.align 16 +.Lcbc_enc_tail: + aghi $len,15 + lghi $t0,0 + stg $t0,128($sp) + stg $t0,136($sp) + bras $t1,3f + mvc 128(1,$sp),0($inp) +3: ex $len,0($t1) + lghi $len,0 + la $inp,128($sp) + j .Lcbc_enc_loop + +.align 16 +.Lcbc_decrypt: + larl $tbl,AES_Td + + lg $t0,0($ivp) + lg $t1,8($ivp) + stmg $t0,$t1,128($sp) + +.Lcbc_dec_loop: + stmg $inp,$out,16($sp) + llgf $s0,0($inp) + llgf $s1,4($inp) + llgf $s2,8($inp) + llgf $s3,12($inp) + lgr %r4,$key + + bras $ra,_s390x_AES_decrypt + + lmg $inp,$key,16($sp) + sllg $s0,$s0,32 + sllg $s2,$s2,32 + lr $s0,$s1 + lr $s2,$s3 + + lg $t0,0($inp) + lg $t1,8($inp) + xg $s0,128($sp) + xg $s2,136($sp) + lghi $s1,16 + slgr $len,$s1 + brc 4,.Lcbc_dec_tail # if borrow + brc 2,.Lcbc_dec_done # if zero + stg $s0,0($out) + stg $s2,8($out) + stmg $t0,$t1,128($sp) + + la $inp,16($inp) + la $out,16($out) + j .Lcbc_dec_loop + +.Lcbc_dec_done: + stg $s0,0($out) + stg $s2,8($out) +.Lcbc_dec_exit: + lmg $ivp,$ra,48($sp) + stmg $t0,$t1,0($ivp) + + br $ra + +.align 16 +.Lcbc_dec_tail: + aghi $len,15 + stg $s0,128($sp) + stg $s2,136($sp) + bras $s1,4f + mvc 0(1,$out),128($sp) +4: ex $len,0($s1) + j .Lcbc_dec_exit +.size AES_cbc_encrypt,.-AES_cbc_encrypt +___ +} +$code.=<<___; .string "AES for s390x, CRYPTOGAMS by " ___ diff --git a/crypto/bn/asm/s390x-mont.pl b/crypto/bn/asm/s390x-mont.pl index d5505f93c3..b575eb70d1 100644 --- a/crypto/bn/asm/s390x-mont.pl +++ b/crypto/bn/asm/s390x-mont.pl @@ -27,6 +27,11 @@ # module performance by implementing dedicated squaring code-path and # possibly by unrolling loops... +# January 2009. +# +# Reschedule to minimize/avoid Address Generation Interlock hazard, +# make inner loops counter-based. + $mn0="%r0"; $num="%r1"; @@ -47,7 +52,7 @@ $nhi="%r10"; $nlo="%r11"; $AHI="%r12"; $NHI="%r13"; -$fp="%r14"; +$count="%r14"; $sp="%r15"; $code.=<<___; @@ -57,44 +62,46 @@ $code.=<<___; bn_mul_mont: lgf $num,164($sp) # pull $num sla $num,3 # $num to enumerate bytes - la $rp,0($num,$rp) # pointers to point at the vectors' ends - la $ap,0($num,$ap) la $bp,0($num,$bp) - la $np,0($num,$np) stmg %r2,%r15,16($sp) cghi $num,16 # lghi %r2,0 # blr %r14 # if($num<16) return 0; + cghi $num,128 # + bhr %r14 # if($num>128) return 0; - lcgr $num,$num # -$num + lghi $rp,-160-8 # leave room for carry bit + lcgr $j,$num # -$num lgr %r0,$sp - lgr $fp,$sp - aghi $fp,-160-8 # leave room for carry bit - la $sp,0($num,$fp) # alloca - stg %r0,0($sp) - aghi $fp,160-8 # $fp to point at tp[$num-1] + la $rp,0($rp,$sp) + la $sp,0($j,$rp) # alloca + stg %r0,0($sp) # back chain - la $bp,0($num,$bp) # restore $bp + sra $num,3 # restore $num + la $bp,0($j,$bp) # restore $bp + ahi $num,-1 # adjust $num for inner loop lg $n0,0($n0) # pull n0 lg $bi,0($bp) - lg $alo,0($num,$ap) + lg $alo,0($ap) mlgr $ahi,$bi # ap[0]*bp[0] lgr $AHI,$ahi lgr $mn0,$alo # "tp[0]"*n0 msgr $mn0,$n0 - lg $nlo,0($num,$np)# + lg $nlo,0($np) # mlgr $nhi,$mn0 # np[0]*m1 algr $nlo,$alo # +="tp[0]" lghi $NHI,0 alcgr $NHI,$nhi - lgr $j,$num - aghi $j,8 # j=1 + la $j,8(%r0) # j=1 + lr $count,$num + +.align 16 .L1st: lg $alo,0($j,$ap) mlgr $ahi,$bi # ap[j]*bp[0] @@ -110,43 +117,45 @@ bn_mul_mont: algr $nlo,$alo alcgr $NHI,$nhi - stg $nlo,0($j,$fp) # tp[j-1]= - aghi $j,8 # j++ - jnz .L1st + stg $nlo,160-8($j,$sp) # tp[j-1]= + la $j,8($j) # j++ + brct $count,.L1st algr $NHI,$AHI lghi $AHI,0 alcgr $AHI,$AHI # upmost overflow bit - stg $NHI,0($fp) - stg $AHI,8($fp) + stg $NHI,160-8($j,$sp) + stg $AHI,160($j,$sp) la $bp,8($bp) # bp++ .Louter: lg $bi,0($bp) # bp[i] - lg $alo,0($num,$ap) + lg $alo,0($ap) mlgr $ahi,$bi # ap[0]*bp[i] - alg $alo,8($num,$fp)# +=tp[0] + alg $alo,160($sp) # +=tp[0] lghi $AHI,0 alcgr $AHI,$ahi lgr $mn0,$alo - msgr $mn0,$n0 # tp[0]*n0 + msgr $mn0,$n0 # tp[0]*n0 - lg $nlo,0($num,$np)# np[0] + lg $nlo,0($np) # np[0] mlgr $nhi,$mn0 # np[0]*m1 algr $nlo,$alo # +="tp[0]" lghi $NHI,0 alcgr $NHI,$nhi - lgr $j,$num - aghi $j,8 # j=1 + la $j,8(%r0) # j=1 + lr $count,$num + +.align 16 .Linner: lg $alo,0($j,$ap) mlgr $ahi,$bi # ap[j]*bp[i] algr $alo,$AHI lghi $AHI,0 alcgr $ahi,$AHI - alg $alo,8($j,$fp) # +=tp[j] + alg $alo,160($j,$sp)# +=tp[j] alcgr $AHI,$ahi lg $nlo,0($j,$np) @@ -157,34 +166,29 @@ bn_mul_mont: algr $nlo,$alo # +="tp[j]" alcgr $NHI,$nhi - stg $nlo,0($j,$fp) # tp[j-1]= - aghi $j,8 # j++ - jnz .Linner + stg $nlo,160-8($j,$sp) # tp[j-1]= + la $j,8($j) # j++ + brct $count,.Linner algr $NHI,$AHI lghi $AHI,0 alcgr $AHI,$AHI - alg $NHI,8($fp) # accumulate previous upmost overflow bit + alg $NHI,160($j,$sp)# accumulate previous upmost overflow bit lghi $ahi,0 alcgr $AHI,$ahi # new upmost overflow bit - stg $NHI,0($fp) - stg $AHI,8($fp) + stg $NHI,160-8($j,$sp) + stg $AHI,160($j,$sp) la $bp,8($bp) # bp++ - clg $bp,16+32($fp) # compare to &bp[num] + clg $bp,160+8+32($j,$sp) # compare to &bp[num] jne .Louter -___ - -undef $bi; -$count=$bp; undef $bp; -$code.=<<___; - lg $rp,16+16($fp) # reincarnate rp - la $ap,8($fp) - lgr $j,$num + lg $rp,160+8+16($j,$sp) # reincarnate rp + la $ap,160($sp) + ahi $num,1 # restore $num, incidentally clears "borrow" - lcgr $count,$num - sra $count,3 # incidentally clears "borrow" + la $j,0(%r0) + lr $count,$num .Lsub: lg $alo,0($j,$ap) slbg $alo,0($j,$np) stg $alo,0($j,$rp) @@ -198,15 +202,17 @@ $code.=<<___; xgr $np,$AHI ngr $np,$rp ogr $ap,$np # ap=borrow?tp:rp - lgr $j,$num + la $j,0(%r0) + lgr $count,$num .Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh - stg $j,8($j,$fp) # zap tp + stg $j,160($j,$sp) # zap tp stg $alo,0($j,$rp) - aghi $j,8 - jnz .Lcopy + la $j,8($j) + brct $count,.Lcopy - lmg %r6,%r15,16+48($fp) + la %r1,160+8+48($j,$sp) + lmg %r6,%r15,0(%r1) lghi %r2,1 # signal "processed" br %r14 .size bn_mul_mont,.-bn_mul_mont diff --git a/crypto/s390xcpuid.S b/crypto/s390xcpuid.S new file mode 100644 index 0000000000..f411be6595 --- /dev/null +++ b/crypto/s390xcpuid.S @@ -0,0 +1,83 @@ +.text + +.globl OPENSSL_s390x_facilities +.type OPENSSL_s390x_facilities,@function +.align 16 +OPENSSL_s390x_facilities: + lghi %r0,0 + .long 0xb2b0f010 # stfle 16(%r15) + lg %r2,16(%r15) + br %r14 +.size OPENSSL_s390x_facilities,.-OPENSSL_s390x_facilities + +.globl OPENSSL_rdtsc +.type OPENSSL_rdtsc,@function +.align 16 +OPENSSL_rdtsc: + stck 16(%r15) + lg %r2,16(%r15) + br %r14 +.size OPENSSL_rdtsc,.-OPENSSL_rdtsc + +.globl OPENSSL_atomic_add +.type OPENSSL_atomic_add,@function +.align 16 +OPENSSL_atomic_add: + l %r1,0(%r2) +.Lspin: lr %r0,%r1 + ar %r0,%r3 + cs %r1,%r0,0(%r2) + brc 4,.Lspin + lgfr %r2,%r0 # OpenSSL expects the new value + br %r14 +.size OPENSSL_atomic_add,.-OPENSSL_atomic_add + +.globl OPENSSL_wipe_cpu +.type OPENSSL_wipe_cpu,@function +.align 16 +OPENSSL_wipe_cpu: + xgr %r0,%r0 + xgr %r1,%r1 + lgr %r2,%r15 + xgr %r3,%r3 + xgr %r4,%r4 + lzdr %f0 + lzdr %f1 + lzdr %f2 + lzdr %f3 + lzdr %f4 + lzdr %f5 + lzdr %f6 + lzdr %f7 + br %r14 +.size OPENSSL_wipe_cpu,.-OPENSSL_wipe_cpu + +.globl OPENSSL_cleanse +.type OPENSSL_cleanse,@function +.align 16 +OPENSSL_cleanse: + lghi %r4,15 + lghi %r0,0 + clgr %r3,%r4 + jh .Lot +.Little: + stc %r0,0(%r2) + la %r2,1(%r2) + brctg %r3,.Little + br %r14 +.align 4 +.Lot: tmll %r2,7 + jz .Laligned + stc %r0,0(%r2) + la %r2,1(%r2) + brctg %r3,.Lot +.Laligned: + srlg %r4,%r3,3 +.Loop: stg %r0,0(%r2) + la %r2,8(%r2) + brctg %r4,.Loop + lghi %r4,7 + ngr %r3,%r4 + jnz .Little + br %r14 +.size OPENSSL_cleanse,.-OPENSSL_cleanse diff --git a/crypto/sha/asm/sha1-s390x.pl b/crypto/sha/asm/sha1-s390x.pl index 5c36436d45..e22e86fa14 100644 --- a/crypto/sha/asm/sha1-s390x.pl +++ b/crypto/sha/asm/sha1-s390x.pl @@ -15,14 +15,20 @@ # twist is that SHA1 hardware support is detected and utilized. In # which case performance can reach further >4.5x for larger chunks. +# January 2009. +# +# Optimize Xupdate for amount of memory references and reschedule +# instructions to favour dual-issue z10 pipeline. On z10 hardware is +# "only" ~2.3x faster than software. + $kimdfunc=1; # magic function code for kimd instruction $output=shift; open STDOUT,">$output"; -$t0="%r0"; -$t1="%r1"; -$ctx="%r2"; +$K_00_39="%r0"; $K=$K_00_39; +$K_40_79="%r1"; +$ctx="%r2"; $prefetch="%r2"; $inp="%r3"; $len="%r4"; @@ -31,119 +37,107 @@ $B="%r6"; $C="%r7"; $D="%r8"; $E="%r9"; @V=($A,$B,$C,$D,$E); -$K_00_19="%r10"; -$K_20_39="%r11"; -$K_40_59="%r12"; -$K_60_79="%r13"; -$Xi="%r14"; +$t0="%r10"; +$t1="%r11"; +@X=("%r12","%r13","%r14"); $sp="%r15"; $frame=160+16*4; -sub BODY_00_15 { -my ($i,$a,$b,$c,$d,$e)=@_; -my $xi=($i&1)?$Xi:$t1; - -$code.=<<___ if ($i<16 && !($i&1)); - lg $Xi,`$i*4`($inp) -___ -$code.=<<___; - alr $e,$K_00_19 ### $i - rll $t0,$a,5 - alr $e,$t0 - lr $t0,$d - xr $t0,$c - nr $t0,$b - xr $t0,$d - alr $e,$t0 - rll $b,$b,30 -___ -$code.=<<___ if ($i<16 && !($i&1)); - srlg $xi,$Xi,32 - stg $Xi,`160+$i*4`($sp) -___ -$code.=<<___; - alr $e,$xi -___ -} - sub Xupdate { my $i=shift; +$code.=<<___ if ($i==15); + lg $prefetch,160($sp) ### Xupdate(16) warm-up + lr $X[0],$X[2] +___ return if ($i&1); # Xupdate is vectorized and executed every 2nd cycle -$code.=<<___; - lg $Xi,`160+4*($i%16)`($sp) ### Xupdate($i) - xg $Xi,`160+4*(($i+2)%16)`($sp) - xg $Xi,`160+4*(($i+8)%16)`($sp) +$code.=<<___ if ($i<16); + lg $X[0],`$i*4`($inp) ### Xload($i) + rllg $X[1],$X[0],32 ___ -if ((($i+13)%16)==15) { -$code.=<<___; - llgf $t0,`160+4*15`($sp) - x $Xi,`160+0`($sp) - sllg $t0,$t0,32 - xgr $Xi,$t0 +$code.=<<___ if ($i>=16); + xgr $X[0],$prefetch ### Xupdate($i) + lg $prefetch,`160+4*(($i+2)%16)`($sp) + xg $X[0],`160+4*(($i+8)%16)`($sp) + xgr $X[0],$prefetch + rll $X[0],$X[0],1 + rllg $X[1],$X[0],32 + rll $X[1],$X[1],1 + rllg $X[0],$X[1],32 + lr $X[2],$X[1] # feedback ___ -} else { -$code.=<<___; - xg $Xi,`160+4*(($i+13)%16)`($sp) +$code.=<<___ if ($i<=70); + stg $X[0],`160+4*($i%16)`($sp) ___ +unshift(@X,pop(@X)); } + +sub BODY_00_19 { +my ($i,$a,$b,$c,$d,$e)=@_; +my $xi=$X[1]; + + &Xupdate($i); $code.=<<___; - rll $Xi,$Xi,1 - rllg $t1,$Xi,32 - rll $t1,$t1,1 - rllg $Xi,$t1,32 - stg $Xi,`160+4*($i%16)`($sp) + alr $e,$K ### $i + rll $t1,$a,5 + lr $t0,$d + xr $t0,$c + alr $e,$t1 + nr $t0,$b + alr $e,$xi + xr $t0,$d + rll $b,$b,30 + alr $e,$t0 ___ } -sub BODY_16_19 { - &Xupdate(@_[0]); - &BODY_00_15(@_); -} - sub BODY_20_39 { my ($i,$a,$b,$c,$d,$e)=@_; -my $xi=($i&1)?$Xi:$t1; -my $K_XX_XX=($i<40)?$K_20_39:$K_60_79; +my $xi=$X[1]; &Xupdate($i); $code.=<<___; - alr $e,$K_XX_XX ### $i - rll $t0,$a,5 - alr $e,$t0 + alr $e,$K ### $i + rll $t1,$a,5 lr $t0,$b + alr $e,$t1 xr $t0,$c + alr $e,$xi xr $t0,$d - alr $e,$t0 rll $b,$b,30 - alr $e,$xi + alr $e,$t0 ___ } sub BODY_40_59 { my ($i,$a,$b,$c,$d,$e)=@_; -my $xi=($i&1)?$Xi:$t1; +my $xi=$X[1]; &Xupdate($i); $code.=<<___; - alr $e,$K_40_59 ### $i - rll $t0,$a,5 - alr $e,$t0 + alr $e,$K ### $i + rll $t1,$a,5 lr $t0,$b + alr $e,$t1 or $t0,$c - nr $t0,$d - alr $e,$xi lr $t1,$b + nr $t0,$d nr $t1,$c + alr $e,$xi or $t0,$t1 - alr $e,$t0 rll $b,$b,30 + alr $e,$t0 ___ } $code.=<<___; .text +.align 64 +.type Ktable,\@object +Ktable: .long 0x5a827999,0x6ed9eba1,0x8f1bbcdc,0xca62c1d6 + .skip 48 #.long 0,0,0,0,0,0,0,0,0,0,0,0 +.size Ktable,.-Ktable .globl sha1_block_data_order .type sha1_block_data_order,\@function sha1_block_data_order: @@ -165,37 +159,43 @@ $code.=<<___ if ($kimdfunc); .Lsoftware: ___ $code.=<<___; + lghi %r1,-$frame + stg $ctx,16($sp) stmg %r6,%r15,48($sp) lgr %r0,$sp - aghi $sp,-$frame + la $sp,0(%r1,$sp) stg %r0,0($sp) - sllg $len,$len,6 - la $len,0($inp,$len) - + larl $t0,Ktable llgf $A,0($ctx) llgf $B,4($ctx) llgf $C,8($ctx) llgf $D,12($ctx) llgf $E,16($ctx) - llilh $K_00_19,0x5a82 - oill $K_00_19,0x7999 - llilh $K_20_39,0x6ed9 - oill $K_20_39,0xeba1 - llilh $K_40_59,0x8f1b - oill $K_40_59,0xbcdc - llilh $K_60_79,0xca62 - oill $K_60_79,0xc1d6 + lg $K_00_39,0($t0) + lg $K_40_79,8($t0) + .Lloop: + rllg $K_00_39,$K_00_39,32 +___ +for ($i=0;$i<20;$i++) { &BODY_00_19($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + rllg $K_00_39,$K_00_39,32 ___ -for ($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } -for (;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); } for (;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; $K=$K_40_79; + rllg $K_40_79,$K_40_79,32 +___ for (;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + rllg $K_40_79,$K_40_79,32 +___ for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } $code.=<<___; + lg $ctx,`$frame+16`($sp) + la $inp,64($inp) al $A,0($ctx) al $B,4($ctx) al $C,8($ctx) @@ -206,9 +206,7 @@ $code.=<<___; st $C,8($ctx) st $D,12($ctx) st $E,16($ctx) - la $inp,64($inp) - clgr $inp,$len - jne .Lloop + brct $len,.Lloop lmg %r6,%r15,`$frame+48`($sp) br %r14 diff --git a/crypto/sha/asm/sha512-s390x.pl b/crypto/sha/asm/sha512-s390x.pl index d2aceec68c..e4f1812c68 100644 --- a/crypto/sha/asm/sha512-s390x.pl +++ b/crypto/sha/asm/sha512-s390x.pl @@ -20,9 +20,15 @@ # # sha512_block_data_order is ~70% faster than gcc 3.3 generated code. +# January 2009. +# +# Add support for hardware SHA512 and reschedule instructions to +# favour dual-issue z10 pipeline. Hardware SHA256/512 is ~4.7x faster +# than software. + $t0="%r0"; $t1="%r1"; -$ctx="%r2"; +$ctx="%r2"; $t2="%r2"; $inp="%r3"; $len="%r4"; # used as index in inner loop @@ -54,7 +60,7 @@ if ($output =~ /512/) { @sigma0=(56,63, 7); @sigma1=( 3,45, 6); $rounds=80; - $kimdfunc=0; # 0 means unknown/unsupported/unimplemented + $kimdfunc=3; # 0 means unknown/unsupported/unimplemented/disabled } else { $label="256"; $SZ=4; @@ -83,32 +89,32 @@ ___ $code.=<<___; $ROT $t0,$e,$Sigma1[0] $ROT $t1,$e,$Sigma1[1] + lgr $t2,$f xgr $t0,$t1 $ROT $t1,$t1,`$Sigma1[2]-$Sigma1[1]` - xgr $t0,$t1 # Sigma1(e) + xgr $t2,$g $ST $T1,`160+$SZ*($i%16)`($sp) + xgr $t0,$t1 # Sigma1(e) + la $T1,0($T1,$h) # T1+=h + ngr $t2,$e + lgr $t1,$a algr $T1,$t0 # T1+=Sigma1(e) - algr $T1,$h # T1+=h - $ADD $T1,`$i*$SZ`($len,$tbl) # T1+=K[i] - lgr $t0,$f - xgr $t0,$g - ngr $t0,$e - xgr $t0,$g # Ch(e,f,g) - algr $T1,$t0 # T1+=Ch(e,f,g) $ROT $h,$a,$Sigma0[0] + xgr $t2,$g # Ch(e,f,g) + $ADD $T1,`$i*$SZ`($len,$tbl) # T1+=K[i] $ROT $t0,$a,$Sigma0[1] + algr $T1,$t2 # T1+=Ch(e,f,g) + ogr $t1,$b xgr $h,$t0 + lgr $t2,$a + ngr $t1,$c $ROT $t0,$t0,`$Sigma0[2]-$Sigma0[1]` xgr $h,$t0 # h=Sigma0(a) - lgr $t0,$a - ogr $t0,$b - ngr $t0,$c - lgr $t1,$a - ngr $t1,$b - ogr $t0,$t1 # Maj(a,b,c) - algr $h,$t0 # h+=Maj(a,b,c) - algr $d,$T1 # d+=T1 + ngr $t2,$b algr $h,$T1 # h+=T1 + ogr $t2,$t1 # Maj(a,b,c) + la $d,0($d,$T1) # d+=T1 + algr $h,$t2 # h+=Maj(a,b,c) ___ } @@ -120,15 +126,15 @@ $code.=<<___; $LD $t1,`160+$SZ*(($i+14)%16)`($sp) $ROT $t0,$T1,$sigma0[0] $SHR $T1,$sigma0[2] + $ROT $t2,$t0,`$sigma0[1]-$sigma0[0]` xgr $T1,$t0 - $ROT $t0,$t0,`$sigma0[1]-$sigma0[0]` - xgr $T1,$t0 # sigma0(X[i+1]) $ROT $t0,$t1,$sigma1[0] - $ADD $T1,`160+$SZ*($i%16)`($sp) # +=X[i] + xgr $T1,$t2 # sigma0(X[i+1]) $SHR $t1,$sigma1[2] + $ADD $T1,`160+$SZ*($i%16)`($sp) # +=X[i] xgr $t1,$t0 - $ADD $T1,`160+$SZ*(($i+9)%16)`($sp) # +=X[i+9] $ROT $t0,$t0,`$sigma1[1]-$sigma1[0]` + $ADD $T1,`160+$SZ*(($i+9)%16)`($sp) # +=X[i+9] xgr $t1,$t0 # sigma1(X[i+14]) algr $T1,$t1 # +=sigma1(X[i+14]) ___ @@ -225,15 +231,14 @@ $code.=<<___ if ($kimdfunc); ___ $code.=<<___; sllg $len,$len,`log(16*$SZ)/log(2)` - la $len,0($inp,$len) - stmg $len,%r15,32($sp) + lghi %r1,-$frame + agr $len,$inp + stmg $ctx,%r15,16($sp) lgr %r0,$sp - aghi $sp,-$frame + la $sp,0(%r1,$sp) stg %r0,0($sp) - bras $tbl,.Lpic -.Lpic: aghi $tbl,$Table-.Lpic - + larl $tbl,$Table $LD $A,`0*$SZ`($ctx) $LD $B,`1*$SZ`($ctx) $LD $C,`2*$SZ`($ctx) @@ -255,6 +260,8 @@ $code.=<<___; clgr $len,$t0 jne .Lrounds_16_xx + lg $ctx,`$frame+16`($sp) + la $inp,`16*$SZ`($inp) $ADD $A,`0*$SZ`($ctx) $ADD $B,`1*$SZ`($ctx) $ADD $C,`2*$SZ`($ctx) @@ -271,7 +278,6 @@ $code.=<<___; $ST $F,`5*$SZ`($ctx) $ST $G,`6*$SZ`($ctx) $ST $H,`7*$SZ`($ctx) - la $inp,`16*$SZ`($inp) clg $inp,`$frame+32`($sp) jne .Lloop