sha256-armv4.pl: 4% performance improvement.
[openssl.git] / crypto / sha / asm / sha256-armv4.pl
index 853ca31e48aa9ad188e6e4b8ab01bd5fba67749b..2a0a98947e09455208867e3aa4bcd2e0935d66b4 100644 (file)
 
 # Performance is ~2x better than gcc 3.4 generated code and in "abso-
 # lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
-# byte.
+# byte [on single-issue Xscale PXA250 core].
 
-$output=shift;
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 22% improvement on
+# Cortex A8 core and ~20 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~16.4 cycles per processed byte.
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
 open STDOUT,">$output";
 
 $ctx="r0";     $t0="r0";
-$inp="r1";
+$inp="r1";     $t4="r1";
 $len="r2";     $t1="r2";
-$T1="r3";
+$T1="r3";      $t3="r3";
 $A="r4";
 $B="r5";
 $C="r6";
@@ -41,63 +51,98 @@ sub BODY_00_15 {
 my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
 
 $code.=<<___ if ($i<16);
-       ldrb    $T1,[$inp,#3]                   @ $i
+#if __ARM_ARCH__>=7
+       @ ldr   $t1,[$inp],#4                   @ $i
+# if $i==15
+       str     $inp,[sp,#17*4]                 @ make room for $t4
+# endif
+       mov     $t0,$e,ror#$Sigma1[0]
+       add     $a,$a,$t2                       @ h+=Maj(a,b,c) from the past
+       rev     $t1,$t1
+       eor     $t0,$t0,$e,ror#$Sigma1[1]
+#else
+       @ ldrb  $t1,[$inp,#3]                   @ $i
+       add     $a,$a,$t2                       @ h+=Maj(a,b,c) from the past
        ldrb    $t2,[$inp,#2]
-       ldrb    $t1,[$inp,#1]
-       ldrb    $t0,[$inp],#4
-       orr     $T1,$T1,$t2,lsl#8
-       orr     $T1,$T1,$t1,lsl#16
-       orr     $T1,$T1,$t0,lsl#24
-       `"str   $inp,[sp,#17*4]"        if ($i==15)`
+       ldrb    $t0,[$inp,#1]
+       orr     $t1,$t1,$t2,lsl#8
+       ldrb    $t2,[$inp],#4
+       orr     $t1,$t1,$t0,lsl#16
+# if $i==15
+       str     $inp,[sp,#17*4]                 @ make room for $t4
+# endif
+       mov     $t0,$e,ror#$Sigma1[0]
+       orr     $t1,$t1,$t2,lsl#24
+       eor     $t0,$t0,$e,ror#$Sigma1[1]
+#endif
 ___
 $code.=<<___;
        ldr     $t2,[$Ktbl],#4                  @ *K256++
-       str     $T1,[sp,#`$i%16`*4]
-       mov     $t0,$e,ror#$Sigma1[0]
-       eor     $t0,$t0,$e,ror#$Sigma1[1]
-       eor     $t0,$t0,$e,ror#$Sigma1[2]       @ Sigma1(e)
-       add     $T1,$T1,$t0
+       add     $h,$h,$t1                       @ h+=X[i]
+       str     $t1,[sp,#`$i%16`*4]
        eor     $t1,$f,$g
+       eor     $t0,$t0,$e,ror#$Sigma1[2]       @ Sigma1(e)
        and     $t1,$t1,$e
+       add     $h,$h,$t0                       @ h+=Sigma1(e)
        eor     $t1,$t1,$g                      @ Ch(e,f,g)
-       add     $T1,$T1,$t1
-       add     $T1,$T1,$h
-       add     $T1,$T1,$t2
-       mov     $h,$a,ror#$Sigma0[0]
-       eor     $h,$h,$a,ror#$Sigma0[1]
-       eor     $h,$h,$a,ror#$Sigma0[2]         @ Sigma0(a)
-       orr     $t0,$a,$b
-       and     $t0,$t0,$c
-       and     $t1,$a,$b
-       orr     $t0,$t0,$t1                     @ Maj(a,b,c)
-       add     $h,$h,$t0
-       add     $d,$d,$T1
-       add     $h,$h,$T1
+       add     $h,$h,$t2                       @ h+=K256[i]
+       mov     $t0,$a,ror#$Sigma0[0]
+       add     $h,$h,$t1                       @ h+=Ch(e,f,g)
+#if $i==31
+       and     $t2,$t2,#0xff
+       cmp     $t2,#0xf2                       @ done?
+#endif
+#if $i<15
+# if __ARM_ARCH__>=7
+       ldr     $t1,[$inp],#4                   @ prefetch
+# else
+       ldrb    $t1,[$inp,#3]
+# endif
+       eor     $t2,$a,$b                       @ a^b, b^c in next round
+#else
+       ldr     $t1,[sp,#`($i+2)%16`*4]         @ from future BODY_16_xx
+       eor     $t2,$a,$b                       @ a^b, b^c in next round
+       ldr     $t4,[sp,#`($i+15)%16`*4]        @ from future BODY_16_xx
+#endif
+       eor     $t0,$a,ror#$Sigma0[1]
+       and     $t3,$t3,$t2                     @ (b^c)&=(a^b)
+       add     $d,$d,$h                        @ d+=h
+       eor     $t0,$a,ror#$Sigma0[2]           @ Sigma0(a)
+       eor     $t3,$t3,$b                      @ Maj(a,b,c)
+       add     $h,$h,$t0                       @ h+=Sigma0(a)
+       @ add   $h,$h,$t3                       @ h+=Maj(a,b,c)
 ___
+       ($t2,$t3)=($t3,$t2);
 }
 
 sub BODY_16_XX {
 my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
 
 $code.=<<___;
-       ldr     $t1,[sp,#`($i+1)%16`*4] @ $i
-       ldr     $t2,[sp,#`($i+14)%16`*4]
-       ldr     $T1,[sp,#`($i+0)%16`*4]
-       ldr     $inp,[sp,#`($i+9)%16`*4]
+       @ ldr   $t1,[sp,#`($i+1)%16`*4]         @ $i
+       @ ldr   $t4,[sp,#`($i+14)%16`*4]
        mov     $t0,$t1,ror#$sigma0[0]
+       add     $a,$a,$t2                       @ h+=Maj(a,b,c) from the past
+       mov     $t2,$t4,ror#$sigma1[0]
        eor     $t0,$t0,$t1,ror#$sigma0[1]
+       eor     $t2,$t2,$t4,ror#$sigma1[1]
        eor     $t0,$t0,$t1,lsr#$sigma0[2]      @ sigma0(X[i+1])
-       mov     $t1,$t2,ror#$sigma1[0]
-       eor     $t1,$t1,$t2,ror#$sigma1[1]
-       eor     $t1,$t1,$t2,lsr#$sigma1[2]      @ sigma1(X[i+14])
-       add     $T1,$T1,$t0
-       add     $T1,$T1,$t1
-       add     $T1,$T1,$inp
+       ldr     $t1,[sp,#`($i+0)%16`*4]
+       eor     $t2,$t2,$t4,lsr#$sigma1[2]      @ sigma1(X[i+14])
+       ldr     $t4,[sp,#`($i+9)%16`*4]
+
+       add     $t2,$t2,$t0
+       mov     $t0,$e,ror#$Sigma1[0]           @ from BODY_00_15
+       add     $t1,$t1,$t2
+       eor     $t0,$t0,$e,ror#$Sigma1[1]       @ from BODY_00_15
+       add     $t1,$t1,$t4                     @ X[i]
 ___
        &BODY_00_15(@_);
 }
 
 $code=<<___;
+#include "arm_arch.h"
+
 .text
 .code  32
 
@@ -127,51 +172,62 @@ K256:
 sha256_block_data_order:
        sub     r3,pc,#8                @ sha256_block_data_order
        add     $len,$inp,$len,lsl#6    @ len to point at the end of inp
-       stmdb   sp!,{$ctx,$inp,$len,r4-r12,lr}
+       stmdb   sp!,{$ctx,$inp,$len,r4-r11,lr}
        ldmia   $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
        sub     $Ktbl,r3,#256           @ K256
        sub     sp,sp,#16*4             @ alloca(X[16])
 .Loop:
+# if __ARM_ARCH__>=7
+       ldr     $t1,[$inp],#4
+# else
+       ldrb    $t1,[$inp,#3]
+# endif
+       eor     $t3,$B,$C               @ magic
+       eor     $t2,$t2,$t2
 ___
 for($i=0;$i<16;$i++)   { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
 $code.=".Lrounds_16_xx:\n";
 for (;$i<32;$i++)      { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
 $code.=<<___;
-       and     $t2,$t2,#0xff
-       cmp     $t2,#0xf2
+       ldreq   $t3,[sp,#16*4]          @ pull ctx
        bne     .Lrounds_16_xx
 
-       ldr     $T1,[sp,#16*4]          @ pull ctx
-       ldr     $t0,[$T1,#0]
-       ldr     $t1,[$T1,#4]
-       ldr     $t2,[$T1,#8]
+       add     $A,$A,$t2               @ h+=Maj(a,b,c) from the past
+       ldr     $t0,[$t3,#0]
+       ldr     $t1,[$t3,#4]
+       ldr     $t2,[$t3,#8]
        add     $A,$A,$t0
-       ldr     $t0,[$T1,#12]
+       ldr     $t0,[$t3,#12]
        add     $B,$B,$t1
-       ldr     $t1,[$T1,#16]
+       ldr     $t1,[$t3,#16]
        add     $C,$C,$t2
-       ldr     $t2,[$T1,#20]
+       ldr     $t2,[$t3,#20]
        add     $D,$D,$t0
-       ldr     $t0,[$T1,#24]
+       ldr     $t0,[$t3,#24]
        add     $E,$E,$t1
-       ldr     $t1,[$T1,#28]
+       ldr     $t1,[$t3,#28]
        add     $F,$F,$t2
        ldr     $inp,[sp,#17*4]         @ pull inp
        ldr     $t2,[sp,#18*4]          @ pull inp+len
        add     $G,$G,$t0
        add     $H,$H,$t1
-       stmia   $T1,{$A,$B,$C,$D,$E,$F,$G,$H}
+       stmia   $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
        cmp     $inp,$t2
        sub     $Ktbl,$Ktbl,#256        @ rewind Ktbl
        bne     .Loop
 
        add     sp,sp,#`16+3`*4 @ destroy frame
-       ldmia   sp!,{r4-r12,lr}
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r11,pc}
+#else
+       ldmia   sp!,{r4-r11,lr}
        tst     lr,#1
        moveq   pc,lr                   @ be binary compatible with V4, yet
        bx      lr                      @ interoperable with Thumb ISA:-)
+#endif
 .size   sha256_block_data_order,.-sha256_block_data_order
 .asciz  "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
 ___
 
 $code =~ s/\`([^\`]*)\`/eval $1/gem;