ARM assembly pack: SHA update from master.
authorAndy Polyakov <appro@openssl.org>
Mon, 9 Dec 2013 22:53:42 +0000 (23:53 +0100)
committerAndy Polyakov <appro@openssl.org>
Mon, 9 Dec 2013 22:53:42 +0000 (23:53 +0100)
crypto/sha/asm/sha256-armv4.pl
crypto/sha/asm/sha512-armv4.pl

index 9c84e8d93c301a35b544b112cb87a90b7ddbd573..2a0a98947e09455208867e3aa4bcd2e0935d66b4 100644 (file)
 # February 2011.
 #
 # Profiler-assisted and platform-specific optimization resulted in 16%
-# improvement on Cortex A8 core and ~17 cycles per processed byte.
+# improvement on Cortex A8 core and ~16.4 cycles per processed byte.
 
 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
 open STDOUT,">$output";
 
 $ctx="r0";     $t0="r0";
-$inp="r1";     $t3="r1";
+$inp="r1";     $t4="r1";
 $len="r2";     $t1="r2";
-$T1="r3";
+$T1="r3";      $t3="r3";
 $A="r4";
 $B="r5";
 $C="r6";
@@ -52,71 +52,90 @@ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
 
 $code.=<<___ if ($i<16);
 #if __ARM_ARCH__>=7
-       ldr     $T1,[$inp],#4
+       @ ldr   $t1,[$inp],#4                   @ $i
+# if $i==15
+       str     $inp,[sp,#17*4]                 @ make room for $t4
+# endif
+       mov     $t0,$e,ror#$Sigma1[0]
+       add     $a,$a,$t2                       @ h+=Maj(a,b,c) from the past
+       rev     $t1,$t1
+       eor     $t0,$t0,$e,ror#$Sigma1[1]
 #else
-       ldrb    $T1,[$inp,#3]                   @ $i
+       @ ldrb  $t1,[$inp,#3]                   @ $i
+       add     $a,$a,$t2                       @ h+=Maj(a,b,c) from the past
        ldrb    $t2,[$inp,#2]
-       ldrb    $t1,[$inp,#1]
-       ldrb    $t0,[$inp],#4
-       orr     $T1,$T1,$t2,lsl#8
-       orr     $T1,$T1,$t1,lsl#16
-       orr     $T1,$T1,$t0,lsl#24
+       ldrb    $t0,[$inp,#1]
+       orr     $t1,$t1,$t2,lsl#8
+       ldrb    $t2,[$inp],#4
+       orr     $t1,$t1,$t0,lsl#16
+# if $i==15
+       str     $inp,[sp,#17*4]                 @ make room for $t4
+# endif
+       mov     $t0,$e,ror#$Sigma1[0]
+       orr     $t1,$t1,$t2,lsl#24
+       eor     $t0,$t0,$e,ror#$Sigma1[1]
 #endif
 ___
 $code.=<<___;
-       mov     $t0,$e,ror#$Sigma1[0]
        ldr     $t2,[$Ktbl],#4                  @ *K256++
-       eor     $t0,$t0,$e,ror#$Sigma1[1]
+       add     $h,$h,$t1                       @ h+=X[i]
+       str     $t1,[sp,#`$i%16`*4]
        eor     $t1,$f,$g
-#if $i>=16
-       add     $T1,$T1,$t3                     @ from BODY_16_xx
-#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
-       rev     $T1,$T1
-#endif
-#if $i==15
-       str     $inp,[sp,#17*4]                 @ leave room for $t3
-#endif
        eor     $t0,$t0,$e,ror#$Sigma1[2]       @ Sigma1(e)
        and     $t1,$t1,$e
-       str     $T1,[sp,#`$i%16`*4]
-       add     $T1,$T1,$t0
+       add     $h,$h,$t0                       @ h+=Sigma1(e)
        eor     $t1,$t1,$g                      @ Ch(e,f,g)
-       add     $T1,$T1,$h
-       mov     $h,$a,ror#$Sigma0[0]
-       add     $T1,$T1,$t1
-       eor     $h,$h,$a,ror#$Sigma0[1]
-       add     $T1,$T1,$t2
-       eor     $h,$h,$a,ror#$Sigma0[2]         @ Sigma0(a)
-#if $i>=15
-       ldr     $t3,[sp,#`($i+2)%16`*4]         @ from BODY_16_xx
+       add     $h,$h,$t2                       @ h+=K256[i]
+       mov     $t0,$a,ror#$Sigma0[0]
+       add     $h,$h,$t1                       @ h+=Ch(e,f,g)
+#if $i==31
+       and     $t2,$t2,#0xff
+       cmp     $t2,#0xf2                       @ done?
 #endif
-       orr     $t0,$a,$b
-       and     $t1,$a,$b
-       and     $t0,$t0,$c
-       add     $h,$h,$T1
-       orr     $t0,$t0,$t1                     @ Maj(a,b,c)
-       add     $d,$d,$T1
-       add     $h,$h,$t0
+#if $i<15
+# if __ARM_ARCH__>=7
+       ldr     $t1,[$inp],#4                   @ prefetch
+# else
+       ldrb    $t1,[$inp,#3]
+# endif
+       eor     $t2,$a,$b                       @ a^b, b^c in next round
+#else
+       ldr     $t1,[sp,#`($i+2)%16`*4]         @ from future BODY_16_xx
+       eor     $t2,$a,$b                       @ a^b, b^c in next round
+       ldr     $t4,[sp,#`($i+15)%16`*4]        @ from future BODY_16_xx
+#endif
+       eor     $t0,$a,ror#$Sigma0[1]
+       and     $t3,$t3,$t2                     @ (b^c)&=(a^b)
+       add     $d,$d,$h                        @ d+=h
+       eor     $t0,$a,ror#$Sigma0[2]           @ Sigma0(a)
+       eor     $t3,$t3,$b                      @ Maj(a,b,c)
+       add     $h,$h,$t0                       @ h+=Sigma0(a)
+       @ add   $h,$h,$t3                       @ h+=Maj(a,b,c)
 ___
+       ($t2,$t3)=($t3,$t2);
 }
 
 sub BODY_16_XX {
 my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
 
 $code.=<<___;
-       @ ldr   $t3,[sp,#`($i+1)%16`*4]         @ $i
-       ldr     $t2,[sp,#`($i+14)%16`*4]
-       mov     $t0,$t3,ror#$sigma0[0]
-       ldr     $T1,[sp,#`($i+0)%16`*4]
-       eor     $t0,$t0,$t3,ror#$sigma0[1]
-       ldr     $t1,[sp,#`($i+9)%16`*4]
-       eor     $t0,$t0,$t3,lsr#$sigma0[2]      @ sigma0(X[i+1])
-       mov     $t3,$t2,ror#$sigma1[0]
-       add     $T1,$T1,$t0
-       eor     $t3,$t3,$t2,ror#$sigma1[1]
-       add     $T1,$T1,$t1
-       eor     $t3,$t3,$t2,lsr#$sigma1[2]      @ sigma1(X[i+14])
-       @ add   $T1,$T1,$t3
+       @ ldr   $t1,[sp,#`($i+1)%16`*4]         @ $i
+       @ ldr   $t4,[sp,#`($i+14)%16`*4]
+       mov     $t0,$t1,ror#$sigma0[0]
+       add     $a,$a,$t2                       @ h+=Maj(a,b,c) from the past
+       mov     $t2,$t4,ror#$sigma1[0]
+       eor     $t0,$t0,$t1,ror#$sigma0[1]
+       eor     $t2,$t2,$t4,ror#$sigma1[1]
+       eor     $t0,$t0,$t1,lsr#$sigma0[2]      @ sigma0(X[i+1])
+       ldr     $t1,[sp,#`($i+0)%16`*4]
+       eor     $t2,$t2,$t4,lsr#$sigma1[2]      @ sigma1(X[i+14])
+       ldr     $t4,[sp,#`($i+9)%16`*4]
+
+       add     $t2,$t2,$t0
+       mov     $t0,$e,ror#$Sigma1[0]           @ from BODY_00_15
+       add     $t1,$t1,$t2
+       eor     $t0,$t0,$e,ror#$Sigma1[1]       @ from BODY_00_15
+       add     $t1,$t1,$t4                     @ X[i]
 ___
        &BODY_00_15(@_);
 }
@@ -158,35 +177,41 @@ sha256_block_data_order:
        sub     $Ktbl,r3,#256           @ K256
        sub     sp,sp,#16*4             @ alloca(X[16])
 .Loop:
+# if __ARM_ARCH__>=7
+       ldr     $t1,[$inp],#4
+# else
+       ldrb    $t1,[$inp,#3]
+# endif
+       eor     $t3,$B,$C               @ magic
+       eor     $t2,$t2,$t2
 ___
 for($i=0;$i<16;$i++)   { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
 $code.=".Lrounds_16_xx:\n";
 for (;$i<32;$i++)      { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
 $code.=<<___;
-       and     $t2,$t2,#0xff
-       cmp     $t2,#0xf2
+       ldreq   $t3,[sp,#16*4]          @ pull ctx
        bne     .Lrounds_16_xx
 
-       ldr     $T1,[sp,#16*4]          @ pull ctx
-       ldr     $t0,[$T1,#0]
-       ldr     $t1,[$T1,#4]
-       ldr     $t2,[$T1,#8]
+       add     $A,$A,$t2               @ h+=Maj(a,b,c) from the past
+       ldr     $t0,[$t3,#0]
+       ldr     $t1,[$t3,#4]
+       ldr     $t2,[$t3,#8]
        add     $A,$A,$t0
-       ldr     $t0,[$T1,#12]
+       ldr     $t0,[$t3,#12]
        add     $B,$B,$t1
-       ldr     $t1,[$T1,#16]
+       ldr     $t1,[$t3,#16]
        add     $C,$C,$t2
-       ldr     $t2,[$T1,#20]
+       ldr     $t2,[$t3,#20]
        add     $D,$D,$t0
-       ldr     $t0,[$T1,#24]
+       ldr     $t0,[$t3,#24]
        add     $E,$E,$t1
-       ldr     $t1,[$T1,#28]
+       ldr     $t1,[$t3,#28]
        add     $F,$F,$t2
        ldr     $inp,[sp,#17*4]         @ pull inp
        ldr     $t2,[sp,#18*4]          @ pull inp+len
        add     $G,$G,$t0
        add     $H,$H,$t1
-       stmia   $T1,{$A,$B,$C,$D,$E,$F,$G,$H}
+       stmia   $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
        cmp     $inp,$t2
        sub     $Ktbl,$Ktbl,#256        @ rewind Ktbl
        bne     .Loop
index 7faf37b1479029e152ec99200b4c55b551850131..d3065794b343890487187ab32e71234712ce155c 100644 (file)
@@ -1,7 +1,7 @@
 #!/usr/bin/env perl
 
 # ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
 # project. The module is, however, dual licensed under OpenSSL and
 # CRYPTOGAMS licenses depending on where you obtain it. For further
 # details see http://www.openssl.org/~appro/cryptogams/.
 # March 2011.
 #
 # Add NEON implementation. On Cortex A8 it was measured to process
-# one byte in 25.5 cycles or 47% faster than integer-only code.
+# one byte in 23.3 cycles or ~60% faster than integer-only code.
+
+# August 2012.
+#
+# Improve NEON performance by 12% on Snapdragon S4. In absolute
+# terms it's 22.6 cycles per byte, which is disappointing result.
+# Technical writers asserted that 3-way S4 pipeline can sustain
+# multiple NEON instructions per cycle, but dual NEON issue could
+# not be observed, and for NEON-only sequences IPC(*) was found to
+# be limited by 1:-( 0.33 and 0.66 were measured for sequences with
+# ILPs(*) of 1 and 2 respectively. This in turn means that you can
+# even find yourself striving, as I did here, for achieving IPC
+# adequate to one delivered by Cortex A8 [for reference, it's
+# 0.5 for ILP of 1, and 1 for higher ILPs].
+#
+# (*) ILP, instruction-level parallelism, how many instructions
+#     *can* execute at the same time. IPC, instructions per cycle,
+#     indicates how many instructions actually execute.
 
 # Byte order [in]dependence. =========================================
 #
@@ -457,40 +474,40 @@ $code.=<<___ if ($i<16 || $i&1);
        vld1.64         {@X[$i%16]},[$inp]!     @ handles unaligned
 #endif
        vshr.u64        $t1,$e,#@Sigma1[1]
+#if $i>0
+        vadd.i64       $a,$Maj                 @ h+=Maj from the past
+#endif
        vshr.u64        $t2,$e,#@Sigma1[2]
 ___
 $code.=<<___;
        vld1.64         {$K},[$Ktbl,:64]!       @ K[i++]
        vsli.64         $t0,$e,#`64-@Sigma1[0]`
        vsli.64         $t1,$e,#`64-@Sigma1[1]`
+       vmov            $Ch,$e
        vsli.64         $t2,$e,#`64-@Sigma1[2]`
 #if $i<16 && defined(__ARMEL__)
        vrev64.8        @X[$i],@X[$i]
 #endif
-       vadd.i64        $T1,$K,$h
-       veor            $Ch,$f,$g
-       veor            $t0,$t1
-       vand            $Ch,$e
-       veor            $t0,$t2                 @ Sigma1(e)
-       veor            $Ch,$g                  @ Ch(e,f,g)
-       vadd.i64        $T1,$t0
+       veor            $t1,$t0
+       vbsl            $Ch,$f,$g               @ Ch(e,f,g)
        vshr.u64        $t0,$a,#@Sigma0[0]
-       vadd.i64        $T1,$Ch
+       veor            $t2,$t1                 @ Sigma1(e)
+       vadd.i64        $T1,$Ch,$h
        vshr.u64        $t1,$a,#@Sigma0[1]
-       vshr.u64        $t2,$a,#@Sigma0[2]
        vsli.64         $t0,$a,#`64-@Sigma0[0]`
+       vadd.i64        $T1,$t2
+       vshr.u64        $t2,$a,#@Sigma0[2]
+       vadd.i64        $K,@X[$i%16]
        vsli.64         $t1,$a,#`64-@Sigma0[1]`
+       veor            $Maj,$a,$b
        vsli.64         $t2,$a,#`64-@Sigma0[2]`
-       vadd.i64        $T1,@X[$i%16]
-       vorr            $Maj,$a,$c
-       vand            $Ch,$a,$c
        veor            $h,$t0,$t1
-       vand            $Maj,$b
+       vadd.i64        $T1,$K
+       vbsl            $Maj,$c,$b              @ Maj(a,b,c)
        veor            $h,$t2                  @ Sigma0(a)
-       vorr            $Maj,$Ch                @ Maj(a,b,c)
-       vadd.i64        $h,$T1
        vadd.i64        $d,$T1
-       vadd.i64        $h,$Maj
+       vadd.i64        $Maj,$T1
+       @ vadd.i64      $h,$Maj
 ___
 }
 
@@ -508,6 +525,7 @@ $i /= 2;
 $code.=<<___;
        vshr.u64        $t0,@X[($i+7)%8],#@sigma1[0]
        vshr.u64        $t1,@X[($i+7)%8],#@sigma1[1]
+        vadd.i64       @_[0],d30                       @ h+=Maj from the past
        vshr.u64        $s1,@X[($i+7)%8],#@sigma1[2]
        vsli.64         $t0,@X[($i+7)%8],#`64-@sigma1[0]`
        vext.8          $s0,@X[$i%8],@X[($i+1)%8],#8    @ X[i+1]
@@ -554,6 +572,7 @@ for(;$i<32;$i++)    { &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
 $code.=<<___;
        bne             .L16_79_neon
 
+        vadd.i64       $A,d30          @ h+=Maj from the past
        vldmia          $ctx,{d24-d31}  @ load context to temp
        vadd.i64        q8,q12          @ vectorized accumulate
        vadd.i64        q9,q13