SHA1 assembler show off: minor performance updates and new modules for
[openssl.git] / crypto / sha / asm / sha1-ia64.pl
index a5d81979451ac7ed462446f0e48baab3a6713b1f..db28f0805a11c568e468d2cb7d8a46280c70237c 100644 (file)
@@ -15,7 +15,7 @@
 # is >50% better than HP C and >2x better than gcc.
 
 $code=<<___;
-.ident  \"sha1-ia64.s, version 1.1\"
+.ident  \"sha1-ia64.s, version 1.3\"
 .ident  \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
 .explicit
 
@@ -26,14 +26,10 @@ if ($^O eq "hpux") {
     $ADDP="addp4";
     for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
 } else { $ADDP="add"; }
-for (@ARGV) {  $big_endian=1 if (/\-DB_ENDIAN/);
-               $big_endian=0 if (/\-DL_ENDIAN/);   }
-if (!defined($big_endian))
-           {   $big_endian=(unpack('L',pack('N',1))==1);   }
 
 #$human=1;
 if ($human) {  # useful for visual code auditing...
-       ($A,$B,$C,$D,$E,$T)   = ("A","B","C","D","E","T");
+       ($A,$B,$C,$D,$E)   = ("A","B","C","D","E");
        ($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4");
        ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
            (   "K_00_19","K_20_39","K_40_59","K_60_79" );
@@ -41,80 +37,75 @@ if ($human) {       # useful for visual code auditing...
                "X8", "X9","X10","X11","X12","X13","X14","X15"  );
 }
 else {
-       ($A,$B,$C,$D,$E,$T)   = ("loc0","loc1","loc2","loc3","loc4","loc5");
-       ($h0,$h1,$h2,$h3,$h4) = ("loc6","loc7","loc8","loc9","loc10");
+       ($A,$B,$C,$D,$E)   =    ("loc0","loc1","loc2","loc3","loc4");
+       ($h0,$h1,$h2,$h3,$h4) = ("loc5","loc6","loc7","loc8","loc9");
        ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
-           (   "r14", "r15", "loc11", "loc12"  );
+           (   "r14", "r15", "loc10", "loc11"  );
        @X= (   "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
                "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"  );
 }
 
 sub BODY_00_15 {
 local  *code=shift;
-local  ($i,$a,$b,$c,$d,$e,$f,$unaligned)=@_;
+my     ($i,$a,$b,$c,$d,$e)=@_;
+my     $j=$i+1;
+my     $Xn=@X[$j%16];
 
-if ($unaligned) {
-       $code.=<<___;
-{ .mmi;        ld1     tmp0=[inp],2                // MSB
-       ld1     tmp1=[tmp3],2           };;
-{ .mmi;        ld1     tmp2=[inp],2
-       ld1     $X[$i&0xf]=[tmp3],2         // LSB
-       dep     tmp1=tmp0,tmp1,8,8      };;
-{ .mii;        cmp.ne  p16,p0=r0,r0                // no misaligned prefetch
-       dep     $X[$i&0xf]=tmp2,$X[$i&0xf],8,8;;
-       dep     $X[$i&0xf]=tmp1,$X[$i&0xf],16,16        };;
-{ .mmi;        nop.m   0
-___
-       }
-elsif ($i<15) {
-       $code.=<<___;
-{ .mmi;        ld4     $X[($i+1)&0xf]=[inp],4  // prefetch
+$code.=<<___ if ($i==0);
+{ .mmi;        ld1     $X[$i]=[inp],2              // MSB
+       ld1     tmp2=[tmp3],2           };;
+{ .mmi;        ld1     tmp0=[inp],2
+       ld1     tmp4=[tmp3],2               // LSB
+       dep     $X[$i]=$X[$i],tmp2,8,8  };;
 ___
-       }
-else   {
-       $code.=<<___;
-{ .mmi;        nop.m   0
-___
-       }
 if ($i<15) {
        $code.=<<___;
-       and     tmp0=$c,$b
-       dep.z   tmp5=$a,5,27            }   // a<<5
-{ .mmi;        andcm   tmp1=$d,$b
-       add     tmp4=$e,$K_00_19        };;
-{ .mmi;        or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
-       add     $f=tmp4,$X[$i&0xf]          // f=xi+e+K_00_19
+{ .mmi;        ld1     $Xn=[inp],2                 // forward Xload
+       nop.m   0x0
+       dep     tmp1=tmp0,tmp4,8,8      };;
+{ .mmi;        ld1     tmp2=[tmp3],2               // forward Xload
+       and     tmp4=$c,$b
+       dep     $X[$i]=$X[$i],tmp1,16,16} //;;
+{ .mmi;        add     $e=$e,$K_00_19              // e+=K_00_19
+       andcm   tmp1=$d,$b
+       dep.z   tmp5=$a,5,27            };; // a<<5
+{ .mmi;        add     $e=$e,$X[$i]                // e+=Xload
+       or      tmp4=tmp4,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
        extr.u  tmp1=$a,27,5            };; // a>>27
-{ .mib;        add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
+{ .mmi;        ld1     tmp0=[inp],2                // forward Xload
+       add     $e=$e,tmp4                  // e+=F_00_19(b,c,d)
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
-{ .mib;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
+{ .mmi;        ld1     tmp4=[tmp3],2               // forward Xload
+       or      tmp5=tmp1,tmp5              // ROTATE(a,5)
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp1                  // f+=ROTATE(a,5)
-       mux2    $X[$i&0xf]=$X[$i&0xf],0x44
-       nop.i   0                       };;
+{ .mii;        add     $e=$e,tmp5                  // e+=ROTATE(a,5)
+       dep     $Xn=$Xn,tmp2,8,8            // forward Xload
+       mux2    $X[$i]=$X[$i],0x44      } //;;
 
 ___
        }
 else   {
        $code.=<<___;
-       and     tmp0=$c,$b
-       dep.z   tmp5=$a,5,27            }   // a<<5 ;;?
-{ .mmi;        andcm   tmp1=$d,$b
-       add     tmp4=$e,$K_00_19        };;
-{ .mmi;        or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
-       add     $f=tmp4,$X[$i&0xf]          // f=xi+e+K_00_19
+{ .mii;        and     tmp3=$c,$b
+       dep     tmp1=tmp0,tmp4,8,8;;
+       dep     $X[$i]=$X[$i],tmp1,16,16} //;;
+{ .mmi;        add     $e=$e,$K_00_19              // e+=K_00_19
+       andcm   tmp1=$d,$b
+       dep.z   tmp5=$a,5,27            };; // a<<5
+{ .mmi;        add     $e=$e,$X[$i]                // e+=Xupdate
+       or      tmp4=tmp3,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
        extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mmi;        xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
-       xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
+{ .mmi;        xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
+       xor     tmp3=$X[($j+8)%16],$X[($j+13)%16] // forward Xupdate
        nop.i   0                       };;
-{ .mmi;        add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
-       xor     tmp2=tmp2,tmp3              // +1
+{ .mmi;        add     $e=$e,tmp4                  // e+=F_00_19(b,c,d)
+       xor     $Xn=$Xn,tmp3                // forward Xupdate
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
 { .mmi; or     tmp1=tmp1,tmp5              // ROTATE(a,5)
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp1                  // f+=ROTATE(a,5)
-       shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
-       mux2    $X[$i&0xf]=$X[$i&0xf],0x44  };;
+{ .mii;        add     $e=$e,tmp1                  // e+=ROTATE(a,5)
+       shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
+       mux2    $X[$i]=$X[$i],0x44      };;
 
 ___
        }
@@ -122,27 +113,28 @@ ___
 
 sub BODY_16_19 {
 local  *code=shift;
-local  ($i,$a,$b,$c,$d,$e,$f)=@_;
+my     ($i,$a,$b,$c,$d,$e)=@_;
+my     $j=$i+1;
+my     $Xn=@X[$j%16];
 
 $code.=<<___;
-{ .mmi;        mov     $X[$i&0xf]=$f               // Xupdate
-       and     tmp0=$c,$b
+{ .mib;        add     $e=$e,$K_00_19              // e+=K_00_19
        dep.z   tmp5=$a,5,27            }   // a<<5
-{ .mmi;        andcm   tmp1=$d,$b
-       add     tmp4=$e,$K_00_19        };;
-{ .mmi;        or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
-       add     $f=$f,tmp4                  // f+=e+K_00_19
+{ .mib;        andcm   tmp1=$d,$b
+       and     tmp0=$c,$b              };;
+{ .mmi;        add     $e=$e,$X[$i%16]             // e+=Xupdate
+       or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
        extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mmi;        xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
-       xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
+{ .mmi;        xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
+       xor     tmp3=$X[($j+8)%16],$X[($j+13)%16]       // forward Xupdate
        nop.i   0                       };;
-{ .mmi;        add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
-       xor     tmp2=tmp2,tmp3              // +1
+{ .mmi;        add     $e=$e,tmp0                  // f+=F_00_19(b,c,d)
+       xor     $Xn=$Xn,tmp3                // forward Xupdate
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
 { .mmi;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp1                  // f+=ROTATE(a,5)
-       shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
+{ .mii;        add     $e=$e,tmp1                  // e+=ROTATE(a,5)
+       shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
        nop.i   0                       };;
 
 ___
@@ -150,51 +142,47 @@ ___
 
 sub BODY_20_39 {
 local  *code=shift;
-local  ($i,$a,$b,$c,$d,$e,$f,$Konst)=@_;
+my     ($i,$a,$b,$c,$d,$e,$Konst)=@_;
        $Konst = $K_20_39 if (!defined($Konst));
+my     $j=$i+1;
+my     $Xn=@X[$j%16];
 
 if ($i<79) {
 $code.=<<___;
-{ .mib;        mov     $X[$i&0xf]=$f               // Xupdate
+{ .mib;        add     $e=$e,$Konst                // e+=K_XX_XX
        dep.z   tmp5=$a,5,27            }   // a<<5
 { .mib;        xor     tmp0=$c,$b
-       add     tmp4=$e,$Konst          };;
-{ .mmi;        xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
-       add     $f=$f,tmp4                  // f+=e+K_20_39
+       xor     $Xn=$Xn,$X[($j+2)%16]   };; // forward Xupdate
+{ .mib;        add     $e=$e,$X[$i%16]             // e+=Xupdate
        extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mmi;        xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
-       xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
-       nop.i   0                       };;
-{ .mmi;        add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
-       xor     tmp2=tmp2,tmp3              // +1
+{ .mib;        xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
+       xor     $Xn=$Xn,$X[($j+8)%16]   };; // forward Xupdate
+{ .mmi;        add     $e=$e,tmp0                  // e+=F_20_39(b,c,d)
+       xor     $Xn=$Xn,$X[($j+13)%16]      // forward Xupdate
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
 { .mmi;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp1                  // f+=ROTATE(a,5)
-       shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
+{ .mii;        add     $e=$e,tmp1                  // e+=ROTATE(a,5)
+       shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
        nop.i   0                       };;
 
 ___
 }
 else {
 $code.=<<___;
-{ .mib;        mov     $X[$i&0xf]=$f               // Xupdate
+{ .mib;        add     $e=$e,$Konst                // e+=K_60_79
        dep.z   tmp5=$a,5,27            }   // a<<5
 { .mib;        xor     tmp0=$c,$b
-       add     tmp4=$e,$Konst          };;
-{ .mib;        xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
-       extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mib;        add     $f=$f,tmp4                  // f+=e+K_20_39
        add     $h1=$h1,$a              };; // wrap up
-{ .mmi;
-(p16)  ld4.s   $X[0]=[inp],4               // non-faulting prefetch
-       add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
-       shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30) ;;?
-{ .mmi;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
+{ .mib;        add     $e=$e,$X[$i%16]             // e+=Xupdate
+       extr.u  tmp1=$a,27,5            }   // a>>27
+{ .mib;        xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
        add     $h3=$h3,$c              };; // wrap up
-{ .mib;        add     tmp3=1,inp                  // used in unaligned codepath
-       add     $f=$f,tmp1              }   // f+=ROTATE(a,5)
-{ .mib;        add     $h2=$h2,$b                  // wrap up
+{ .mmi;        add     $e=$e,tmp0                  // e+=F_20_39(b,c,d)
+       or      tmp1=tmp1,tmp5              // ROTATE(a,5)
+       shrp    $b=tmp6,tmp6,2          };; // b=ROTATE(b,30) ;;?
+{ .mmi;        add     $e=$e,tmp1                  // e+=ROTATE(a,5)
+       add     tmp3=1,inp                  // used in unaligned codepath
        add     $h4=$h4,$d              };; // wrap up
 
 ___
@@ -203,29 +191,29 @@ ___
 
 sub BODY_40_59 {
 local  *code=shift;
-local  ($i,$a,$b,$c,$d,$e,$f)=@_;
+my     ($i,$a,$b,$c,$d,$e)=@_;
+my     $j=$i+1;
+my     $Xn=@X[$j%16];
 
 $code.=<<___;
-{ .mmi;        mov     $X[$i&0xf]=$f               // Xupdate
-       and     tmp0=$c,$b
+{ .mib;        add     $e=$e,$K_40_59              // e+=K_40_59
        dep.z   tmp5=$a,5,27            }   // a<<5
-{ .mmi;        and     tmp1=$d,$b
-       add     tmp4=$e,$K_40_59        };;
-{ .mmi;        or      tmp0=tmp0,tmp1              // (b&c)|(b&d)
-       add     $f=$f,tmp4                  // f+=e+K_40_59
+{ .mib;        and     tmp1=$c,$d
+       xor     tmp0=$c,$d              };;
+{ .mmi;        add     $e=$e,$X[$i%16]             // e+=Xupdate
+       add     tmp5=tmp5,tmp1              // a<<5+(c&d)
        extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mmi;        and     tmp4=$c,$d
-       xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
-       xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
-       };;
-{ .mmi;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
-       xor     tmp2=tmp2,tmp3              // +1
+{ .mmi;        and     tmp0=tmp0,$b
+       xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
+       xor     tmp3=$X[($j+8)%16],$X[($j+13)%16] };;   // forward Xupdate
+{ .mmi;        add     $e=$e,tmp0                  // e+=b&(c^d)
+       add     tmp5=tmp5,tmp1              // ROTATE(a,5)+(c&d)
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
-{ .mmi;        or      tmp0=tmp0,tmp4              // F_40_59(b,c,d)=(b&c)|(b&d)|(c&d)
+{ .mmi;        xor     $Xn=$Xn,tmp3
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp0                  // f+=F_40_59(b,c,d)
-       shrp    $e=tmp2,tmp2,31;;           // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
-       add     $f=$f,tmp1              };; // f+=ROTATE(a,5)
+{ .mii;        add     $e=$e,tmp5                  // e+=ROTATE(a,5)+(c&d)
+       shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
+       nop.i   0x0                     };;
 
 ___
 }
@@ -241,13 +229,13 @@ tmp3=r11;
 ctx=r32;       // in0
 inp=r33;       // in1
 
-// void sha1_block_asm_data_order(SHA_CTX *c,const void *p,size_t num);
-.global        sha1_block_asm_data_order#
-.proc  sha1_block_asm_data_order#
+// void sha1_block_data_order(SHA_CTX *c,const void *p,size_t num);
+.global        sha1_block_data_order#
+.proc  sha1_block_data_order#
 .align 32
-sha1_block_asm_data_order:
+sha1_block_data_order:
        .prologue
-{ .mmi;        alloc   tmp1=ar.pfs,3,15,0,0
+{ .mmi;        alloc   tmp1=ar.pfs,3,14,0,0
        $ADDP   tmp0=4,ctx
        .save   ar.lc,r3
        mov     r3=ar.lc                }
@@ -255,8 +243,8 @@ sha1_block_asm_data_order:
        $ADDP   inp=0,inp
        mov     r2=pr                   };;
 tmp4=in2;
-tmp5=loc13;
-tmp6=loc14;
+tmp5=loc12;
+tmp6=loc13;
        .body
 { .mlx;        ld4     $h0=[ctx],8
        movl    $K_00_19=0x5a827999     }
@@ -283,20 +271,20 @@ tmp6=loc14;
 
 ___
 
-{ my $i,@V=($A,$B,$C,$D,$E,$T);
+{ my $i,@V=($A,$B,$C,$D,$E);
 
-       for($i=0;$i<16;$i++)    { &BODY_00_15(\$code,$i,@V,1); unshift(@V,pop(@V)); }
+       for($i=0;$i<16;$i++)    { &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); }
        for(;$i<20;$i++)        { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); }
        for(;$i<40;$i++)        { &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); }
        for(;$i<60;$i++)        { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); }
        for(;$i<80;$i++)        { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); }
 
-       (($V[5] eq $D) and ($V[0] eq $E)) or die;       # double-check
+       (($V[0] eq $A) and ($V[4] eq $E)) or die;       # double-check
 }
 
 $code.=<<___;
-{ .mmb;        add     $h0=$h0,$E
-       nop.m   0
+{ .mmb;        add     $h0=$h0,$A
+       add     $h2=$h2,$C
        br.ctop.dptk.many       .Ldtop  };;
 .Ldend:
 { .mmi;        add     tmp0=4,ctx
@@ -308,7 +296,9 @@ $code.=<<___;
 { .mib;        st4     [ctx]=$h4,-16
        mov     pr=r2,0x1ffff
        br.ret.sptk.many        b0      };;
-.endp  sha1_block_asm_data_order#
+.endp  sha1_block_data_order#
+stringz        "SHA1 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>"
 ___
 
+$output=shift and open STDOUT,">$output";
 print $code;