SHA1 assembler show off: minor performance updates and new modules for
[openssl.git] / crypto / sha / asm / sha1-ia64.pl
index 51c4f47ecbdc215d6457c8d3c1ac3d16e9db9733..db28f0805a11c568e468d2cb7d8a46280c70237c 100644 (file)
@@ -15,7 +15,7 @@
 # is >50% better than HP C and >2x better than gcc.
 
 $code=<<___;
-.ident  \"sha1-ia64.s, version 1.2\"
+.ident  \"sha1-ia64.s, version 1.3\"
 .ident  \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
 .explicit
 
@@ -26,14 +26,10 @@ if ($^O eq "hpux") {
     $ADDP="addp4";
     for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
 } else { $ADDP="add"; }
-for (@ARGV) {  $big_endian=1 if (/\-DB_ENDIAN/);
-               $big_endian=0 if (/\-DL_ENDIAN/);   }
-if (!defined($big_endian))
-           {   $big_endian=(unpack('L',pack('N',1))==1);   }
 
 #$human=1;
 if ($human) {  # useful for visual code auditing...
-       ($A,$B,$C,$D,$E,$T)   = ("A","B","C","D","E","T");
+       ($A,$B,$C,$D,$E)   = ("A","B","C","D","E");
        ($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4");
        ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
            (   "K_00_19","K_20_39","K_40_59","K_60_79" );
@@ -41,47 +37,50 @@ if ($human) {       # useful for visual code auditing...
                "X8", "X9","X10","X11","X12","X13","X14","X15"  );
 }
 else {
-       ($A,$B,$C,$D,$E,$T)   = ("loc0","loc1","loc2","loc3","loc4","loc5");
-       ($h0,$h1,$h2,$h3,$h4) = ("loc6","loc7","loc8","loc9","loc10");
+       ($A,$B,$C,$D,$E)   =    ("loc0","loc1","loc2","loc3","loc4");
+       ($h0,$h1,$h2,$h3,$h4) = ("loc5","loc6","loc7","loc8","loc9");
        ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
-           (   "r14", "r15", "loc11", "loc12"  );
+           (   "r14", "r15", "loc10", "loc11"  );
        @X= (   "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
                "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"  );
 }
 
 sub BODY_00_15 {
 local  *code=shift;
-local  ($i,$a,$b,$c,$d,$e,$f)=@_;
+my     ($i,$a,$b,$c,$d,$e)=@_;
+my     $j=$i+1;
+my     $Xn=@X[$j%16];
 
 $code.=<<___ if ($i==0);
-{ .mmi;        ld1     $X[$i&0xf]=[inp],2          // MSB
+{ .mmi;        ld1     $X[$i]=[inp],2              // MSB
        ld1     tmp2=[tmp3],2           };;
 { .mmi;        ld1     tmp0=[inp],2
        ld1     tmp4=[tmp3],2               // LSB
-       dep     $X[$i&0xf]=$X[$i&0xf],tmp2,8,8  };;
+       dep     $X[$i]=$X[$i],tmp2,8,8  };;
 ___
 if ($i<15) {
        $code.=<<___;
-{ .mmi;        ld1     $X[($i+1)&0xf]=[inp],2      // +1
+{ .mmi;        ld1     $Xn=[inp],2                 // forward Xload
+       nop.m   0x0
        dep     tmp1=tmp0,tmp4,8,8      };;
-{ .mmi;        ld1     tmp2=[tmp3],2               // +1
+{ .mmi;        ld1     tmp2=[tmp3],2               // forward Xload
        and     tmp4=$c,$b
-       dep     $X[$i&0xf]=$X[$i&0xf],tmp1,16,16        } //;;
-{ .mmi;        andcm   tmp1=$d,$b
-       add     tmp0=$e,$K_00_19
+       dep     $X[$i]=$X[$i],tmp1,16,16} //;;
+{ .mmi;        add     $e=$e,$K_00_19              // e+=K_00_19
+       andcm   tmp1=$d,$b
        dep.z   tmp5=$a,5,27            };; // a<<5
-{ .mmi;        or      tmp4=tmp4,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
-       add     $f=tmp0,$X[$i&0xf]          // f=xi+e+K_00_19
+{ .mmi;        add     $e=$e,$X[$i]                // e+=Xload
+       or      tmp4=tmp4,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
        extr.u  tmp1=$a,27,5            };; // a>>27
-{ .mmi;        ld1     tmp0=[inp],2                // +1
-       add     $f=$f,tmp4                  // f+=F_00_19(b,c,d)
+{ .mmi;        ld1     tmp0=[inp],2                // forward Xload
+       add     $e=$e,tmp4                  // e+=F_00_19(b,c,d)
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
-{ .mmi;        ld1     tmp4=[tmp3],2               // +1
+{ .mmi;        ld1     tmp4=[tmp3],2               // forward Xload
        or      tmp5=tmp1,tmp5              // ROTATE(a,5)
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp5                  // f+=ROTATE(a,5)
-       dep     $X[($i+1)&0xf]=$X[($i+1)&0xf],tmp2,8,8  // +1
-       mux2    $X[$i&0xf]=$X[$i&0xf],0x44      } //;;
+{ .mii;        add     $e=$e,tmp5                  // e+=ROTATE(a,5)
+       dep     $Xn=$Xn,tmp2,8,8            // forward Xload
+       mux2    $X[$i]=$X[$i],0x44      } //;;
 
 ___
        }
@@ -89,24 +88,24 @@ else        {
        $code.=<<___;
 { .mii;        and     tmp3=$c,$b
        dep     tmp1=tmp0,tmp4,8,8;;
-       dep     $X[$i&0xf]=$X[$i&0xf],tmp1,16,16        } //;;
-{ .mmi;        andcm   tmp1=$d,$b
-       add     tmp0=$e,$K_00_19
+       dep     $X[$i]=$X[$i],tmp1,16,16} //;;
+{ .mmi;        add     $e=$e,$K_00_19              // e+=K_00_19
+       andcm   tmp1=$d,$b
        dep.z   tmp5=$a,5,27            };; // a<<5
-{ .mmi;        or      tmp4=tmp3,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
-       add     $f=tmp0,$X[$i&0xf]          // f=xi+e+K_00_19
+{ .mmi;        add     $e=$e,$X[$i]                // e+=Xupdate
+       or      tmp4=tmp3,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
        extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mmi;        xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
-       xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
+{ .mmi;        xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
+       xor     tmp3=$X[($j+8)%16],$X[($j+13)%16] // forward Xupdate
        nop.i   0                       };;
-{ .mmi;        add     $f=$f,tmp4                  // f+=F_00_19(b,c,d)
-       xor     tmp2=tmp2,tmp3              // +1
+{ .mmi;        add     $e=$e,tmp4                  // e+=F_00_19(b,c,d)
+       xor     $Xn=$Xn,tmp3                // forward Xupdate
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
 { .mmi; or     tmp1=tmp1,tmp5              // ROTATE(a,5)
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp1                  // f+=ROTATE(a,5)
-       shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
-       mux2    $X[$i&0xf]=$X[$i&0xf],0x44  };;
+{ .mii;        add     $e=$e,tmp1                  // e+=ROTATE(a,5)
+       shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
+       mux2    $X[$i]=$X[$i],0x44      };;
 
 ___
        }
@@ -114,27 +113,28 @@ ___
 
 sub BODY_16_19 {
 local  *code=shift;
-local  ($i,$a,$b,$c,$d,$e,$f)=@_;
+my     ($i,$a,$b,$c,$d,$e)=@_;
+my     $j=$i+1;
+my     $Xn=@X[$j%16];
 
 $code.=<<___;
-{ .mmi;        mov     $X[$i&0xf]=$f               // Xupdate
-       and     tmp0=$c,$b
+{ .mib;        add     $e=$e,$K_00_19              // e+=K_00_19
        dep.z   tmp5=$a,5,27            }   // a<<5
-{ .mmi;        andcm   tmp1=$d,$b
-       add     tmp4=$e,$K_00_19        };;
-{ .mmi;        or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
-       add     $f=$f,tmp4                  // f+=e+K_00_19
+{ .mib;        andcm   tmp1=$d,$b
+       and     tmp0=$c,$b              };;
+{ .mmi;        add     $e=$e,$X[$i%16]             // e+=Xupdate
+       or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
        extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mmi;        xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
-       xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
+{ .mmi;        xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
+       xor     tmp3=$X[($j+8)%16],$X[($j+13)%16]       // forward Xupdate
        nop.i   0                       };;
-{ .mmi;        add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
-       xor     tmp2=tmp2,tmp3              // +1
+{ .mmi;        add     $e=$e,tmp0                  // f+=F_00_19(b,c,d)
+       xor     $Xn=$Xn,tmp3                // forward Xupdate
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
 { .mmi;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp1                  // f+=ROTATE(a,5)
-       shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
+{ .mii;        add     $e=$e,tmp1                  // e+=ROTATE(a,5)
+       shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
        nop.i   0                       };;
 
 ___
@@ -142,49 +142,47 @@ ___
 
 sub BODY_20_39 {
 local  *code=shift;
-local  ($i,$a,$b,$c,$d,$e,$f,$Konst)=@_;
+my     ($i,$a,$b,$c,$d,$e,$Konst)=@_;
        $Konst = $K_20_39 if (!defined($Konst));
+my     $j=$i+1;
+my     $Xn=@X[$j%16];
 
 if ($i<79) {
 $code.=<<___;
-{ .mib;        mov     $X[$i&0xf]=$f               // Xupdate
+{ .mib;        add     $e=$e,$Konst                // e+=K_XX_XX
        dep.z   tmp5=$a,5,27            }   // a<<5
 { .mib;        xor     tmp0=$c,$b
-       add     tmp4=$e,$Konst          };;
-{ .mmi;        xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
-       add     $f=$f,tmp4                  // f+=e+K_20_39
+       xor     $Xn=$Xn,$X[($j+2)%16]   };; // forward Xupdate
+{ .mib;        add     $e=$e,$X[$i%16]             // e+=Xupdate
        extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mmi;        xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
-       xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
-       nop.i   0                       };;
-{ .mmi;        add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
-       xor     tmp2=tmp2,tmp3              // +1
+{ .mib;        xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
+       xor     $Xn=$Xn,$X[($j+8)%16]   };; // forward Xupdate
+{ .mmi;        add     $e=$e,tmp0                  // e+=F_20_39(b,c,d)
+       xor     $Xn=$Xn,$X[($j+13)%16]      // forward Xupdate
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
 { .mmi;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp1                  // f+=ROTATE(a,5)
-       shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
+{ .mii;        add     $e=$e,tmp1                  // e+=ROTATE(a,5)
+       shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
        nop.i   0                       };;
 
 ___
 }
 else {
 $code.=<<___;
-{ .mib;        mov     $X[$i&0xf]=$f               // Xupdate
+{ .mib;        add     $e=$e,$Konst                // e+=K_60_79
        dep.z   tmp5=$a,5,27            }   // a<<5
 { .mib;        xor     tmp0=$c,$b
-       add     tmp4=$e,$Konst          };;
-{ .mib;        xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
-       extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mib;        add     $f=$f,tmp4                  // f+=e+K_20_39
        add     $h1=$h1,$a              };; // wrap up
-{ .mmi;        add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
-       shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30) ;;?
-{ .mmi;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
+{ .mib;        add     $e=$e,$X[$i%16]             // e+=Xupdate
+       extr.u  tmp1=$a,27,5            }   // a>>27
+{ .mib;        xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
        add     $h3=$h3,$c              };; // wrap up
-{ .mib;        add     tmp3=1,inp                  // used in unaligned codepath
-       add     $f=$f,tmp1              }   // f+=ROTATE(a,5)
-{ .mib;        add     $h2=$h2,$b                  // wrap up
+{ .mmi;        add     $e=$e,tmp0                  // e+=F_20_39(b,c,d)
+       or      tmp1=tmp1,tmp5              // ROTATE(a,5)
+       shrp    $b=tmp6,tmp6,2          };; // b=ROTATE(b,30) ;;?
+{ .mmi;        add     $e=$e,tmp1                  // e+=ROTATE(a,5)
+       add     tmp3=1,inp                  // used in unaligned codepath
        add     $h4=$h4,$d              };; // wrap up
 
 ___
@@ -193,29 +191,29 @@ ___
 
 sub BODY_40_59 {
 local  *code=shift;
-local  ($i,$a,$b,$c,$d,$e,$f)=@_;
+my     ($i,$a,$b,$c,$d,$e)=@_;
+my     $j=$i+1;
+my     $Xn=@X[$j%16];
 
 $code.=<<___;
-{ .mmi;        mov     $X[$i&0xf]=$f               // Xupdate
-       and     tmp0=$c,$b
+{ .mib;        add     $e=$e,$K_40_59              // e+=K_40_59
        dep.z   tmp5=$a,5,27            }   // a<<5
-{ .mmi;        and     tmp1=$d,$b
-       add     tmp4=$e,$K_40_59        };;
-{ .mmi;        or      tmp0=tmp0,tmp1              // (b&c)|(b&d)
-       add     $f=$f,tmp4                  // f+=e+K_40_59
+{ .mib;        and     tmp1=$c,$d
+       xor     tmp0=$c,$d              };;
+{ .mmi;        add     $e=$e,$X[$i%16]             // e+=Xupdate
+       add     tmp5=tmp5,tmp1              // a<<5+(c&d)
        extr.u  tmp1=$a,27,5            }   // a>>27
-{ .mmi;        and     tmp4=$c,$d
-       xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
-       xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
-       };;
-{ .mmi;        or      tmp1=tmp1,tmp5              // ROTATE(a,5)
-       xor     tmp2=tmp2,tmp3              // +1
+{ .mmi;        and     tmp0=tmp0,$b
+       xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
+       xor     tmp3=$X[($j+8)%16],$X[($j+13)%16] };;   // forward Xupdate
+{ .mmi;        add     $e=$e,tmp0                  // e+=b&(c^d)
+       add     tmp5=tmp5,tmp1              // ROTATE(a,5)+(c&d)
        shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
-{ .mmi;        or      tmp0=tmp0,tmp4              // F_40_59(b,c,d)=(b&c)|(b&d)|(c&d)
+{ .mmi;        xor     $Xn=$Xn,tmp3
        mux2    tmp6=$a,0x44            };; // see b in next iteration
-{ .mii;        add     $f=$f,tmp0                  // f+=F_40_59(b,c,d)
-       shrp    $e=tmp2,tmp2,31;;           // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
-       add     $f=$f,tmp1              };; // f+=ROTATE(a,5)
+{ .mii;        add     $e=$e,tmp5                  // e+=ROTATE(a,5)+(c&d)
+       shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
+       nop.i   0x0                     };;
 
 ___
 }
@@ -237,7 +235,7 @@ inp=r33;    // in1
 .align 32
 sha1_block_data_order:
        .prologue
-{ .mmi;        alloc   tmp1=ar.pfs,3,15,0,0
+{ .mmi;        alloc   tmp1=ar.pfs,3,14,0,0
        $ADDP   tmp0=4,ctx
        .save   ar.lc,r3
        mov     r3=ar.lc                }
@@ -245,8 +243,8 @@ sha1_block_data_order:
        $ADDP   inp=0,inp
        mov     r2=pr                   };;
 tmp4=in2;
-tmp5=loc13;
-tmp6=loc14;
+tmp5=loc12;
+tmp6=loc13;
        .body
 { .mlx;        ld4     $h0=[ctx],8
        movl    $K_00_19=0x5a827999     }
@@ -273,7 +271,7 @@ tmp6=loc14;
 
 ___
 
-{ my $i,@V=($A,$B,$C,$D,$E,$T);
+{ my $i,@V=($A,$B,$C,$D,$E);
 
        for($i=0;$i<16;$i++)    { &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); }
        for(;$i<20;$i++)        { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); }
@@ -281,12 +279,12 @@ ___
        for(;$i<60;$i++)        { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); }
        for(;$i<80;$i++)        { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); }
 
-       (($V[5] eq $D) and ($V[0] eq $E)) or die;       # double-check
+       (($V[0] eq $A) and ($V[4] eq $E)) or die;       # double-check
 }
 
 $code.=<<___;
-{ .mmb;        add     $h0=$h0,$E
-       nop.m   0
+{ .mmb;        add     $h0=$h0,$A
+       add     $h2=$h2,$C
        br.ctop.dptk.many       .Ldtop  };;
 .Ldend:
 { .mmi;        add     tmp0=4,ctx