Make all x86_64 modules independent on current working directory.
[openssl.git] / crypto / sha / asm / sha1-ia64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # Eternal question is what's wrong with compiler generated code? The
11 # trick is that it's possible to reduce the number of shifts required
12 # to perform rotations by maintaining copy of 32-bit value in upper
13 # bits of 64-bit register. Just follow mux2 and shrp instructions...
14 # Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which
15 # is >50% better than HP C and >2x better than gcc.
16
17 $code=<<___;
18 .ident  \"sha1-ia64.s, version 1.2\"
19 .ident  \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
20 .explicit
21
22 ___
23
24
25 if ($^O eq "hpux") {
26     $ADDP="addp4";
27     for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
28 } else { $ADDP="add"; }
29 for (@ARGV) {   $big_endian=1 if (/\-DB_ENDIAN/);
30                 $big_endian=0 if (/\-DL_ENDIAN/);   }
31 if (!defined($big_endian))
32             {   $big_endian=(unpack('L',pack('N',1))==1);   }
33
34 #$human=1;
35 if ($human) {   # useful for visual code auditing...
36         ($A,$B,$C,$D,$E,$T)   = ("A","B","C","D","E","T");
37         ($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4");
38         ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
39             (   "K_00_19","K_20_39","K_40_59","K_60_79" );
40         @X= (   "X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7",
41                 "X8", "X9","X10","X11","X12","X13","X14","X15"  );
42 }
43 else {
44         ($A,$B,$C,$D,$E,$T)   = ("loc0","loc1","loc2","loc3","loc4","loc5");
45         ($h0,$h1,$h2,$h3,$h4) = ("loc6","loc7","loc8","loc9","loc10");
46         ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
47             (   "r14", "r15", "loc11", "loc12"  );
48         @X= (   "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
49                 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"  );
50 }
51
52 sub BODY_00_15 {
53 local   *code=shift;
54 local   ($i,$a,$b,$c,$d,$e,$f)=@_;
55
56 $code.=<<___ if ($i==0);
57 { .mmi; ld1     $X[$i&0xf]=[inp],2          // MSB
58         ld1     tmp2=[tmp3],2           };;
59 { .mmi; ld1     tmp0=[inp],2
60         ld1     tmp4=[tmp3],2               // LSB
61         dep     $X[$i&0xf]=$X[$i&0xf],tmp2,8,8  };;
62 ___
63 if ($i<15) {
64         $code.=<<___;
65 { .mmi; ld1     $X[($i+1)&0xf]=[inp],2      // +1
66         dep     tmp1=tmp0,tmp4,8,8      };;
67 { .mmi; ld1     tmp2=[tmp3],2               // +1
68         and     tmp4=$c,$b
69         dep     $X[$i&0xf]=$X[$i&0xf],tmp1,16,16        } //;;
70 { .mmi; andcm   tmp1=$d,$b
71         add     tmp0=$e,$K_00_19
72         dep.z   tmp5=$a,5,27            };; // a<<5
73 { .mmi; or      tmp4=tmp4,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
74         add     $f=tmp0,$X[$i&0xf]          // f=xi+e+K_00_19
75         extr.u  tmp1=$a,27,5            };; // a>>27
76 { .mmi; ld1     tmp0=[inp],2                // +1
77         add     $f=$f,tmp4                  // f+=F_00_19(b,c,d)
78         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
79 { .mmi; ld1     tmp4=[tmp3],2               // +1
80         or      tmp5=tmp1,tmp5              // ROTATE(a,5)
81         mux2    tmp6=$a,0x44            };; // see b in next iteration
82 { .mii; add     $f=$f,tmp5                  // f+=ROTATE(a,5)
83         dep     $X[($i+1)&0xf]=$X[($i+1)&0xf],tmp2,8,8  // +1
84         mux2    $X[$i&0xf]=$X[$i&0xf],0x44      } //;;
85
86 ___
87         }
88 else    {
89         $code.=<<___;
90 { .mii; and     tmp3=$c,$b
91         dep     tmp1=tmp0,tmp4,8,8;;
92         dep     $X[$i&0xf]=$X[$i&0xf],tmp1,16,16        } //;;
93 { .mmi; andcm   tmp1=$d,$b
94         add     tmp0=$e,$K_00_19
95         dep.z   tmp5=$a,5,27            };; // a<<5
96 { .mmi; or      tmp4=tmp3,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
97         add     $f=tmp0,$X[$i&0xf]          // f=xi+e+K_00_19
98         extr.u  tmp1=$a,27,5            }   // a>>27
99 { .mmi; xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
100         xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
101         nop.i   0                       };;
102 { .mmi; add     $f=$f,tmp4                  // f+=F_00_19(b,c,d)
103         xor     tmp2=tmp2,tmp3              // +1
104         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
105 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
106         mux2    tmp6=$a,0x44            };; // see b in next iteration
107 { .mii; add     $f=$f,tmp1                  // f+=ROTATE(a,5)
108         shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
109         mux2    $X[$i&0xf]=$X[$i&0xf],0x44  };;
110
111 ___
112         }
113 }
114
115 sub BODY_16_19 {
116 local   *code=shift;
117 local   ($i,$a,$b,$c,$d,$e,$f)=@_;
118
119 $code.=<<___;
120 { .mmi; mov     $X[$i&0xf]=$f               // Xupdate
121         and     tmp0=$c,$b
122         dep.z   tmp5=$a,5,27            }   // a<<5
123 { .mmi; andcm   tmp1=$d,$b
124         add     tmp4=$e,$K_00_19        };;
125 { .mmi; or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
126         add     $f=$f,tmp4                  // f+=e+K_00_19
127         extr.u  tmp1=$a,27,5            }   // a>>27
128 { .mmi; xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
129         xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
130         nop.i   0                       };;
131 { .mmi; add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
132         xor     tmp2=tmp2,tmp3              // +1
133         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
134 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
135         mux2    tmp6=$a,0x44            };; // see b in next iteration
136 { .mii; add     $f=$f,tmp1                  // f+=ROTATE(a,5)
137         shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
138         nop.i   0                       };;
139
140 ___
141 }
142
143 sub BODY_20_39 {
144 local   *code=shift;
145 local   ($i,$a,$b,$c,$d,$e,$f,$Konst)=@_;
146         $Konst = $K_20_39 if (!defined($Konst));
147
148 if ($i<79) {
149 $code.=<<___;
150 { .mib; mov     $X[$i&0xf]=$f               // Xupdate
151         dep.z   tmp5=$a,5,27            }   // a<<5
152 { .mib; xor     tmp0=$c,$b
153         add     tmp4=$e,$Konst          };;
154 { .mmi; xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
155         add     $f=$f,tmp4                  // f+=e+K_20_39
156         extr.u  tmp1=$a,27,5            }   // a>>27
157 { .mmi; xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
158         xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
159         nop.i   0                       };;
160 { .mmi; add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
161         xor     tmp2=tmp2,tmp3              // +1
162         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
163 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
164         mux2    tmp6=$a,0x44            };; // see b in next iteration
165 { .mii; add     $f=$f,tmp1                  // f+=ROTATE(a,5)
166         shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
167         nop.i   0                       };;
168
169 ___
170 }
171 else {
172 $code.=<<___;
173 { .mib; mov     $X[$i&0xf]=$f               // Xupdate
174         dep.z   tmp5=$a,5,27            }   // a<<5
175 { .mib; xor     tmp0=$c,$b
176         add     tmp4=$e,$Konst          };;
177 { .mib; xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
178         extr.u  tmp1=$a,27,5            }   // a>>27
179 { .mib; add     $f=$f,tmp4                  // f+=e+K_20_39
180         add     $h1=$h1,$a              };; // wrap up
181 { .mmi; add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
182         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30) ;;?
183 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
184         add     $h3=$h3,$c              };; // wrap up
185 { .mib; add     tmp3=1,inp                  // used in unaligned codepath
186         add     $f=$f,tmp1              }   // f+=ROTATE(a,5)
187 { .mib; add     $h2=$h2,$b                  // wrap up
188         add     $h4=$h4,$d              };; // wrap up
189
190 ___
191 }
192 }
193
194 sub BODY_40_59 {
195 local   *code=shift;
196 local   ($i,$a,$b,$c,$d,$e,$f)=@_;
197
198 $code.=<<___;
199 { .mmi; mov     $X[$i&0xf]=$f               // Xupdate
200         and     tmp0=$c,$b
201         dep.z   tmp5=$a,5,27            }   // a<<5
202 { .mmi; and     tmp1=$d,$b
203         add     tmp4=$e,$K_40_59        };;
204 { .mmi; or      tmp0=tmp0,tmp1              // (b&c)|(b&d)
205         add     $f=$f,tmp4                  // f+=e+K_40_59
206         extr.u  tmp1=$a,27,5            }   // a>>27
207 { .mmi; and     tmp4=$c,$d
208         xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
209         xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
210         };;
211 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
212         xor     tmp2=tmp2,tmp3              // +1
213         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
214 { .mmi; or      tmp0=tmp0,tmp4              // F_40_59(b,c,d)=(b&c)|(b&d)|(c&d)
215         mux2    tmp6=$a,0x44            };; // see b in next iteration
216 { .mii; add     $f=$f,tmp0                  // f+=F_40_59(b,c,d)
217         shrp    $e=tmp2,tmp2,31;;           // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
218         add     $f=$f,tmp1              };; // f+=ROTATE(a,5)
219
220 ___
221 }
222 sub BODY_60_79  { &BODY_20_39(@_,$K_60_79); }
223
224 $code.=<<___;
225 .text
226
227 tmp0=r8;
228 tmp1=r9;
229 tmp2=r10;
230 tmp3=r11;
231 ctx=r32;        // in0
232 inp=r33;        // in1
233
234 // void sha1_block_data_order(SHA_CTX *c,const void *p,size_t num);
235 .global sha1_block_data_order#
236 .proc   sha1_block_data_order#
237 .align  32
238 sha1_block_data_order:
239         .prologue
240 { .mmi; alloc   tmp1=ar.pfs,3,15,0,0
241         $ADDP   tmp0=4,ctx
242         .save   ar.lc,r3
243         mov     r3=ar.lc                }
244 { .mmi; $ADDP   ctx=0,ctx
245         $ADDP   inp=0,inp
246         mov     r2=pr                   };;
247 tmp4=in2;
248 tmp5=loc13;
249 tmp6=loc14;
250         .body
251 { .mlx; ld4     $h0=[ctx],8
252         movl    $K_00_19=0x5a827999     }
253 { .mlx; ld4     $h1=[tmp0],8
254         movl    $K_20_39=0x6ed9eba1     };;
255 { .mlx; ld4     $h2=[ctx],8
256         movl    $K_40_59=0x8f1bbcdc     }
257 { .mlx; ld4     $h3=[tmp0]
258         movl    $K_60_79=0xca62c1d6     };;
259 { .mmi; ld4     $h4=[ctx],-16
260         add     in2=-1,in2                  // adjust num for ar.lc
261         mov     ar.ec=1                 };;
262 { .mmi; nop.m   0
263         add     tmp3=1,inp
264         mov     ar.lc=in2               };; // brp.loop.imp: too far
265
266 .Ldtop:
267 { .mmi; mov     $A=$h0
268         mov     $B=$h1
269         mux2    tmp6=$h1,0x44           }
270 { .mmi; mov     $C=$h2
271         mov     $D=$h3
272         mov     $E=$h4                  };;
273
274 ___
275
276 { my $i,@V=($A,$B,$C,$D,$E,$T);
277
278         for($i=0;$i<16;$i++)    { &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); }
279         for(;$i<20;$i++)        { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); }
280         for(;$i<40;$i++)        { &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); }
281         for(;$i<60;$i++)        { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); }
282         for(;$i<80;$i++)        { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); }
283
284         (($V[5] eq $D) and ($V[0] eq $E)) or die;       # double-check
285 }
286
287 $code.=<<___;
288 { .mmb; add     $h0=$h0,$E
289         nop.m   0
290         br.ctop.dptk.many       .Ldtop  };;
291 .Ldend:
292 { .mmi; add     tmp0=4,ctx
293         mov     ar.lc=r3                };;
294 { .mmi; st4     [ctx]=$h0,8
295         st4     [tmp0]=$h1,8            };;
296 { .mmi; st4     [ctx]=$h2,8
297         st4     [tmp0]=$h3              };;
298 { .mib; st4     [ctx]=$h4,-16
299         mov     pr=r2,0x1ffff
300         br.ret.sptk.many        b0      };;
301 .endp   sha1_block_data_order#
302 stringz "SHA1 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>"
303 ___
304
305 print $code;