9e2f73560f3a97b9c7eda6cf0929b22963efbfb3
[openssl.git] / crypto / sha / asm / sha1-ia64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # Eternal question is what's wrong with compiler generated code? The
11 # trick is that it's possible to reduce the number of shifts required
12 # to perform rotations by maintaining copy of 32-bit value in upper
13 # bits of 64-bit register. Just follow mux2 and shrp instructions...
14 # Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which
15 # is >50% better than HP C and >2x better than gcc.
16
17 $code=<<___;
18 .ident  \"sha1-ia64.s, version 1.1\"
19 .ident  \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
20 .explicit
21
22 ___
23
24
25 if ($^O eq "hpux") {
26     $ADDP="addp4";
27     for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
28 } else { $ADDP="add"; }
29 for (@ARGV) {   $big_endian=1 if (/\-DB_ENDIAN/);
30                 $big_endian=0 if (/\-DL_ENDIAN/);   }
31 if (!defined($big_endian))
32             {   $big_endian=(unpack('L',pack('N',1))==1);   }
33
34 #$human=1;
35 if ($human) {   # useful for visual code auditing...
36         ($A,$B,$C,$D,$E,$T)   = ("A","B","C","D","E","T");
37         ($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4");
38         ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
39             (   "K_00_19","K_20_39","K_40_59","K_60_79" );
40         @X= (   "X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7",
41                 "X8", "X9","X10","X11","X12","X13","X14","X15"  );
42 }
43 else {
44         ($A,$B,$C,$D,$E,$T)   = ("loc0","loc1","loc2","loc3","loc4","loc5");
45         ($h0,$h1,$h2,$h3,$h4) = ("loc6","loc7","loc8","loc9","loc10");
46         ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
47             (   "r14", "r15", "loc11", "loc12"  );
48         @X= (   "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
49                 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"  );
50 }
51
52 sub BODY_00_15 {
53 local   *code=shift;
54 local   ($i,$a,$b,$c,$d,$e,$f,$unaligned)=@_;
55
56 if ($unaligned) {
57         $code.=<<___;
58 { .mmi; ld1     tmp0=[inp],2                // MSB
59         ld1     tmp1=[tmp3],2           };;
60 { .mmi; ld1     tmp2=[inp],2
61         ld1     $X[$i&0xf]=[tmp3],2         // LSB
62         dep     tmp1=tmp0,tmp1,8,8      };;
63 { .mii; cmp.ne  p16,p0=r0,r0                // no misaligned prefetch
64         dep     $X[$i&0xf]=tmp2,$X[$i&0xf],8,8;;
65         dep     $X[$i&0xf]=tmp1,$X[$i&0xf],16,16        };;
66 { .mmi; nop.m   0
67 ___
68         }
69 elsif ($i<15) {
70         $code.=<<___;
71 { .mmi; ld4     $X[($i+1)&0xf]=[inp],4  // prefetch
72 ___
73         }
74 else    {
75         $code.=<<___;
76 { .mmi; nop.m   0
77 ___
78         }
79 if ($i<15) {
80         $code.=<<___;
81         and     tmp0=$c,$b
82         dep.z   tmp5=$a,5,27            }   // a<<5
83 { .mmi; andcm   tmp1=$d,$b
84         add     tmp4=$e,$K_00_19        };;
85 { .mmi; or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
86         add     $f=tmp4,$X[$i&0xf]          // f=xi+e+K_00_19
87         extr.u  tmp1=$a,27,5            };; // a>>27
88 { .mib; add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
89         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
90 { .mib; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
91         mux2    tmp6=$a,0x44            };; // see b in next iteration
92 { .mii; add     $f=$f,tmp1                  // f+=ROTATE(a,5)
93         mux2    $X[$i&0xf]=$X[$i&0xf],0x44
94         nop.i   0                       };;
95
96 ___
97         }
98 else    {
99         $code.=<<___;
100         and     tmp0=$c,$b
101         dep.z   tmp5=$a,5,27            }   // a<<5 ;;?
102 { .mmi; andcm   tmp1=$d,$b
103         add     tmp4=$e,$K_00_19        };;
104 { .mmi; or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
105         add     $f=tmp4,$X[$i&0xf]          // f=xi+e+K_00_19
106         extr.u  tmp1=$a,27,5            }   // a>>27
107 { .mmi; xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
108         xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
109         nop.i   0                       };;
110 { .mmi; add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
111         xor     tmp2=tmp2,tmp3              // +1
112         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
113 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
114         mux2    tmp6=$a,0x44            };; // see b in next iteration
115 { .mii; add     $f=$f,tmp1                  // f+=ROTATE(a,5)
116         shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
117         mux2    $X[$i&0xf]=$X[$i&0xf],0x44  };;
118
119 ___
120         }
121 }
122
123 sub BODY_16_19 {
124 local   *code=shift;
125 local   ($i,$a,$b,$c,$d,$e,$f)=@_;
126
127 $code.=<<___;
128 { .mmi; mov     $X[$i&0xf]=$f               // Xupdate
129         and     tmp0=$c,$b
130         dep.z   tmp5=$a,5,27            }   // a<<5
131 { .mmi; andcm   tmp1=$d,$b
132         add     tmp4=$e,$K_00_19        };;
133 { .mmi; or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
134         add     $f=$f,tmp4                  // f+=e+K_00_19
135         extr.u  tmp1=$a,27,5            }   // a>>27
136 { .mmi; xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
137         xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
138         nop.i   0                       };;
139 { .mmi; add     $f=$f,tmp0                  // f+=F_00_19(b,c,d)
140         xor     tmp2=tmp2,tmp3              // +1
141         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
142 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
143         mux2    tmp6=$a,0x44            };; // see b in next iteration
144 { .mii; add     $f=$f,tmp1                  // f+=ROTATE(a,5)
145         shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
146         nop.i   0                       };;
147
148 ___
149 }
150
151 sub BODY_20_39 {
152 local   *code=shift;
153 local   ($i,$a,$b,$c,$d,$e,$f,$Konst)=@_;
154         $Konst = $K_20_39 if (!defined($Konst));
155
156 if ($i<79) {
157 $code.=<<___;
158 { .mib; mov     $X[$i&0xf]=$f               // Xupdate
159         dep.z   tmp5=$a,5,27            }   // a<<5
160 { .mib; xor     tmp0=$c,$b
161         add     tmp4=$e,$Konst          };;
162 { .mmi; xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
163         add     $f=$f,tmp4                  // f+=e+K_20_39
164         extr.u  tmp1=$a,27,5            }   // a>>27
165 { .mmi; xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
166         xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
167         nop.i   0                       };;
168 { .mmi; add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
169         xor     tmp2=tmp2,tmp3              // +1
170         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
171 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
172         mux2    tmp6=$a,0x44            };; // see b in next iteration
173 { .mii; add     $f=$f,tmp1                  // f+=ROTATE(a,5)
174         shrp    $e=tmp2,tmp2,31             // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
175         nop.i   0                       };;
176
177 ___
178 }
179 else {
180 $code.=<<___;
181 { .mib; mov     $X[$i&0xf]=$f               // Xupdate
182         dep.z   tmp5=$a,5,27            }   // a<<5
183 { .mib; xor     tmp0=$c,$b
184         add     tmp4=$e,$Konst          };;
185 { .mib; xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
186         extr.u  tmp1=$a,27,5            }   // a>>27
187 { .mib; add     $f=$f,tmp4                  // f+=e+K_20_39
188         add     $h1=$h1,$a              };; // wrap up
189 { .mmi; add     $f=$f,tmp0                  // f+=F_20_39(b,c,d)
190         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30) ;;?
191 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
192         add     $h3=$h3,$c              };; // wrap up
193 { .mib; add     tmp3=1,inp                  // used in unaligned codepath
194         add     $f=$f,tmp1              }   // f+=ROTATE(a,5)
195 { .mib; add     $h2=$h2,$b                  // wrap up
196         add     $h4=$h4,$d              };; // wrap up
197
198 ___
199 }
200 }
201
202 sub BODY_40_59 {
203 local   *code=shift;
204 local   ($i,$a,$b,$c,$d,$e,$f)=@_;
205
206 $code.=<<___;
207 { .mmi; mov     $X[$i&0xf]=$f               // Xupdate
208         and     tmp0=$c,$b
209         dep.z   tmp5=$a,5,27            }   // a<<5
210 { .mmi; and     tmp1=$d,$b
211         add     tmp4=$e,$K_40_59        };;
212 { .mmi; or      tmp0=tmp0,tmp1              // (b&c)|(b&d)
213         add     $f=$f,tmp4                  // f+=e+K_40_59
214         extr.u  tmp1=$a,27,5            }   // a>>27
215 { .mmi; and     tmp4=$c,$d
216         xor     tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]  // +1
217         xor     tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
218         };;
219 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
220         xor     tmp2=tmp2,tmp3              // +1
221         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
222 { .mmi; or      tmp0=tmp0,tmp4              // F_40_59(b,c,d)=(b&c)|(b&d)|(c&d)
223         mux2    tmp6=$a,0x44            };; // see b in next iteration
224 { .mii; add     $f=$f,tmp0                  // f+=F_40_59(b,c,d)
225         shrp    $e=tmp2,tmp2,31;;           // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
226         add     $f=$f,tmp1              };; // f+=ROTATE(a,5)
227
228 ___
229 }
230 sub BODY_60_79  { &BODY_20_39(@_,$K_60_79); }
231
232 $code.=<<___;
233 .text
234
235 tmp0=r8;
236 tmp1=r9;
237 tmp2=r10;
238 tmp3=r11;
239 ctx=r32;        // in0
240 inp=r33;        // in1
241
242 // void sha1_block_asm_data_order(SHA_CTX *c,const void *p,size_t num);
243 .global sha1_block_asm_data_order#
244 .proc   sha1_block_asm_data_order#
245 .align  32
246 sha1_block_asm_data_order:
247         .prologue
248 { .mmi; alloc   tmp1=ar.pfs,3,15,0,0
249         $ADDP   tmp0=4,ctx
250         .save   ar.lc,r3
251         mov     r3=ar.lc                }
252 { .mmi; $ADDP   ctx=0,ctx
253         $ADDP   inp=0,inp
254         mov     r2=pr                   };;
255 tmp4=in2;
256 tmp5=loc13;
257 tmp6=loc14;
258         .body
259 { .mlx; ld4     $h0=[ctx],8
260         movl    $K_00_19=0x5a827999     }
261 { .mlx; ld4     $h1=[tmp0],8
262         movl    $K_20_39=0x6ed9eba1     };;
263 { .mlx; ld4     $h2=[ctx],8
264         movl    $K_40_59=0x8f1bbcdc     }
265 { .mlx; ld4     $h3=[tmp0]
266         movl    $K_60_79=0xca62c1d6     };;
267 { .mmi; ld4     $h4=[ctx],-16
268         add     in2=-1,in2                  // adjust num for ar.lc
269         mov     ar.ec=1                 };;
270 { .mmi; nop.m   0
271         add     tmp3=1,inp
272         mov     ar.lc=in2               };; // brp.loop.imp: too far
273
274 .Ldtop:
275 { .mmi; mov     $A=$h0
276         mov     $B=$h1
277         mux2    tmp6=$h1,0x44           }
278 { .mmi; mov     $C=$h2
279         mov     $D=$h3
280         mov     $E=$h4                  };;
281
282 ___
283
284 { my $i,@V=($A,$B,$C,$D,$E,$T);
285
286         for($i=0;$i<16;$i++)    { &BODY_00_15(\$code,$i,@V,1); unshift(@V,pop(@V)); }
287         for(;$i<20;$i++)        { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); }
288         for(;$i<40;$i++)        { &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); }
289         for(;$i<60;$i++)        { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); }
290         for(;$i<80;$i++)        { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); }
291
292         (($V[5] eq $D) and ($V[0] eq $E)) or die;       # double-check
293 }
294
295 $code.=<<___;
296 { .mmb; add     $h0=$h0,$E
297         nop.m   0
298         br.ctop.dptk.many       .Ldtop  };;
299 .Ldend:
300 { .mmi; add     tmp0=4,ctx
301         mov     ar.lc=r3                };;
302 { .mmi; st4     [ctx]=$h0,8
303         st4     [tmp0]=$h1,8            };;
304 { .mmi; st4     [ctx]=$h2,8
305         st4     [tmp0]=$h3              };;
306 { .mib; st4     [ctx]=$h4,-16
307         mov     pr=r2,0x1ffff
308         br.ret.sptk.many        b0      };;
309 .endp   sha1_block_asm_data_order#
310 ___
311
312 print $code;