Unified - adapt the generation of sha assembler to use GENERATE
[openssl.git] / crypto / sha / asm / sha1-ia64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # Eternal question is what's wrong with compiler generated code? The
11 # trick is that it's possible to reduce the number of shifts required
12 # to perform rotations by maintaining copy of 32-bit value in upper
13 # bits of 64-bit register. Just follow mux2 and shrp instructions...
14 # Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which
15 # is >50% better than HP C and >2x better than gcc.
16
17 $output = pop;
18
19 $code=<<___;
20 .ident  \"sha1-ia64.s, version 1.3\"
21 .ident  \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
22 .explicit
23
24 ___
25
26
27 if ($^O eq "hpux") {
28     $ADDP="addp4";
29     for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
30 } else { $ADDP="add"; }
31
32 #$human=1;
33 if ($human) {   # useful for visual code auditing...
34         ($A,$B,$C,$D,$E)   = ("A","B","C","D","E");
35         ($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4");
36         ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
37             (   "K_00_19","K_20_39","K_40_59","K_60_79" );
38         @X= (   "X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7",
39                 "X8", "X9","X10","X11","X12","X13","X14","X15"  );
40 }
41 else {
42         ($A,$B,$C,$D,$E)   =    ("loc0","loc1","loc2","loc3","loc4");
43         ($h0,$h1,$h2,$h3,$h4) = ("loc5","loc6","loc7","loc8","loc9");
44         ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
45             (   "r14", "r15", "loc10", "loc11"  );
46         @X= (   "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
47                 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"  );
48 }
49
50 sub BODY_00_15 {
51 local   *code=shift;
52 my      ($i,$a,$b,$c,$d,$e)=@_;
53 my      $j=$i+1;
54 my      $Xn=@X[$j%16];
55
56 $code.=<<___ if ($i==0);
57 { .mmi; ld1     $X[$i]=[inp],2              // MSB
58         ld1     tmp2=[tmp3],2           };;
59 { .mmi; ld1     tmp0=[inp],2
60         ld1     tmp4=[tmp3],2               // LSB
61         dep     $X[$i]=$X[$i],tmp2,8,8  };;
62 ___
63 if ($i<15) {
64         $code.=<<___;
65 { .mmi; ld1     $Xn=[inp],2                 // forward Xload
66         nop.m   0x0
67         dep     tmp1=tmp0,tmp4,8,8      };;
68 { .mmi; ld1     tmp2=[tmp3],2               // forward Xload
69         and     tmp4=$c,$b
70         dep     $X[$i]=$X[$i],tmp1,16,16} //;;
71 { .mmi; add     $e=$e,$K_00_19              // e+=K_00_19
72         andcm   tmp1=$d,$b
73         dep.z   tmp5=$a,5,27            };; // a<<5
74 { .mmi; add     $e=$e,$X[$i]                // e+=Xload
75         or      tmp4=tmp4,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
76         extr.u  tmp1=$a,27,5            };; // a>>27
77 { .mmi; ld1     tmp0=[inp],2                // forward Xload
78         add     $e=$e,tmp4                  // e+=F_00_19(b,c,d)
79         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
80 { .mmi; ld1     tmp4=[tmp3],2               // forward Xload
81         or      tmp5=tmp1,tmp5              // ROTATE(a,5)
82         mux2    tmp6=$a,0x44            };; // see b in next iteration
83 { .mii; add     $e=$e,tmp5                  // e+=ROTATE(a,5)
84         dep     $Xn=$Xn,tmp2,8,8            // forward Xload
85         mux2    $X[$i]=$X[$i],0x44      } //;;
86
87 ___
88         }
89 else    {
90         $code.=<<___;
91 { .mii; and     tmp3=$c,$b
92         dep     tmp1=tmp0,tmp4,8,8;;
93         dep     $X[$i]=$X[$i],tmp1,16,16} //;;
94 { .mmi; add     $e=$e,$K_00_19              // e+=K_00_19
95         andcm   tmp1=$d,$b
96         dep.z   tmp5=$a,5,27            };; // a<<5
97 { .mmi; add     $e=$e,$X[$i]                // e+=Xupdate
98         or      tmp4=tmp3,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
99         extr.u  tmp1=$a,27,5            }   // a>>27
100 { .mmi; xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
101         xor     tmp3=$X[($j+8)%16],$X[($j+13)%16] // forward Xupdate
102         nop.i   0                       };;
103 { .mmi; add     $e=$e,tmp4                  // e+=F_00_19(b,c,d)
104         xor     $Xn=$Xn,tmp3                // forward Xupdate
105         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
106 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
107         mux2    tmp6=$a,0x44            };; // see b in next iteration
108 { .mii; add     $e=$e,tmp1                  // e+=ROTATE(a,5)
109         shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
110         mux2    $X[$i]=$X[$i],0x44      };;
111
112 ___
113         }
114 }
115
116 sub BODY_16_19 {
117 local   *code=shift;
118 my      ($i,$a,$b,$c,$d,$e)=@_;
119 my      $j=$i+1;
120 my      $Xn=@X[$j%16];
121
122 $code.=<<___;
123 { .mib; add     $e=$e,$K_00_19              // e+=K_00_19
124         dep.z   tmp5=$a,5,27            }   // a<<5
125 { .mib; andcm   tmp1=$d,$b
126         and     tmp0=$c,$b              };;
127 { .mmi; add     $e=$e,$X[$i%16]             // e+=Xupdate
128         or      tmp0=tmp0,tmp1              // F_00_19(b,c,d)=(b&c)|(~b&d)
129         extr.u  tmp1=$a,27,5            }   // a>>27
130 { .mmi; xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
131         xor     tmp3=$X[($j+8)%16],$X[($j+13)%16]       // forward Xupdate
132         nop.i   0                       };;
133 { .mmi; add     $e=$e,tmp0                  // f+=F_00_19(b,c,d)
134         xor     $Xn=$Xn,tmp3                // forward Xupdate
135         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
136 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
137         mux2    tmp6=$a,0x44            };; // see b in next iteration
138 { .mii; add     $e=$e,tmp1                  // e+=ROTATE(a,5)
139         shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
140         nop.i   0                       };;
141
142 ___
143 }
144
145 sub BODY_20_39 {
146 local   *code=shift;
147 my      ($i,$a,$b,$c,$d,$e,$Konst)=@_;
148         $Konst = $K_20_39 if (!defined($Konst));
149 my      $j=$i+1;
150 my      $Xn=@X[$j%16];
151
152 if ($i<79) {
153 $code.=<<___;
154 { .mib; add     $e=$e,$Konst                // e+=K_XX_XX
155         dep.z   tmp5=$a,5,27            }   // a<<5
156 { .mib; xor     tmp0=$c,$b
157         xor     $Xn=$Xn,$X[($j+2)%16]   };; // forward Xupdate
158 { .mib; add     $e=$e,$X[$i%16]             // e+=Xupdate
159         extr.u  tmp1=$a,27,5            }   // a>>27
160 { .mib; xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
161         xor     $Xn=$Xn,$X[($j+8)%16]   };; // forward Xupdate
162 { .mmi; add     $e=$e,tmp0                  // e+=F_20_39(b,c,d)
163         xor     $Xn=$Xn,$X[($j+13)%16]      // forward Xupdate
164         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
165 { .mmi; or      tmp1=tmp1,tmp5              // ROTATE(a,5)
166         mux2    tmp6=$a,0x44            };; // see b in next iteration
167 { .mii; add     $e=$e,tmp1                  // e+=ROTATE(a,5)
168         shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
169         nop.i   0                       };;
170
171 ___
172 }
173 else {
174 $code.=<<___;
175 { .mib; add     $e=$e,$Konst                // e+=K_60_79
176         dep.z   tmp5=$a,5,27            }   // a<<5
177 { .mib; xor     tmp0=$c,$b
178         add     $h1=$h1,$a              };; // wrap up
179 { .mib; add     $e=$e,$X[$i%16]             // e+=Xupdate
180         extr.u  tmp1=$a,27,5            }   // a>>27
181 { .mib; xor     tmp0=tmp0,$d                // F_20_39(b,c,d)=b^c^d
182         add     $h3=$h3,$c              };; // wrap up
183 { .mmi; add     $e=$e,tmp0                  // e+=F_20_39(b,c,d)
184         or      tmp1=tmp1,tmp5              // ROTATE(a,5)
185         shrp    $b=tmp6,tmp6,2          };; // b=ROTATE(b,30) ;;?
186 { .mmi; add     $e=$e,tmp1                  // e+=ROTATE(a,5)
187         add     tmp3=1,inp                  // used in unaligned codepath
188         add     $h4=$h4,$d              };; // wrap up
189
190 ___
191 }
192 }
193
194 sub BODY_40_59 {
195 local   *code=shift;
196 my      ($i,$a,$b,$c,$d,$e)=@_;
197 my      $j=$i+1;
198 my      $Xn=@X[$j%16];
199
200 $code.=<<___;
201 { .mib; add     $e=$e,$K_40_59              // e+=K_40_59
202         dep.z   tmp5=$a,5,27            }   // a<<5
203 { .mib; and     tmp1=$c,$d
204         xor     tmp0=$c,$d              };;
205 { .mmi; add     $e=$e,$X[$i%16]             // e+=Xupdate
206         add     tmp5=tmp5,tmp1              // a<<5+(c&d)
207         extr.u  tmp1=$a,27,5            }   // a>>27
208 { .mmi; and     tmp0=tmp0,$b
209         xor     $Xn=$Xn,$X[($j+2)%16]       // forward Xupdate
210         xor     tmp3=$X[($j+8)%16],$X[($j+13)%16] };;   // forward Xupdate
211 { .mmi; add     $e=$e,tmp0                  // e+=b&(c^d)
212         add     tmp5=tmp5,tmp1              // ROTATE(a,5)+(c&d)
213         shrp    $b=tmp6,tmp6,2          }   // b=ROTATE(b,30)
214 { .mmi; xor     $Xn=$Xn,tmp3
215         mux2    tmp6=$a,0x44            };; // see b in next iteration
216 { .mii; add     $e=$e,tmp5                  // e+=ROTATE(a,5)+(c&d)
217         shrp    $Xn=$Xn,$Xn,31              // ROTATE(x[0]^x[2]^x[8]^x[13],1)
218         nop.i   0x0                     };;
219
220 ___
221 }
222 sub BODY_60_79  { &BODY_20_39(@_,$K_60_79); }
223
224 $code.=<<___;
225 .text
226
227 tmp0=r8;
228 tmp1=r9;
229 tmp2=r10;
230 tmp3=r11;
231 ctx=r32;        // in0
232 inp=r33;        // in1
233
234 // void sha1_block_data_order(SHA_CTX *c,const void *p,size_t num);
235 .global sha1_block_data_order#
236 .proc   sha1_block_data_order#
237 .align  32
238 sha1_block_data_order:
239         .prologue
240 { .mmi; alloc   tmp1=ar.pfs,3,14,0,0
241         $ADDP   tmp0=4,ctx
242         .save   ar.lc,r3
243         mov     r3=ar.lc                }
244 { .mmi; $ADDP   ctx=0,ctx
245         $ADDP   inp=0,inp
246         mov     r2=pr                   };;
247 tmp4=in2;
248 tmp5=loc12;
249 tmp6=loc13;
250         .body
251 { .mlx; ld4     $h0=[ctx],8
252         movl    $K_00_19=0x5a827999     }
253 { .mlx; ld4     $h1=[tmp0],8
254         movl    $K_20_39=0x6ed9eba1     };;
255 { .mlx; ld4     $h2=[ctx],8
256         movl    $K_40_59=0x8f1bbcdc     }
257 { .mlx; ld4     $h3=[tmp0]
258         movl    $K_60_79=0xca62c1d6     };;
259 { .mmi; ld4     $h4=[ctx],-16
260         add     in2=-1,in2                  // adjust num for ar.lc
261         mov     ar.ec=1                 };;
262 { .mmi; nop.m   0
263         add     tmp3=1,inp
264         mov     ar.lc=in2               };; // brp.loop.imp: too far
265
266 .Ldtop:
267 { .mmi; mov     $A=$h0
268         mov     $B=$h1
269         mux2    tmp6=$h1,0x44           }
270 { .mmi; mov     $C=$h2
271         mov     $D=$h3
272         mov     $E=$h4                  };;
273
274 ___
275
276 { my $i;
277   my @V=($A,$B,$C,$D,$E);
278
279         for($i=0;$i<16;$i++)    { &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); }
280         for(;$i<20;$i++)        { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); }
281         for(;$i<40;$i++)        { &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); }
282         for(;$i<60;$i++)        { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); }
283         for(;$i<80;$i++)        { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); }
284
285         (($V[0] eq $A) and ($V[4] eq $E)) or die;       # double-check
286 }
287
288 $code.=<<___;
289 { .mmb; add     $h0=$h0,$A
290         add     $h2=$h2,$C
291         br.ctop.dptk.many       .Ldtop  };;
292 .Ldend:
293 { .mmi; add     tmp0=4,ctx
294         mov     ar.lc=r3                };;
295 { .mmi; st4     [ctx]=$h0,8
296         st4     [tmp0]=$h1,8            };;
297 { .mmi; st4     [ctx]=$h2,8
298         st4     [tmp0]=$h3              };;
299 { .mib; st4     [ctx]=$h4,-16
300         mov     pr=r2,0x1ffff
301         br.ret.sptk.many        b0      };;
302 .endp   sha1_block_data_order#
303 stringz "SHA1 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>"
304 ___
305
306 open STDOUT,">$output" if $output;
307 print $code;