3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # Eternal question is what's wrong with compiler generated code? The
11 # trick is that it's possible to reduce the number of shifts required
12 # to perform rotations by maintaining copy of 32-bit value in upper
13 # bits of 64-bit register. Just follow mux2 and shrp instructions...
14 # Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which
15 # is >50% better than HP C and >2x better than gcc.
18 .ident \"sha1-ia64.s, version 1.1\"
19 .ident \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
27 for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
28 } else { $ADDP="add"; }
29 for (@ARGV) { $big_endian=1 if (/\-DB_ENDIAN/);
30 $big_endian=0 if (/\-DL_ENDIAN/); }
31 if (!defined($big_endian))
32 { $big_endian=(unpack('L',pack('N',1))==1); }
35 if ($human) { # useful for visual code auditing...
36 ($A,$B,$C,$D,$E,$T) = ("A","B","C","D","E","T");
37 ($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4");
38 ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
39 ( "K_00_19","K_20_39","K_40_59","K_60_79" );
40 @X= ( "X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7",
41 "X8", "X9","X10","X11","X12","X13","X14","X15" );
44 ($A,$B,$C,$D,$E,$T) = ("loc0","loc1","loc2","loc3","loc4","loc5");
45 ($h0,$h1,$h2,$h3,$h4) = ("loc6","loc7","loc8","loc9","loc10");
46 ($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
47 ( "r14", "r15", "loc11", "loc12" );
48 @X= ( "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
49 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" );
54 local ($i,$a,$b,$c,$d,$e,$f,$unaligned)=@_;
58 { .mmi; ld1 tmp0=[inp],2 // MSB
60 { .mmi; ld1 tmp2=[inp],2
61 ld1 $X[$i&0xf]=[tmp3],2 // LSB
62 dep tmp1=tmp0,tmp1,8,8 };;
63 { .mii; cmp.ne p16,p0=r0,r0 // no misaligned prefetch
64 dep $X[$i&0xf]=tmp2,$X[$i&0xf],8,8;;
65 dep $X[$i&0xf]=tmp1,$X[$i&0xf],16,16 };;
71 { .mmi; ld4 $X[($i+1)&0xf]=[inp],4 // prefetch
82 dep.z tmp5=$a,5,27 } // a<<5
83 { .mmi; andcm tmp1=$d,$b
84 add tmp4=$e,$K_00_19 };;
85 { .mmi; or tmp0=tmp0,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d)
86 add $f=tmp4,$X[$i&0xf] // f=xi+e+K_00_19
87 extr.u tmp1=$a,27,5 };; // a>>27
88 { .mib; add $f=$f,tmp0 // f+=F_00_19(b,c,d)
89 shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30)
90 { .mib; or tmp1=tmp1,tmp5 // ROTATE(a,5)
91 mux2 tmp6=$a,0x44 };; // see b in next iteration
92 { .mii; add $f=$f,tmp1 // f+=ROTATE(a,5)
93 mux2 $X[$i&0xf]=$X[$i&0xf],0x44
101 dep.z tmp5=$a,5,27 } // a<<5 ;;?
102 { .mmi; andcm tmp1=$d,$b
103 add tmp4=$e,$K_00_19 };;
104 { .mmi; or tmp0=tmp0,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d)
105 add $f=tmp4,$X[$i&0xf] // f=xi+e+K_00_19
106 extr.u tmp1=$a,27,5 } // a>>27
107 { .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1
108 xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
110 { .mmi; add $f=$f,tmp0 // f+=F_00_19(b,c,d)
111 xor tmp2=tmp2,tmp3 // +1
112 shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30)
113 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5)
114 mux2 tmp6=$a,0x44 };; // see b in next iteration
115 { .mii; add $f=$f,tmp1 // f+=ROTATE(a,5)
116 shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
117 mux2 $X[$i&0xf]=$X[$i&0xf],0x44 };;
125 local ($i,$a,$b,$c,$d,$e,$f)=@_;
128 { .mmi; mov $X[$i&0xf]=$f // Xupdate
130 dep.z tmp5=$a,5,27 } // a<<5
131 { .mmi; andcm tmp1=$d,$b
132 add tmp4=$e,$K_00_19 };;
133 { .mmi; or tmp0=tmp0,tmp1 // F_00_19(b,c,d)=(b&c)|(~b&d)
134 add $f=$f,tmp4 // f+=e+K_00_19
135 extr.u tmp1=$a,27,5 } // a>>27
136 { .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1
137 xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
139 { .mmi; add $f=$f,tmp0 // f+=F_00_19(b,c,d)
140 xor tmp2=tmp2,tmp3 // +1
141 shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30)
142 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5)
143 mux2 tmp6=$a,0x44 };; // see b in next iteration
144 { .mii; add $f=$f,tmp1 // f+=ROTATE(a,5)
145 shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
153 local ($i,$a,$b,$c,$d,$e,$f,$Konst)=@_;
154 $Konst = $K_20_39 if (!defined($Konst));
158 { .mib; mov $X[$i&0xf]=$f // Xupdate
159 dep.z tmp5=$a,5,27 } // a<<5
160 { .mib; xor tmp0=$c,$b
161 add tmp4=$e,$Konst };;
162 { .mmi; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d
163 add $f=$f,tmp4 // f+=e+K_20_39
164 extr.u tmp1=$a,27,5 } // a>>27
165 { .mmi; xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1
166 xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
168 { .mmi; add $f=$f,tmp0 // f+=F_20_39(b,c,d)
169 xor tmp2=tmp2,tmp3 // +1
170 shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30)
171 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5)
172 mux2 tmp6=$a,0x44 };; // see b in next iteration
173 { .mii; add $f=$f,tmp1 // f+=ROTATE(a,5)
174 shrp $e=tmp2,tmp2,31 // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
181 { .mib; mov $X[$i&0xf]=$f // Xupdate
182 dep.z tmp5=$a,5,27 } // a<<5
183 { .mib; xor tmp0=$c,$b
184 add tmp4=$e,$Konst };;
185 { .mib; xor tmp0=tmp0,$d // F_20_39(b,c,d)=b^c^d
186 extr.u tmp1=$a,27,5 } // a>>27
187 { .mib; add $f=$f,tmp4 // f+=e+K_20_39
188 add $h1=$h1,$a };; // wrap up
189 { .mmi; add $f=$f,tmp0 // f+=F_20_39(b,c,d)
190 shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30) ;;?
191 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5)
192 add $h3=$h3,$c };; // wrap up
193 { .mib; add tmp3=1,inp // used in unaligned codepath
194 add $f=$f,tmp1 } // f+=ROTATE(a,5)
195 { .mib; add $h2=$h2,$b // wrap up
196 add $h4=$h4,$d };; // wrap up
204 local ($i,$a,$b,$c,$d,$e,$f)=@_;
207 { .mmi; mov $X[$i&0xf]=$f // Xupdate
209 dep.z tmp5=$a,5,27 } // a<<5
210 { .mmi; and tmp1=$d,$b
211 add tmp4=$e,$K_40_59 };;
212 { .mmi; or tmp0=tmp0,tmp1 // (b&c)|(b&d)
213 add $f=$f,tmp4 // f+=e+K_40_59
214 extr.u tmp1=$a,27,5 } // a>>27
215 { .mmi; and tmp4=$c,$d
216 xor tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf] // +1
217 xor tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
219 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5)
220 xor tmp2=tmp2,tmp3 // +1
221 shrp $b=tmp6,tmp6,2 } // b=ROTATE(b,30)
222 { .mmi; or tmp0=tmp0,tmp4 // F_40_59(b,c,d)=(b&c)|(b&d)|(c&d)
223 mux2 tmp6=$a,0x44 };; // see b in next iteration
224 { .mii; add $f=$f,tmp0 // f+=F_40_59(b,c,d)
225 shrp $e=tmp2,tmp2,31;; // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
226 add $f=$f,tmp1 };; // f+=ROTATE(a,5)
230 sub BODY_60_79 { &BODY_20_39(@_,$K_60_79); }
242 // void sha1_block_asm_data_order(SHA_CTX *c,const void *p,size_t num);
243 .global sha1_block_asm_data_order#
244 .proc sha1_block_asm_data_order#
246 sha1_block_asm_data_order:
248 { .mmi; alloc tmp1=ar.pfs,3,15,0,0
252 { .mmi; $ADDP ctx=0,ctx
259 { .mlx; ld4 $h0=[ctx],8
260 movl $K_00_19=0x5a827999 }
261 { .mlx; ld4 $h1=[tmp0],8
262 movl $K_20_39=0x6ed9eba1 };;
263 { .mlx; ld4 $h2=[ctx],8
264 movl $K_40_59=0x8f1bbcdc }
265 { .mlx; ld4 $h3=[tmp0]
266 movl $K_60_79=0xca62c1d6 };;
267 { .mmi; ld4 $h4=[ctx],-16
268 add in2=-1,in2 // adjust num for ar.lc
272 mov ar.lc=in2 };; // brp.loop.imp: too far
284 { my $i,@V=($A,$B,$C,$D,$E,$T);
286 for($i=0;$i<16;$i++) { &BODY_00_15(\$code,$i,@V,1); unshift(@V,pop(@V)); }
287 for(;$i<20;$i++) { &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); }
288 for(;$i<40;$i++) { &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); }
289 for(;$i<60;$i++) { &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); }
290 for(;$i<80;$i++) { &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); }
292 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check
296 { .mmb; add $h0=$h0,$E
298 br.ctop.dptk.many .Ldtop };;
300 { .mmi; add tmp0=4,ctx
302 { .mmi; st4 [ctx]=$h0,8
304 { .mmi; st4 [ctx]=$h2,8
306 { .mib; st4 [ctx]=$h4,-16
308 br.ret.sptk.many b0 };;
309 .endp sha1_block_asm_data_order#