3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # SHA1 block procedure for MIPS.
12 # Performance improvement is 30% on unaligned input. The "secret" is
13 # to deploy lwl/lwr pair to load unaligned input. One could have
14 # vectorized Xupdate on MIPSIII/IV, but the goal was to code MIPS32-
15 # compatible subroutine. There is room for minor optimization on
16 # little-endian platforms...
18 # The code is somewhat IRIX-centric, i.e. is likely to require minor
19 # adaptations for other OSes...
21 for (@ARGV) { $big_endian=1 if (/\-DB_ENDIAN/);
22 $big_endian=0 if (/\-DL_ENDIAN/); }
23 if (!defined($big_endian))
24 { $big_endian=(unpack('L',pack('N',1))==1); }
26 # offsets of the Most and Least Significant Bytes
30 @X=( "\$8", "\$9", "\$10", "\$11", "\$12", "\$13", "\$14", "\$15",
31 "\$16", "\$17", "\$18", "\$19", "\$20", "\$21", "\$22", "\$23");
39 $E="\$24"; @V=($A,$B,$C,$D,$E);
48 my ($i,$a,$b,$c,$d,$e)=@_;
50 $code.=<<___ if (!$big_endian);
51 srl $t0,@X[$i],24 # byte swap($i)
53 andi $t2,@X[$i],0xFF00
62 lwl @X[$j],$j*4+$MSB($inp)
65 lwr @X[$j],$j*4+$LSB($inp)
81 my ($i,$a,$b,$c,$d,$e)=@_;
84 $code.=<<___ if (!$big_endian && $i==15);
85 srl $t0,@X[$i],24 # byte swap($i)
87 andi $t2,@X[$i],0xFF00
96 xor @X[$j%16],@X[($j+2)%16]
101 xor @X[$j%16],@X[($j+8)%16]
104 xor @X[$j%16],@X[($j+13)%16]
108 addu @X[$j%16],@X[$j%16]
119 my ($i,$a,$b,$c,$d,$e)=@_;
121 $code.=<<___ if ($i<79);
122 xor @X[$j%16],@X[($j+2)%16]
127 xor @X[$j%16],@X[($j+8)%16]
130 xor @X[$j%16],@X[($j+13)%16]
134 addu @X[$j%16],@X[$j%16]
141 $code.=<<___ if ($i==79);
163 my ($i,$a,$b,$c,$d,$e)=@_;
165 $code.=<<___ if ($i<79);
166 xor @X[$j%16],@X[($j+2)%16]
171 xor @X[$j%16],@X[($j+8)%16]
174 xor @X[$j%16],@X[($j+13)%16]
179 addu @X[$j%16],@X[$j%16]
198 .globl sha1_block_data_order
199 .ent sha1_block_data_order
200 sha1_block_data_order:
201 .frame sp,$FRAMESIZE*SZREG,zero
202 .mask 0xd0ff0000,-$FRAMESIZE*SZREG
204 PTR_SUB sp,$FRAMESIZE*SZREG
205 REG_S \$31,($FRAMESIZE-1)*SZREG(sp)
206 REG_S \$30,($FRAMESIZE-2)*SZREG(sp)
207 REG_S \$28,($FRAMESIZE-3)*SZREG(sp)
208 REG_S \$23,($FRAMESIZE-4)*SZREG(sp)
209 REG_S \$22,($FRAMESIZE-5)*SZREG(sp)
210 REG_S \$21,($FRAMESIZE-6)*SZREG(sp)
211 REG_S \$20,($FRAMESIZE-7)*SZREG(sp)
212 REG_S \$19,($FRAMESIZE-8)*SZREG(sp)
213 REG_S \$18,($FRAMESIZE-9)*SZREG(sp)
214 REG_S \$17,($FRAMESIZE-10)*SZREG(sp)
215 REG_S \$16,($FRAMESIZE-11)*SZREG(sp)
229 ori $K,0x7999 # K_00_19
231 for ($i=0;$i<15;$i++) { &BODY_00_14($i,@V); unshift(@V,pop(@V)); }
232 for (;$i<20;$i++) { &BODY_15_19($i,@V); unshift(@V,pop(@V)); }
235 ori $K,0xeba1 # K_20_39
237 for (;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
240 ori $K,0xbcdc # K_40_59
242 for (;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
245 ori $K,0xc1d6 # K_60_79
247 for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
265 REG_L \$31,($FRAMESIZE-1)*SZREG(sp)
266 REG_L \$30,($FRAMESIZE-2)*SZREG(sp)
267 REG_L \$28,($FRAMESIZE-3)*SZREG(sp)
268 REG_L \$23,($FRAMESIZE-4)*SZREG(sp)
269 REG_L \$22,($FRAMESIZE-5)*SZREG(sp)
270 REG_L \$21,($FRAMESIZE-6)*SZREG(sp)
271 REG_L \$20,($FRAMESIZE-7)*SZREG(sp)
272 REG_L \$19,($FRAMESIZE-8)*SZREG(sp)
273 REG_L \$18,($FRAMESIZE-9)*SZREG(sp)
274 REG_L \$17,($FRAMESIZE-10)*SZREG(sp)
275 REG_L \$16,($FRAMESIZE-11)*SZREG(sp)
277 PTR_ADD sp,$FRAMESIZE*SZREG
278 .end sha1_block_data_order