3 # ====================================================================
4 # [Re]written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
11 # functions were re-implemented to address P4 performance issue [see
12 # commentary below], and in 2006 the rest was rewritten in order to
13 # gain freedom to liberate licensing terms.
15 # January, September 2004.
17 # It was noted that Intel IA-32 C compiler generates code which
18 # performs ~30% *faster* on P4 CPU than original *hand-coded*
19 # SHA1 assembler implementation. To address this problem (and
20 # prove that humans are still better than machines:-), the
21 # original code was overhauled, which resulted in following
22 # performance changes:
24 # compared with original compared with Intel cc
25 # assembler impl. generated code
30 # As you can see Pentium came out as looser:-( Yet I reckoned that
31 # improvement on P4 outweights the loss and incorporate this
32 # re-tuned code to 0.9.7 and later.
33 # ----------------------------------------------------------------
34 # <appro@fy.chalmers.se>
38 # George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
39 # '(c&d) + (b&(c^d))', which allows to accumulate partial results
40 # and lighten "pressure" on scratch registers. This resulted in
41 # >12% performance improvement on contemporary AMD cores (with no
42 # degradation on other CPUs:-). Also, the code was revised to maximize
43 # "distance" between instructions producing input to 'lea' instruction
44 # and the 'lea' instruction itself, which is essential for Intel Atom
47 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
48 push(@INC,"${dir}","${dir}../../perlasm");
51 &asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386");
61 @V=($A,$B,$C,$D,$E,$T);
65 local($n,$a,$b,$c,$d,$e,$f)=@_;
69 &mov($f,$c); # f to hold F_00_19(b,c,d)
70 if ($n==0) { &mov($tmp1,$a); }
71 else { &mov($a,$tmp1); }
72 &rotl($tmp1,5); # tmp1=ROTATE(a,5)
74 &add($tmp1,$e); # tmp1+=e;
75 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded
76 # with xi, also note that e becomes
79 &rotr($b,2); # b=ROTATE(b,30)
80 &xor($f,$d); # f holds F_00_19(b,c,d)
81 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi
83 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
84 &add($f,$tmp1); } # f+=tmp1
85 else { &add($tmp1,$f); } # f becomes a in next round
90 local($n,$a,$b,$c,$d,$e,$f)=@_;
94 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d)
95 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
97 &xor($f,&swtmp(($n+8)%16));
99 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
100 &rotl($f,1); # f=ROTATE(f,1)
101 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
102 &add($e,$tmp1); # e+=F_00_19(b,c,d)
104 &rotr($b,2); # b=ROTATE(b,30)
105 &mov(&swtmp($n%16),$f); # xi=f
106 &rotl($tmp1,5); # ROTATE(a,5)
107 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
108 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
109 &add($f,$tmp1); # f+=ROTATE(a,5)
114 local($n,$a,$b,$c,$d,$e,$f)=@_;
115 local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
117 &comment("20_39 $n");
119 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d)
120 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
122 &xor($f,&swtmp(($n+8)%16));
123 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
124 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
125 &rotl($f,1); # f=ROTATE(f,1)
126 &add($e,$tmp1); # e+=F_20_39(b,c,d)
127 &rotr($b,2); # b=ROTATE(b,30)
129 &rotl($tmp1,5); # ROTATE(a,5)
130 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
131 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
132 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
133 &add($f,$tmp1); # f+=ROTATE(a,5)
138 local($n,$a,$b,$c,$d,$e,$f)=@_;
140 &comment("40_59 $n");
142 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d)
143 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
145 &xor($f,&swtmp(($n+8)%16));
147 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
148 &rotl($f,1); # f=ROTATE(f,1)
149 &add($tmp1,$e); # b&(c^d)+=e
150 &rotr($b,2); # b=ROTATE(b,30)
151 &mov($e,$a); # e becomes volatile
152 &rotl($e,5); # ROTATE(a,5)
153 &mov(&swtmp($n%16),$f); # xi=f
154 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
156 &add($f,$e); # f+=ROTATE(a,5)
158 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
159 &add($f,$tmp1); # f+=c&d
162 &function_begin("sha1_block_data_order");
163 &mov($tmp1,&wparam(0)); # SHA_CTX *c
164 &mov($T,&wparam(1)); # const void *input
165 &mov($A,&wparam(2)); # size_t num
166 &stack_push(16); # allocate X[16]
169 &mov(&wparam(2),$A); # pointer beyond the end of input
170 &mov($E,&DWP(16,$tmp1));# pre-load E
172 &set_label("loop",16);
174 # copy input chunk to X, but reversing byte order!
175 for ($i=0; $i<16; $i+=4)
177 &mov($A,&DWP(4*($i+0),$T));
178 &mov($B,&DWP(4*($i+1),$T));
179 &mov($C,&DWP(4*($i+2),$T));
180 &mov($D,&DWP(4*($i+3),$T));
185 &mov(&swtmp($i+0),$A);
186 &mov(&swtmp($i+1),$B);
187 &mov(&swtmp($i+2),$C);
188 &mov(&swtmp($i+3),$D);
190 &mov(&wparam(1),$T); # redundant in 1st spin
192 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX
193 &mov($B,&DWP(4,$tmp1));
194 &mov($C,&DWP(8,$tmp1));
195 &mov($D,&DWP(12,$tmp1));
198 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
199 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
200 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
201 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
202 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
204 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check
206 &mov($tmp1,&wparam(0)); # re-load SHA_CTX*
207 &mov($D,&wparam(1)); # D is last "T" and is discarded
209 &add($E,&DWP(0,$tmp1)); # E is last "A"...
210 &add($T,&DWP(4,$tmp1));
211 &add($A,&DWP(8,$tmp1));
212 &add($B,&DWP(12,$tmp1));
213 &add($C,&DWP(16,$tmp1));
215 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX
216 &add($D,64); # advance input pointer
217 &mov(&DWP(4,$tmp1),$T);
218 &cmp($D,&wparam(2)); # have we reached the end yet?
219 &mov(&DWP(8,$tmp1),$A);
220 &mov($E,$C); # C is last "E" which needs to be "pre-loaded"
221 &mov(&DWP(12,$tmp1),$B);
222 &mov($T,$D); # input pointer
223 &mov(&DWP(16,$tmp1),$C);
227 &function_end("sha1_block_data_order");
228 &asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");