crypto/*/Makefile: unify "catch-all" assembler make rules and harmonize
[openssl.git] / crypto / sha / asm / sha1-586.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # [Re]written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
11 # functions were re-implemented to address P4 performance issue [see
12 # commentary below], and in 2006 the rest was rewritten in order to
13 # gain freedom to liberate licensing terms.
14
15 # January, September 2004.
16 #
17 # It was noted that Intel IA-32 C compiler generates code which
18 # performs ~30% *faster* on P4 CPU than original *hand-coded*
19 # SHA1 assembler implementation. To address this problem (and
20 # prove that humans are still better than machines:-), the
21 # original code was overhauled, which resulted in following
22 # performance changes:
23 #
24 #               compared with original  compared with Intel cc
25 #               assembler impl.         generated code
26 # Pentium       -16%                    +48%
27 # PIII/AMD      +8%                     +16%
28 # P4            +85%(!)                 +45%
29 #
30 # As you can see Pentium came out as looser:-( Yet I reckoned that
31 # improvement on P4 outweights the loss and incorporate this
32 # re-tuned code to 0.9.7 and later.
33 # ----------------------------------------------------------------
34 #                                       <appro@fy.chalmers.se>
35
36 # August 2009.
37 #
38 # George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
39 # '(c&d) + (b&(c^d))', which allows to accumulate partial results
40 # and lighten "pressure" on scratch registers. This resulted in
41 # >12% performance improvement on contemporary AMD cores (with no
42 # degradation on other CPUs:-). Also, the code was revised to maximize
43 # "distance" between instructions producing input to 'lea' instruction
44 # and the 'lea' instruction itself, which is essential for Intel Atom
45 # core.
46
47 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
48 push(@INC,"${dir}","${dir}../../perlasm");
49 require "x86asm.pl";
50
51 &asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386");
52
53 $A="eax";
54 $B="ebx";
55 $C="ecx";
56 $D="edx";
57 $E="edi";
58 $T="esi";
59 $tmp1="ebp";
60
61 @V=($A,$B,$C,$D,$E,$T);
62
63 sub BODY_00_15
64         {
65         local($n,$a,$b,$c,$d,$e,$f)=@_;
66
67         &comment("00_15 $n");
68
69         &mov($f,$c);                    # f to hold F_00_19(b,c,d)
70          if ($n==0)  { &mov($tmp1,$a); }
71          else        { &mov($a,$tmp1); }
72         &rotl($tmp1,5);                 # tmp1=ROTATE(a,5)
73          &xor($f,$d);
74         &add($tmp1,$e);                 # tmp1+=e;
75          &mov($e,&swtmp($n%16));        # e becomes volatile and is loaded
76                                         # with xi, also note that e becomes
77                                         # f in next round...
78         &and($f,$b);
79         &rotr($b,2);                    # b=ROTATE(b,30)
80          &xor($f,$d);                   # f holds F_00_19(b,c,d)
81         &lea($tmp1,&DWP(0x5a827999,$tmp1,$e));  # tmp1+=K_00_19+xi
82
83         if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
84                       &add($f,$tmp1); } # f+=tmp1
85         else        { &add($tmp1,$f); } # f becomes a in next round
86         }
87
88 sub BODY_16_19
89         {
90         local($n,$a,$b,$c,$d,$e,$f)=@_;
91
92         &comment("16_19 $n");
93
94         &mov($tmp1,$c);                 # tmp1 to hold F_00_19(b,c,d)
95          &xor($f,&swtmp(($n+2)%16));    # f to hold Xupdate(xi,xa,xb,xc,xd)
96         &xor($tmp1,$d);
97          &xor($f,&swtmp(($n+8)%16));
98         &and($tmp1,$b);
99          &xor($f,&swtmp(($n+13)%16));   # f holds xa^xb^xc^xd
100         &rotl($f,1);                    # f=ROTATE(f,1)
101          &xor($tmp1,$d);                # tmp1=F_00_19(b,c,d)
102         &add($e,$tmp1);                 # e+=F_00_19(b,c,d)
103          &mov($tmp1,$a);
104         &rotr($b,2);                    # b=ROTATE(b,30)
105          &mov(&swtmp($n%16),$f);        # xi=f
106         &rotl($tmp1,5);                 # ROTATE(a,5)
107          &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
108         &mov($e,&swtmp(($n+1)%16));     # pre-fetch f for next round
109          &add($f,$tmp1);                # f+=ROTATE(a,5)
110         }
111
112 sub BODY_20_39
113         {
114         local($n,$a,$b,$c,$d,$e,$f)=@_;
115         local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
116
117         &comment("20_39 $n");
118
119         &mov($tmp1,$b);                 # tmp1 to hold F_20_39(b,c,d)
120          &xor($f,&swtmp(($n+2)%16));    # f to hold Xupdate(xi,xa,xb,xc,xd)
121         &xor($tmp1,$c);
122          &xor($f,&swtmp(($n+8)%16));
123         &xor($tmp1,$d);                 # tmp1 holds F_20_39(b,c,d)
124          &xor($f,&swtmp(($n+13)%16));   # f holds xa^xb^xc^xd
125         &rotl($f,1);                    # f=ROTATE(f,1)
126          &add($e,$tmp1);                # e+=F_20_39(b,c,d)
127         &rotr($b,2);                    # b=ROTATE(b,30)
128          &mov($tmp1,$a);
129         &rotl($tmp1,5);                 # ROTATE(a,5)
130          &mov(&swtmp($n%16),$f) if($n<77);# xi=f
131         &lea($f,&DWP($K,$f,$e));        # f+=e+K_XX_YY
132          &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
133         &add($f,$tmp1);                 # f+=ROTATE(a,5)
134         }
135
136 sub BODY_40_59
137         {
138         local($n,$a,$b,$c,$d,$e,$f)=@_;
139
140         &comment("40_59 $n");
141
142         &mov($tmp1,$c);                 # tmp1 to hold F_40_59(b,c,d)
143          &xor($f,&swtmp(($n+2)%16));    # f to hold Xupdate(xi,xa,xb,xc,xd)
144         &xor($tmp1,$d);
145          &xor($f,&swtmp(($n+8)%16));
146         &and($tmp1,$b);
147          &xor($f,&swtmp(($n+13)%16));   # f holds xa^xb^xc^xd
148         &rotl($f,1);                    # f=ROTATE(f,1)
149          &add($tmp1,$e);                # b&(c^d)+=e
150         &rotr($b,2);                    # b=ROTATE(b,30)
151          &mov($e,$a);                   # e becomes volatile
152         &rotl($e,5);                    # ROTATE(a,5)
153          &mov(&swtmp($n%16),$f);        # xi=f
154         &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
155          &mov($tmp1,$c);
156         &add($f,$e);                    # f+=ROTATE(a,5)
157          &and($tmp1,$d);
158         &mov($e,&swtmp(($n+1)%16));     # pre-fetch f for next round
159          &add($f,$tmp1);                # f+=c&d
160         }
161
162 &function_begin("sha1_block_data_order");
163         &mov($tmp1,&wparam(0)); # SHA_CTX *c
164         &mov($T,&wparam(1));    # const void *input
165         &mov($A,&wparam(2));    # size_t num
166         &stack_push(16);        # allocate X[16]
167         &shl($A,6);
168         &add($A,$T);
169         &mov(&wparam(2),$A);    # pointer beyond the end of input
170         &mov($E,&DWP(16,$tmp1));# pre-load E
171
172         &set_label("loop",16);
173
174         # copy input chunk to X, but reversing byte order!
175         for ($i=0; $i<16; $i+=4)
176                 {
177                 &mov($A,&DWP(4*($i+0),$T));
178                 &mov($B,&DWP(4*($i+1),$T));
179                 &mov($C,&DWP(4*($i+2),$T));
180                 &mov($D,&DWP(4*($i+3),$T));
181                 &bswap($A);
182                 &bswap($B);
183                 &bswap($C);
184                 &bswap($D);
185                 &mov(&swtmp($i+0),$A);
186                 &mov(&swtmp($i+1),$B);
187                 &mov(&swtmp($i+2),$C);
188                 &mov(&swtmp($i+3),$D);
189                 }
190         &mov(&wparam(1),$T);    # redundant in 1st spin
191
192         &mov($A,&DWP(0,$tmp1)); # load SHA_CTX
193         &mov($B,&DWP(4,$tmp1));
194         &mov($C,&DWP(8,$tmp1));
195         &mov($D,&DWP(12,$tmp1));
196         # E is pre-loaded
197
198         for($i=0;$i<16;$i++)    { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
199         for(;$i<20;$i++)        { &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
200         for(;$i<40;$i++)        { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
201         for(;$i<60;$i++)        { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
202         for(;$i<80;$i++)        { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
203
204         (($V[5] eq $D) and ($V[0] eq $E)) or die;       # double-check
205
206         &mov($tmp1,&wparam(0)); # re-load SHA_CTX*
207         &mov($D,&wparam(1));    # D is last "T" and is discarded
208
209         &add($E,&DWP(0,$tmp1)); # E is last "A"...
210         &add($T,&DWP(4,$tmp1));
211         &add($A,&DWP(8,$tmp1));
212         &add($B,&DWP(12,$tmp1));
213         &add($C,&DWP(16,$tmp1));
214
215         &mov(&DWP(0,$tmp1),$E); # update SHA_CTX
216          &add($D,64);           # advance input pointer
217         &mov(&DWP(4,$tmp1),$T);
218          &cmp($D,&wparam(2));   # have we reached the end yet?
219         &mov(&DWP(8,$tmp1),$A);
220          &mov($E,$C);           # C is last "E" which needs to be "pre-loaded"
221         &mov(&DWP(12,$tmp1),$B);
222          &mov($T,$D);           # input pointer
223         &mov(&DWP(16,$tmp1),$C);
224         &jb(&label("loop"));
225
226         &stack_pop(16);
227 &function_end("sha1_block_data_order");
228 &asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
229
230 &asm_finish();