Unified - adapt the generation of sha assembler to use GENERATE
[openssl.git] / crypto / sha / asm / sha512-586.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # SHA512 block transform for x86. September 2007.
11 #
12 # May 2013.
13 #
14 # Add SSSE3 code path, 20-25% improvement [over original SSE2 code].
15 #
16 # Performance in clock cycles per processed byte (less is better):
17 #
18 #               gcc     icc     x86 asm SIMD(*) x86_64(**)
19 # Pentium       100     97      61      -       -
20 # PIII          75      77      56      -       -
21 # P4            116     95      82      34.6    30.8
22 # AMD K8        54      55      36      20.7    9.57
23 # Core2         66      57      40      15.9    9.97
24 # Westmere      70      -       38      12.2    9.58
25 # Sandy Bridge  58      -       35      11.9    11.2
26 # Ivy Bridge    50      -       33      11.5    8.17
27 # Haswell       46      -       29      11.3    7.66
28 # Bulldozer     121     -       50      14.0    13.5
29 # VIA Nano      91      -       52      33      14.7
30 # Atom          126     -       68      48(***) 14.7
31 # Silvermont    97      -       58      42(***) 17.5
32 #
33 # (*)   whichever best applicable.
34 # (**)  x86_64 assembler performance is presented for reference
35 #       purposes, the results are for integer-only code.
36 # (***) paddq is increadibly slow on Atom.
37 #
38 # IALU code-path is optimized for elder Pentiums. On vanilla Pentium
39 # performance improvement over compiler generated code reaches ~60%,
40 # while on PIII - ~35%. On newer ยต-archs improvement varies from 15%
41 # to 50%, but it's less important as they are expected to execute SSE2
42 # code-path, which is commonly ~2-3x faster [than compiler generated
43 # code]. SSE2 code-path is as fast as original sha512-sse2.pl, even
44 # though it does not use 128-bit operations. The latter means that
45 # SSE2-aware kernel is no longer required to execute the code. Another
46 # difference is that new code optimizes amount of writes, but at the
47 # cost of increased data cache "footprint" by 1/2KB.
48
49 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
50 push(@INC,"${dir}","${dir}../../perlasm");
51 require "x86asm.pl";
52
53 $output=pop;
54 open STDOUT,">$output";
55
56 &asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
57
58 $sse2=0;
59 for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
60
61 &external_label("OPENSSL_ia32cap_P") if ($sse2);
62
63 $Tlo=&DWP(0,"esp");     $Thi=&DWP(4,"esp");
64 $Alo=&DWP(8,"esp");     $Ahi=&DWP(8+4,"esp");
65 $Blo=&DWP(16,"esp");    $Bhi=&DWP(16+4,"esp");
66 $Clo=&DWP(24,"esp");    $Chi=&DWP(24+4,"esp");
67 $Dlo=&DWP(32,"esp");    $Dhi=&DWP(32+4,"esp");
68 $Elo=&DWP(40,"esp");    $Ehi=&DWP(40+4,"esp");
69 $Flo=&DWP(48,"esp");    $Fhi=&DWP(48+4,"esp");
70 $Glo=&DWP(56,"esp");    $Ghi=&DWP(56+4,"esp");
71 $Hlo=&DWP(64,"esp");    $Hhi=&DWP(64+4,"esp");
72 $K512="ebp";
73
74 $Asse2=&QWP(0,"esp");
75 $Bsse2=&QWP(8,"esp");
76 $Csse2=&QWP(16,"esp");
77 $Dsse2=&QWP(24,"esp");
78 $Esse2=&QWP(32,"esp");
79 $Fsse2=&QWP(40,"esp");
80 $Gsse2=&QWP(48,"esp");
81 $Hsse2=&QWP(56,"esp");
82
83 $A="mm0";       # B-D and
84 $E="mm4";       # F-H are commonly loaded to respectively mm1-mm3 and
85                 # mm5-mm7, but it's done on on-demand basis...
86 $BxC="mm2";     # ... except for B^C
87
88 sub BODY_00_15_sse2 {
89     my $phase=shift;
90
91         #&movq  ("mm5",$Fsse2);                 # load f
92         #&movq  ("mm6",$Gsse2);                 # load g
93
94         &movq   ("mm1",$E);                     # %mm1 is sliding right
95          &pxor  ("mm5","mm6");                  # f^=g
96         &psrlq  ("mm1",14);
97          &movq  ($Esse2,$E);                    # modulo-scheduled save e
98          &pand  ("mm5",$E);                     # f&=e
99         &psllq  ($E,23);                        # $E is sliding left
100          &movq  ($A,"mm3")                      if ($phase<2);
101          &movq  (&QWP(8*9,"esp"),"mm7")         # save X[i]
102         &movq   ("mm3","mm1");                  # %mm3 is T1
103          &psrlq ("mm1",4);
104          &pxor  ("mm5","mm6");                  # Ch(e,f,g)
105         &pxor   ("mm3",$E);
106          &psllq ($E,23);
107         &pxor   ("mm3","mm1");
108          &movq  ($Asse2,$A);                    # modulo-scheduled save a
109          &paddq ("mm7","mm5");                  # X[i]+=Ch(e,f,g)
110         &pxor   ("mm3",$E);
111          &psrlq ("mm1",23);
112          &paddq ("mm7",$Hsse2);                 # X[i]+=h
113         &pxor   ("mm3","mm1");
114          &psllq ($E,4);
115          &paddq ("mm7",QWP(0,$K512));           # X[i]+=K512[i]
116         &pxor   ("mm3",$E);                     # T1=Sigma1_512(e)
117
118          &movq  ($E,$Dsse2);                    # e = load d, e in next round
119         &paddq  ("mm3","mm7");                  # T1+=X[i]
120          &movq  ("mm5",$A);                     # %mm5 is sliding right
121          &psrlq ("mm5",28);
122         &paddq  ($E,"mm3");                     # d += T1
123          &movq  ("mm6",$A);                     # %mm6 is sliding left
124          &movq  ("mm7","mm5");
125          &psllq ("mm6",25);
126         &movq   ("mm1",$Bsse2);                 # load b
127          &psrlq ("mm5",6);
128          &pxor  ("mm7","mm6");
129         &sub    ("esp",8);
130          &psllq ("mm6",5);
131          &pxor  ("mm7","mm5");
132         &pxor   ($A,"mm1");                     # a^b, b^c in next round
133          &psrlq ("mm5",5);
134          &pxor  ("mm7","mm6");
135         &pand   ($BxC,$A);                      # (b^c)&(a^b)
136          &psllq ("mm6",6);
137          &pxor  ("mm7","mm5");
138         &pxor   ($BxC,"mm1");                   # [h=]Maj(a,b,c)
139          &pxor  ("mm6","mm7");                  # Sigma0_512(a)
140          &movq  ("mm7",&QWP(8*(9+16-1),"esp"))  if ($phase!=0); # pre-fetch
141          &movq  ("mm5",$Fsse2)                  if ($phase==0); # load f
142
143     if ($phase>1) {
144         &paddq  ($BxC,"mm6");                   # h+=Sigma0(a)
145          &add   ($K512,8);
146         #&paddq ($BxC,"mm3");                   # h+=T1
147
148         ($A,$BxC) = ($BxC,$A);                  # rotate registers
149     } else {
150         &paddq  ("mm3",$BxC);                   # T1+=Maj(a,b,c)
151          &movq  ($BxC,$A);
152          &add   ($K512,8);
153         &paddq  ("mm3","mm6");                  # T1+=Sigma0(a)
154          &movq  ("mm6",$Gsse2)                  if ($phase==0); # load g
155         #&movq  ($A,"mm3");                     # h=T1
156     }
157 }
158
159 sub BODY_00_15_x86 {
160         #define Sigma1(x)       (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
161         #       LO              lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
162         #       HI              hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
163         &mov    ("ecx",$Elo);
164         &mov    ("edx",$Ehi);
165         &mov    ("esi","ecx");
166
167         &shr    ("ecx",9);      # lo>>9
168         &mov    ("edi","edx");
169         &shr    ("edx",9);      # hi>>9
170         &mov    ("ebx","ecx");
171         &shl    ("esi",14);     # lo<<14
172         &mov    ("eax","edx");
173         &shl    ("edi",14);     # hi<<14
174         &xor    ("ebx","esi");
175
176         &shr    ("ecx",14-9);   # lo>>14
177         &xor    ("eax","edi");
178         &shr    ("edx",14-9);   # hi>>14
179         &xor    ("eax","ecx");
180         &shl    ("esi",18-14);  # lo<<18
181         &xor    ("ebx","edx");
182         &shl    ("edi",18-14);  # hi<<18
183         &xor    ("ebx","esi");
184
185         &shr    ("ecx",18-14);  # lo>>18
186         &xor    ("eax","edi");
187         &shr    ("edx",18-14);  # hi>>18
188         &xor    ("eax","ecx");
189         &shl    ("esi",23-18);  # lo<<23
190         &xor    ("ebx","edx");
191         &shl    ("edi",23-18);  # hi<<23
192         &xor    ("eax","esi");
193         &xor    ("ebx","edi");                  # T1 = Sigma1(e)
194
195         &mov    ("ecx",$Flo);
196         &mov    ("edx",$Fhi);
197         &mov    ("esi",$Glo);
198         &mov    ("edi",$Ghi);
199          &add   ("eax",$Hlo);
200          &adc   ("ebx",$Hhi);                   # T1 += h
201         &xor    ("ecx","esi");
202         &xor    ("edx","edi");
203         &and    ("ecx",$Elo);
204         &and    ("edx",$Ehi);
205          &add   ("eax",&DWP(8*(9+15)+0,"esp"));
206          &adc   ("ebx",&DWP(8*(9+15)+4,"esp")); # T1 += X[0]
207         &xor    ("ecx","esi");
208         &xor    ("edx","edi");                  # Ch(e,f,g) = (f^g)&e)^g
209
210         &mov    ("esi",&DWP(0,$K512));
211         &mov    ("edi",&DWP(4,$K512));          # K[i]
212         &add    ("eax","ecx");
213         &adc    ("ebx","edx");                  # T1 += Ch(e,f,g)
214         &mov    ("ecx",$Dlo);
215         &mov    ("edx",$Dhi);
216         &add    ("eax","esi");
217         &adc    ("ebx","edi");                  # T1 += K[i]
218         &mov    ($Tlo,"eax");
219         &mov    ($Thi,"ebx");                   # put T1 away
220         &add    ("eax","ecx");
221         &adc    ("ebx","edx");                  # d += T1
222
223         #define Sigma0(x)       (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
224         #       LO              lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
225         #       HI              hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
226         &mov    ("ecx",$Alo);
227         &mov    ("edx",$Ahi);
228         &mov    ($Dlo,"eax");
229         &mov    ($Dhi,"ebx");
230         &mov    ("esi","ecx");
231
232         &shr    ("ecx",2);      # lo>>2
233         &mov    ("edi","edx");
234         &shr    ("edx",2);      # hi>>2
235         &mov    ("ebx","ecx");
236         &shl    ("esi",4);      # lo<<4
237         &mov    ("eax","edx");
238         &shl    ("edi",4);      # hi<<4
239         &xor    ("ebx","esi");
240
241         &shr    ("ecx",7-2);    # lo>>7
242         &xor    ("eax","edi");
243         &shr    ("edx",7-2);    # hi>>7
244         &xor    ("ebx","ecx");
245         &shl    ("esi",25-4);   # lo<<25
246         &xor    ("eax","edx");
247         &shl    ("edi",25-4);   # hi<<25
248         &xor    ("eax","esi");
249
250         &shr    ("ecx",28-7);   # lo>>28
251         &xor    ("ebx","edi");
252         &shr    ("edx",28-7);   # hi>>28
253         &xor    ("eax","ecx");
254         &shl    ("esi",30-25);  # lo<<30
255         &xor    ("ebx","edx");
256         &shl    ("edi",30-25);  # hi<<30
257         &xor    ("eax","esi");
258         &xor    ("ebx","edi");                  # Sigma0(a)
259
260         &mov    ("ecx",$Alo);
261         &mov    ("edx",$Ahi);
262         &mov    ("esi",$Blo);
263         &mov    ("edi",$Bhi);
264         &add    ("eax",$Tlo);
265         &adc    ("ebx",$Thi);                   # T1 = Sigma0(a)+T1
266         &or     ("ecx","esi");
267         &or     ("edx","edi");
268         &and    ("ecx",$Clo);
269         &and    ("edx",$Chi);
270         &and    ("esi",$Alo);
271         &and    ("edi",$Ahi);
272         &or     ("ecx","esi");
273         &or     ("edx","edi");                  # Maj(a,b,c) = ((a|b)&c)|(a&b)
274
275         &add    ("eax","ecx");
276         &adc    ("ebx","edx");                  # T1 += Maj(a,b,c)
277         &mov    ($Tlo,"eax");
278         &mov    ($Thi,"ebx");
279
280         &mov    (&LB("edx"),&BP(0,$K512));      # pre-fetch LSB of *K
281         &sub    ("esp",8);
282         &lea    ($K512,&DWP(8,$K512));          # K++
283 }
284
285
286 &function_begin("sha512_block_data_order");
287         &mov    ("esi",wparam(0));      # ctx
288         &mov    ("edi",wparam(1));      # inp
289         &mov    ("eax",wparam(2));      # num
290         &mov    ("ebx","esp");          # saved sp
291
292         &call   (&label("pic_point"));  # make it PIC!
293 &set_label("pic_point");
294         &blindpop($K512);
295         &lea    ($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512));
296
297         &sub    ("esp",16);
298         &and    ("esp",-64);
299
300         &shl    ("eax",7);
301         &add    ("eax","edi");
302         &mov    (&DWP(0,"esp"),"esi");  # ctx
303         &mov    (&DWP(4,"esp"),"edi");  # inp
304         &mov    (&DWP(8,"esp"),"eax");  # inp+num*128
305         &mov    (&DWP(12,"esp"),"ebx"); # saved sp
306
307 if ($sse2) {
308         &picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512"));
309         &mov    ("ecx",&DWP(0,"edx"));
310         &test   ("ecx",1<<26);
311         &jz     (&label("loop_x86"));
312
313         &mov    ("edx",&DWP(4,"edx"));
314
315         # load ctx->h[0-7]
316         &movq   ($A,&QWP(0,"esi"));
317          &and   ("ecx",1<<24);          # XMM registers availability
318         &movq   ("mm1",&QWP(8,"esi"));
319          &and   ("edx",1<<9);           # SSSE3 bit
320         &movq   ($BxC,&QWP(16,"esi"));
321          &or    ("ecx","edx");
322         &movq   ("mm3",&QWP(24,"esi"));
323         &movq   ($E,&QWP(32,"esi"));
324         &movq   ("mm5",&QWP(40,"esi"));
325         &movq   ("mm6",&QWP(48,"esi"));
326         &movq   ("mm7",&QWP(56,"esi"));
327         &cmp    ("ecx",1<<24|1<<9);
328         &je     (&label("SSSE3"));
329         &sub    ("esp",8*10);
330         &jmp    (&label("loop_sse2"));
331
332 &set_label("loop_sse2",16);
333         #&movq  ($Asse2,$A);
334         &movq   ($Bsse2,"mm1");
335         &movq   ($Csse2,$BxC);
336         &movq   ($Dsse2,"mm3");
337         #&movq  ($Esse2,$E);
338         &movq   ($Fsse2,"mm5");
339         &movq   ($Gsse2,"mm6");
340         &pxor   ($BxC,"mm1");                   # magic
341         &movq   ($Hsse2,"mm7");
342         &movq   ("mm3",$A);                     # magic
343
344         &mov    ("eax",&DWP(0,"edi"));
345         &mov    ("ebx",&DWP(4,"edi"));
346         &add    ("edi",8);
347         &mov    ("edx",15);                     # counter
348         &bswap  ("eax");
349         &bswap  ("ebx");
350         &jmp    (&label("00_14_sse2"));
351
352 &set_label("00_14_sse2",16);
353         &movd   ("mm1","eax");
354         &mov    ("eax",&DWP(0,"edi"));
355         &movd   ("mm7","ebx");
356         &mov    ("ebx",&DWP(4,"edi"));
357         &add    ("edi",8);
358         &bswap  ("eax");
359         &bswap  ("ebx");
360         &punpckldq("mm7","mm1");
361
362         &BODY_00_15_sse2();
363
364         &dec    ("edx");
365         &jnz    (&label("00_14_sse2"));
366
367         &movd   ("mm1","eax");
368         &movd   ("mm7","ebx");
369         &punpckldq("mm7","mm1");
370
371         &BODY_00_15_sse2(1);
372
373         &pxor   ($A,$A);                        # A is in %mm3
374         &mov    ("edx",32);                     # counter
375         &jmp    (&label("16_79_sse2"));
376
377 &set_label("16_79_sse2",16);
378     for ($j=0;$j<2;$j++) {                      # 2x unroll
379         #&movq  ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15 
380         &movq   ("mm5",&QWP(8*(9+16-14),"esp"));
381         &movq   ("mm1","mm7");
382         &psrlq  ("mm7",1);
383          &movq  ("mm6","mm5");
384          &psrlq ("mm5",6);
385         &psllq  ("mm1",56);
386          &paddq ($A,"mm3");                     # from BODY_00_15
387          &movq  ("mm3","mm7");
388         &psrlq  ("mm7",7-1);
389          &pxor  ("mm3","mm1");
390          &psllq ("mm1",63-56);
391         &pxor   ("mm3","mm7");
392          &psrlq ("mm7",8-7);
393         &pxor   ("mm3","mm1");
394          &movq  ("mm1","mm5");
395          &psrlq ("mm5",19-6);
396         &pxor   ("mm7","mm3");                  # sigma0
397
398          &psllq ("mm6",3);
399          &pxor  ("mm1","mm5");
400         &paddq  ("mm7",&QWP(8*(9+16),"esp"));
401          &pxor  ("mm1","mm6");
402          &psrlq ("mm5",61-19);
403         &paddq  ("mm7",&QWP(8*(9+16-9),"esp"));
404          &pxor  ("mm1","mm5");
405          &psllq ("mm6",45-3);
406         &movq   ("mm5",$Fsse2);                 # load f
407          &pxor  ("mm1","mm6");                  # sigma1
408         &movq   ("mm6",$Gsse2);                 # load g
409
410         &paddq  ("mm7","mm1");                  # X[i]
411         #&movq  (&QWP(8*9,"esp"),"mm7");        # moved to BODY_00_15
412
413         &BODY_00_15_sse2(2);
414     }
415         &dec    ("edx");
416         &jnz    (&label("16_79_sse2"));
417
418         #&movq  ($A,$Asse2);
419         &paddq  ($A,"mm3");                     # from BODY_00_15
420         &movq   ("mm1",$Bsse2);
421         #&movq  ($BxC,$Csse2);
422         &movq   ("mm3",$Dsse2);
423         #&movq  ($E,$Esse2);
424         &movq   ("mm5",$Fsse2);
425         &movq   ("mm6",$Gsse2);
426         &movq   ("mm7",$Hsse2);
427
428         &pxor   ($BxC,"mm1");                   # de-magic
429         &paddq  ($A,&QWP(0,"esi"));
430         &paddq  ("mm1",&QWP(8,"esi"));
431         &paddq  ($BxC,&QWP(16,"esi"));
432         &paddq  ("mm3",&QWP(24,"esi"));
433         &paddq  ($E,&QWP(32,"esi"));
434         &paddq  ("mm5",&QWP(40,"esi"));
435         &paddq  ("mm6",&QWP(48,"esi"));
436         &paddq  ("mm7",&QWP(56,"esi"));
437
438         &mov    ("eax",8*80);
439         &movq   (&QWP(0,"esi"),$A);
440         &movq   (&QWP(8,"esi"),"mm1");
441         &movq   (&QWP(16,"esi"),$BxC);
442         &movq   (&QWP(24,"esi"),"mm3");
443         &movq   (&QWP(32,"esi"),$E);
444         &movq   (&QWP(40,"esi"),"mm5");
445         &movq   (&QWP(48,"esi"),"mm6");
446         &movq   (&QWP(56,"esi"),"mm7");
447
448         &lea    ("esp",&DWP(0,"esp","eax"));    # destroy frame
449         &sub    ($K512,"eax");                  # rewind K
450
451         &cmp    ("edi",&DWP(8*10+8,"esp"));     # are we done yet?
452         &jb     (&label("loop_sse2"));
453
454         &mov    ("esp",&DWP(8*10+12,"esp"));    # restore sp
455         &emms   ();
456 &function_end_A();
457
458 &set_label("SSSE3",32);
459 { my ($cnt,$frame)=("ecx","edx");
460   my @X=map("xmm$_",(0..7));
461   my $j;
462   my $i=0;
463
464         &lea    ($frame,&DWP(-64,"esp"));
465         &sub    ("esp",256);
466
467         # fixed stack frame layout
468         #
469         # +0    A B C D E F G H         # backing store
470         # +64   X[0]+K[i] .. X[15]+K[i] # XMM->MM xfer area
471         # +192                          # XMM off-load ring buffer
472         # +256                          # saved parameters
473
474         &movdqa         (@X[1],&QWP(80*8,$K512));               # byte swap mask
475         &movdqu         (@X[0],&QWP(0,"edi"));
476         &pshufb         (@X[0],@X[1]);
477     for ($j=0;$j<8;$j++) {
478         &movdqa         (&QWP(16*(($j-1)%4),$frame),@X[3])      if ($j>4); # off-load
479         &movdqa         (@X[3],&QWP(16*($j%8),$K512));
480         &movdqa         (@X[2],@X[1])                           if ($j<7); # perpetuate byte swap mask
481         &movdqu         (@X[1],&QWP(16*($j+1),"edi"))           if ($j<7); # next input
482         &movdqa         (@X[1],&QWP(16*(($j+1)%4),$frame))      if ($j==7);# restore @X[0]
483         &paddq          (@X[3],@X[0]);
484         &pshufb         (@X[1],@X[2])                           if ($j<7);
485         &movdqa         (&QWP(16*($j%8)-128,$frame),@X[3]);     # xfer X[i]+K[i]
486
487         push(@X,shift(@X));                                     # rotate(@X)
488     }
489         #&jmp           (&label("loop_ssse3"));
490         &nop            ();
491
492 &set_label("loop_ssse3",32);
493         &movdqa         (@X[2],&QWP(16*(($j+1)%4),$frame));     # pre-restore @X[1]
494         &movdqa         (&QWP(16*(($j-1)%4),$frame),@X[3]);     # off-load @X[3]
495         &lea            ($K512,&DWP(16*8,$K512));
496
497         #&movq  ($Asse2,$A);                    # off-load A-H
498         &movq   ($Bsse2,"mm1");
499          &mov   ("ebx","edi");
500         &movq   ($Csse2,$BxC);
501          &lea   ("edi",&DWP(128,"edi"));        # advance input
502         &movq   ($Dsse2,"mm3");
503          &cmp   ("edi","eax");
504         #&movq  ($Esse2,$E);
505         &movq   ($Fsse2,"mm5");
506          &cmovb ("ebx","edi");
507         &movq   ($Gsse2,"mm6");
508          &mov   ("ecx",4);                      # loop counter
509         &pxor   ($BxC,"mm1");                   # magic
510         &movq   ($Hsse2,"mm7");
511         &pxor   ("mm3","mm3");                  # magic
512
513         &jmp            (&label("00_47_ssse3"));
514
515 sub BODY_00_15_ssse3 {          # "phase-less" copy of BODY_00_15_sse2
516         (
517         '&movq  ("mm1",$E)',                            # %mm1 is sliding right
518         '&movq  ("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i]
519          '&pxor ("mm5","mm6")',                         # f^=g
520         '&psrlq ("mm1",14)',
521          '&movq (&QWP(8*($i+4)%64,"esp"),$E)',          # modulo-scheduled save e
522          '&pand ("mm5",$E)',                            # f&=e
523         '&psllq ($E,23)',                               # $E is sliding left
524         '&paddq ($A,"mm3")',                            # [h+=Maj(a,b,c)]
525         '&movq  ("mm3","mm1")',                         # %mm3 is T1
526          '&psrlq("mm1",4)',
527          '&pxor ("mm5","mm6")',                         # Ch(e,f,g)
528         '&pxor  ("mm3",$E)',
529          '&psllq($E,23)',
530         '&pxor  ("mm3","mm1")',
531          '&movq (&QWP(8*$i%64,"esp"),$A)',              # modulo-scheduled save a
532          '&paddq("mm7","mm5")',                         # X[i]+=Ch(e,f,g)
533         '&pxor  ("mm3",$E)',
534          '&psrlq("mm1",23)',
535          '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))',       # X[i]+=h
536         '&pxor  ("mm3","mm1")',
537          '&psllq($E,4)',
538         '&pxor  ("mm3",$E)',                            # T1=Sigma1_512(e)
539
540          '&movq ($E,&QWP(8*($i+3)%64,"esp"))',          # e = load d, e in next round
541         '&paddq ("mm3","mm7")',                         # T1+=X[i]
542          '&movq ("mm5",$A)',                            # %mm5 is sliding right
543          '&psrlq("mm5",28)',
544         '&paddq ($E,"mm3")',                            # d += T1
545          '&movq ("mm6",$A)',                            # %mm6 is sliding left
546          '&movq ("mm7","mm5")',
547          '&psllq("mm6",25)',
548         '&movq  ("mm1",&QWP(8*($i+1)%64,"esp"))',       # load b
549          '&psrlq("mm5",6)',
550          '&pxor ("mm7","mm6")',
551          '&psllq("mm6",5)',
552          '&pxor ("mm7","mm5")',
553         '&pxor  ($A,"mm1")',                            # a^b, b^c in next round
554          '&psrlq("mm5",5)',
555          '&pxor ("mm7","mm6")',
556         '&pand  ($BxC,$A)',                             # (b^c)&(a^b)
557          '&psllq("mm6",6)',
558          '&pxor ("mm7","mm5")',
559         '&pxor  ($BxC,"mm1")',                          # [h=]Maj(a,b,c)
560          '&pxor ("mm6","mm7")',                         # Sigma0_512(a)
561          '&movq ("mm5",&QWP(8*($i+5-1)%64,"esp"))',     # pre-load f
562         '&paddq ($BxC,"mm6")',                          # h+=Sigma0(a)
563          '&movq ("mm6",&QWP(8*($i+6-1)%64,"esp"))',     # pre-load g
564
565         '($A,$BxC) = ($BxC,$A); $i--;'
566         );
567 }
568
569 &set_label("00_47_ssse3",32);
570
571     for(;$j<16;$j++) {
572         my ($t0,$t2,$t1)=@X[2..4];
573         my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
574
575         &movdqa         ($t2,@X[5]);
576         &movdqa         (@X[1],$t0);                    # restore @X[1]
577         &palignr        ($t0,@X[0],8);                  # X[1..2]
578         &movdqa         (&QWP(16*($j%4),$frame),@X[4]); # off-load @X[4]
579          &palignr       ($t2,@X[4],8);                  # X[9..10]
580
581         &movdqa         ($t1,$t0);
582         &psrlq          ($t0,7);
583          &paddq         (@X[0],$t2);                    # X[0..1] += X[9..10]
584         &movdqa         ($t2,$t1);
585         &psrlq          ($t1,1);
586         &psllq          ($t2,64-8);
587         &pxor           ($t0,$t1);
588         &psrlq          ($t1,8-1);
589         &pxor           ($t0,$t2);
590         &psllq          ($t2,8-1);
591         &pxor           ($t0,$t1);
592          &movdqa        ($t1,@X[7]);
593         &pxor           ($t0,$t2);                      # sigma0(X[1..2])
594          &movdqa        ($t2,@X[7]);
595          &psrlq         ($t1,6);
596         &paddq          (@X[0],$t0);                    # X[0..1] += sigma0(X[1..2])
597
598         &movdqa         ($t0,@X[7]);
599         &psrlq          ($t2,19);
600         &psllq          ($t0,64-61);
601         &pxor           ($t1,$t2);
602         &psrlq          ($t2,61-19);
603         &pxor           ($t1,$t0);
604         &psllq          ($t0,61-19);
605         &pxor           ($t1,$t2);
606         &movdqa         ($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1]
607         &pxor           ($t1,$t0);                      # sigma0(X[1..2])
608         &movdqa         ($t0,&QWP(16*($j%8),$K512));
609          eval(shift(@insns));
610         &paddq          (@X[0],$t1);                    # X[0..1] += sigma0(X[14..15])
611          eval(shift(@insns));
612          eval(shift(@insns));
613          eval(shift(@insns));
614          eval(shift(@insns));
615         &paddq          ($t0,@X[0]);
616          foreach(@insns) { eval; }
617         &movdqa         (&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i]
618
619         push(@X,shift(@X));                             # rotate(@X)
620     }
621         &lea            ($K512,&DWP(16*8,$K512));
622         &dec            ("ecx");
623         &jnz            (&label("00_47_ssse3"));
624
625         &movdqa         (@X[1],&QWP(0,$K512));          # byte swap mask
626         &lea            ($K512,&DWP(-80*8,$K512));      # rewind
627         &movdqu         (@X[0],&QWP(0,"ebx"));
628         &pshufb         (@X[0],@X[1]);
629
630     for ($j=0;$j<8;$j++) {      # load next or same block
631         my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
632
633         &movdqa         (&QWP(16*(($j-1)%4),$frame),@X[3])      if ($j>4); # off-load
634         &movdqa         (@X[3],&QWP(16*($j%8),$K512));
635         &movdqa         (@X[2],@X[1])                           if ($j<7); # perpetuate byte swap mask
636         &movdqu         (@X[1],&QWP(16*($j+1),"ebx"))           if ($j<7); # next input
637         &movdqa         (@X[1],&QWP(16*(($j+1)%4),$frame))      if ($j==7);# restore @X[0]
638         &paddq          (@X[3],@X[0]);
639         &pshufb         (@X[1],@X[2])                           if ($j<7);
640          foreach(@insns) { eval; }
641         &movdqa         (&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i]
642
643         push(@X,shift(@X));                             # rotate(@X)
644     }
645
646         #&movq  ($A,$Asse2);                    # load A-H
647         &movq   ("mm1",$Bsse2);
648         &paddq  ($A,"mm3");                     # from BODY_00_15
649         #&movq  ($BxC,$Csse2);
650         &movq   ("mm3",$Dsse2);
651         #&movq  ($E,$Esse2);
652         #&movq  ("mm5",$Fsse2);
653         #&movq  ("mm6",$Gsse2);
654         &movq   ("mm7",$Hsse2);
655
656         &pxor   ($BxC,"mm1");                   # de-magic
657         &paddq  ($A,&QWP(0,"esi"));
658         &paddq  ("mm1",&QWP(8,"esi"));
659         &paddq  ($BxC,&QWP(16,"esi"));
660         &paddq  ("mm3",&QWP(24,"esi"));
661         &paddq  ($E,&QWP(32,"esi"));
662         &paddq  ("mm5",&QWP(40,"esi"));
663         &paddq  ("mm6",&QWP(48,"esi"));
664         &paddq  ("mm7",&QWP(56,"esi"));
665
666         &movq   (&QWP(0,"esi"),$A);
667         &movq   (&QWP(8,"esi"),"mm1");
668         &movq   (&QWP(16,"esi"),$BxC);
669         &movq   (&QWP(24,"esi"),"mm3");
670         &movq   (&QWP(32,"esi"),$E);
671         &movq   (&QWP(40,"esi"),"mm5");
672         &movq   (&QWP(48,"esi"),"mm6");
673         &movq   (&QWP(56,"esi"),"mm7");
674
675         &cmp    ("edi","eax")                   # are we done yet?
676         &jb     (&label("loop_ssse3"));
677
678         &mov    ("esp",&DWP(64+12,$frame));     # restore sp
679         &emms   ();
680 }
681 &function_end_A();
682 }
683 &set_label("loop_x86",16);
684     # copy input block to stack reversing byte and qword order
685     for ($i=0;$i<8;$i++) {
686         &mov    ("eax",&DWP($i*16+0,"edi"));
687         &mov    ("ebx",&DWP($i*16+4,"edi"));
688         &mov    ("ecx",&DWP($i*16+8,"edi"));
689         &mov    ("edx",&DWP($i*16+12,"edi"));
690         &bswap  ("eax");
691         &bswap  ("ebx");
692         &bswap  ("ecx");
693         &bswap  ("edx");
694         &push   ("eax");
695         &push   ("ebx");
696         &push   ("ecx");
697         &push   ("edx");
698     }
699         &add    ("edi",128);
700         &sub    ("esp",9*8);            # place for T,A,B,C,D,E,F,G,H
701         &mov    (&DWP(8*(9+16)+4,"esp"),"edi");
702
703         # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
704         &lea    ("edi",&DWP(8,"esp"));
705         &mov    ("ecx",16);
706         &data_word(0xA5F3F689);         # rep movsd
707
708 &set_label("00_15_x86",16);
709         &BODY_00_15_x86();
710
711         &cmp    (&LB("edx"),0x94);
712         &jne    (&label("00_15_x86"));
713
714 &set_label("16_79_x86",16);
715         #define sigma0(x)       (ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
716         #       LO              lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
717         #       HI              hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
718         &mov    ("ecx",&DWP(8*(9+15+16-1)+0,"esp"));
719         &mov    ("edx",&DWP(8*(9+15+16-1)+4,"esp"));
720         &mov    ("esi","ecx");
721
722         &shr    ("ecx",1);      # lo>>1
723         &mov    ("edi","edx");
724         &shr    ("edx",1);      # hi>>1
725         &mov    ("eax","ecx");
726         &shl    ("esi",24);     # lo<<24
727         &mov    ("ebx","edx");
728         &shl    ("edi",24);     # hi<<24
729         &xor    ("ebx","esi");
730
731         &shr    ("ecx",7-1);    # lo>>7
732         &xor    ("eax","edi");
733         &shr    ("edx",7-1);    # hi>>7
734         &xor    ("eax","ecx");
735         &shl    ("esi",31-24);  # lo<<31
736         &xor    ("ebx","edx");
737         &shl    ("edi",25-24);  # hi<<25
738         &xor    ("ebx","esi");
739
740         &shr    ("ecx",8-7);    # lo>>8
741         &xor    ("eax","edi");
742         &shr    ("edx",8-7);    # hi>>8
743         &xor    ("eax","ecx");
744         &shl    ("edi",31-25);  # hi<<31
745         &xor    ("ebx","edx");
746         &xor    ("eax","edi");                  # T1 = sigma0(X[-15])
747
748         &mov    (&DWP(0,"esp"),"eax");
749         &mov    (&DWP(4,"esp"),"ebx");          # put T1 away
750
751         #define sigma1(x)       (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
752         #       LO              lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
753         #       HI              hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
754         &mov    ("ecx",&DWP(8*(9+15+16-14)+0,"esp"));
755         &mov    ("edx",&DWP(8*(9+15+16-14)+4,"esp"));
756         &mov    ("esi","ecx");
757
758         &shr    ("ecx",6);      # lo>>6
759         &mov    ("edi","edx");
760         &shr    ("edx",6);      # hi>>6
761         &mov    ("eax","ecx");
762         &shl    ("esi",3);      # lo<<3
763         &mov    ("ebx","edx");
764         &shl    ("edi",3);      # hi<<3
765         &xor    ("eax","esi");
766
767         &shr    ("ecx",19-6);   # lo>>19
768         &xor    ("ebx","edi");
769         &shr    ("edx",19-6);   # hi>>19
770         &xor    ("eax","ecx");
771         &shl    ("esi",13-3);   # lo<<13
772         &xor    ("ebx","edx");
773         &shl    ("edi",13-3);   # hi<<13
774         &xor    ("ebx","esi");
775
776         &shr    ("ecx",29-19);  # lo>>29
777         &xor    ("eax","edi");
778         &shr    ("edx",29-19);  # hi>>29
779         &xor    ("ebx","ecx");
780         &shl    ("edi",26-13);  # hi<<26
781         &xor    ("eax","edx");
782         &xor    ("eax","edi");                  # sigma1(X[-2])
783
784         &mov    ("ecx",&DWP(8*(9+15+16)+0,"esp"));
785         &mov    ("edx",&DWP(8*(9+15+16)+4,"esp"));
786         &add    ("eax",&DWP(0,"esp"));
787         &adc    ("ebx",&DWP(4,"esp"));          # T1 = sigma1(X[-2])+T1
788         &mov    ("esi",&DWP(8*(9+15+16-9)+0,"esp"));
789         &mov    ("edi",&DWP(8*(9+15+16-9)+4,"esp"));
790         &add    ("eax","ecx");
791         &adc    ("ebx","edx");                  # T1 += X[-16]
792         &add    ("eax","esi");
793         &adc    ("ebx","edi");                  # T1 += X[-7]
794         &mov    (&DWP(8*(9+15)+0,"esp"),"eax");
795         &mov    (&DWP(8*(9+15)+4,"esp"),"ebx"); # save X[0]
796
797         &BODY_00_15_x86();
798
799         &cmp    (&LB("edx"),0x17);
800         &jne    (&label("16_79_x86"));
801
802         &mov    ("esi",&DWP(8*(9+16+80)+0,"esp"));# ctx
803         &mov    ("edi",&DWP(8*(9+16+80)+4,"esp"));# inp
804     for($i=0;$i<4;$i++) {
805         &mov    ("eax",&DWP($i*16+0,"esi"));
806         &mov    ("ebx",&DWP($i*16+4,"esi"));
807         &mov    ("ecx",&DWP($i*16+8,"esi"));
808         &mov    ("edx",&DWP($i*16+12,"esi"));
809         &add    ("eax",&DWP(8+($i*16)+0,"esp"));
810         &adc    ("ebx",&DWP(8+($i*16)+4,"esp"));
811         &mov    (&DWP($i*16+0,"esi"),"eax");
812         &mov    (&DWP($i*16+4,"esi"),"ebx");
813         &add    ("ecx",&DWP(8+($i*16)+8,"esp"));
814         &adc    ("edx",&DWP(8+($i*16)+12,"esp"));
815         &mov    (&DWP($i*16+8,"esi"),"ecx");
816         &mov    (&DWP($i*16+12,"esi"),"edx");
817     }
818         &add    ("esp",8*(9+16+80));            # destroy frame
819         &sub    ($K512,8*80);                   # rewind K
820
821         &cmp    ("edi",&DWP(8,"esp"));          # are we done yet?
822         &jb     (&label("loop_x86"));
823
824         &mov    ("esp",&DWP(12,"esp"));         # restore sp
825 &function_end_A();
826
827 &set_label("K512",64);  # Yes! I keep it in the code segment!
828         &data_word(0xd728ae22,0x428a2f98);      # u64
829         &data_word(0x23ef65cd,0x71374491);      # u64
830         &data_word(0xec4d3b2f,0xb5c0fbcf);      # u64
831         &data_word(0x8189dbbc,0xe9b5dba5);      # u64
832         &data_word(0xf348b538,0x3956c25b);      # u64
833         &data_word(0xb605d019,0x59f111f1);      # u64
834         &data_word(0xaf194f9b,0x923f82a4);      # u64
835         &data_word(0xda6d8118,0xab1c5ed5);      # u64
836         &data_word(0xa3030242,0xd807aa98);      # u64
837         &data_word(0x45706fbe,0x12835b01);      # u64
838         &data_word(0x4ee4b28c,0x243185be);      # u64
839         &data_word(0xd5ffb4e2,0x550c7dc3);      # u64
840         &data_word(0xf27b896f,0x72be5d74);      # u64
841         &data_word(0x3b1696b1,0x80deb1fe);      # u64
842         &data_word(0x25c71235,0x9bdc06a7);      # u64
843         &data_word(0xcf692694,0xc19bf174);      # u64
844         &data_word(0x9ef14ad2,0xe49b69c1);      # u64
845         &data_word(0x384f25e3,0xefbe4786);      # u64
846         &data_word(0x8b8cd5b5,0x0fc19dc6);      # u64
847         &data_word(0x77ac9c65,0x240ca1cc);      # u64
848         &data_word(0x592b0275,0x2de92c6f);      # u64
849         &data_word(0x6ea6e483,0x4a7484aa);      # u64
850         &data_word(0xbd41fbd4,0x5cb0a9dc);      # u64
851         &data_word(0x831153b5,0x76f988da);      # u64
852         &data_word(0xee66dfab,0x983e5152);      # u64
853         &data_word(0x2db43210,0xa831c66d);      # u64
854         &data_word(0x98fb213f,0xb00327c8);      # u64
855         &data_word(0xbeef0ee4,0xbf597fc7);      # u64
856         &data_word(0x3da88fc2,0xc6e00bf3);      # u64
857         &data_word(0x930aa725,0xd5a79147);      # u64
858         &data_word(0xe003826f,0x06ca6351);      # u64
859         &data_word(0x0a0e6e70,0x14292967);      # u64
860         &data_word(0x46d22ffc,0x27b70a85);      # u64
861         &data_word(0x5c26c926,0x2e1b2138);      # u64
862         &data_word(0x5ac42aed,0x4d2c6dfc);      # u64
863         &data_word(0x9d95b3df,0x53380d13);      # u64
864         &data_word(0x8baf63de,0x650a7354);      # u64
865         &data_word(0x3c77b2a8,0x766a0abb);      # u64
866         &data_word(0x47edaee6,0x81c2c92e);      # u64
867         &data_word(0x1482353b,0x92722c85);      # u64
868         &data_word(0x4cf10364,0xa2bfe8a1);      # u64
869         &data_word(0xbc423001,0xa81a664b);      # u64
870         &data_word(0xd0f89791,0xc24b8b70);      # u64
871         &data_word(0x0654be30,0xc76c51a3);      # u64
872         &data_word(0xd6ef5218,0xd192e819);      # u64
873         &data_word(0x5565a910,0xd6990624);      # u64
874         &data_word(0x5771202a,0xf40e3585);      # u64
875         &data_word(0x32bbd1b8,0x106aa070);      # u64
876         &data_word(0xb8d2d0c8,0x19a4c116);      # u64
877         &data_word(0x5141ab53,0x1e376c08);      # u64
878         &data_word(0xdf8eeb99,0x2748774c);      # u64
879         &data_word(0xe19b48a8,0x34b0bcb5);      # u64
880         &data_word(0xc5c95a63,0x391c0cb3);      # u64
881         &data_word(0xe3418acb,0x4ed8aa4a);      # u64
882         &data_word(0x7763e373,0x5b9cca4f);      # u64
883         &data_word(0xd6b2b8a3,0x682e6ff3);      # u64
884         &data_word(0x5defb2fc,0x748f82ee);      # u64
885         &data_word(0x43172f60,0x78a5636f);      # u64
886         &data_word(0xa1f0ab72,0x84c87814);      # u64
887         &data_word(0x1a6439ec,0x8cc70208);      # u64
888         &data_word(0x23631e28,0x90befffa);      # u64
889         &data_word(0xde82bde9,0xa4506ceb);      # u64
890         &data_word(0xb2c67915,0xbef9a3f7);      # u64
891         &data_word(0xe372532b,0xc67178f2);      # u64
892         &data_word(0xea26619c,0xca273ece);      # u64
893         &data_word(0x21c0c207,0xd186b8c7);      # u64
894         &data_word(0xcde0eb1e,0xeada7dd6);      # u64
895         &data_word(0xee6ed178,0xf57d4f7f);      # u64
896         &data_word(0x72176fba,0x06f067aa);      # u64
897         &data_word(0xa2c898a6,0x0a637dc5);      # u64
898         &data_word(0xbef90dae,0x113f9804);      # u64
899         &data_word(0x131c471b,0x1b710b35);      # u64
900         &data_word(0x23047d84,0x28db77f5);      # u64
901         &data_word(0x40c72493,0x32caab7b);      # u64
902         &data_word(0x15c9bebc,0x3c9ebe0a);      # u64
903         &data_word(0x9c100d4c,0x431d67c4);      # u64
904         &data_word(0xcb3e42b6,0x4cc5d4be);      # u64
905         &data_word(0xfc657e2a,0x597f299c);      # u64
906         &data_word(0x3ad6faec,0x5fcb6fab);      # u64
907         &data_word(0x4a475817,0x6c44198c);      # u64
908
909         &data_word(0x04050607,0x00010203);      # byte swap
910         &data_word(0x0c0d0e0f,0x08090a0b);      # mask
911 &function_end_B("sha512_block_data_order");
912 &asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
913
914 &asm_finish();
915
916 close STDOUT;