3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # SHA256 block transform for x86. September 2007.
12 # Performance improvement over compiler generated code varies from
13 # 10% to 40% [see below]. Not very impressive on some ยต-archs, but
14 # it's 5 times smaller and optimizies amount of writes.
18 # Optimization including two of Pavel Semjanov's ideas, alternative
19 # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
20 # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
21 # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
22 # on P4, where it kills performance, nor Sandy Bridge, where folded
23 # loop is approximately as fast...
27 # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
28 # May version, >60% over original. Add AVX+shrd code path, >25%
29 # improvement on Sandy Bridge over May version, 60% over original.
31 # Performance in clock cycles per processed byte (less is better):
33 # PIII P4 AMD K8 Core2 SB Atom Bldzr
34 # gcc 36 41 27 26 25 50 36
35 # icc 33 38 25 23 - - -
36 # x86 asm(*) 27/24 28 19/15.5 18/15.6 12.5 30/25 16.6
37 # x86_64 asm(**) 17.5 15 15.5 17.5 23 21
39 # (*) numbers after slash are for unrolled loop, where available;
40 # (**) x86_64 assembly performance is presented for reference
43 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
44 push(@INC,"${dir}","${dir}../../perlasm");
47 &asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
50 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
53 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
54 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
55 $1>=2.19); # first version supporting AVX
57 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
58 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
59 $1>=2.03); # first version supporting AVX
61 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
62 `ml 2>&1` =~ /Version ([0-9]+)\./ &&
63 $1>=10); # first version supporting AVX
65 $unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
66 # fully unrolled loop was measured to run about
67 # 3-4x slower. If slowdown coefficient is N and
68 # unrolled loop is m times faster, then you break
69 # even at (N-1)/(m-1) blocks. Then it needs to be
70 # adjusted for probability of code being evicted,
71 # code size/cache size=1/4. Typical m is 1.15...
88 &mov ($T,"ecx"); # "ecx" is preloaded
89 &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
97 &xor ($T,"ecx"); # T = sigma0(X[-15])
99 &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
101 &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
102 #&xor ("edi","esi") # sigma1(X[-2])
103 # &add ($T,"edi"); # T += sigma1(X[-2])
104 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
112 &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
115 &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
119 &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
120 &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
123 &mov ($Eoff,$E); # modulo-scheduled
125 &add ($T,$Hoff); # T += h
126 &xor ("esi","edi"); # Ch(e,f,g)
127 &ror ($E,6); # Sigma1(e)
129 &add ($T,"esi"); # T += Ch(e,f,g)
132 &add ($T,$E); # T += Sigma1(e)
135 &mov ($Aoff,$A); # modulo-scheduled
136 &lea ("esp",&DWP(-4,"esp"));
138 &mov ("esi",&DWP(0,$K256));
140 &mov ($E,$Eoff); # e in next iteration, d in this one
141 &xor ($A,"edi"); # a ^= b
142 &ror ("ecx",2); # Sigma0(a)
144 &add ($T,"esi"); # T+= K[i]
145 &mov (&DWP(0,"esp"),$A); # (b^c) in next round
146 &add ($E,$T); # d += T
147 &and ($A,&DWP(4,"esp")); # a &= (b^c)
148 &add ($T,"ecx"); # T += Sigma0(a)
149 &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
150 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
152 &add ($A,$T); # h += T
155 &external_label("OPENSSL_ia32cap_P") if (!$i386);
157 &function_begin("sha256_block_data_order");
158 &mov ("esi",wparam(0)); # ctx
159 &mov ("edi",wparam(1)); # inp
160 &mov ("eax",wparam(2)); # num
161 &mov ("ebx","esp"); # saved sp
163 &call (&label("pic_point")); # make it PIC!
164 &set_label("pic_point");
166 &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
173 &mov (&DWP(0,"esp"),"esi"); # ctx
174 &mov (&DWP(4,"esp"),"edi"); # inp
175 &mov (&DWP(8,"esp"),"eax"); # inp+num*128
176 &mov (&DWP(12,"esp"),"ebx"); # saved sp
178 &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
179 &mov ("ecx",&DWP(0,"edx"));
180 &mov ("edx",&DWP(4,"edx"));
181 &test ("ecx",1<<20); # check for P4
182 &jnz (&label("loop"));
183 &test ("edx",1<<11); # check for XOP
184 &jnz (&label("XOP")) if ($ymm);
185 &and ("ecx",1<<30); # mask "Intel CPU" bit
186 &and ("edx",1<<28); # mask AVX bit
188 &cmp ("ecx",1<<28|1<<30);
189 &je (&label("AVX")) if ($ymm);
190 &je (&label("loop_shrd")) if (!$ymm);
193 &cmp ("eax",$unroll_after);
194 &jae (&label("unrolled"));
196 &jmp (&label("loop"));
201 &set_label("loop$suffix",16);
202 # copy input block to stack reversing byte and dword order
203 for($i=0;$i<4;$i++) {
204 &mov ("eax",&DWP($i*16+0,"edi"));
205 &mov ("ebx",&DWP($i*16+4,"edi"));
206 &mov ("ecx",&DWP($i*16+8,"edi"));
208 &mov ("edx",&DWP($i*16+12,"edi"));
218 &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
219 &mov (&DWP(4*(9+16)+4,"esp"),"edi");
221 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
222 &mov ($A,&DWP(0,"esi"));
223 &mov ("ebx",&DWP(4,"esi"));
224 &mov ("ecx",&DWP(8,"esi"));
225 &mov ("edi",&DWP(12,"esi"));
231 &mov (&DWP(0,"esp"),"ebx"); # magic
232 &mov ($E,&DWP(16,"esi"));
233 &mov ("ebx",&DWP(20,"esi"));
234 &mov ("ecx",&DWP(24,"esi"));
235 &mov ("edi",&DWP(28,"esi"));
241 &set_label("00_15$suffix",16);
245 &cmp ("esi",0xc19bf174);
246 &jne (&label("00_15$suffix"));
248 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
249 &jmp (&label("16_63$suffix"));
251 &set_label("16_63$suffix",16);
255 &cmp ("esi",0xc67178f2);
256 &jne (&label("16_63$suffix"));
258 &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
261 # &mov ("edi",$Coff);
263 &add ($A,&DWP(0,"esi"));
264 &add ("ebx",&DWP(4,"esi"));
265 &add ("edi",&DWP(8,"esi"));
266 &add ("ecx",&DWP(12,"esi"));
267 &mov (&DWP(0,"esi"),$A);
268 &mov (&DWP(4,"esi"),"ebx");
269 &mov (&DWP(8,"esi"),"edi");
270 &mov (&DWP(12,"esi"),"ecx");
275 &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
276 &add ($E,&DWP(16,"esi"));
277 &add ("eax",&DWP(20,"esi"));
278 &add ("ebx",&DWP(24,"esi"));
279 &add ("ecx",&DWP(28,"esi"));
280 &mov (&DWP(16,"esi"),$E);
281 &mov (&DWP(20,"esi"),"eax");
282 &mov (&DWP(24,"esi"),"ebx");
283 &mov (&DWP(28,"esi"),"ecx");
285 &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
286 &sub ($K256,4*64); # rewind K
288 &cmp ("edi",&DWP(8,"esp")); # are we done yet?
289 &jb (&label("loop$suffix"));
292 &mov ("esp",&DWP(12,"esp")); # restore sp
294 if (!$i386 && !$ymm) {
295 # ~20% improvement on Sandy Bridge
296 local *ror = sub { &shrd(@_[0],@_) };
297 &COMPACT_LOOP("_shrd");
298 &mov ("esp",&DWP(12,"esp")); # restore sp
302 &set_label("K256",64); # Yes! I keep it in the code segment!
303 @K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
304 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
305 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
306 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
307 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
308 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
309 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
310 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
311 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
312 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
313 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
314 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
315 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
316 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
317 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
318 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
320 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);
322 if (!$i386 && $unroll_after) {
325 &set_label("unrolled",16);
326 &lea ("esp",&DWP(-96,"esp"));
327 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
328 &mov ($AH[0],&DWP(0,"esi"));
329 &mov ($AH[1],&DWP(4,"esi"));
330 &mov ("ecx",&DWP(8,"esi"));
331 &mov ("ebx",&DWP(12,"esi"));
332 #&mov (&DWP(0,"esp"),$AH[0]);
333 &mov (&DWP(4,"esp"),$AH[1]);
334 &xor ($AH[1],"ecx"); # magic
335 &mov (&DWP(8,"esp"),"ecx");
336 &mov (&DWP(12,"esp"),"ebx");
337 &mov ($E,&DWP(16,"esi"));
338 &mov ("ebx",&DWP(20,"esi"));
339 &mov ("ecx",&DWP(24,"esi"));
340 &mov ("esi",&DWP(28,"esi"));
341 #&mov (&DWP(16,"esp"),$E);
342 &mov (&DWP(20,"esp"),"ebx");
343 &mov (&DWP(24,"esp"),"ecx");
344 &mov (&DWP(28,"esp"),"esi");
345 &jmp (&label("grand_loop"));
347 &set_label("grand_loop",16);
348 # copy input block to stack reversing byte order
349 for($i=0;$i<5;$i++) {
350 &mov ("ebx",&DWP(12*$i+0,"edi"));
351 &mov ("ecx",&DWP(12*$i+4,"edi"));
353 &mov ("esi",&DWP(12*$i+8,"edi"));
355 &mov (&DWP(32+12*$i+0,"esp"),"ebx");
357 &mov (&DWP(32+12*$i+4,"esp"),"ecx");
358 &mov (&DWP(32+12*$i+8,"esp"),"esi");
360 &mov ("ebx",&DWP($i*12,"edi"));
363 &mov (&DWP(96+4,"esp"),"edi");
364 &mov (&DWP(32+12*$i,"esp"),"ebx");
366 my ($t1,$t2) = ("ecx","esi");
367 my ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
368 sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
370 for ($i=0;$i<64;$i++) {
373 &mov ($T,$t1); # $t1 is preloaded
374 # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
382 &xor ($T,$t1); # T = sigma0(X[-15])
384 &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
386 &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
387 #&xor ("edi",$t2) # sigma1(X[-2])
388 # &add ($T,"edi"); # T += sigma1(X[-2])
389 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
392 &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
395 &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
396 &mov ("edi",&off($g));
398 &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
399 &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
403 &mov (&off($e),$t1); # save $E, modulo-scheduled
405 &add ($T,&off($h)); # T += h
406 &xor ("edi",$t2); # Ch(e,f,g)
407 &ror ($E,6); # Sigma1(e)
409 &add ($T,"edi"); # T += Ch(e,f,g)
413 &mov ("edi",&off($b));
415 &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
416 &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
418 &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
419 &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
421 &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
422 &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
423 &ror ($t1,2); # Sigma0(a)
425 &add ($AH[1],$E); # h += T
426 &add ($E,&off($d)); # d += T
427 &add ($AH[1],$t1); # h += Sigma0(a)
428 &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
430 @AH = reverse(@AH); # rotate(a,h)
431 ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
433 &mov ("esi",&DWP(96,"esp")); #ctx
434 #&mov ($AH[0],&DWP(0,"esp"));
435 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
436 #&mov ("edi", &DWP(8,"esp"));
437 &mov ("ecx",&DWP(12,"esp"));
438 &add ($AH[0],&DWP(0,"esi"));
439 &add ($AH[1],&DWP(4,"esi"));
440 &add ("edi",&DWP(8,"esi"));
441 &add ("ecx",&DWP(12,"esi"));
442 &mov (&DWP(0,"esi"),$AH[0]);
443 &mov (&DWP(4,"esi"),$AH[1]);
444 &mov (&DWP(8,"esi"),"edi");
445 &mov (&DWP(12,"esi"),"ecx");
446 #&mov (&DWP(0,"esp"),$AH[0]);
447 &mov (&DWP(4,"esp"),$AH[1]);
448 &xor ($AH[1],"edi"); # magic
449 &mov (&DWP(8,"esp"),"edi");
450 &mov (&DWP(12,"esp"),"ecx");
451 #&mov ($E,&DWP(16,"esp"));
452 &mov ("edi",&DWP(20,"esp"));
453 &mov ("ebx",&DWP(24,"esp"));
454 &mov ("ecx",&DWP(28,"esp"));
455 &add ($E,&DWP(16,"esi"));
456 &add ("edi",&DWP(20,"esi"));
457 &add ("ebx",&DWP(24,"esi"));
458 &add ("ecx",&DWP(28,"esi"));
459 &mov (&DWP(16,"esi"),$E);
460 &mov (&DWP(20,"esi"),"edi");
461 &mov (&DWP(24,"esi"),"ebx");
462 &mov (&DWP(28,"esi"),"ecx");
463 #&mov (&DWP(16,"esp"),$E);
464 &mov (&DWP(20,"esp"),"edi");
465 &mov ("edi",&DWP(96+4,"esp")); # inp
466 &mov (&DWP(24,"esp"),"ebx");
467 &mov (&DWP(28,"esp"),"ecx");
469 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
470 &jb (&label("grand_loop"));
472 &mov ("esp",&DWP(96+12,"esp")); # restore sp
476 my @X = map("xmm$_",(0..3));
477 my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
480 &set_label("XOP",16);
481 &lea ("esp",&DWP(-96,"esp"));
483 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
484 &mov ($AH[0],&DWP(0,"esi"));
485 &mov ($AH[1],&DWP(4,"esi"));
486 &mov ("ecx",&DWP(8,"esi"));
487 &mov ("edi",&DWP(12,"esi"));
488 #&mov (&DWP(0,"esp"),$AH[0]);
489 &mov (&DWP(4,"esp"),$AH[1]);
490 &xor ($AH[1],"ecx"); # magic
491 &mov (&DWP(8,"esp"),"ecx");
492 &mov (&DWP(12,"esp"),"edi");
493 &mov ($E,&DWP(16,"esi"));
494 &mov ("edi",&DWP(20,"esi"));
495 &mov ("ecx",&DWP(24,"esi"));
496 &mov ("esi",&DWP(28,"esi"));
497 #&mov (&DWP(16,"esp"),$E);
498 &mov (&DWP(20,"esp"),"edi");
499 &mov ("edi",&DWP(96+4,"esp")); # inp
500 &mov (&DWP(24,"esp"),"ecx");
501 &mov (&DWP(28,"esp"),"esi");
502 &vmovdqa ($t3,&DWP(256,$K256));
503 &jmp (&label("grand_xop"));
505 &set_label("grand_xop",16);
506 # load input, reverse byte order, add K256[0..15], save to stack
507 &vmovdqu (@X[0],&QWP(0,"edi"));
508 &vmovdqu (@X[1],&QWP(16,"edi"));
509 &vmovdqu (@X[2],&QWP(32,"edi"));
510 &vmovdqu (@X[3],&QWP(48,"edi"));
512 &vpshufb (@X[0],@X[0],$t3);
513 &mov (&DWP(96+4,"esp"),"edi");
514 &vpshufb (@X[1],@X[1],$t3);
515 &vpshufb (@X[2],@X[2],$t3);
516 &vpaddd ($t0,@X[0],&QWP(0,$K256));
517 &vpshufb (@X[3],@X[3],$t3);
518 &vpaddd ($t1,@X[1],&QWP(16,$K256));
519 &vpaddd ($t2,@X[2],&QWP(32,$K256));
520 &vpaddd ($t3,@X[3],&QWP(48,$K256));
521 &vmovdqa (&QWP(32+0,"esp"),$t0);
522 &vmovdqa (&QWP(32+16,"esp"),$t1);
523 &vmovdqa (&QWP(32+32,"esp"),$t2);
524 &vmovdqa (&QWP(32+48,"esp"),$t3);
525 &jmp (&label("xop_00_47"));
527 &set_label("xop_00_47",16);
534 my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
536 &vpalignr ($t0,@X[1],@X[0],4); # X[1..4]
539 &vpalignr ($t3,@X[3],@X[2],4); # X[9..12]
542 &vprotd ($t1,$t0,14);
546 &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
551 &vprotd ($t2,$t1,25-14);
552 &vpxor ($t0,$t0,$t1);
557 &vprotd ($t3,@X[3],13);
558 &vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
561 &vpsrld ($t2,@X[3],10);
564 &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
567 &vprotd ($t1,$t3,15-13);
568 &vpxor ($t3,$t3,$t2);
573 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
578 &vpsrldq ($t3,$t3,8);
583 &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
588 &vprotd ($t3,@X[0],13);
591 &vpsrld ($t2,@X[0],10);
594 &vprotd ($t1,$t3,15-13);
597 &vpxor ($t3,$t3,$t2);
602 &vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
607 &vpslldq ($t3,$t3,8); # 22 instructions
612 &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
617 &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
619 foreach (@insns) { eval; } # remaining instructions
621 &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
627 '&mov ("esi",&off($f));',
629 '&mov ("edi",&off($g));',
631 '&xor ("esi","edi");',
633 '&and ("esi","ecx");',
634 '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
636 '&xor ("edi","esi");', # Ch(e,f,g)
637 '&ror ($E,6);', # T = Sigma1(e)
638 '&mov ("ecx",$AH[0]);',
639 '&mov ("esi",$AH[0]);',
640 '&add ($E,&off($h));', # T += h
642 '&ror ("ecx",22-13);',
643 '&add ($E,"edi");', # T += Ch(e,f,g)
644 '&mov ("edi",&off($b));',
645 '&xor ("ecx",$AH[0]);',
646 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
647 '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
648 '&ror ("ecx",13-2);',
649 '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
650 '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
651 '&xor ("ecx","esi");',
652 '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
653 '&ror ("ecx",2);', # Sigma0(a)
655 '&add ($AH[1],$E);', # h += T
656 '&add ($E,&off($d));', # d += T
657 '&add ($AH[1],"ecx");'. # h += Sigma0(a)
659 '@AH = reverse(@AH); $i++;' # rotate(a,h)
663 for ($i=0,$j=0; $j<4; $j++) {
664 &XOP_00_47($j,\&body_00_15,@X);
665 push(@X,shift(@X)); # rotate(@X)
667 &cmp (&DWP(16*$j,$K256),0x00010203);
668 &jne (&label("xop_00_47"));
670 for ($i=0; $i<16; ) {
671 foreach(body_00_15()) { eval; }
674 &mov ("esi",&DWP(96,"esp")); #ctx
675 #&mov ($AH[0],&DWP(0,"esp"));
676 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
677 #&mov ("edi", &DWP(8,"esp"));
678 &mov ("ecx",&DWP(12,"esp"));
679 &add ($AH[0],&DWP(0,"esi"));
680 &add ($AH[1],&DWP(4,"esi"));
681 &add ("edi",&DWP(8,"esi"));
682 &add ("ecx",&DWP(12,"esi"));
683 &mov (&DWP(0,"esi"),$AH[0]);
684 &mov (&DWP(4,"esi"),$AH[1]);
685 &mov (&DWP(8,"esi"),"edi");
686 &mov (&DWP(12,"esi"),"ecx");
687 #&mov (&DWP(0,"esp"),$AH[0]);
688 &mov (&DWP(4,"esp"),$AH[1]);
689 &xor ($AH[1],"edi"); # magic
690 &mov (&DWP(8,"esp"),"edi");
691 &mov (&DWP(12,"esp"),"ecx");
692 #&mov ($E,&DWP(16,"esp"));
693 &mov ("edi",&DWP(20,"esp"));
694 &mov ("ecx",&DWP(24,"esp"));
695 &add ($E,&DWP(16,"esi"));
696 &add ("edi",&DWP(20,"esi"));
697 &add ("ecx",&DWP(24,"esi"));
698 &mov (&DWP(16,"esi"),$E);
699 &mov (&DWP(20,"esi"),"edi");
700 &mov (&DWP(20,"esp"),"edi");
701 &mov ("edi",&DWP(28,"esp"));
702 &mov (&DWP(24,"esi"),"ecx");
703 #&mov (&DWP(16,"esp"),$E);
704 &add ("edi",&DWP(28,"esi"));
705 &mov (&DWP(24,"esp"),"ecx");
706 &mov (&DWP(28,"esi"),"edi");
707 &mov (&DWP(28,"esp"),"edi");
708 &mov ("edi",&DWP(96+4,"esp")); # inp
710 &vmovdqa ($t3,&QWP(64,$K256));
711 &sub ($K256,3*64); # rewind K
712 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
713 &jb (&label("grand_xop"));
715 &mov ("esp",&DWP(96+12,"esp")); # restore sp
719 &set_label("AVX",16);
720 &lea ("esp",&DWP(-96,"esp"));
722 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
723 &mov ($AH[0],&DWP(0,"esi"));
724 &mov ($AH[1],&DWP(4,"esi"));
725 &mov ("ecx",&DWP(8,"esi"));
726 &mov ("edi",&DWP(12,"esi"));
727 #&mov (&DWP(0,"esp"),$AH[0]);
728 &mov (&DWP(4,"esp"),$AH[1]);
729 &xor ($AH[1],"ecx"); # magic
730 &mov (&DWP(8,"esp"),"ecx");
731 &mov (&DWP(12,"esp"),"edi");
732 &mov ($E,&DWP(16,"esi"));
733 &mov ("edi",&DWP(20,"esi"));
734 &mov ("ecx",&DWP(24,"esi"));
735 &mov ("esi",&DWP(28,"esi"));
736 #&mov (&DWP(16,"esp"),$E);
737 &mov (&DWP(20,"esp"),"edi");
738 &mov ("edi",&DWP(96+4,"esp")); # inp
739 &mov (&DWP(24,"esp"),"ecx");
740 &mov (&DWP(28,"esp"),"esi");
741 &vmovdqa ($t3,&DWP(256,$K256));
742 &jmp (&label("grand_avx"));
744 &set_label("grand_avx",16);
745 # load input, reverse byte order, add K256[0..15], save to stack
746 &vmovdqu (@X[0],&QWP(0,"edi"));
747 &vmovdqu (@X[1],&QWP(16,"edi"));
748 &vmovdqu (@X[2],&QWP(32,"edi"));
749 &vmovdqu (@X[3],&QWP(48,"edi"));
751 &vpshufb (@X[0],@X[0],$t3);
752 &mov (&DWP(96+4,"esp"),"edi");
753 &vpshufb (@X[1],@X[1],$t3);
754 &vpshufb (@X[2],@X[2],$t3);
755 &vpaddd ($t0,@X[0],&QWP(0,$K256));
756 &vpshufb (@X[3],@X[3],$t3);
757 &vpaddd ($t1,@X[1],&QWP(16,$K256));
758 &vpaddd ($t2,@X[2],&QWP(32,$K256));
759 &vpaddd ($t3,@X[3],&QWP(48,$K256));
760 &vmovdqa (&QWP(32+0,"esp"),$t0);
761 &vmovdqa (&QWP(32+16,"esp"),$t1);
762 &vmovdqa (&QWP(32+32,"esp"),$t2);
763 &vmovdqa (&QWP(32+48,"esp"),$t3);
764 &jmp (&label("avx_00_47"));
766 &set_label("avx_00_47",16);
771 '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
772 '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
773 '&vpsrld ($t2,$t0,7);',
774 '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
775 '&vpsrld ($t3,$t0,3);',
776 '&vpslld ($t1,$t0,14);',
777 '&vpxor ($t0,$t3,$t2);',
778 '&vpsrld ($t2,$t2,18-7);',
779 '&vpxor ($t0,$t0,$t1);',
780 '&vpslld ($t1,$t1,25-14);',
781 '&vpxor ($t0,$t0,$t2);',
782 '&vpsrld ($t3,@X[3],10);',
783 '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
784 '&vpslld ($t2,@X[3],13);',
785 '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
786 '&vpsrld ($t1,@X[3],17);',
787 '&vpxor ($t3,$t3,$t2);',
788 '&vpslld ($t2,$t2,15-13);',
789 '&vpxor ($t3,$t3,$t1);',
790 '&vpsrld ($t1,$t1,19-17);',
791 '&vpxor ($t3,$t3,$t2);',
792 '&vpxor ($t3,$t3,$t1);', # sigma1(X[14..15])
793 '&vpsrldq ($t3,$t3,8);',
794 '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
795 '&vpsrld ($t3,@X[0],10);',
796 '&vpslld ($t2,@X[0],13);',
797 '&vpsrld ($t1,@X[0],17);',
798 '&vpxor ($t3,$t3,$t2);',
799 '&vpslld ($t2,$t2,15-13);',
800 '&vpxor ($t3,$t3,$t1);',
801 '&vpsrld ($t1,$t1,19-17);',
802 '&vpxor ($t3,$t3,$t2);',
803 '&vpxor ($t3,$t3,$t1);', # sigma1(X[16..17])
804 '&vpslldq ($t3,$t3,8);',
805 '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
809 local *ror = sub { &shrd(@_[0],@_) };
814 my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
816 foreach (Xupdate_AVX()) { # 35 instructions
822 &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
823 foreach (@insns) { eval; } # remaining instructions
824 &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
827 for ($i=0,$j=0; $j<4; $j++) {
828 &AVX_00_47($j,\&body_00_15,@X);
829 push(@X,shift(@X)); # rotate(@X)
831 &cmp (&DWP(16*$j,$K256),0x00010203);
832 &jne (&label("avx_00_47"));
834 for ($i=0; $i<16; ) {
835 foreach(body_00_15()) { eval; }
838 &mov ("esi",&DWP(96,"esp")); #ctx
839 #&mov ($AH[0],&DWP(0,"esp"));
840 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
841 #&mov ("edi", &DWP(8,"esp"));
842 &mov ("ecx",&DWP(12,"esp"));
843 &add ($AH[0],&DWP(0,"esi"));
844 &add ($AH[1],&DWP(4,"esi"));
845 &add ("edi",&DWP(8,"esi"));
846 &add ("ecx",&DWP(12,"esi"));
847 &mov (&DWP(0,"esi"),$AH[0]);
848 &mov (&DWP(4,"esi"),$AH[1]);
849 &mov (&DWP(8,"esi"),"edi");
850 &mov (&DWP(12,"esi"),"ecx");
851 #&mov (&DWP(0,"esp"),$AH[0]);
852 &mov (&DWP(4,"esp"),$AH[1]);
853 &xor ($AH[1],"edi"); # magic
854 &mov (&DWP(8,"esp"),"edi");
855 &mov (&DWP(12,"esp"),"ecx");
856 #&mov ($E,&DWP(16,"esp"));
857 &mov ("edi",&DWP(20,"esp"));
858 &mov ("ecx",&DWP(24,"esp"));
859 &add ($E,&DWP(16,"esi"));
860 &add ("edi",&DWP(20,"esi"));
861 &add ("ecx",&DWP(24,"esi"));
862 &mov (&DWP(16,"esi"),$E);
863 &mov (&DWP(20,"esi"),"edi");
864 &mov (&DWP(20,"esp"),"edi");
865 &mov ("edi",&DWP(28,"esp"));
866 &mov (&DWP(24,"esi"),"ecx");
867 #&mov (&DWP(16,"esp"),$E);
868 &add ("edi",&DWP(28,"esi"));
869 &mov (&DWP(24,"esp"),"ecx");
870 &mov (&DWP(28,"esi"),"edi");
871 &mov (&DWP(28,"esp"),"edi");
872 &mov ("edi",&DWP(96+4,"esp")); # inp
874 &vmovdqa ($t3,&QWP(64,$K256));
875 &sub ($K256,3*64); # rewind K
876 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
877 &jb (&label("grand_avx"));
879 &mov ("esp",&DWP(96+12,"esp")); # restore sp
884 &function_end_B("sha256_block_data_order");
885 &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");