2 # Copyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # SHA256 block transform for x86. September 2007.
19 # Performance improvement over compiler generated code varies from
20 # 10% to 40% [see below]. Not very impressive on some ยต-archs, but
21 # it's 5 times smaller and optimizes amount of writes.
25 # Optimization including two of Pavel Semjanov's ideas, alternative
26 # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
27 # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
28 # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
29 # on P4, where it kills performance, nor Sandy Bridge, where folded
30 # loop is approximately as fast...
34 # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
35 # May version, >60% over original. Add AVX+shrd code path, >25%
36 # improvement on Sandy Bridge over May version, 60% over original.
40 # Replace AMD XOP code path with SSSE3 to cover more processors.
41 # (Biggest improvement coefficient is on upcoming Atom Silvermont,
42 # not shown.) Add AVX+BMI code path.
46 # Add support for Intel SHA Extensions.
48 # Performance in clock cycles per processed byte (less is better):
50 # gcc icc x86 asm(*) SIMD x86_64 asm(**)
51 # Pentium 46 57 40/38 - -
52 # PIII 36 33 27/24 - -
54 # AMD K8 27 25 19/15.5 - 14.9
55 # Core2 26 23 18/15.6 14.3 13.8
56 # Westmere 27 - 19/15.7 13.4 12.3
57 # Sandy Bridge 25 - 15.9 12.4 11.6
58 # Ivy Bridge 24 - 15.0 11.4 10.3
59 # Haswell 22 - 13.9 9.46 7.80
60 # Skylake 20 - 14.9 9.50 7.70
61 # Bulldozer 36 - 27/22 17.0 13.6
62 # VIA Nano 36 - 25/22 16.8 16.5
63 # Atom 50 - 30/25 21.9 18.9
64 # Silvermont 40 - 34/31 22.9 20.6
65 # Goldmont 29 - 20 16.3(***)
67 # (*) numbers after slash are for unrolled loop, where applicable;
68 # (**) x86_64 assembly performance is presented for reference
69 # purposes, results are best-available;
70 # (***) SHAEXT result is 4.1, strangely enough better than 64-bit one;
72 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
73 push(@INC,"${dir}","${dir}../../perlasm");
76 $output=pop and open STDOUT,">$output";
78 &asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
81 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
83 if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
84 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
85 $avx = ($1>=2.19) + ($1>=2.22);
88 if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
89 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
90 $avx = ($1>=2.03) + ($1>=2.10);
93 if ($xmm && !$avx && $ARGV[0] eq "win32" &&
94 `ml 2>&1` =~ /Version ([0-9]+)\./) {
95 $avx = ($1>=10) + ($1>=11);
98 if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([0-9]+\.[0-9]+)/) {
99 $avx = ($2>=3.0) + ($2>3.0);
102 $shaext=$xmm; ### set to zero if compiling for 1.0.1
104 $unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
105 # fully unrolled loop was measured to run about
106 # 3-4x slower. If slowdown coefficient is N and
107 # unrolled loop is m times faster, then you break
108 # even at (N-1)/(m-1) blocks. Then it needs to be
109 # adjusted for probability of code being evicted,
110 # code size/cache size=1/4. Typical m is 1.15...
117 $Coff=&DWP(12,"esp");
118 $Doff=&DWP(16,"esp");
119 $Eoff=&DWP(20,"esp");
120 $Foff=&DWP(24,"esp");
121 $Goff=&DWP(28,"esp");
122 $Hoff=&DWP(32,"esp");
123 $Xoff=&DWP(36,"esp");
127 &mov ($T,"ecx"); # "ecx" is preloaded
128 &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
136 &xor ($T,"ecx"); # T = sigma0(X[-15])
138 &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
140 &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
141 #&xor ("edi","esi") # sigma1(X[-2])
142 # &add ($T,"edi"); # T += sigma1(X[-2])
143 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
151 &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
154 &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
158 &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
159 &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
162 &mov ($Eoff,$E); # modulo-scheduled
164 &add ($T,$Hoff); # T += h
165 &xor ("esi","edi"); # Ch(e,f,g)
166 &ror ($E,6); # Sigma1(e)
168 &add ($T,"esi"); # T += Ch(e,f,g)
171 &add ($T,$E); # T += Sigma1(e)
174 &mov ($Aoff,$A); # modulo-scheduled
175 &lea ("esp",&DWP(-4,"esp"));
177 &mov ("esi",&DWP(0,$K256));
179 &mov ($E,$Eoff); # e in next iteration, d in this one
180 &xor ($A,"edi"); # a ^= b
181 &ror ("ecx",2); # Sigma0(a)
183 &add ($T,"esi"); # T+= K[i]
184 &mov (&DWP(0,"esp"),$A); # (b^c) in next round
185 &add ($E,$T); # d += T
186 &and ($A,&DWP(4,"esp")); # a &= (b^c)
187 &add ($T,"ecx"); # T += Sigma0(a)
188 &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
189 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
191 &add ($A,$T); # h += T
194 &external_label("OPENSSL_ia32cap_P") if (!$i386);
196 &function_begin("sha256_block_data_order");
197 &mov ("esi",wparam(0)); # ctx
198 &mov ("edi",wparam(1)); # inp
199 &mov ("eax",wparam(2)); # num
200 &mov ("ebx","esp"); # saved sp
202 &call (&label("pic_point")); # make it PIC!
203 &set_label("pic_point");
205 &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
212 &mov (&DWP(0,"esp"),"esi"); # ctx
213 &mov (&DWP(4,"esp"),"edi"); # inp
214 &mov (&DWP(8,"esp"),"eax"); # inp+num*128
215 &mov (&DWP(12,"esp"),"ebx"); # saved sp
216 if (!$i386 && $xmm) {
217 &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
218 &mov ("ecx",&DWP(0,"edx"));
219 &mov ("ebx",&DWP(4,"edx"));
220 &test ("ecx",1<<20); # check for P4
221 &jnz (&label("loop"));
222 &mov ("edx",&DWP(8,"edx")) if ($xmm);
223 &test ("ecx",1<<24); # check for FXSR
224 &jz ($unroll_after?&label("no_xmm"):&label("loop"));
225 &and ("ecx",1<<30); # mask "Intel CPU" bit
226 &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
227 &test ("edx",1<<29) if ($shaext); # check for SHA
228 &jnz (&label("shaext")) if ($shaext);
230 &and ("ecx",1<<28|1<<30);
231 &cmp ("ecx",1<<28|1<<30);
233 &je (&label("AVX")) if ($avx);
234 &test ("ebx",1<<9); # check for SSSE3
235 &jnz (&label("SSSE3"));
237 &je (&label("loop_shrd"));
240 &set_label("no_xmm");
242 &cmp ("eax",$unroll_after);
243 &jae (&label("unrolled"));
245 &jmp (&label("loop"));
250 &set_label("loop$suffix",$suffix?32:16);
251 # copy input block to stack reversing byte and dword order
252 for($i=0;$i<4;$i++) {
253 &mov ("eax",&DWP($i*16+0,"edi"));
254 &mov ("ebx",&DWP($i*16+4,"edi"));
255 &mov ("ecx",&DWP($i*16+8,"edi"));
257 &mov ("edx",&DWP($i*16+12,"edi"));
267 &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
268 &mov (&DWP(4*(9+16)+4,"esp"),"edi");
270 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
271 &mov ($A,&DWP(0,"esi"));
272 &mov ("ebx",&DWP(4,"esi"));
273 &mov ("ecx",&DWP(8,"esi"));
274 &mov ("edi",&DWP(12,"esi"));
280 &mov (&DWP(0,"esp"),"ebx"); # magic
281 &mov ($E,&DWP(16,"esi"));
282 &mov ("ebx",&DWP(20,"esi"));
283 &mov ("ecx",&DWP(24,"esi"));
284 &mov ("edi",&DWP(28,"esi"));
290 &set_label("00_15$suffix",16);
294 &cmp ("esi",0xc19bf174);
295 &jne (&label("00_15$suffix"));
297 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
298 &jmp (&label("16_63$suffix"));
300 &set_label("16_63$suffix",16);
304 &cmp ("esi",0xc67178f2);
305 &jne (&label("16_63$suffix"));
307 &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
310 # &mov ("edi",$Coff);
312 &add ($A,&DWP(0,"esi"));
313 &add ("ebx",&DWP(4,"esi"));
314 &add ("edi",&DWP(8,"esi"));
315 &add ("ecx",&DWP(12,"esi"));
316 &mov (&DWP(0,"esi"),$A);
317 &mov (&DWP(4,"esi"),"ebx");
318 &mov (&DWP(8,"esi"),"edi");
319 &mov (&DWP(12,"esi"),"ecx");
324 &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
325 &add ($E,&DWP(16,"esi"));
326 &add ("eax",&DWP(20,"esi"));
327 &add ("ebx",&DWP(24,"esi"));
328 &add ("ecx",&DWP(28,"esi"));
329 &mov (&DWP(16,"esi"),$E);
330 &mov (&DWP(20,"esi"),"eax");
331 &mov (&DWP(24,"esi"),"ebx");
332 &mov (&DWP(28,"esi"),"ecx");
334 &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
335 &sub ($K256,4*64); # rewind K
337 &cmp ("edi",&DWP(8,"esp")); # are we done yet?
338 &jb (&label("loop$suffix"));
341 &mov ("esp",&DWP(12,"esp")); # restore sp
343 if (!$i386 && !$xmm) {
344 # ~20% improvement on Sandy Bridge
345 local *ror = sub { &shrd(@_[0],@_) };
346 &COMPACT_LOOP("_shrd");
347 &mov ("esp",&DWP(12,"esp")); # restore sp
351 &set_label("K256",64); # Yes! I keep it in the code segment!
352 @K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
353 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
354 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
355 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
356 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
357 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
358 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
359 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
360 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
361 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
362 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
363 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
364 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
365 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
366 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
367 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
369 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
370 &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
372 ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
373 sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
375 if (!$i386 && $unroll_after) {
378 &set_label("unrolled",16);
379 &lea ("esp",&DWP(-96,"esp"));
380 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
381 &mov ($AH[0],&DWP(0,"esi"));
382 &mov ($AH[1],&DWP(4,"esi"));
383 &mov ("ecx",&DWP(8,"esi"));
384 &mov ("ebx",&DWP(12,"esi"));
385 #&mov (&DWP(0,"esp"),$AH[0]);
386 &mov (&DWP(4,"esp"),$AH[1]);
387 &xor ($AH[1],"ecx"); # magic
388 &mov (&DWP(8,"esp"),"ecx");
389 &mov (&DWP(12,"esp"),"ebx");
390 &mov ($E,&DWP(16,"esi"));
391 &mov ("ebx",&DWP(20,"esi"));
392 &mov ("ecx",&DWP(24,"esi"));
393 &mov ("esi",&DWP(28,"esi"));
394 #&mov (&DWP(16,"esp"),$E);
395 &mov (&DWP(20,"esp"),"ebx");
396 &mov (&DWP(24,"esp"),"ecx");
397 &mov (&DWP(28,"esp"),"esi");
398 &jmp (&label("grand_loop"));
400 &set_label("grand_loop",16);
401 # copy input block to stack reversing byte order
402 for($i=0;$i<5;$i++) {
403 &mov ("ebx",&DWP(12*$i+0,"edi"));
404 &mov ("ecx",&DWP(12*$i+4,"edi"));
406 &mov ("esi",&DWP(12*$i+8,"edi"));
408 &mov (&DWP(32+12*$i+0,"esp"),"ebx");
410 &mov (&DWP(32+12*$i+4,"esp"),"ecx");
411 &mov (&DWP(32+12*$i+8,"esp"),"esi");
413 &mov ("ebx",&DWP($i*12,"edi"));
416 &mov (&DWP(96+4,"esp"),"edi");
417 &mov (&DWP(32+12*$i,"esp"),"ebx");
419 my ($t1,$t2) = ("ecx","esi");
421 for ($i=0;$i<64;$i++) {
424 &mov ($T,$t1); # $t1 is preloaded
425 # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
433 &xor ($T,$t1); # T = sigma0(X[-15])
435 &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
437 &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
438 #&xor ("edi",$t2) # sigma1(X[-2])
439 # &add ($T,"edi"); # T += sigma1(X[-2])
440 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
443 &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
446 &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
447 &mov ("edi",&off($g));
449 &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
450 &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
454 &mov (&off($e),$t1); # save $E, modulo-scheduled
456 &add ($T,&off($h)); # T += h
457 &xor ("edi",$t2); # Ch(e,f,g)
458 &ror ($E,6); # Sigma1(e)
460 &add ($T,"edi"); # T += Ch(e,f,g)
464 &mov ("edi",&off($b));
466 &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
467 &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
469 &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
470 &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
472 &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
473 &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
474 &ror ($t1,2); # Sigma0(a)
476 &add ($AH[1],$E); # h += T
477 &add ($E,&off($d)); # d += T
478 &add ($AH[1],$t1); # h += Sigma0(a)
479 &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
481 @AH = reverse(@AH); # rotate(a,h)
482 ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
484 &mov ("esi",&DWP(96,"esp")); #ctx
485 #&mov ($AH[0],&DWP(0,"esp"));
486 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
487 #&mov ("edi", &DWP(8,"esp"));
488 &mov ("ecx",&DWP(12,"esp"));
489 &add ($AH[0],&DWP(0,"esi"));
490 &add ($AH[1],&DWP(4,"esi"));
491 &add ("edi",&DWP(8,"esi"));
492 &add ("ecx",&DWP(12,"esi"));
493 &mov (&DWP(0,"esi"),$AH[0]);
494 &mov (&DWP(4,"esi"),$AH[1]);
495 &mov (&DWP(8,"esi"),"edi");
496 &mov (&DWP(12,"esi"),"ecx");
497 #&mov (&DWP(0,"esp"),$AH[0]);
498 &mov (&DWP(4,"esp"),$AH[1]);
499 &xor ($AH[1],"edi"); # magic
500 &mov (&DWP(8,"esp"),"edi");
501 &mov (&DWP(12,"esp"),"ecx");
502 #&mov ($E,&DWP(16,"esp"));
503 &mov ("edi",&DWP(20,"esp"));
504 &mov ("ebx",&DWP(24,"esp"));
505 &mov ("ecx",&DWP(28,"esp"));
506 &add ($E,&DWP(16,"esi"));
507 &add ("edi",&DWP(20,"esi"));
508 &add ("ebx",&DWP(24,"esi"));
509 &add ("ecx",&DWP(28,"esi"));
510 &mov (&DWP(16,"esi"),$E);
511 &mov (&DWP(20,"esi"),"edi");
512 &mov (&DWP(24,"esi"),"ebx");
513 &mov (&DWP(28,"esi"),"ecx");
514 #&mov (&DWP(16,"esp"),$E);
515 &mov (&DWP(20,"esp"),"edi");
516 &mov ("edi",&DWP(96+4,"esp")); # inp
517 &mov (&DWP(24,"esp"),"ebx");
518 &mov (&DWP(28,"esp"),"ecx");
520 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
521 &jb (&label("grand_loop"));
523 &mov ("esp",&DWP(96+12,"esp")); # restore sp
526 if (!$i386 && $xmm) {{{
528 ######################################################################
529 # Intel SHA Extensions implementation of SHA256 update function.
531 my ($ctx,$inp,$end)=("esi","edi","eax");
532 my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
533 my @MSG=map("xmm$_",(3..6));
536 my ($opcodelet,$dst,$src)=@_;
537 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
538 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
540 sub sha256rnds2 { sha256op38(0xcb,@_); }
541 sub sha256msg1 { sha256op38(0xcc,@_); }
542 sub sha256msg2 { sha256op38(0xcd,@_); }
544 &set_label("shaext",32);
547 &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
548 &lea ($K256,&DWP(0x80,$K256));
549 &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
550 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
552 &pshufd ($Wi,$ABEF,0x1b); # ABCD
553 &pshufd ($ABEF,$ABEF,0xb1); # CDAB
554 &pshufd ($CDGH,$CDGH,0x1b); # EFGH
555 &palignr ($ABEF,$CDGH,8); # ABEF
556 &punpcklqdq ($CDGH,$Wi); # CDGH
557 &jmp (&label("loop_shaext"));
559 &set_label("loop_shaext",16);
560 &movdqu (@MSG[0],&QWP(0,$inp));
561 &movdqu (@MSG[1],&QWP(0x10,$inp));
562 &movdqu (@MSG[2],&QWP(0x20,$inp));
563 &pshufb (@MSG[0],$TMP);
564 &movdqu (@MSG[3],&QWP(0x30,$inp));
565 &movdqa (&QWP(16,"esp"),$CDGH); # offload
567 &movdqa ($Wi,&QWP(0*16-0x80,$K256));
568 &paddd ($Wi,@MSG[0]);
569 &pshufb (@MSG[1],$TMP);
570 &sha256rnds2 ($CDGH,$ABEF); # 0-3
571 &pshufd ($Wi,$Wi,0x0e);
573 &movdqa (&QWP(0,"esp"),$ABEF); # offload
574 &sha256rnds2 ($ABEF,$CDGH);
576 &movdqa ($Wi,&QWP(1*16-0x80,$K256));
577 &paddd ($Wi,@MSG[1]);
578 &pshufb (@MSG[2],$TMP);
579 &sha256rnds2 ($CDGH,$ABEF); # 4-7
580 &pshufd ($Wi,$Wi,0x0e);
581 &lea ($inp,&DWP(0x40,$inp));
582 &sha256msg1 (@MSG[0],@MSG[1]);
583 &sha256rnds2 ($ABEF,$CDGH);
585 &movdqa ($Wi,&QWP(2*16-0x80,$K256));
586 &paddd ($Wi,@MSG[2]);
587 &pshufb (@MSG[3],$TMP);
588 &sha256rnds2 ($CDGH,$ABEF); # 8-11
589 &pshufd ($Wi,$Wi,0x0e);
590 &movdqa ($TMP,@MSG[3]);
591 &palignr ($TMP,@MSG[2],4);
593 &paddd (@MSG[0],$TMP);
594 &sha256msg1 (@MSG[1],@MSG[2]);
595 &sha256rnds2 ($ABEF,$CDGH);
597 &movdqa ($Wi,&QWP(3*16-0x80,$K256));
598 &paddd ($Wi,@MSG[3]);
599 &sha256msg2 (@MSG[0],@MSG[3]);
600 &sha256rnds2 ($CDGH,$ABEF); # 12-15
601 &pshufd ($Wi,$Wi,0x0e);
602 &movdqa ($TMP,@MSG[0]);
603 &palignr ($TMP,@MSG[3],4);
605 &paddd (@MSG[1],$TMP);
606 &sha256msg1 (@MSG[2],@MSG[3]);
607 &sha256rnds2 ($ABEF,$CDGH);
609 for($i=4;$i<16-3;$i++) {
610 &movdqa ($Wi,&QWP($i*16-0x80,$K256));
611 &paddd ($Wi,@MSG[0]);
612 &sha256msg2 (@MSG[1],@MSG[0]);
613 &sha256rnds2 ($CDGH,$ABEF); # 16-19...
614 &pshufd ($Wi,$Wi,0x0e);
615 &movdqa ($TMP,@MSG[1]);
616 &palignr ($TMP,@MSG[0],4);
618 &paddd (@MSG[2],$TMP);
619 &sha256msg1 (@MSG[3],@MSG[0]);
620 &sha256rnds2 ($ABEF,$CDGH);
622 push(@MSG,shift(@MSG));
624 &movdqa ($Wi,&QWP(13*16-0x80,$K256));
625 &paddd ($Wi,@MSG[0]);
626 &sha256msg2 (@MSG[1],@MSG[0]);
627 &sha256rnds2 ($CDGH,$ABEF); # 52-55
628 &pshufd ($Wi,$Wi,0x0e);
629 &movdqa ($TMP,@MSG[1])
630 &palignr ($TMP,@MSG[0],4);
631 &sha256rnds2 ($ABEF,$CDGH);
632 &paddd (@MSG[2],$TMP);
634 &movdqa ($Wi,&QWP(14*16-0x80,$K256));
635 &paddd ($Wi,@MSG[1]);
636 &sha256rnds2 ($CDGH,$ABEF); # 56-59
637 &pshufd ($Wi,$Wi,0x0e);
638 &sha256msg2 (@MSG[2],@MSG[1]);
639 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
640 &sha256rnds2 ($ABEF,$CDGH);
642 &movdqa ($Wi,&QWP(15*16-0x80,$K256));
643 &paddd ($Wi,@MSG[2]);
645 &sha256rnds2 ($CDGH,$ABEF); # 60-63
646 &pshufd ($Wi,$Wi,0x0e);
649 &sha256rnds2 ($ABEF,$CDGH);
651 &paddd ($CDGH,&QWP(16,"esp"));
652 &paddd ($ABEF,&QWP(0,"esp"));
653 &jnz (&label("loop_shaext"));
655 &pshufd ($CDGH,$CDGH,0xb1); # DCHG
656 &pshufd ($TMP,$ABEF,0x1b); # FEBA
657 &pshufd ($ABEF,$ABEF,0xb1); # BAFE
658 &punpckhqdq ($ABEF,$CDGH); # DCBA
659 &palignr ($CDGH,$TMP,8); # HGFE
661 &mov ("esp",&DWP(32+12,"esp"));
662 &movdqu (&QWP(0,$ctx),$ABEF);
663 &movdqu (&QWP(16,$ctx),$CDGH);
667 my @X = map("xmm$_",(0..3));
668 my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
671 &set_label("SSSE3",32);
672 &lea ("esp",&DWP(-96,"esp"));
673 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
674 &mov ($AH[0],&DWP(0,"esi"));
675 &mov ($AH[1],&DWP(4,"esi"));
676 &mov ("ecx",&DWP(8,"esi"));
677 &mov ("edi",&DWP(12,"esi"));
678 #&mov (&DWP(0,"esp"),$AH[0]);
679 &mov (&DWP(4,"esp"),$AH[1]);
680 &xor ($AH[1],"ecx"); # magic
681 &mov (&DWP(8,"esp"),"ecx");
682 &mov (&DWP(12,"esp"),"edi");
683 &mov ($E,&DWP(16,"esi"));
684 &mov ("edi",&DWP(20,"esi"));
685 &mov ("ecx",&DWP(24,"esi"));
686 &mov ("esi",&DWP(28,"esi"));
687 #&mov (&DWP(16,"esp"),$E);
688 &mov (&DWP(20,"esp"),"edi");
689 &mov ("edi",&DWP(96+4,"esp")); # inp
690 &mov (&DWP(24,"esp"),"ecx");
691 &mov (&DWP(28,"esp"),"esi");
692 &movdqa ($t3,&QWP(256,$K256));
693 &jmp (&label("grand_ssse3"));
695 &set_label("grand_ssse3",16);
696 # load input, reverse byte order, add K256[0..15], save to stack
697 &movdqu (@X[0],&QWP(0,"edi"));
698 &movdqu (@X[1],&QWP(16,"edi"));
699 &movdqu (@X[2],&QWP(32,"edi"));
700 &movdqu (@X[3],&QWP(48,"edi"));
703 &mov (&DWP(96+4,"esp"),"edi");
705 &movdqa ($t0,&QWP(0,$K256));
707 &movdqa ($t1,&QWP(16,$K256));
710 &movdqa ($t2,&QWP(32,$K256));
712 &movdqa ($t3,&QWP(48,$K256));
713 &movdqa (&QWP(32+0,"esp"),$t0);
715 &movdqa (&QWP(32+16,"esp"),$t1);
717 &movdqa (&QWP(32+32,"esp"),$t2);
718 &movdqa (&QWP(32+48,"esp"),$t3);
719 &jmp (&label("ssse3_00_47"));
721 &set_label("ssse3_00_47",16);
728 my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
732 eval(shift(@insns)); # @
737 &palignr ($t0,@X[0],4); # X[1..4]
739 eval(shift(@insns)); # @
741 &palignr ($t3,@X[2],4); # X[9..12]
746 eval(shift(@insns)); # @
753 eval(shift(@insns)); # @
754 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
760 eval(shift(@insns)); # @
762 &pshufd ($t3,@X[3],0b11111010); # X[14..15]
767 eval(shift(@insns)); # @
774 eval(shift(@insns)); # @
781 eval(shift(@insns)); # @
788 eval(shift(@insns)); # @
789 &pxor ($t0,$t1); # sigma0(X[1..4])
795 eval(shift(@insns)); # @
796 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
802 eval(shift(@insns)); # @
809 eval(shift(@insns)); # @
813 &pshufd ($t3,$t3,0b10000000);
816 eval(shift(@insns)); # @
821 eval(shift(@insns)); # @
827 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
828 eval(shift(@insns)); # @
833 eval(shift(@insns)); # @
835 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
840 eval(shift(@insns)); # @
847 eval(shift(@insns)); # @
854 eval(shift(@insns)); # @
859 &pshufd ($t3,$t3,0b00001000);
861 eval(shift(@insns)); # @
862 &movdqa ($t2,&QWP(16*$j,$K256));
868 eval(shift(@insns)); # @
873 eval(shift(@insns)); # @
874 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
880 eval(shift(@insns)); # @
882 foreach (@insns) { eval; } # remaining instructions
884 &movdqa (&QWP(32+16*$j,"esp"),$t2);
891 '&mov ("esi",&off($f));',
893 '&mov ("edi",&off($g));',
894 '&xor ("esi","edi");',
896 '&and ("esi","ecx");',
897 '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
899 '&xor ("edi","esi");', # Ch(e,f,g)
900 '&ror ($E,6);', # T = Sigma1(e)
901 '&mov ("ecx",$AH[0]);',
902 '&add ($E,"edi");', # T += Ch(e,f,g)
903 '&mov ("edi",&off($b));',
904 '&mov ("esi",$AH[0]);',
906 '&ror ("ecx",22-13);',
907 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
908 '&xor ("ecx",$AH[0]);',
909 '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
910 '&add ($E,&off($h));', # T += h
911 '&ror ("ecx",13-2);',
912 '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
913 '&xor ("ecx","esi");',
914 '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
915 '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
916 '&ror ("ecx",2);', # Sigma0(a)
918 '&add ($AH[1],$E);', # h += T
919 '&add ($E,&off($d));', # d += T
920 '&add ($AH[1],"ecx");'. # h += Sigma0(a)
922 '@AH = reverse(@AH); $i++;' # rotate(a,h)
926 for ($i=0,$j=0; $j<4; $j++) {
927 &SSSE3_00_47($j,\&body_00_15,@X);
928 push(@X,shift(@X)); # rotate(@X)
930 &cmp (&DWP(16*$j,$K256),0x00010203);
931 &jne (&label("ssse3_00_47"));
933 for ($i=0; $i<16; ) {
934 foreach(body_00_15()) { eval; }
937 &mov ("esi",&DWP(96,"esp")); #ctx
938 #&mov ($AH[0],&DWP(0,"esp"));
939 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
940 #&mov ("edi", &DWP(8,"esp"));
941 &mov ("ecx",&DWP(12,"esp"));
942 &add ($AH[0],&DWP(0,"esi"));
943 &add ($AH[1],&DWP(4,"esi"));
944 &add ("edi",&DWP(8,"esi"));
945 &add ("ecx",&DWP(12,"esi"));
946 &mov (&DWP(0,"esi"),$AH[0]);
947 &mov (&DWP(4,"esi"),$AH[1]);
948 &mov (&DWP(8,"esi"),"edi");
949 &mov (&DWP(12,"esi"),"ecx");
950 #&mov (&DWP(0,"esp"),$AH[0]);
951 &mov (&DWP(4,"esp"),$AH[1]);
952 &xor ($AH[1],"edi"); # magic
953 &mov (&DWP(8,"esp"),"edi");
954 &mov (&DWP(12,"esp"),"ecx");
955 #&mov ($E,&DWP(16,"esp"));
956 &mov ("edi",&DWP(20,"esp"));
957 &mov ("ecx",&DWP(24,"esp"));
958 &add ($E,&DWP(16,"esi"));
959 &add ("edi",&DWP(20,"esi"));
960 &add ("ecx",&DWP(24,"esi"));
961 &mov (&DWP(16,"esi"),$E);
962 &mov (&DWP(20,"esi"),"edi");
963 &mov (&DWP(20,"esp"),"edi");
964 &mov ("edi",&DWP(28,"esp"));
965 &mov (&DWP(24,"esi"),"ecx");
966 #&mov (&DWP(16,"esp"),$E);
967 &add ("edi",&DWP(28,"esi"));
968 &mov (&DWP(24,"esp"),"ecx");
969 &mov (&DWP(28,"esi"),"edi");
970 &mov (&DWP(28,"esp"),"edi");
971 &mov ("edi",&DWP(96+4,"esp")); # inp
973 &movdqa ($t3,&QWP(64,$K256));
974 &sub ($K256,3*64); # rewind K
975 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
976 &jb (&label("grand_ssse3"));
978 &mov ("esp",&DWP(96+12,"esp")); # restore sp
981 &set_label("AVX",32);
983 &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
984 &cmp ("edx",1<<8|1<<3);
985 &je (&label("AVX_BMI"));
987 &lea ("esp",&DWP(-96,"esp"));
989 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
990 &mov ($AH[0],&DWP(0,"esi"));
991 &mov ($AH[1],&DWP(4,"esi"));
992 &mov ("ecx",&DWP(8,"esi"));
993 &mov ("edi",&DWP(12,"esi"));
994 #&mov (&DWP(0,"esp"),$AH[0]);
995 &mov (&DWP(4,"esp"),$AH[1]);
996 &xor ($AH[1],"ecx"); # magic
997 &mov (&DWP(8,"esp"),"ecx");
998 &mov (&DWP(12,"esp"),"edi");
999 &mov ($E,&DWP(16,"esi"));
1000 &mov ("edi",&DWP(20,"esi"));
1001 &mov ("ecx",&DWP(24,"esi"));
1002 &mov ("esi",&DWP(28,"esi"));
1003 #&mov (&DWP(16,"esp"),$E);
1004 &mov (&DWP(20,"esp"),"edi");
1005 &mov ("edi",&DWP(96+4,"esp")); # inp
1006 &mov (&DWP(24,"esp"),"ecx");
1007 &mov (&DWP(28,"esp"),"esi");
1008 &vmovdqa ($t3,&QWP(256,$K256));
1009 &jmp (&label("grand_avx"));
1011 &set_label("grand_avx",32);
1012 # load input, reverse byte order, add K256[0..15], save to stack
1013 &vmovdqu (@X[0],&QWP(0,"edi"));
1014 &vmovdqu (@X[1],&QWP(16,"edi"));
1015 &vmovdqu (@X[2],&QWP(32,"edi"));
1016 &vmovdqu (@X[3],&QWP(48,"edi"));
1018 &vpshufb (@X[0],@X[0],$t3);
1019 &mov (&DWP(96+4,"esp"),"edi");
1020 &vpshufb (@X[1],@X[1],$t3);
1021 &vpshufb (@X[2],@X[2],$t3);
1022 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1023 &vpshufb (@X[3],@X[3],$t3);
1024 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1025 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1026 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1027 &vmovdqa (&QWP(32+0,"esp"),$t0);
1028 &vmovdqa (&QWP(32+16,"esp"),$t1);
1029 &vmovdqa (&QWP(32+32,"esp"),$t2);
1030 &vmovdqa (&QWP(32+48,"esp"),$t3);
1031 &jmp (&label("avx_00_47"));
1033 &set_label("avx_00_47",16);
1036 sub Xupdate_AVX () {
1038 '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
1039 '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
1040 '&vpsrld ($t2,$t0,7);',
1041 '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
1042 '&vpsrld ($t3,$t0,3);',
1043 '&vpslld ($t1,$t0,14);',
1044 '&vpxor ($t0,$t3,$t2);',
1045 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
1046 '&vpsrld ($t2,$t2,18-7);',
1047 '&vpxor ($t0,$t0,$t1);',
1048 '&vpslld ($t1,$t1,25-14);',
1049 '&vpxor ($t0,$t0,$t2);',
1050 '&vpsrld ($t2,$t3,10);',
1051 '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
1052 '&vpsrlq ($t1,$t3,17);',
1053 '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
1054 '&vpxor ($t2,$t2,$t1);',
1055 '&vpsrlq ($t3,$t3,19);',
1056 '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
1057 '&vpshufd ($t3,$t2,0b10000100);',
1058 '&vpsrldq ($t3,$t3,8);',
1059 '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
1060 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
1061 '&vpsrld ($t2,$t3,10);',
1062 '&vpsrlq ($t1,$t3,17);',
1063 '&vpxor ($t2,$t2,$t1);',
1064 '&vpsrlq ($t3,$t3,19);',
1065 '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
1066 '&vpshufd ($t3,$t2,0b11101000);',
1067 '&vpslldq ($t3,$t3,8);',
1068 '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
1072 local *ror = sub { &shrd(@_[0],@_) };
1077 my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
1080 foreach (Xupdate_AVX()) { # 31 instructions
1082 eval(shift(@insns));
1083 eval(shift(@insns));
1084 eval($insn = shift(@insns));
1085 eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
1087 &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
1088 foreach (@insns) { eval; } # remaining instructions
1089 &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
1092 for ($i=0,$j=0; $j<4; $j++) {
1093 &AVX_00_47($j,\&body_00_15,@X);
1094 push(@X,shift(@X)); # rotate(@X)
1096 &cmp (&DWP(16*$j,$K256),0x00010203);
1097 &jne (&label("avx_00_47"));
1099 for ($i=0; $i<16; ) {
1100 foreach(body_00_15()) { eval; }
1103 &mov ("esi",&DWP(96,"esp")); #ctx
1104 #&mov ($AH[0],&DWP(0,"esp"));
1105 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1106 #&mov ("edi", &DWP(8,"esp"));
1107 &mov ("ecx",&DWP(12,"esp"));
1108 &add ($AH[0],&DWP(0,"esi"));
1109 &add ($AH[1],&DWP(4,"esi"));
1110 &add ("edi",&DWP(8,"esi"));
1111 &add ("ecx",&DWP(12,"esi"));
1112 &mov (&DWP(0,"esi"),$AH[0]);
1113 &mov (&DWP(4,"esi"),$AH[1]);
1114 &mov (&DWP(8,"esi"),"edi");
1115 &mov (&DWP(12,"esi"),"ecx");
1116 #&mov (&DWP(0,"esp"),$AH[0]);
1117 &mov (&DWP(4,"esp"),$AH[1]);
1118 &xor ($AH[1],"edi"); # magic
1119 &mov (&DWP(8,"esp"),"edi");
1120 &mov (&DWP(12,"esp"),"ecx");
1121 #&mov ($E,&DWP(16,"esp"));
1122 &mov ("edi",&DWP(20,"esp"));
1123 &mov ("ecx",&DWP(24,"esp"));
1124 &add ($E,&DWP(16,"esi"));
1125 &add ("edi",&DWP(20,"esi"));
1126 &add ("ecx",&DWP(24,"esi"));
1127 &mov (&DWP(16,"esi"),$E);
1128 &mov (&DWP(20,"esi"),"edi");
1129 &mov (&DWP(20,"esp"),"edi");
1130 &mov ("edi",&DWP(28,"esp"));
1131 &mov (&DWP(24,"esi"),"ecx");
1132 #&mov (&DWP(16,"esp"),$E);
1133 &add ("edi",&DWP(28,"esi"));
1134 &mov (&DWP(24,"esp"),"ecx");
1135 &mov (&DWP(28,"esi"),"edi");
1136 &mov (&DWP(28,"esp"),"edi");
1137 &mov ("edi",&DWP(96+4,"esp")); # inp
1139 &vmovdqa ($t3,&QWP(64,$K256));
1140 &sub ($K256,3*64); # rewind K
1141 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1142 &jb (&label("grand_avx"));
1144 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1148 sub bodyx_00_15 () { # +10%
1150 '&rorx ("ecx",$E,6)',
1151 '&rorx ("esi",$E,11)',
1152 '&mov (&off($e),$E)', # save $E, modulo-scheduled
1153 '&rorx ("edi",$E,25)',
1154 '&xor ("ecx","esi")',
1155 '&andn ("esi",$E,&off($g))',
1156 '&xor ("ecx","edi")', # Sigma1(e)
1157 '&and ($E,&off($f))',
1158 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
1159 '&or ($E,"esi")', # T = Ch(e,f,g)
1161 '&rorx ("edi",$AH[0],2)',
1162 '&rorx ("esi",$AH[0],13)',
1163 '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
1164 '&rorx ("ecx",$AH[0],22)',
1165 '&xor ("esi","edi")',
1166 '&mov ("edi",&off($b))',
1167 '&xor ("ecx","esi")', # Sigma0(a)
1169 '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
1170 '&add ($E,&off($h))', # T += h
1171 '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
1172 '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
1173 '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
1175 '&add ("ecx",$E)', # h += T
1176 '&add ($E,&off($d))', # d += T
1177 '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
1179 '@AH = reverse(@AH); $i++;' # rotate(a,h)
1183 &set_label("AVX_BMI",32);
1184 &lea ("esp",&DWP(-96,"esp"));
1186 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
1187 &mov ($AH[0],&DWP(0,"esi"));
1188 &mov ($AH[1],&DWP(4,"esi"));
1189 &mov ("ecx",&DWP(8,"esi"));
1190 &mov ("edi",&DWP(12,"esi"));
1191 #&mov (&DWP(0,"esp"),$AH[0]);
1192 &mov (&DWP(4,"esp"),$AH[1]);
1193 &xor ($AH[1],"ecx"); # magic
1194 &mov (&DWP(8,"esp"),"ecx");
1195 &mov (&DWP(12,"esp"),"edi");
1196 &mov ($E,&DWP(16,"esi"));
1197 &mov ("edi",&DWP(20,"esi"));
1198 &mov ("ecx",&DWP(24,"esi"));
1199 &mov ("esi",&DWP(28,"esi"));
1200 #&mov (&DWP(16,"esp"),$E);
1201 &mov (&DWP(20,"esp"),"edi");
1202 &mov ("edi",&DWP(96+4,"esp")); # inp
1203 &mov (&DWP(24,"esp"),"ecx");
1204 &mov (&DWP(28,"esp"),"esi");
1205 &vmovdqa ($t3,&QWP(256,$K256));
1206 &jmp (&label("grand_avx_bmi"));
1208 &set_label("grand_avx_bmi",32);
1209 # load input, reverse byte order, add K256[0..15], save to stack
1210 &vmovdqu (@X[0],&QWP(0,"edi"));
1211 &vmovdqu (@X[1],&QWP(16,"edi"));
1212 &vmovdqu (@X[2],&QWP(32,"edi"));
1213 &vmovdqu (@X[3],&QWP(48,"edi"));
1215 &vpshufb (@X[0],@X[0],$t3);
1216 &mov (&DWP(96+4,"esp"),"edi");
1217 &vpshufb (@X[1],@X[1],$t3);
1218 &vpshufb (@X[2],@X[2],$t3);
1219 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1220 &vpshufb (@X[3],@X[3],$t3);
1221 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1222 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1223 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1224 &vmovdqa (&QWP(32+0,"esp"),$t0);
1225 &vmovdqa (&QWP(32+16,"esp"),$t1);
1226 &vmovdqa (&QWP(32+32,"esp"),$t2);
1227 &vmovdqa (&QWP(32+48,"esp"),$t3);
1228 &jmp (&label("avx_bmi_00_47"));
1230 &set_label("avx_bmi_00_47",16);
1233 for ($i=0,$j=0; $j<4; $j++) {
1234 &AVX_00_47($j,\&bodyx_00_15,@X);
1235 push(@X,shift(@X)); # rotate(@X)
1237 &cmp (&DWP(16*$j,$K256),0x00010203);
1238 &jne (&label("avx_bmi_00_47"));
1240 for ($i=0; $i<16; ) {
1241 foreach(bodyx_00_15()) { eval; }
1244 &mov ("esi",&DWP(96,"esp")); #ctx
1245 #&mov ($AH[0],&DWP(0,"esp"));
1246 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1247 #&mov ("edi", &DWP(8,"esp"));
1248 &mov ("ecx",&DWP(12,"esp"));
1249 &add ($AH[0],&DWP(0,"esi"));
1250 &add ($AH[1],&DWP(4,"esi"));
1251 &add ("edi",&DWP(8,"esi"));
1252 &add ("ecx",&DWP(12,"esi"));
1253 &mov (&DWP(0,"esi"),$AH[0]);
1254 &mov (&DWP(4,"esi"),$AH[1]);
1255 &mov (&DWP(8,"esi"),"edi");
1256 &mov (&DWP(12,"esi"),"ecx");
1257 #&mov (&DWP(0,"esp"),$AH[0]);
1258 &mov (&DWP(4,"esp"),$AH[1]);
1259 &xor ($AH[1],"edi"); # magic
1260 &mov (&DWP(8,"esp"),"edi");
1261 &mov (&DWP(12,"esp"),"ecx");
1262 #&mov ($E,&DWP(16,"esp"));
1263 &mov ("edi",&DWP(20,"esp"));
1264 &mov ("ecx",&DWP(24,"esp"));
1265 &add ($E,&DWP(16,"esi"));
1266 &add ("edi",&DWP(20,"esi"));
1267 &add ("ecx",&DWP(24,"esi"));
1268 &mov (&DWP(16,"esi"),$E);
1269 &mov (&DWP(20,"esi"),"edi");
1270 &mov (&DWP(20,"esp"),"edi");
1271 &mov ("edi",&DWP(28,"esp"));
1272 &mov (&DWP(24,"esi"),"ecx");
1273 #&mov (&DWP(16,"esp"),$E);
1274 &add ("edi",&DWP(28,"esi"));
1275 &mov (&DWP(24,"esp"),"ecx");
1276 &mov (&DWP(28,"esi"),"edi");
1277 &mov (&DWP(28,"esp"),"edi");
1278 &mov ("edi",&DWP(96+4,"esp")); # inp
1280 &vmovdqa ($t3,&QWP(64,$K256));
1281 &sub ($K256,3*64); # rewind K
1282 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1283 &jb (&label("grand_avx_bmi"));
1285 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1291 &function_end_B("sha256_block_data_order");
1295 close STDOUT or die "error closing STDOUT";