2 # Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # SHA512 block transform for x86. September 2007.
21 # Add SSSE3 code path, 20-25% improvement [over original SSE2 code].
23 # Performance in clock cycles per processed byte (less is better):
25 # gcc icc x86 asm SIMD(*) x86_64(**)
26 # Pentium 100 97 61 - -
28 # P4 116 95 82 34.6 30.8
29 # AMD K8 54 55 36 20.7 9.57
30 # Core2 66 57 40 15.9 9.97
31 # Westmere 70 - 38 12.2 9.58
32 # Sandy Bridge 58 - 35 11.9 11.2
33 # Ivy Bridge 50 - 33 11.5 8.17
34 # Haswell 46 - 29 11.3 7.66
35 # Skylake 40 - 26 13.3 7.25
36 # Bulldozer 121 - 50 14.0 13.5
37 # VIA Nano 91 - 52 33 14.7
38 # Atom 126 - 68 48(***) 14.7
39 # Silvermont 97 - 58 42(***) 17.5
40 # Goldmont 80 - 48 19.5 12.0
42 # (*) whichever best applicable.
43 # (**) x86_64 assembler performance is presented for reference
44 # purposes, the results are for integer-only code.
45 # (***) paddq is increadibly slow on Atom.
47 # IALU code-path is optimized for elder Pentiums. On vanilla Pentium
48 # performance improvement over compiler generated code reaches ~60%,
49 # while on PIII - ~35%. On newer ยต-archs improvement varies from 15%
50 # to 50%, but it's less important as they are expected to execute SSE2
51 # code-path, which is commonly ~2-3x faster [than compiler generated
52 # code]. SSE2 code-path is as fast as original sha512-sse2.pl, even
53 # though it does not use 128-bit operations. The latter means that
54 # SSE2-aware kernel is no longer required to execute the code. Another
55 # difference is that new code optimizes amount of writes, but at the
56 # cost of increased data cache "footprint" by 1/2KB.
58 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
59 push(@INC,"${dir}","${dir}../../perlasm");
63 open STDOUT,">$output";
65 &asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
68 for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
70 &external_label("OPENSSL_ia32cap_P") if ($sse2);
72 $Tlo=&DWP(0,"esp"); $Thi=&DWP(4,"esp");
73 $Alo=&DWP(8,"esp"); $Ahi=&DWP(8+4,"esp");
74 $Blo=&DWP(16,"esp"); $Bhi=&DWP(16+4,"esp");
75 $Clo=&DWP(24,"esp"); $Chi=&DWP(24+4,"esp");
76 $Dlo=&DWP(32,"esp"); $Dhi=&DWP(32+4,"esp");
77 $Elo=&DWP(40,"esp"); $Ehi=&DWP(40+4,"esp");
78 $Flo=&DWP(48,"esp"); $Fhi=&DWP(48+4,"esp");
79 $Glo=&DWP(56,"esp"); $Ghi=&DWP(56+4,"esp");
80 $Hlo=&DWP(64,"esp"); $Hhi=&DWP(64+4,"esp");
85 $Csse2=&QWP(16,"esp");
86 $Dsse2=&QWP(24,"esp");
87 $Esse2=&QWP(32,"esp");
88 $Fsse2=&QWP(40,"esp");
89 $Gsse2=&QWP(48,"esp");
90 $Hsse2=&QWP(56,"esp");
93 $E="mm4"; # F-H are commonly loaded to respectively mm1-mm3 and
94 # mm5-mm7, but it's done on on-demand basis...
95 $BxC="mm2"; # ... except for B^C
100 #&movq ("mm5",$Fsse2); # load f
101 #&movq ("mm6",$Gsse2); # load g
103 &movq ("mm1",$E); # %mm1 is sliding right
104 &pxor ("mm5","mm6"); # f^=g
106 &movq ($Esse2,$E); # modulo-scheduled save e
107 &pand ("mm5",$E); # f&=e
108 &psllq ($E,23); # $E is sliding left
109 &movq ($A,"mm3") if ($phase<2);
110 &movq (&QWP(8*9,"esp"),"mm7") # save X[i]
111 &movq ("mm3","mm1"); # %mm3 is T1
113 &pxor ("mm5","mm6"); # Ch(e,f,g)
117 &movq ($Asse2,$A); # modulo-scheduled save a
118 &paddq ("mm7","mm5"); # X[i]+=Ch(e,f,g)
121 &paddq ("mm7",$Hsse2); # X[i]+=h
124 &paddq ("mm7",QWP(0,$K512)); # X[i]+=K512[i]
125 &pxor ("mm3",$E); # T1=Sigma1_512(e)
127 &movq ($E,$Dsse2); # e = load d, e in next round
128 &paddq ("mm3","mm7"); # T1+=X[i]
129 &movq ("mm5",$A); # %mm5 is sliding right
131 &paddq ($E,"mm3"); # d += T1
132 &movq ("mm6",$A); # %mm6 is sliding left
135 &movq ("mm1",$Bsse2); # load b
141 &pxor ($A,"mm1"); # a^b, b^c in next round
144 &pand ($BxC,$A); # (b^c)&(a^b)
147 &pxor ($BxC,"mm1"); # [h=]Maj(a,b,c)
148 &pxor ("mm6","mm7"); # Sigma0_512(a)
149 &movq ("mm7",&QWP(8*(9+16-1),"esp")) if ($phase!=0); # pre-fetch
150 &movq ("mm5",$Fsse2) if ($phase==0); # load f
153 &paddq ($BxC,"mm6"); # h+=Sigma0(a)
155 #&paddq ($BxC,"mm3"); # h+=T1
157 ($A,$BxC) = ($BxC,$A); # rotate registers
159 &paddq ("mm3",$BxC); # T1+=Maj(a,b,c)
162 &paddq ("mm3","mm6"); # T1+=Sigma0(a)
163 &movq ("mm6",$Gsse2) if ($phase==0); # load g
164 #&movq ($A,"mm3"); # h=T1
169 #define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
170 # LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
171 # HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
176 &shr ("ecx",9); # lo>>9
178 &shr ("edx",9); # hi>>9
180 &shl ("esi",14); # lo<<14
182 &shl ("edi",14); # hi<<14
185 &shr ("ecx",14-9); # lo>>14
187 &shr ("edx",14-9); # hi>>14
189 &shl ("esi",18-14); # lo<<18
191 &shl ("edi",18-14); # hi<<18
194 &shr ("ecx",18-14); # lo>>18
196 &shr ("edx",18-14); # hi>>18
198 &shl ("esi",23-18); # lo<<23
200 &shl ("edi",23-18); # hi<<23
202 &xor ("ebx","edi"); # T1 = Sigma1(e)
209 &adc ("ebx",$Hhi); # T1 += h
214 &add ("eax",&DWP(8*(9+15)+0,"esp"));
215 &adc ("ebx",&DWP(8*(9+15)+4,"esp")); # T1 += X[0]
217 &xor ("edx","edi"); # Ch(e,f,g) = (f^g)&e)^g
219 &mov ("esi",&DWP(0,$K512));
220 &mov ("edi",&DWP(4,$K512)); # K[i]
222 &adc ("ebx","edx"); # T1 += Ch(e,f,g)
226 &adc ("ebx","edi"); # T1 += K[i]
228 &mov ($Thi,"ebx"); # put T1 away
230 &adc ("ebx","edx"); # d += T1
232 #define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
233 # LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
234 # HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
241 &shr ("ecx",2); # lo>>2
243 &shr ("edx",2); # hi>>2
245 &shl ("esi",4); # lo<<4
247 &shl ("edi",4); # hi<<4
250 &shr ("ecx",7-2); # lo>>7
252 &shr ("edx",7-2); # hi>>7
254 &shl ("esi",25-4); # lo<<25
256 &shl ("edi",25-4); # hi<<25
259 &shr ("ecx",28-7); # lo>>28
261 &shr ("edx",28-7); # hi>>28
263 &shl ("esi",30-25); # lo<<30
265 &shl ("edi",30-25); # hi<<30
267 &xor ("ebx","edi"); # Sigma0(a)
274 &adc ("ebx",$Thi); # T1 = Sigma0(a)+T1
282 &or ("edx","edi"); # Maj(a,b,c) = ((a|b)&c)|(a&b)
285 &adc ("ebx","edx"); # T1 += Maj(a,b,c)
289 &mov (&LB("edx"),&BP(0,$K512)); # pre-fetch LSB of *K
291 &lea ($K512,&DWP(8,$K512)); # K++
295 &function_begin("sha512_block_data_order");
296 &mov ("esi",wparam(0)); # ctx
297 &mov ("edi",wparam(1)); # inp
298 &mov ("eax",wparam(2)); # num
299 &mov ("ebx","esp"); # saved sp
301 &call (&label("pic_point")); # make it PIC!
302 &set_label("pic_point");
304 &lea ($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512));
311 &mov (&DWP(0,"esp"),"esi"); # ctx
312 &mov (&DWP(4,"esp"),"edi"); # inp
313 &mov (&DWP(8,"esp"),"eax"); # inp+num*128
314 &mov (&DWP(12,"esp"),"ebx"); # saved sp
317 &picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512"));
318 &mov ("ecx",&DWP(0,"edx"));
320 &jz (&label("loop_x86"));
322 &mov ("edx",&DWP(4,"edx"));
325 &movq ($A,&QWP(0,"esi"));
326 &and ("ecx",1<<24); # XMM registers availability
327 &movq ("mm1",&QWP(8,"esi"));
328 &and ("edx",1<<9); # SSSE3 bit
329 &movq ($BxC,&QWP(16,"esi"));
331 &movq ("mm3",&QWP(24,"esi"));
332 &movq ($E,&QWP(32,"esi"));
333 &movq ("mm5",&QWP(40,"esi"));
334 &movq ("mm6",&QWP(48,"esi"));
335 &movq ("mm7",&QWP(56,"esi"));
336 &cmp ("ecx",1<<24|1<<9);
337 &je (&label("SSSE3"));
339 &jmp (&label("loop_sse2"));
341 &set_label("loop_sse2",16);
343 &movq ($Bsse2,"mm1");
345 &movq ($Dsse2,"mm3");
347 &movq ($Fsse2,"mm5");
348 &movq ($Gsse2,"mm6");
349 &pxor ($BxC,"mm1"); # magic
350 &movq ($Hsse2,"mm7");
351 &movq ("mm3",$A); # magic
353 &mov ("eax",&DWP(0,"edi"));
354 &mov ("ebx",&DWP(4,"edi"));
356 &mov ("edx",15); # counter
359 &jmp (&label("00_14_sse2"));
361 &set_label("00_14_sse2",16);
363 &mov ("eax",&DWP(0,"edi"));
365 &mov ("ebx",&DWP(4,"edi"));
369 &punpckldq("mm7","mm1");
374 &jnz (&label("00_14_sse2"));
378 &punpckldq("mm7","mm1");
382 &pxor ($A,$A); # A is in %mm3
383 &mov ("edx",32); # counter
384 &jmp (&label("16_79_sse2"));
386 &set_label("16_79_sse2",16);
387 for ($j=0;$j<2;$j++) { # 2x unroll
388 #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15
389 &movq ("mm5",&QWP(8*(9+16-14),"esp"));
395 &paddq ($A,"mm3"); # from BODY_00_15
399 &psllq ("mm1",63-56);
405 &pxor ("mm7","mm3"); # sigma0
409 &paddq ("mm7",&QWP(8*(9+16),"esp"));
411 &psrlq ("mm5",61-19);
412 &paddq ("mm7",&QWP(8*(9+16-9),"esp"));
415 &movq ("mm5",$Fsse2); # load f
416 &pxor ("mm1","mm6"); # sigma1
417 &movq ("mm6",$Gsse2); # load g
419 &paddq ("mm7","mm1"); # X[i]
420 #&movq (&QWP(8*9,"esp"),"mm7"); # moved to BODY_00_15
425 &jnz (&label("16_79_sse2"));
428 &paddq ($A,"mm3"); # from BODY_00_15
429 &movq ("mm1",$Bsse2);
430 #&movq ($BxC,$Csse2);
431 &movq ("mm3",$Dsse2);
433 &movq ("mm5",$Fsse2);
434 &movq ("mm6",$Gsse2);
435 &movq ("mm7",$Hsse2);
437 &pxor ($BxC,"mm1"); # de-magic
438 &paddq ($A,&QWP(0,"esi"));
439 &paddq ("mm1",&QWP(8,"esi"));
440 &paddq ($BxC,&QWP(16,"esi"));
441 &paddq ("mm3",&QWP(24,"esi"));
442 &paddq ($E,&QWP(32,"esi"));
443 &paddq ("mm5",&QWP(40,"esi"));
444 &paddq ("mm6",&QWP(48,"esi"));
445 &paddq ("mm7",&QWP(56,"esi"));
448 &movq (&QWP(0,"esi"),$A);
449 &movq (&QWP(8,"esi"),"mm1");
450 &movq (&QWP(16,"esi"),$BxC);
451 &movq (&QWP(24,"esi"),"mm3");
452 &movq (&QWP(32,"esi"),$E);
453 &movq (&QWP(40,"esi"),"mm5");
454 &movq (&QWP(48,"esi"),"mm6");
455 &movq (&QWP(56,"esi"),"mm7");
457 &lea ("esp",&DWP(0,"esp","eax")); # destroy frame
458 &sub ($K512,"eax"); # rewind K
460 &cmp ("edi",&DWP(8*10+8,"esp")); # are we done yet?
461 &jb (&label("loop_sse2"));
463 &mov ("esp",&DWP(8*10+12,"esp")); # restore sp
467 &set_label("SSSE3",32);
468 { my ($cnt,$frame)=("ecx","edx");
469 my @X=map("xmm$_",(0..7));
473 &lea ($frame,&DWP(-64,"esp"));
476 # fixed stack frame layout
478 # +0 A B C D E F G H # backing store
479 # +64 X[0]+K[i] .. X[15]+K[i] # XMM->MM xfer area
480 # +192 # XMM off-load ring buffer
481 # +256 # saved parameters
483 &movdqa (@X[1],&QWP(80*8,$K512)); # byte swap mask
484 &movdqu (@X[0],&QWP(0,"edi"));
485 &pshufb (@X[0],@X[1]);
486 for ($j=0;$j<8;$j++) {
487 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load
488 &movdqa (@X[3],&QWP(16*($j%8),$K512));
489 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask
490 &movdqu (@X[1],&QWP(16*($j+1),"edi")) if ($j<7); # next input
491 &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0]
492 &paddq (@X[3],@X[0]);
493 &pshufb (@X[1],@X[2]) if ($j<7);
494 &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]); # xfer X[i]+K[i]
496 push(@X,shift(@X)); # rotate(@X)
498 #&jmp (&label("loop_ssse3"));
501 &set_label("loop_ssse3",32);
502 &movdqa (@X[2],&QWP(16*(($j+1)%4),$frame)); # pre-restore @X[1]
503 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]); # off-load @X[3]
504 &lea ($K512,&DWP(16*8,$K512));
506 #&movq ($Asse2,$A); # off-load A-H
507 &movq ($Bsse2,"mm1");
510 &lea ("edi",&DWP(128,"edi")); # advance input
511 &movq ($Dsse2,"mm3");
514 &movq ($Fsse2,"mm5");
515 &cmovb ("ebx","edi");
516 &movq ($Gsse2,"mm6");
517 &mov ("ecx",4); # loop counter
518 &pxor ($BxC,"mm1"); # magic
519 &movq ($Hsse2,"mm7");
520 &pxor ("mm3","mm3"); # magic
522 &jmp (&label("00_47_ssse3"));
524 sub BODY_00_15_ssse3 { # "phase-less" copy of BODY_00_15_sse2
526 '&movq ("mm1",$E)', # %mm1 is sliding right
527 '&movq ("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i]
528 '&pxor ("mm5","mm6")', # f^=g
530 '&movq (&QWP(8*($i+4)%64,"esp"),$E)', # modulo-scheduled save e
531 '&pand ("mm5",$E)', # f&=e
532 '&psllq ($E,23)', # $E is sliding left
533 '&paddq ($A,"mm3")', # [h+=Maj(a,b,c)]
534 '&movq ("mm3","mm1")', # %mm3 is T1
536 '&pxor ("mm5","mm6")', # Ch(e,f,g)
539 '&pxor ("mm3","mm1")',
540 '&movq (&QWP(8*$i%64,"esp"),$A)', # modulo-scheduled save a
541 '&paddq("mm7","mm5")', # X[i]+=Ch(e,f,g)
544 '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))', # X[i]+=h
545 '&pxor ("mm3","mm1")',
547 '&pxor ("mm3",$E)', # T1=Sigma1_512(e)
549 '&movq ($E,&QWP(8*($i+3)%64,"esp"))', # e = load d, e in next round
550 '&paddq ("mm3","mm7")', # T1+=X[i]
551 '&movq ("mm5",$A)', # %mm5 is sliding right
553 '&paddq ($E,"mm3")', # d += T1
554 '&movq ("mm6",$A)', # %mm6 is sliding left
555 '&movq ("mm7","mm5")',
557 '&movq ("mm1",&QWP(8*($i+1)%64,"esp"))', # load b
559 '&pxor ("mm7","mm6")',
561 '&pxor ("mm7","mm5")',
562 '&pxor ($A,"mm1")', # a^b, b^c in next round
564 '&pxor ("mm7","mm6")',
565 '&pand ($BxC,$A)', # (b^c)&(a^b)
567 '&pxor ("mm7","mm5")',
568 '&pxor ($BxC,"mm1")', # [h=]Maj(a,b,c)
569 '&pxor ("mm6","mm7")', # Sigma0_512(a)
570 '&movq ("mm5",&QWP(8*($i+5-1)%64,"esp"))', # pre-load f
571 '&paddq ($BxC,"mm6")', # h+=Sigma0(a)
572 '&movq ("mm6",&QWP(8*($i+6-1)%64,"esp"))', # pre-load g
574 '($A,$BxC) = ($BxC,$A); $i--;'
578 &set_label("00_47_ssse3",32);
581 my ($t0,$t2,$t1)=@X[2..4];
582 my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
585 &movdqa (@X[1],$t0); # restore @X[1]
586 &palignr ($t0,@X[0],8); # X[1..2]
587 &movdqa (&QWP(16*($j%4),$frame),@X[4]); # off-load @X[4]
588 &palignr ($t2,@X[4],8); # X[9..10]
592 &paddq (@X[0],$t2); # X[0..1] += X[9..10]
602 &pxor ($t0,$t2); # sigma0(X[1..2])
605 &paddq (@X[0],$t0); # X[0..1] += sigma0(X[1..2])
615 &movdqa ($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1]
616 &pxor ($t1,$t0); # sigma0(X[1..2])
617 &movdqa ($t0,&QWP(16*($j%8),$K512));
619 &paddq (@X[0],$t1); # X[0..1] += sigma0(X[14..15])
625 foreach(@insns) { eval; }
626 &movdqa (&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i]
628 push(@X,shift(@X)); # rotate(@X)
630 &lea ($K512,&DWP(16*8,$K512));
632 &jnz (&label("00_47_ssse3"));
634 &movdqa (@X[1],&QWP(0,$K512)); # byte swap mask
635 &lea ($K512,&DWP(-80*8,$K512)); # rewind
636 &movdqu (@X[0],&QWP(0,"ebx"));
637 &pshufb (@X[0],@X[1]);
639 for ($j=0;$j<8;$j++) { # load next or same block
640 my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
642 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load
643 &movdqa (@X[3],&QWP(16*($j%8),$K512));
644 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask
645 &movdqu (@X[1],&QWP(16*($j+1),"ebx")) if ($j<7); # next input
646 &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0]
647 &paddq (@X[3],@X[0]);
648 &pshufb (@X[1],@X[2]) if ($j<7);
649 foreach(@insns) { eval; }
650 &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i]
652 push(@X,shift(@X)); # rotate(@X)
655 #&movq ($A,$Asse2); # load A-H
656 &movq ("mm1",$Bsse2);
657 &paddq ($A,"mm3"); # from BODY_00_15
658 #&movq ($BxC,$Csse2);
659 &movq ("mm3",$Dsse2);
661 #&movq ("mm5",$Fsse2);
662 #&movq ("mm6",$Gsse2);
663 &movq ("mm7",$Hsse2);
665 &pxor ($BxC,"mm1"); # de-magic
666 &paddq ($A,&QWP(0,"esi"));
667 &paddq ("mm1",&QWP(8,"esi"));
668 &paddq ($BxC,&QWP(16,"esi"));
669 &paddq ("mm3",&QWP(24,"esi"));
670 &paddq ($E,&QWP(32,"esi"));
671 &paddq ("mm5",&QWP(40,"esi"));
672 &paddq ("mm6",&QWP(48,"esi"));
673 &paddq ("mm7",&QWP(56,"esi"));
675 &movq (&QWP(0,"esi"),$A);
676 &movq (&QWP(8,"esi"),"mm1");
677 &movq (&QWP(16,"esi"),$BxC);
678 &movq (&QWP(24,"esi"),"mm3");
679 &movq (&QWP(32,"esi"),$E);
680 &movq (&QWP(40,"esi"),"mm5");
681 &movq (&QWP(48,"esi"),"mm6");
682 &movq (&QWP(56,"esi"),"mm7");
684 &cmp ("edi","eax") # are we done yet?
685 &jb (&label("loop_ssse3"));
687 &mov ("esp",&DWP(64+12,$frame)); # restore sp
692 &set_label("loop_x86",16);
693 # copy input block to stack reversing byte and qword order
694 for ($i=0;$i<8;$i++) {
695 &mov ("eax",&DWP($i*16+0,"edi"));
696 &mov ("ebx",&DWP($i*16+4,"edi"));
697 &mov ("ecx",&DWP($i*16+8,"edi"));
698 &mov ("edx",&DWP($i*16+12,"edi"));
709 &sub ("esp",9*8); # place for T,A,B,C,D,E,F,G,H
710 &mov (&DWP(8*(9+16)+4,"esp"),"edi");
712 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
713 &lea ("edi",&DWP(8,"esp"));
715 &data_word(0xA5F3F689); # rep movsd
717 &set_label("00_15_x86",16);
720 &cmp (&LB("edx"),0x94);
721 &jne (&label("00_15_x86"));
723 &set_label("16_79_x86",16);
724 #define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
725 # LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
726 # HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
727 &mov ("ecx",&DWP(8*(9+15+16-1)+0,"esp"));
728 &mov ("edx",&DWP(8*(9+15+16-1)+4,"esp"));
731 &shr ("ecx",1); # lo>>1
733 &shr ("edx",1); # hi>>1
735 &shl ("esi",24); # lo<<24
737 &shl ("edi",24); # hi<<24
740 &shr ("ecx",7-1); # lo>>7
742 &shr ("edx",7-1); # hi>>7
744 &shl ("esi",31-24); # lo<<31
746 &shl ("edi",25-24); # hi<<25
749 &shr ("ecx",8-7); # lo>>8
751 &shr ("edx",8-7); # hi>>8
753 &shl ("edi",31-25); # hi<<31
755 &xor ("eax","edi"); # T1 = sigma0(X[-15])
757 &mov (&DWP(0,"esp"),"eax");
758 &mov (&DWP(4,"esp"),"ebx"); # put T1 away
760 #define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
761 # LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
762 # HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
763 &mov ("ecx",&DWP(8*(9+15+16-14)+0,"esp"));
764 &mov ("edx",&DWP(8*(9+15+16-14)+4,"esp"));
767 &shr ("ecx",6); # lo>>6
769 &shr ("edx",6); # hi>>6
771 &shl ("esi",3); # lo<<3
773 &shl ("edi",3); # hi<<3
776 &shr ("ecx",19-6); # lo>>19
778 &shr ("edx",19-6); # hi>>19
780 &shl ("esi",13-3); # lo<<13
782 &shl ("edi",13-3); # hi<<13
785 &shr ("ecx",29-19); # lo>>29
787 &shr ("edx",29-19); # hi>>29
789 &shl ("edi",26-13); # hi<<26
791 &xor ("eax","edi"); # sigma1(X[-2])
793 &mov ("ecx",&DWP(8*(9+15+16)+0,"esp"));
794 &mov ("edx",&DWP(8*(9+15+16)+4,"esp"));
795 &add ("eax",&DWP(0,"esp"));
796 &adc ("ebx",&DWP(4,"esp")); # T1 = sigma1(X[-2])+T1
797 &mov ("esi",&DWP(8*(9+15+16-9)+0,"esp"));
798 &mov ("edi",&DWP(8*(9+15+16-9)+4,"esp"));
800 &adc ("ebx","edx"); # T1 += X[-16]
802 &adc ("ebx","edi"); # T1 += X[-7]
803 &mov (&DWP(8*(9+15)+0,"esp"),"eax");
804 &mov (&DWP(8*(9+15)+4,"esp"),"ebx"); # save X[0]
808 &cmp (&LB("edx"),0x17);
809 &jne (&label("16_79_x86"));
811 &mov ("esi",&DWP(8*(9+16+80)+0,"esp"));# ctx
812 &mov ("edi",&DWP(8*(9+16+80)+4,"esp"));# inp
813 for($i=0;$i<4;$i++) {
814 &mov ("eax",&DWP($i*16+0,"esi"));
815 &mov ("ebx",&DWP($i*16+4,"esi"));
816 &mov ("ecx",&DWP($i*16+8,"esi"));
817 &mov ("edx",&DWP($i*16+12,"esi"));
818 &add ("eax",&DWP(8+($i*16)+0,"esp"));
819 &adc ("ebx",&DWP(8+($i*16)+4,"esp"));
820 &mov (&DWP($i*16+0,"esi"),"eax");
821 &mov (&DWP($i*16+4,"esi"),"ebx");
822 &add ("ecx",&DWP(8+($i*16)+8,"esp"));
823 &adc ("edx",&DWP(8+($i*16)+12,"esp"));
824 &mov (&DWP($i*16+8,"esi"),"ecx");
825 &mov (&DWP($i*16+12,"esi"),"edx");
827 &add ("esp",8*(9+16+80)); # destroy frame
828 &sub ($K512,8*80); # rewind K
830 &cmp ("edi",&DWP(8,"esp")); # are we done yet?
831 &jb (&label("loop_x86"));
833 &mov ("esp",&DWP(12,"esp")); # restore sp
836 &set_label("K512",64); # Yes! I keep it in the code segment!
837 &data_word(0xd728ae22,0x428a2f98); # u64
838 &data_word(0x23ef65cd,0x71374491); # u64
839 &data_word(0xec4d3b2f,0xb5c0fbcf); # u64
840 &data_word(0x8189dbbc,0xe9b5dba5); # u64
841 &data_word(0xf348b538,0x3956c25b); # u64
842 &data_word(0xb605d019,0x59f111f1); # u64
843 &data_word(0xaf194f9b,0x923f82a4); # u64
844 &data_word(0xda6d8118,0xab1c5ed5); # u64
845 &data_word(0xa3030242,0xd807aa98); # u64
846 &data_word(0x45706fbe,0x12835b01); # u64
847 &data_word(0x4ee4b28c,0x243185be); # u64
848 &data_word(0xd5ffb4e2,0x550c7dc3); # u64
849 &data_word(0xf27b896f,0x72be5d74); # u64
850 &data_word(0x3b1696b1,0x80deb1fe); # u64
851 &data_word(0x25c71235,0x9bdc06a7); # u64
852 &data_word(0xcf692694,0xc19bf174); # u64
853 &data_word(0x9ef14ad2,0xe49b69c1); # u64
854 &data_word(0x384f25e3,0xefbe4786); # u64
855 &data_word(0x8b8cd5b5,0x0fc19dc6); # u64
856 &data_word(0x77ac9c65,0x240ca1cc); # u64
857 &data_word(0x592b0275,0x2de92c6f); # u64
858 &data_word(0x6ea6e483,0x4a7484aa); # u64
859 &data_word(0xbd41fbd4,0x5cb0a9dc); # u64
860 &data_word(0x831153b5,0x76f988da); # u64
861 &data_word(0xee66dfab,0x983e5152); # u64
862 &data_word(0x2db43210,0xa831c66d); # u64
863 &data_word(0x98fb213f,0xb00327c8); # u64
864 &data_word(0xbeef0ee4,0xbf597fc7); # u64
865 &data_word(0x3da88fc2,0xc6e00bf3); # u64
866 &data_word(0x930aa725,0xd5a79147); # u64
867 &data_word(0xe003826f,0x06ca6351); # u64
868 &data_word(0x0a0e6e70,0x14292967); # u64
869 &data_word(0x46d22ffc,0x27b70a85); # u64
870 &data_word(0x5c26c926,0x2e1b2138); # u64
871 &data_word(0x5ac42aed,0x4d2c6dfc); # u64
872 &data_word(0x9d95b3df,0x53380d13); # u64
873 &data_word(0x8baf63de,0x650a7354); # u64
874 &data_word(0x3c77b2a8,0x766a0abb); # u64
875 &data_word(0x47edaee6,0x81c2c92e); # u64
876 &data_word(0x1482353b,0x92722c85); # u64
877 &data_word(0x4cf10364,0xa2bfe8a1); # u64
878 &data_word(0xbc423001,0xa81a664b); # u64
879 &data_word(0xd0f89791,0xc24b8b70); # u64
880 &data_word(0x0654be30,0xc76c51a3); # u64
881 &data_word(0xd6ef5218,0xd192e819); # u64
882 &data_word(0x5565a910,0xd6990624); # u64
883 &data_word(0x5771202a,0xf40e3585); # u64
884 &data_word(0x32bbd1b8,0x106aa070); # u64
885 &data_word(0xb8d2d0c8,0x19a4c116); # u64
886 &data_word(0x5141ab53,0x1e376c08); # u64
887 &data_word(0xdf8eeb99,0x2748774c); # u64
888 &data_word(0xe19b48a8,0x34b0bcb5); # u64
889 &data_word(0xc5c95a63,0x391c0cb3); # u64
890 &data_word(0xe3418acb,0x4ed8aa4a); # u64
891 &data_word(0x7763e373,0x5b9cca4f); # u64
892 &data_word(0xd6b2b8a3,0x682e6ff3); # u64
893 &data_word(0x5defb2fc,0x748f82ee); # u64
894 &data_word(0x43172f60,0x78a5636f); # u64
895 &data_word(0xa1f0ab72,0x84c87814); # u64
896 &data_word(0x1a6439ec,0x8cc70208); # u64
897 &data_word(0x23631e28,0x90befffa); # u64
898 &data_word(0xde82bde9,0xa4506ceb); # u64
899 &data_word(0xb2c67915,0xbef9a3f7); # u64
900 &data_word(0xe372532b,0xc67178f2); # u64
901 &data_word(0xea26619c,0xca273ece); # u64
902 &data_word(0x21c0c207,0xd186b8c7); # u64
903 &data_word(0xcde0eb1e,0xeada7dd6); # u64
904 &data_word(0xee6ed178,0xf57d4f7f); # u64
905 &data_word(0x72176fba,0x06f067aa); # u64
906 &data_word(0xa2c898a6,0x0a637dc5); # u64
907 &data_word(0xbef90dae,0x113f9804); # u64
908 &data_word(0x131c471b,0x1b710b35); # u64
909 &data_word(0x23047d84,0x28db77f5); # u64
910 &data_word(0x40c72493,0x32caab7b); # u64
911 &data_word(0x15c9bebc,0x3c9ebe0a); # u64
912 &data_word(0x9c100d4c,0x431d67c4); # u64
913 &data_word(0xcb3e42b6,0x4cc5d4be); # u64
914 &data_word(0xfc657e2a,0x597f299c); # u64
915 &data_word(0x3ad6faec,0x5fcb6fab); # u64
916 &data_word(0x4a475817,0x6c44198c); # u64
918 &data_word(0x04050607,0x00010203); # byte swap
919 &data_word(0x0c0d0e0f,0x08090a0b); # mask
920 &function_end_B("sha512_block_data_order");
921 &asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");