3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. Rights for redistribution and usage in source and binary
6 # forms are granted according to the OpenSSL license.
7 # ====================================================================
9 # sha256/512_block procedure for x86_64.
11 # 40% improvement over compiler-generated code on Opteron. On EM64T
12 # sha256 was observed to run >80% faster and sha512 - >40%. No magical
13 # tricks, just straight implementation... I really wonder why gcc
14 # [being armed with inline assembler] fails to generate as fast code.
15 # The only thing which is cool about this module is that it's very
16 # same instruction sequence used for both SHA-256 and SHA-512. In
17 # former case the instructions operate on 32-bit operands, while in
18 # latter - on 64-bit ones. All I had to do is to get one flavor right,
19 # the other one passed the test right away:-)
21 # sha256_block runs in ~1005 cycles on Opteron, which gives you
22 # asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
23 # frequency in GHz. sha512_block runs in ~1275 cycles, which results
24 # in 128*1000/1275=100MBps per GHz. Is there room for improvement?
25 # Well, if you compare it to IA-64 implementation, which maintains
26 # X[16] in register bank[!], tends to 4 instructions per CPU clock
27 # cycle and runs in 1003 cycles, 1275 is very good result for 3-way
28 # issue Opteron pipeline and X[16] maintained in memory. So that *if*
29 # there is a way to improve it, *then* the only way would be to try to
30 # offload X[16] updates to SSE unit, but that would require "deeper"
31 # loop unroll, which in turn would naturally cause size blow-up, not
32 # to mention increased complexity! And once again, only *if* it's
33 # actually possible to noticeably improve overall ILP, instruction
34 # level parallelism, on a given CPU implementation in this case.
36 # Special note on Intel EM64T. While Opteron CPU exhibits perfect
37 # perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
38 # [currently available] EM64T CPUs apparently are far from it. On the
39 # contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
40 # sha256_block:-( This is presumably because 64-bit shifts/rotates
41 # apparently are not atomic instructions, but implemented in microcode.
45 # Optimization including one of Pavel Semjanov's ideas, alternative
46 # Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
47 # unfortunately -10% SHA512 on P4 [which nobody should care about
52 # Add SIMD code paths, see below for improvement coefficients. SSSE3
53 # code path was not attempted for SHA512, because improvement is not
54 # estimated to be high enough, noticeably less than 9%, to justify
55 # the effort, not on pre-AVX processors. [Obviously with exclusion
56 # for VIA Nano, but it has SHA512 instruction that is faster and
57 # should be used instead.] For reference, corresponding estimated
58 # upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
59 # higher coefficients are observed on VIA Nano and Bulldozer has more
60 # to do with specifics of their architecture [which is topic for
61 # separate discussion].
63 ######################################################################
64 # Current performance in cycles per processed byte (less is better):
66 # SHA256 SSSE3 AVX/XOP(*) SHA512 AVX/XOP(*)
68 # AMD K8 15.1 - - 9.70 -
70 # Core 2 15.5 13.9(+11%) - 10.3 -
71 # Westmere 15.1 12.5(+21%) - 9.72 -
72 # Atom 23.0 21.6(+6%) - 14.7 -
73 # VIA Nano 23.0 16.3(+41%) - 14.7 -
74 # Sandy Bridge 17.4 14.0(+24%) 11.6(+50%(**)) 11.2 8.10(+38%(**))
75 # Ivy Bridge 12.6 10.3(+22%) 10.3(+22%) 8.17 7.22(+13%)
76 # Bulldozer 21.5 13.7(+57%) 13.7(+57%(***)) 13.5 8.58(+57%)
78 # (*) whichever applicable;
79 # (**) switch from ror to shrd stands for fair share of improvement;
80 # (***) execution time is fully determined by remaining integer-only
81 # part, body_00_15; reducing the amount of SIMD instructions
82 # below certain limit makes no difference/sense; to conserve
83 # space SHA256 XOP code path is therefore omitted;
87 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
89 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
91 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
92 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
93 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
94 die "can't locate x86_64-xlate.pl";
96 $avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
97 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
99 $avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
100 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
102 $avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
103 `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
106 open OUT,"| \"$^X\" $xlate $flavour $output";
109 if ($output =~ /512/) {
110 $func="sha512_block_data_order";
113 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
114 "%r8", "%r9", "%r10","%r11");
115 ($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
122 $func="sha256_block_data_order";
125 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
126 "%r8d","%r9d","%r10d","%r11d");
127 ($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
135 $ctx="%rdi"; # 1st arg, zapped by $a3
136 $inp="%rsi"; # 2nd arg
139 $_ctx="16*$SZ+0*8(%rsp)";
140 $_inp="16*$SZ+1*8(%rsp)";
141 $_end="16*$SZ+2*8(%rsp)";
142 $_rsp="16*$SZ+3*8(%rsp)";
143 $framesz="16*$SZ+4*8";
147 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
150 ror \$`$Sigma1[2]-$Sigma1[1]`,$a0
153 ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
157 mov $T1,`$SZ*($i&0xf)`(%rsp)
161 ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
163 xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
165 ror \$`$Sigma0[1]-$Sigma0[0]`,$a1
167 add $a2,$T1 # T1+=Ch(e,f,g)
170 add ($Tbl),$T1 # T1+=K[round]
173 ror \$$Sigma1[0],$a0 # Sigma1(e)
174 xor $b,$a2 # a^b, b^c in next round
177 ror \$$Sigma0[0],$a1 # Sigma0(a)
179 add $a0,$T1 # T1+=Sigma1(e)
181 xor $a3,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
185 $code.=<<___ if ($i>=15);
186 mov `$SZ*(($i+2)&0xf)`(%rsp),$a0
189 lea $SZ($Tbl),$Tbl # round++
190 add $a1,$h # h+=Sigma0(a)
193 ($a2,$a3) = ($a3,$a2);
197 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
200 #mov `$SZ*(($i+1)&0xf)`(%rsp),$a0
201 mov `$SZ*(($i+14)&0xf)`(%rsp),$a1
204 ror \$`$sigma0[1]-$sigma0[0]`,$a0
206 ror \$`$sigma1[1]-$sigma1[0]`,$a1
214 xor $a0,$T1 # sigma0(X[(i+1)&0xf])
216 add `$SZ*(($i+9)&0xf)`(%rsp),$T1
217 xor $a2,$a1 # sigma1(X[(i+14)&0xf])
219 add `$SZ*($i&0xf)`(%rsp),$T1
230 .extern OPENSSL_ia32cap_P
232 .type $func,\@function,4
236 $code.=<<___ if ($SZ==4 || $avx);
237 lea OPENSSL_ia32cap_P(%rip),%r11
241 $code.=<<___ if ($avx && $SZ==8);
242 test \$`1<<11`,%r11d # check for XOP
245 $code.=<<___ if ($avx);
246 and \$`1<<30`,%r10d # mask "Intel CPU" bit
247 and \$`1<<28|1<<9`,%r11d # mask AVX and SSSE3 bits
249 cmp \$`1<<28|1<<9|1<<30`,%r11d
252 $code.=<<___ if ($SZ==4);
263 mov %rsp,%r11 # copy %rsp
264 shl \$4,%rdx # num*16
266 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
267 and \$-64,%rsp # align stack frame
268 mov $ctx,$_ctx # save ctx, 1st arg
269 mov $inp,$_inp # save inp, 2nd arh
270 mov %rdx,$_end # save end pointer, "3rd" arg
271 mov %r11,$_rsp # save copy of %rsp
287 lea $TABLE(%rip),$Tbl
290 for($i=0;$i<16;$i++) {
291 $code.=" mov $SZ*$i($inp),$T1\n";
292 $code.=" mov @ROT[4],$a0\n";
293 $code.=" mov @ROT[0],$a1\n";
294 $code.=" bswap $T1\n";
295 &ROUND_00_15($i,@ROT);
296 unshift(@ROT,pop(@ROT));
304 &ROUND_16_XX($i,@ROT);
305 unshift(@ROT,pop(@ROT));
309 cmpb \$0,`$SZ-1`($Tbl)
313 lea 16*$SZ($inp),$inp
352 .type $TABLE,\@object
354 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
355 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
356 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
357 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
358 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
359 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
360 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
361 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
362 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
363 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
364 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
365 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
366 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
367 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
368 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
369 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
371 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
372 .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
373 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
374 .asciz "SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
379 .type $TABLE,\@object
381 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
382 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
383 .quad 0x3956c25bf348b538,0x59f111f1b605d019
384 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
385 .quad 0xd807aa98a3030242,0x12835b0145706fbe
386 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
387 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
388 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
389 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
390 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
391 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
392 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
393 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
394 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
395 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
396 .quad 0x06ca6351e003826f,0x142929670a0e6e70
397 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
398 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
399 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
400 .quad 0x81c2c92e47edaee6,0x92722c851482353b
401 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
402 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
403 .quad 0xd192e819d6ef5218,0xd69906245565a910
404 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
405 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
406 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
407 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
408 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
409 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
410 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
411 .quad 0x90befffa23631e28,0xa4506cebde82bde9
412 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
413 .quad 0xca273eceea26619c,0xd186b8c721c0c207
414 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
415 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
416 .quad 0x113f9804bef90dae,0x1b710b35131c471b
417 .quad 0x28db77f523047d84,0x32caab7b40c72493
418 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
419 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
420 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
422 .quad 0x0001020304050607,0x08090a0b0c0d0e0f
423 .asciz "SHA512 block transfort for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
427 ######################################################################
433 my ($a,$b,$c,$d,$e,$f,$g,$h);
435 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
436 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
438 $arg = "\$$arg" if ($arg*1 eq $arg);
439 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
444 '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
446 '&ror ($a0,$Sigma1[2]-$Sigma1[1])',
451 '&ror ($a1,$Sigma0[2]-$Sigma0[1])',
452 '&xor ($a4,$g)', # f^g
454 '&ror ($a0,$Sigma1[1]-$Sigma1[0])',
456 '&and ($a4,$e)', # (f^g)&e
459 '&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i]
462 '&ror ($a1,$Sigma0[1]-$Sigma0[0])',
463 '&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g
464 '&xor ($a2,$b)', # a^b, b^c in next round
466 '&ror ($a0,$Sigma1[0])', # Sigma1(e)
467 '&add ($h,$a4)', # h+=Ch(e,f,g)
468 '&and ($a3,$a2)', # (b^c)&(a^b)
471 '&add ($h,$a0)', # h+=Sigma1(e)
472 '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
474 '&add ($d,$h)', # d+=h
475 '&ror ($a1,$Sigma0[0])', # Sigma0(a)
476 '&add ($h,$a3)', # h+=Maj(a,b,c)
479 '&add ($a1,$h);'. # h+=Sigma0(a)
480 '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
484 ######################################################################
487 if ($SZ==4) { # SHA256 only
488 my @X = map("%xmm$_",(0..3));
489 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
492 .type ${func}_ssse3,\@function,4
502 mov %rsp,%r11 # copy %rsp
503 shl \$4,%rdx # num*16
504 sub \$`$framesz+$win64*16*4`,%rsp
505 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
506 and \$-64,%rsp # align stack frame
507 mov $ctx,$_ctx # save ctx, 1st arg
508 mov $inp,$_inp # save inp, 2nd arh
509 mov %rdx,$_end # save end pointer, "3rd" arg
510 mov %r11,$_rsp # save copy of %rsp
512 $code.=<<___ if ($win64);
513 movaps %xmm6,16*$SZ+32(%rsp)
514 movaps %xmm7,16*$SZ+48(%rsp)
515 movaps %xmm8,16*$SZ+64(%rsp)
516 movaps %xmm9,16*$SZ+80(%rsp)
532 movdqa $TABLE+`$SZ*$rounds`+16(%rip),$t4
533 movdqa $TABLE+`$SZ*$rounds`+32(%rip),$t5
537 movdqa $TABLE+`$SZ*$rounds`(%rip),$t3
538 movdqu 0x00($inp),@X[0]
539 movdqu 0x10($inp),@X[1]
540 movdqu 0x20($inp),@X[2]
541 movdqu 0x30($inp),@X[3]
543 lea $TABLE(%rip),$Tbl
545 movdqa 0x00($Tbl),$t0
547 movdqa 0x10($Tbl),$t1
549 movdqa 0x20($Tbl),$t2
551 movdqa 0x30($Tbl),$t3
555 movdqa $t0,0x00(%rsp)
557 movdqa $t1,0x10(%rsp)
559 movdqa $t2,0x20(%rsp)
561 movdqa $t3,0x30(%rsp)
569 sub Xupdate_256_SSSE3 () {
571 '&movdqa ($t0,@X[1]);',
572 '&movdqa ($t3,@X[3])',
573 '&palignr ($t0,@X[0],$SZ)', # X[1..4]
574 '&palignr ($t3,@X[2],$SZ);', # X[9..12]
576 '&movdqa ($t2,$t0);',
577 '&psrld ($t0,$sigma0[2])',
578 '&paddd (@X[0],$t3);', # X[0..3] += X[9..12]
579 '&psrld ($t2,$sigma0[0])',
580 '&pshufd ($t3,@X[3],0b11111010)',# X[14..15]
581 '&pslld ($t1,8*$SZ-$sigma0[1]);'.
583 '&psrld ($t2,$sigma0[1]-$sigma0[0]);'.
585 '&pslld ($t1,$sigma0[1]-$sigma0[0]);'.
588 '&pxor ($t0,$t1);', # sigma0(X[1..4])
589 '&psrld ($t3,$sigma1[2])',
590 '&paddd (@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
591 '&psrlq ($t2,$sigma1[0])',
593 '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
595 '&pshufb ($t3,$t4)', # sigma1(X[14..15])
596 '&paddd (@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
597 '&pshufd ($t3,@X[0],0b01010000)',# X[16..17]
598 '&movdqa ($t2,$t3);',
599 '&psrld ($t3,$sigma1[2])',
600 '&psrlq ($t2,$sigma1[0])',
602 '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
604 '&movdqa ($t2,16*$j."($Tbl)")',
606 '&paddd (@X[0],$t3)' # X[2..3] += sigma1(X[16..17])
610 sub SSSE3_256_00_47 () {
614 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
617 foreach (Xupdate_256_SSSE3()) { # 36 instructions
623 } else { # squeeze extra 3% on Westmere and Atom
624 eval(shift(@insns)); #@
630 eval(shift(@insns)); #@
632 &palignr ($t0,@X[0],$SZ); # X[1..4]
633 eval(shift(@insns)); #@
635 &palignr ($t3,@X[2],$SZ); # X[9..12]
640 eval(shift(@insns)); #@
645 eval(shift(@insns)); #@
648 &psrld ($t0,$sigma0[2]);
652 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
654 eval(shift(@insns)); #@
656 &psrld ($t2,$sigma0[0]);
659 eval(shift(@insns)); #@
661 &pshufd ($t3,@X[3],0b11111010); # X[4..15]
663 &pslld ($t1,8*$SZ-$sigma0[1]);
666 eval(shift(@insns)); #@
668 &psrld ($t2,$sigma0[1]-$sigma0[0]);
669 eval(shift(@insns)); #@
674 &pslld ($t1,$sigma0[1]-$sigma0[0]);
678 eval(shift(@insns)); #@
682 eval(shift(@insns)); #@
684 &pxor ($t0,$t1); # sigma0(X[1..4])
687 &psrld ($t3,$sigma1[2]);
690 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
692 eval(shift(@insns)); #@
695 &psrlq ($t2,$sigma1[0]);
697 eval(shift(@insns)); #@
702 eval(shift(@insns)); #@
703 &psrlq ($t2,$sigma1[1]-$sigma1[0]);
705 eval(shift(@insns)); #@
711 &pshufb ($t3,$t4); # sigma1(X[14..15])
713 eval(shift(@insns)); #@
716 eval(shift(@insns)); #@
717 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
719 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
726 eval(shift(@insns)); #@
728 &psrld ($t3,$sigma1[2]);
730 &psrlq ($t2,$sigma1[0]);
732 eval(shift(@insns)); #@
737 eval(shift(@insns)); #@
739 &psrlq ($t2,$sigma1[1]-$sigma1[0]);
740 eval(shift(@insns)); #@
747 &movdqa ($t2,16*$j."($Tbl)");
748 eval(shift(@insns)); #@
752 eval(shift(@insns)); #@
756 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
762 foreach (@insns) { eval; } # remaining instructions
763 &movdqa (16*$j."(%rsp)",$t2);
766 for ($i=0,$j=0; $j<4; $j++) {
767 &SSSE3_256_00_47($j,\&body_00_15,@X);
768 push(@X,shift(@X)); # rotate(@X)
770 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
771 &jne (".Lssse3_00_47");
773 for ($i=0; $i<16; ) {
774 foreach(body_00_15()) { eval; }
781 lea 16*$SZ($inp),$inp
804 $code.=<<___ if ($win64);
805 movaps 16*$SZ+32(%rsp),%xmm6
806 movaps 16*$SZ+48(%rsp),%xmm7
807 movaps 16*$SZ+64(%rsp),%xmm8
808 movaps 16*$SZ+80(%rsp),%xmm9
820 .size ${func}_ssse3,.-${func}_ssse3
825 ######################################################################
828 if ($SZ==8) { # SHA512 only
830 .type ${func}_xop,\@function,4
840 mov %rsp,%r11 # copy %rsp
841 shl \$4,%rdx # num*16
842 sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
843 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
844 and \$-64,%rsp # align stack frame
845 mov $ctx,$_ctx # save ctx, 1st arg
846 mov $inp,$_inp # save inp, 2nd arh
847 mov %rdx,$_end # save end pointer, "3rd" arg
848 mov %r11,$_rsp # save copy of %rsp
850 $code.=<<___ if ($win64);
851 movaps %xmm6,16*$SZ+32(%rsp)
852 movaps %xmm7,16*$SZ+48(%rsp)
853 movaps %xmm8,16*$SZ+64(%rsp)
854 movaps %xmm9,16*$SZ+80(%rsp)
856 $code.=<<___ if ($win64 && $SZ>4);
857 movaps %xmm10,16*$SZ+96(%rsp)
858 movaps %xmm11,16*$SZ+112(%rsp)
874 if ($SZ==4) { # SHA256
875 my @X = map("%xmm$_",(0..3));
876 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
881 vmovdqa $TABLE+`$SZ*$rounds`(%rip),$t3
882 vmovdqu 0x00($inp),@X[0]
883 vmovdqu 0x10($inp),@X[1]
884 vmovdqu 0x20($inp),@X[2]
885 vmovdqu 0x30($inp),@X[3]
886 vpshufb $t3,@X[0],@X[0]
887 lea $TABLE(%rip),$Tbl
888 vpshufb $t3,@X[1],@X[1]
889 vpshufb $t3,@X[2],@X[2]
890 vpaddd 0x00($Tbl),@X[0],$t0
891 vpshufb $t3,@X[3],@X[3]
892 vpaddd 0x10($Tbl),@X[1],$t1
893 vpaddd 0x20($Tbl),@X[2],$t2
894 vpaddd 0x30($Tbl),@X[3],$t3
895 vmovdqa $t0,0x00(%rsp)
897 vmovdqa $t1,0x10(%rsp)
899 vmovdqa $t2,0x20(%rsp)
901 vmovdqa $t3,0x30(%rsp)
909 sub XOP_256_00_47 () {
913 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
915 &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4]
918 &vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12]
921 &vprotd ($t1,$t0,8*$SZ-$sigma0[1]);
924 &vpsrld ($t0,$t0,$sigma0[2]);
927 &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
932 &vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]);
935 &vpxor ($t0,$t0,$t1);
940 &vprotd ($t3,@X[3],8*$SZ-$sigma1[1]);
943 &vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
946 &vpsrld ($t2,@X[3],$sigma1[2]);
949 &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
952 &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
955 &vpxor ($t3,$t3,$t2);
960 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
965 &vpsrldq ($t3,$t3,8);
970 &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
975 &vprotd ($t3,@X[0],8*$SZ-$sigma1[1]);
978 &vpsrld ($t2,@X[0],$sigma1[2]);
981 &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
984 &vpxor ($t3,$t3,$t2);
989 &vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
994 &vpslldq ($t3,$t3,8); # 22 instructions
999 &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
1000 eval(shift(@insns));
1001 eval(shift(@insns));
1002 eval(shift(@insns));
1003 eval(shift(@insns));
1004 &vpaddd ($t2,@X[0],16*$j."($Tbl)");
1005 foreach (@insns) { eval; } # remaining instructions
1006 &vmovdqa (16*$j."(%rsp)",$t2);
1009 for ($i=0,$j=0; $j<4; $j++) {
1010 &XOP_256_00_47($j,\&body_00_15,@X);
1011 push(@X,shift(@X)); # rotate(@X)
1013 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
1014 &jne (".Lxop_00_47");
1016 for ($i=0; $i<16; ) {
1017 foreach(body_00_15()) { eval; }
1021 my @X = map("%xmm$_",(0..7));
1022 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
1027 vmovdqa $TABLE+`$SZ*$rounds`(%rip),$t3
1028 vmovdqu 0x00($inp),@X[0]
1029 lea $TABLE(%rip),$Tbl
1030 vmovdqu 0x10($inp),@X[1]
1031 vmovdqu 0x20($inp),@X[2]
1032 vpshufb $t3,@X[0],@X[0]
1033 vmovdqu 0x30($inp),@X[3]
1034 vpshufb $t3,@X[1],@X[1]
1035 vmovdqu 0x40($inp),@X[4]
1036 vpshufb $t3,@X[2],@X[2]
1037 vmovdqu 0x50($inp),@X[5]
1038 vpshufb $t3,@X[3],@X[3]
1039 vmovdqu 0x60($inp),@X[6]
1040 vpshufb $t3,@X[4],@X[4]
1041 vmovdqu 0x70($inp),@X[7]
1042 vpshufb $t3,@X[5],@X[5]
1043 vpaddq 0x00($Tbl),@X[0],$t0
1044 vpshufb $t3,@X[6],@X[6]
1045 vpaddq 0x10($Tbl),@X[1],$t1
1046 vpshufb $t3,@X[7],@X[7]
1047 vpaddq 0x20($Tbl),@X[2],$t2
1048 vpaddq 0x30($Tbl),@X[3],$t3
1049 vmovdqa $t0,0x00(%rsp)
1050 vpaddq 0x40($Tbl),@X[4],$t0
1051 vmovdqa $t1,0x10(%rsp)
1052 vpaddq 0x50($Tbl),@X[5],$t1
1053 vmovdqa $t2,0x20(%rsp)
1054 vpaddq 0x60($Tbl),@X[6],$t2
1055 vmovdqa $t3,0x30(%rsp)
1056 vpaddq 0x70($Tbl),@X[7],$t3
1057 vmovdqa $t0,0x40(%rsp)
1059 vmovdqa $t1,0x50(%rsp)
1061 vmovdqa $t2,0x60(%rsp)
1063 vmovdqa $t3,0x70(%rsp)
1071 sub XOP_512_00_47 () {
1075 my @insns = (&$body,&$body); # 52 instructions
1077 &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2]
1078 eval(shift(@insns));
1079 eval(shift(@insns));
1080 &vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10]
1081 eval(shift(@insns));
1082 eval(shift(@insns));
1083 &vprotq ($t1,$t0,8*$SZ-$sigma0[1]);
1084 eval(shift(@insns));
1085 eval(shift(@insns));
1086 &vpsrlq ($t0,$t0,$sigma0[2]);
1087 eval(shift(@insns));
1088 eval(shift(@insns));
1089 &vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10]
1090 eval(shift(@insns));
1091 eval(shift(@insns));
1092 eval(shift(@insns));
1093 eval(shift(@insns));
1094 &vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]);
1095 eval(shift(@insns));
1096 eval(shift(@insns));
1097 &vpxor ($t0,$t0,$t1);
1098 eval(shift(@insns));
1099 eval(shift(@insns));
1100 eval(shift(@insns));
1101 eval(shift(@insns));
1102 &vprotq ($t3,@X[7],8*$SZ-$sigma1[1]);
1103 eval(shift(@insns));
1104 eval(shift(@insns));
1105 &vpxor ($t0,$t0,$t2); # sigma0(X[1..2])
1106 eval(shift(@insns));
1107 eval(shift(@insns));
1108 &vpsrlq ($t2,@X[7],$sigma1[2]);
1109 eval(shift(@insns));
1110 eval(shift(@insns));
1111 &vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2])
1112 eval(shift(@insns));
1113 eval(shift(@insns));
1114 &vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]);
1115 eval(shift(@insns));
1116 eval(shift(@insns));
1117 &vpxor ($t3,$t3,$t2);
1118 eval(shift(@insns));
1119 eval(shift(@insns));
1120 eval(shift(@insns));
1121 eval(shift(@insns));
1122 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
1123 eval(shift(@insns));
1124 eval(shift(@insns));
1125 eval(shift(@insns));
1126 eval(shift(@insns));
1127 &vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
1128 eval(shift(@insns));
1129 eval(shift(@insns));
1130 eval(shift(@insns));
1131 eval(shift(@insns));
1132 &vpaddq ($t2,@X[0],16*$j."($Tbl)");
1133 foreach (@insns) { eval; } # remaining instructions
1134 &vmovdqa (16*$j."(%rsp)",$t2);
1137 for ($i=0,$j=0; $j<8; $j++) {
1138 &XOP_512_00_47($j,\&body_00_15,@X);
1139 push(@X,shift(@X)); # rotate(@X)
1141 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
1142 &jne (".Lxop_00_47");
1144 for ($i=0; $i<16; ) {
1145 foreach(body_00_15()) { eval; }
1153 lea 16*$SZ($inp),$inp
1177 $code.=<<___ if ($win64);
1178 movaps 16*$SZ+32(%rsp),%xmm6
1179 movaps 16*$SZ+48(%rsp),%xmm7
1180 movaps 16*$SZ+64(%rsp),%xmm8
1181 movaps 16*$SZ+80(%rsp),%xmm9
1183 $code.=<<___ if ($win64 && $SZ>4);
1184 movaps 16*$SZ+96(%rsp),%xmm10
1185 movaps 16*$SZ+112(%rsp),%xmm11
1197 .size ${func}_xop,.-${func}_xop
1200 ######################################################################
1201 # AVX+shrd code path
1203 local *ror = sub { &shrd(@_[0],@_) };
1206 .type ${func}_avx,\@function,4
1216 mov %rsp,%r11 # copy %rsp
1217 shl \$4,%rdx # num*16
1218 sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
1219 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
1220 and \$-64,%rsp # align stack frame
1221 mov $ctx,$_ctx # save ctx, 1st arg
1222 mov $inp,$_inp # save inp, 2nd arh
1223 mov %rdx,$_end # save end pointer, "3rd" arg
1224 mov %r11,$_rsp # save copy of %rsp
1226 $code.=<<___ if ($win64);
1227 movaps %xmm6,16*$SZ+32(%rsp)
1228 movaps %xmm7,16*$SZ+48(%rsp)
1229 movaps %xmm8,16*$SZ+64(%rsp)
1230 movaps %xmm9,16*$SZ+80(%rsp)
1232 $code.=<<___ if ($win64 && $SZ>4);
1233 movaps %xmm10,16*$SZ+96(%rsp)
1234 movaps %xmm11,16*$SZ+112(%rsp)
1249 if ($SZ==4) { # SHA256
1250 my @X = map("%xmm$_",(0..3));
1251 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
1254 vmovdqa $TABLE+`$SZ*$rounds`+16(%rip),$t4
1255 vmovdqa $TABLE+`$SZ*$rounds`+32(%rip),$t5
1259 vmovdqa $TABLE+`$SZ*$rounds`(%rip),$t3
1260 vmovdqu 0x00($inp),@X[0]
1261 vmovdqu 0x10($inp),@X[1]
1262 vmovdqu 0x20($inp),@X[2]
1263 vmovdqu 0x30($inp),@X[3]
1264 vpshufb $t3,@X[0],@X[0]
1265 lea $TABLE(%rip),$Tbl
1266 vpshufb $t3,@X[1],@X[1]
1267 vpshufb $t3,@X[2],@X[2]
1268 vpaddd 0x00($Tbl),@X[0],$t0
1269 vpshufb $t3,@X[3],@X[3]
1270 vpaddd 0x10($Tbl),@X[1],$t1
1271 vpaddd 0x20($Tbl),@X[2],$t2
1272 vpaddd 0x30($Tbl),@X[3],$t3
1273 vmovdqa $t0,0x00(%rsp)
1275 vmovdqa $t1,0x10(%rsp)
1277 vmovdqa $t2,0x20(%rsp)
1279 vmovdqa $t3,0x30(%rsp)
1287 sub Xupdate_256_AVX () {
1289 '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4]
1290 '&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12]
1291 '&vpsrld ($t2,$t0,$sigma0[0]);',
1292 '&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12]
1293 '&vpsrld ($t3,$t0,$sigma0[2])',
1294 '&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);',
1295 '&vpxor ($t0,$t3,$t2)',
1296 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
1297 '&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);',
1298 '&vpxor ($t0,$t0,$t1)',
1299 '&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);',
1300 '&vpxor ($t0,$t0,$t2)',
1301 '&vpsrld ($t2,$t3,$sigma1[2]);',
1302 '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4])
1303 '&vpsrlq ($t3,$t3,$sigma1[0]);',
1304 '&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4])
1305 '&vpxor ($t2,$t2,$t3);',
1306 '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
1307 '&vpxor ($t2,$t2,$t3)',
1308 '&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15])
1309 '&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15])
1310 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
1311 '&vpsrld ($t2,$t3,$sigma1[2])',
1312 '&vpsrlq ($t3,$t3,$sigma1[0])',
1313 '&vpxor ($t2,$t2,$t3);',
1314 '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
1315 '&vpxor ($t2,$t2,$t3)',
1316 '&vpshufb ($t2,$t2,$t5)',
1317 '&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17])
1321 sub AVX_256_00_47 () {
1325 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
1327 foreach (Xupdate_256_AVX()) { # 29 instructions
1329 eval(shift(@insns));
1330 eval(shift(@insns));
1331 eval(shift(@insns));
1333 &vpaddd ($t2,@X[0],16*$j."($Tbl)");
1334 foreach (@insns) { eval; } # remaining instructions
1335 &vmovdqa (16*$j."(%rsp)",$t2);
1338 for ($i=0,$j=0; $j<4; $j++) {
1339 &AVX_256_00_47($j,\&body_00_15,@X);
1340 push(@X,shift(@X)); # rotate(@X)
1342 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
1343 &jne (".Lavx_00_47");
1345 for ($i=0; $i<16; ) {
1346 foreach(body_00_15()) { eval; }
1350 my @X = map("%xmm$_",(0..7));
1351 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
1357 vmovdqa $TABLE+`$SZ*$rounds`(%rip),$t3
1358 vmovdqu 0x00($inp),@X[0]
1359 lea $TABLE(%rip),$Tbl
1360 vmovdqu 0x10($inp),@X[1]
1361 vmovdqu 0x20($inp),@X[2]
1362 vpshufb $t3,@X[0],@X[0]
1363 vmovdqu 0x30($inp),@X[3]
1364 vpshufb $t3,@X[1],@X[1]
1365 vmovdqu 0x40($inp),@X[4]
1366 vpshufb $t3,@X[2],@X[2]
1367 vmovdqu 0x50($inp),@X[5]
1368 vpshufb $t3,@X[3],@X[3]
1369 vmovdqu 0x60($inp),@X[6]
1370 vpshufb $t3,@X[4],@X[4]
1371 vmovdqu 0x70($inp),@X[7]
1372 vpshufb $t3,@X[5],@X[5]
1373 vpaddq 0x00($Tbl),@X[0],$t0
1374 vpshufb $t3,@X[6],@X[6]
1375 vpaddq 0x10($Tbl),@X[1],$t1
1376 vpshufb $t3,@X[7],@X[7]
1377 vpaddq 0x20($Tbl),@X[2],$t2
1378 vpaddq 0x30($Tbl),@X[3],$t3
1379 vmovdqa $t0,0x00(%rsp)
1380 vpaddq 0x40($Tbl),@X[4],$t0
1381 vmovdqa $t1,0x10(%rsp)
1382 vpaddq 0x50($Tbl),@X[5],$t1
1383 vmovdqa $t2,0x20(%rsp)
1384 vpaddq 0x60($Tbl),@X[6],$t2
1385 vmovdqa $t3,0x30(%rsp)
1386 vpaddq 0x70($Tbl),@X[7],$t3
1387 vmovdqa $t0,0x40(%rsp)
1389 vmovdqa $t1,0x50(%rsp)
1391 vmovdqa $t2,0x60(%rsp)
1393 vmovdqa $t3,0x70(%rsp)
1401 sub Xupdate_512_AVX () {
1403 '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2]
1404 '&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10]
1405 '&vpsrlq ($t2,$t0,$sigma0[0]);',
1406 '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += X[9..10]
1407 '&vpsrlq ($t3,$t0,$sigma0[2])',
1408 '&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);',
1409 '&vpxor ($t0,$t3,$t2)',
1410 '&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);',
1411 '&vpxor ($t0,$t0,$t1)',
1412 '&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);',
1413 '&vpxor ($t0,$t0,$t2)',
1414 '&vpsrlq ($t3,@X[7],$sigma1[2]);',
1415 '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2])
1416 '&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1])',
1417 '&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2])
1418 '&vpsrlq ($t1,@X[7],$sigma1[0]);',
1419 '&vpxor ($t3,$t3,$t2)',
1420 '&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);',
1421 '&vpxor ($t3,$t3,$t1)',
1422 '&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);',
1423 '&vpxor ($t3,$t3,$t2)',
1424 '&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15])
1425 '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
1429 sub AVX_512_00_47 () {
1433 my @insns = (&$body,&$body); # 52 instructions
1435 foreach (Xupdate_512_AVX()) { # 23 instructions
1437 eval(shift(@insns));
1438 eval(shift(@insns));
1440 &vpaddq ($t2,@X[0],16*$j."($Tbl)");
1441 foreach (@insns) { eval; } # remaining instructions
1442 &vmovdqa (16*$j."(%rsp)",$t2);
1445 for ($i=0,$j=0; $j<8; $j++) {
1446 &AVX_512_00_47($j,\&body_00_15,@X);
1447 push(@X,shift(@X)); # rotate(@X)
1449 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
1450 &jne (".Lavx_00_47");
1452 for ($i=0; $i<16; ) {
1453 foreach(body_00_15()) { eval; }
1461 lea 16*$SZ($inp),$inp
1485 $code.=<<___ if ($win64);
1486 movaps 16*$SZ+32(%rsp),%xmm6
1487 movaps 16*$SZ+48(%rsp),%xmm7
1488 movaps 16*$SZ+64(%rsp),%xmm8
1489 movaps 16*$SZ+80(%rsp),%xmm9
1491 $code.=<<___ if ($win64 && $SZ>4);
1492 movaps 16*$SZ+96(%rsp),%xmm10
1493 movaps 16*$SZ+112(%rsp),%xmm11
1505 .size ${func}_avx,.-${func}_avx
1509 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1510 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1518 .extern __imp_RtlVirtualUnwind
1519 .type se_handler,\@abi-omnipotent
1533 mov 120($context),%rax # pull context->Rax
1534 mov 248($context),%rbx # pull context->Rip
1536 mov 8($disp),%rsi # disp->ImageBase
1537 mov 56($disp),%r11 # disp->HanderlData
1539 mov 0(%r11),%r10d # HandlerData[0]
1540 lea (%rsi,%r10),%r10 # prologue label
1541 cmp %r10,%rbx # context->Rip<prologue label
1544 mov 152($context),%rax # pull context->Rsp
1546 mov 4(%r11),%r10d # HandlerData[1]
1547 lea (%rsi,%r10),%r10 # epilogue label
1548 cmp %r10,%rbx # context->Rip>=epilogue label
1551 mov %rax,%rsi # put aside Rsp
1552 mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
1561 mov %rbx,144($context) # restore context->Rbx
1562 mov %rbp,160($context) # restore context->Rbp
1563 mov %r12,216($context) # restore context->R12
1564 mov %r13,224($context) # restore context->R13
1565 mov %r14,232($context) # restore context->R14
1566 mov %r15,240($context) # restore context->R15
1568 lea .Lepilogue(%rip),%r10
1570 jb .Lin_prologue # non-AVX code
1572 lea 16*$SZ+4*8(%rsi),%rsi # Xmm6- save area
1573 lea 512($context),%rdi # &context.Xmm6
1574 mov \$`$SZ==4?8:12`,%ecx
1575 .long 0xa548f3fc # cld; rep movsq
1580 mov %rax,152($context) # restore context->Rsp
1581 mov %rsi,168($context) # restore context->Rsi
1582 mov %rdi,176($context) # restore context->Rdi
1584 mov 40($disp),%rdi # disp->ContextRecord
1585 mov $context,%rsi # context
1586 mov \$154,%ecx # sizeof(CONTEXT)
1587 .long 0xa548f3fc # cld; rep movsq
1590 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1591 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1592 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1593 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1594 mov 40(%rsi),%r10 # disp->ContextRecord
1595 lea 56(%rsi),%r11 # &disp->HandlerData
1596 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1597 mov %r10,32(%rsp) # arg5
1598 mov %r11,40(%rsp) # arg6
1599 mov %r12,48(%rsp) # arg7
1600 mov %rcx,56(%rsp) # arg8, (NULL)
1601 call *__imp_RtlVirtualUnwind(%rip)
1603 mov \$1,%eax # ExceptionContinueSearch
1615 .size se_handler,.-se_handler
1619 .rva .LSEH_begin_$func
1620 .rva .LSEH_end_$func
1621 .rva .LSEH_info_$func
1623 $code.=<<___ if ($SZ==4);
1624 .rva .LSEH_begin_${func}_ssse3
1625 .rva .LSEH_end_${func}_ssse3
1626 .rva .LSEH_info_${func}_ssse3
1628 $code.=<<___ if ($avx && $SZ==8);
1629 .rva .LSEH_begin_${func}_xop
1630 .rva .LSEH_end_${func}_xop
1631 .rva .LSEH_info_${func}_xop
1633 $code.=<<___ if ($avx);
1634 .rva .LSEH_begin_${func}_avx
1635 .rva .LSEH_end_${func}_avx
1636 .rva .LSEH_info_${func}_avx
1644 .rva .Lprologue,.Lepilogue # HandlerData[]
1646 $code.=<<___ if ($SZ==4);
1647 .LSEH_info_${func}_ssse3:
1650 .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
1652 $code.=<<___ if ($avx && $SZ==8);
1653 .LSEH_info_${func}_xop:
1656 .rva .Lprologue_xop,.Lepilogue_xop # HandlerData[]
1658 $code.=<<___ if ($avx);
1659 .LSEH_info_${func}_avx:
1662 .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
1666 $code =~ s/\`([^\`]*)\`/eval $1/gem;