3 ##############################################################################
5 # Copyright (c) 2012, Intel Corporation #
7 # All rights reserved. #
9 # Redistribution and use in source and binary forms, with or without #
10 # modification, are permitted provided that the following conditions are #
13 # * Redistributions of source code must retain the above copyright #
14 # notice, this list of conditions and the following disclaimer. #
16 # * Redistributions in binary form must reproduce the above copyright #
17 # notice, this list of conditions and the following disclaimer in the #
18 # documentation and/or other materials provided with the #
21 # * Neither the name of the Intel Corporation nor the names of its #
22 # contributors may be used to endorse or promote products derived from #
23 # this software without specific prior written permission. #
26 # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY #
27 # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
28 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR #
29 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR #
30 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #
31 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #
32 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
33 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
34 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
35 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
36 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
38 ##############################################################################
39 # Developers and authors: #
40 # Shay Gueron (1, 2), and Vlad Krasnov (1) #
41 # (1) Intel Architecture Group, Microprocessor and Chipset Development, #
42 # Israel Development Center, Haifa, Israel #
43 # (2) University of Haifa #
44 ##############################################################################
46 # [1] S. Gueron, "Efficient Software Implementations of Modular #
47 # Exponentiation", http://eprint.iacr.org/2011/239 #
48 # [2] S. Gueron, V. Krasnov. "Speeding up Big-Numbers Squaring". #
49 # IEEE Proceedings of 9th International Conference on Information #
50 # Technology: New Generations (ITNG 2012), 821-823 (2012). #
51 # [3] S. Gueron, Efficient Software Implementations of Modular Exponentiation#
52 # Journal of Cryptographic Engineering 2:31-43 (2012). #
53 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis #
54 # resistant 512-bit and 1024-bit modular exponentiation for optimizing #
55 # RSA1024 and RSA2048 on x86_64 platforms", #
56 # http://rt.openssl.org/Ticket/Display.html?id=2582&user=guest&pass=guest#
57 ##############################################################################
59 # While original submission covers 512- and 1024-bit exponentiation,
60 # this module is limited to 512-bit version only (and as such
61 # accelerates RSA1024 sign). This is because improvement for longer
62 # keys is not high enough to justify the effort, highest measured
63 # was ~5% on Westmere. [This is relative to OpenSSL 1.0.2, upcoming
64 # for the moment of this writing!] Nor does this module implement
65 # "monolithic" complete exponentiation jumbo-subroutine, but adheres
66 # to more modular mixture of C and assembly. And it's optimized even
67 # for processors other than Intel Core family (see table below for
68 # improvement coefficients).
71 # RSA1024 sign/sec this/original |this/rsax(*) this/fips(*)
72 # ----------------+---------------------------
73 # Opteron +13% |+5% +20%
74 # Bulldozer -0% |-1% +10%
76 # Westmere +5% |+14% +17%
77 # Sandy Bridge +2% |+12% +29%
78 # Ivy Bridge +1% |+11% +35%
79 # Haswell(**) -0% |+12% +39%
81 # VIA Nano +70% |+9% +25%
83 # (*) rsax engine and fips numbers are presented for reference
85 # (**) MULX was attempted, but found to give only marginal improvement;
89 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
91 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
93 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
94 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
95 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
96 die "can't locate x86_64-xlate.pl";
98 open OUT,"| $^X $xlate $flavour $output";
101 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
102 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
106 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
107 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
111 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
112 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
116 ($out, $inp, $mod) = ("%rdi", "%rsi", "%rbp"); # common internal API
118 my ($out,$inp,$mod,$n0,$times) = ("%rdi","%rsi","%rdx","%rcx","%r8d");
123 .extern OPENSSL_ia32cap_P
126 .type rsaz_512_sqr,\@function,5
128 rsaz_512_sqr: # 25-29% faster than rsaz_512_mul
138 movq $mod, %rbp # common argument
143 $code.=<<___ if ($addx);
145 andl OPENSSL_ia32cap_P+8(%rip),%r11d
146 cmpl \$0x80100,%r11d # check for MULX and ADO/CX
154 movl $times,128+8(%rsp)
198 addq %r8, %r8 #shlq \$1, %r8
200 adcq %r9, %r9 #shld \$1, %r8, %r9
261 lea (%rcx,%r10,2), %r10 #shld \$1, %rcx, %r10
263 adcq %r11, %r11 #shld \$1, %r10, %r11
301 lea (%rbx,%r12,2), %r12 #shld \$1, %rbx, %r12
319 leaq (%r10,%r13,2), %r13 #shld \$1, %r12, %r13
349 leaq (%rcx,%r14,2), %r14 #shld \$1, %rcx, %r14
367 leaq (%r12,%r15,2),%r15 #shld \$1, %r14, %r15
392 leaq (%rbx,%r8,2), %r8 #shld \$1, %rbx, %r8
407 leaq (%r12,%r9,2), %r9 #shld \$1, %r8, %r9
431 leaq (%rcx,%r10,2), %r10 #shld \$1, %rcx, %r10
439 leaq (%r15,%r11,2), %r11 #shld \$1, %r10, %r11
460 adcq %r12, %r12 #shld \$1, %rbx, %r12
461 adcq %r13, %r13 #shld \$1, %r12, %r13
462 adcq %r14, %r14 #shld \$1, %r13, %r14
492 call __rsaz_512_reduce
504 call __rsaz_512_subtract
508 movl 128+8(%rsp), $times
520 movl $times,128+8(%rsp)
521 movq $out, %xmm0 # off-load
522 movq %rbp, %xmm1 # off-load
526 mulx 16($inp), %rcx, %r10
527 xor %rbp, %rbp # cf=0, of=0
529 mulx 24($inp), %rax, %r11
532 mulx 32($inp), %rcx, %r12
535 mulx 40($inp), %rax, %r13
538 .byte 0xc4,0x62,0xf3,0xf6,0xb6,0x30,0x00,0x00,0x00 # mulx 48($inp), %rcx, %r14
542 .byte 0xc4,0x62,0xfb,0xf6,0xbe,0x38,0x00,0x00,0x00 # mulx 56($inp), %rax, %r15
544 adcx %rbp, %r15 # %rbp is 0
551 mulx %rdx, %rax, %rdx
560 mulx 16($inp), %rax, %rbx
564 .byte 0xc4,0x62,0xc3,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r8
568 mulx 32($inp), %rax, %rbx
572 mulx 40($inp), $out, %r8
576 .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 48($inp), %rax, %rbx
580 .byte 0xc4,0x62,0xc3,0xf6,0x86,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r8
590 mulx %rdx, %rax, %rcx
597 .byte 0x4c,0x89,0x94,0x24,0x18,0x00,0x00,0x00 # mov %r10, 24(%rsp)
600 .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r9
604 mulx 32($inp), %rax, %rcx
608 mulx 40($inp), $out, %r9
612 .byte 0xc4,0xe2,0xfb,0xf6,0x8e,0x30,0x00,0x00,0x00 # mulx 48($inp), %rax, %rcx
616 .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r9
626 mulx %rdx, %rax, %rdx
633 .byte 0x4c,0x89,0xa4,0x24,0x28,0x00,0x00,0x00 # mov %r12, 40(%rsp)
636 .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x20,0x00,0x00,0x00 # mulx 32($inp), %rax, %rbx
640 mulx 40($inp), $out, %r10
644 mulx 48($inp), %rax, %rbx
648 mulx 56($inp), $out, %r10
659 mulx %rdx, %rax, %rdx
669 .byte 0xc4,0x62,0xc3,0xf6,0x9e,0x28,0x00,0x00,0x00 # mulx 40($inp), $out, %r11
673 mulx 48($inp), %rax, %rcx
677 mulx 56($inp), $out, %r11
687 mulx %rdx, %rax, %rdx
697 .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 48($inp), %rax, %rbx
701 .byte 0xc4,0x62,0xc3,0xf6,0xa6,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r12
711 mulx %rdx, %rax, %rdx
721 .byte 0xc4,0x62,0xfb,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 56($inp), %rax, %r13
731 mulx %rdx, %rax, %rdx
737 .byte 0x4c,0x89,0x9c,0x24,0x60,0x00,0x00,0x00 # mov %r11, 96(%rsp)
738 .byte 0x4c,0x89,0xa4,0x24,0x68,0x00,0x00,0x00 # mov %r12, 104(%rsp)
741 mulx %rdx, %rax, %rdx
753 movq 128(%rsp), %rdx # pull $n0
763 call __rsaz_512_reducex
775 call __rsaz_512_subtract
779 movl 128+8(%rsp), $times
790 leaq 128+24+48(%rsp), %rax
800 .size rsaz_512_sqr,.-rsaz_512_sqr
804 my ($out,$ap,$bp,$mod,$n0) = ("%rdi","%rsi","%rdx","%rcx","%r8");
807 .type rsaz_512_mul,\@function,5
819 movq $out, %xmm0 # off-load arguments
823 $code.=<<___ if ($addx);
825 andl OPENSSL_ia32cap_P+8(%rip),%r11d
826 cmpl \$0x80100,%r11d # check for MULX and ADO/CX
830 movq ($bp), %rbx # pass b[0]
831 movq $bp, %rbp # pass argument
846 call __rsaz_512_reduce
848 $code.=<<___ if ($addx);
853 movq $bp, %rbp # pass argument
854 movq ($bp), %rdx # pass b[0]
860 movq 128(%rsp), %rdx # pull $n0
870 call __rsaz_512_reducex
884 call __rsaz_512_subtract
886 leaq 128+24+48(%rsp), %rax
896 .size rsaz_512_mul,.-rsaz_512_mul
900 my ($out,$ap,$bp,$mod,$n0,$pwr) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
902 .globl rsaz_512_mul_gather4
903 .type rsaz_512_mul_gather4,\@function,6
905 rsaz_512_mul_gather4:
916 $code.=<<___ if ($addx);
918 andl OPENSSL_ia32cap_P+8(%rip),%r11d
919 cmpl \$0x80100,%r11d # check for MULX and ADO/CX
923 movl 64($bp,$pwr,4), %eax
924 movq $out, %xmm0 # off-load arguments
925 movl ($bp,$pwr,4), %ebx
933 leaq 128($bp,$pwr,4), %rbp
934 mulq %rbx # 0 iteration
1010 movd 64(%rbp), %xmm5
1061 leaq 128(%rbp), %rbp
1065 jnz .Loop_mul_gather
1088 call __rsaz_512_reduce
1090 $code.=<<___ if ($addx);
1091 jmp .Lmul_gather_tail
1095 mov 64($bp,$pwr,4), %eax
1096 movq $out, %xmm0 # off-load arguments
1097 lea 128($bp,$pwr,4), %rbp
1098 mov ($bp,$pwr,4), %edx
1104 mulx ($ap), %rbx, %r8 # 0 iteration
1106 xor %edi, %edi # cf=0, of=0
1108 mulx 8($ap), %rax, %r9
1111 mulx 16($ap), %rbx, %r10
1112 movd 64(%rbp), %xmm5
1115 mulx 24($ap), %rax, %r11
1119 mulx 32($ap), %rbx, %r12
1123 mulx 40($ap), %rax, %r13
1126 mulx 48($ap), %rbx, %r14
1130 mulx 56($ap), %rax, %r15
1135 adcx %rdi, %r15 # %rdi is 0
1138 jmp .Loop_mulx_gather
1142 mulx ($ap), %rax, %r8
1146 mulx 8($ap), %rax, %r9
1147 .byte 0x66,0x0f,0x6e,0xa5,0x00,0x00,0x00,0x00 # movd (%rbp), %xmm4
1151 mulx 16($ap), %rax, %r10
1152 movd 64(%rbp), %xmm5
1157 .byte 0xc4,0x62,0xfb,0xf6,0x9e,0x18,0x00,0x00,0x00 # mulx 24($ap), %rax, %r11
1163 mulx 32($ap), %rax, %r12
1167 mulx 40($ap), %rax, %r13
1171 .byte 0xc4,0x62,0xfb,0xf6,0xb6,0x30,0x00,0x00,0x00 # mulx 48($ap), %rax, %r14
1175 mulx 56($ap), %rax, %r15
1177 mov %rbx, 64(%rsp,%rcx,8)
1181 adcx %rdi, %r15 # cf=0
1184 jnz .Loop_mulx_gather
1188 mov %r10, 64+16(%rsp)
1189 mov %r11, 64+24(%rsp)
1190 mov %r12, 64+32(%rsp)
1191 mov %r13, 64+40(%rsp)
1192 mov %r14, 64+48(%rsp)
1193 mov %r15, 64+56(%rsp)
1198 mov 128(%rsp), %rdx # pull $n0
1208 call __rsaz_512_reducex
1218 adcq 104(%rsp), %r13
1219 adcq 112(%rsp), %r14
1220 adcq 120(%rsp), %r15
1223 call __rsaz_512_subtract
1225 leaq 128+24+48(%rsp), %rax
1226 movq -48(%rax), %r15
1227 movq -40(%rax), %r14
1228 movq -32(%rax), %r13
1229 movq -24(%rax), %r12
1230 movq -16(%rax), %rbp
1233 .Lmul_gather4_epilogue:
1235 .size rsaz_512_mul_gather4,.-rsaz_512_mul_gather4
1239 my ($out,$ap,$mod,$n0,$tbl,$pwr) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
1241 .globl rsaz_512_mul_scatter4
1242 .type rsaz_512_mul_scatter4,\@function,6
1244 rsaz_512_mul_scatter4:
1253 .Lmul_scatter4_body:
1254 leaq ($tbl,$pwr,4), $tbl
1255 movq $out, %xmm0 # off-load arguments
1262 $code.=<<___ if ($addx);
1263 movl \$0x80100,%r11d
1264 andl OPENSSL_ia32cap_P+8(%rip),%r11d
1265 cmpl \$0x80100,%r11d # check for MULX and ADO/CX
1269 movq ($out),%rbx # pass b[0]
1284 call __rsaz_512_reduce
1286 $code.=<<___ if ($addx);
1287 jmp .Lmul_scatter_tail
1291 movq ($out), %rdx # pass b[0]
1292 call __rsaz_512_mulx
1297 movq 128(%rsp), %rdx # pull $n0
1307 call __rsaz_512_reducex
1317 adcq 104(%rsp), %r13
1318 adcq 112(%rsp), %r14
1319 adcq 120(%rsp), %r15
1323 call __rsaz_512_subtract
1325 movl %r8d, 64*0($inp) # scatter
1327 movl %r9d, 64*2($inp)
1329 movl %r10d, 64*4($inp)
1331 movl %r11d, 64*6($inp)
1333 movl %r12d, 64*8($inp)
1335 movl %r13d, 64*10($inp)
1337 movl %r14d, 64*12($inp)
1339 movl %r15d, 64*14($inp)
1341 movl %r8d, 64*1($inp)
1342 movl %r9d, 64*3($inp)
1343 movl %r10d, 64*5($inp)
1344 movl %r11d, 64*7($inp)
1345 movl %r12d, 64*9($inp)
1346 movl %r13d, 64*11($inp)
1347 movl %r14d, 64*13($inp)
1348 movl %r15d, 64*15($inp)
1350 leaq 128+24+48(%rsp), %rax
1351 movq -48(%rax), %r15
1352 movq -40(%rax), %r14
1353 movq -32(%rax), %r13
1354 movq -24(%rax), %r12
1355 movq -16(%rax), %rbp
1358 .Lmul_scatter4_epilogue:
1360 .size rsaz_512_mul_scatter4,.-rsaz_512_mul_scatter4
1364 my ($out,$inp,$mod,$n0) = ("%rdi","%rsi","%rdx","%rcx");
1366 .globl rsaz_512_mul_by_one
1367 .type rsaz_512_mul_by_one,\@function,4
1369 rsaz_512_mul_by_one:
1380 $code.=<<___ if ($addx);
1381 movl OPENSSL_ia32cap_P+8(%rip),%eax
1384 movq $mod, %rbp # reassign argument
1397 movdqa %xmm0, (%rsp)
1398 movdqa %xmm0, 16(%rsp)
1399 movdqa %xmm0, 32(%rsp)
1400 movdqa %xmm0, 48(%rsp)
1401 movdqa %xmm0, 64(%rsp)
1402 movdqa %xmm0, 80(%rsp)
1403 movdqa %xmm0, 96(%rsp)
1405 $code.=<<___ if ($addx);
1407 cmpl \$0x80100,%eax # check for MULX and ADO/CX
1411 call __rsaz_512_reduce
1413 $code.=<<___ if ($addx);
1417 movq 128(%rsp), %rdx # pull $n0
1418 call __rsaz_512_reducex
1431 leaq 128+24+48(%rsp), %rax
1432 movq -48(%rax), %r15
1433 movq -40(%rax), %r14
1434 movq -32(%rax), %r13
1435 movq -24(%rax), %r12
1436 movq -16(%rax), %rbp
1439 .Lmul_by_one_epilogue:
1441 .size rsaz_512_mul_by_one,.-rsaz_512_mul_by_one
1444 { # __rsaz_512_reduce
1446 # input: %r8-%r15, %rbp - mod, 128(%rsp) - n0
1448 # clobbers: everything except %rbp and %rdi
1450 .type __rsaz_512_reduce,\@abi-omnipotent
1454 imulq 128+8(%rsp), %rbx
1457 jmp .Lreduction_loop
1488 movq 128+8(%rsp), %rsi
1529 jne .Lreduction_loop
1532 .size __rsaz_512_reduce,.-__rsaz_512_reduce
1536 # __rsaz_512_reducex
1538 # input: %r8-%r15, %rbp - mod, 128(%rsp) - n0
1540 # clobbers: everything except %rbp and %rdi
1542 .type __rsaz_512_reducex,\@abi-omnipotent
1545 #movq 128+8(%rsp), %rdx # pull $n0
1547 xorq %rsi, %rsi # cf=0,of=0
1549 jmp .Lreduction_loopx
1554 mulx 0(%rbp), %rax, %r8
1558 mulx 8(%rbp), %rax, %r9
1562 mulx 16(%rbp), %rbx, %r10
1566 mulx 24(%rbp), %rbx, %r11
1570 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 32(%rbp), %rbx, %r12
1576 mulx 128+8(%rsp), %rbx, %rdx
1579 mulx 40(%rbp), %rax, %r13
1583 .byte 0xc4,0x62,0xfb,0xf6,0xb5,0x30,0x00,0x00,0x00 # mulx 48(%rbp), %rax, %r14
1587 mulx 56(%rbp), %rax, %r15
1590 adox %rsi, %r15 # %rsi is 0
1591 adcx %rsi, %r15 # cf=0
1594 jne .Lreduction_loopx
1597 .size __rsaz_512_reducex,.-__rsaz_512_reducex
1600 { # __rsaz_512_subtract
1601 # input: %r8-%r15, %rdi - $out, %rbp - $mod, %rcx - mask
1603 # clobbers: everything but %rdi, %rsi and %rbp
1605 .type __rsaz_512_subtract,\@abi-omnipotent
1607 __rsaz_512_subtract:
1661 .size __rsaz_512_subtract,.-__rsaz_512_subtract
1666 # input: %rsi - ap, %rbp - bp
1668 # clobbers: everything
1669 my ($ap,$bp) = ("%rsi","%rbp");
1671 .type __rsaz_512_mul,\@abi-omnipotent
1812 .size __rsaz_512_mul,.-__rsaz_512_mul
1818 # input: %rsi - ap, %rbp - bp
1820 # clobbers: everything
1821 my ($ap,$bp,$zero) = ("%rsi","%rbp","%rdi");
1823 .type __rsaz_512_mulx,\@abi-omnipotent
1826 mulx ($ap), %rbx, %r8 # initial %rdx preloaded by caller
1829 mulx 8($ap), %rax, %r9
1832 mulx 16($ap), %rbx, %r10
1835 mulx 24($ap), %rax, %r11
1838 mulx 32($ap), %rbx, %r12
1841 mulx 40($ap), %rax, %r13
1844 mulx 48($ap), %rbx, %r14
1847 mulx 56($ap), %rax, %r15
1853 xor $zero, $zero # cf=0,of=0
1859 mulx ($ap), %rax, %r8
1863 mulx 8($ap), %rax, %r9
1867 mulx 16($ap), %rax, %r10
1871 mulx 24($ap), %rax, %r11
1875 .byte 0x3e,0xc4,0x62,0xfb,0xf6,0xa6,0x20,0x00,0x00,0x00 # mulx 32($ap), %rax, %r12
1879 mulx 40($ap), %rax, %r13
1883 mulx 48($ap), %rax, %r14
1887 mulx 56($ap), %rax, %r15
1888 movq 64($bp,%rcx,8), %rdx
1889 movq %rbx, 8+64-8(%rsp,%rcx,8)
1892 adcx $zero, %r15 # cf=0
1898 mulx ($ap), %rax, %r8
1902 .byte 0xc4,0x62,0xfb,0xf6,0x8e,0x08,0x00,0x00,0x00 # mulx 8($ap), %rax, %r9
1906 .byte 0xc4,0x62,0xfb,0xf6,0x96,0x10,0x00,0x00,0x00 # mulx 16($ap), %rax, %r10
1910 mulx 24($ap), %rax, %r11
1914 mulx 32($ap), %rax, %r12
1918 mulx 40($ap), %rax, %r13
1922 .byte 0xc4,0x62,0xfb,0xf6,0xb6,0x30,0x00,0x00,0x00 # mulx 48($ap), %rax, %r14
1926 .byte 0xc4,0x62,0xfb,0xf6,0xbe,0x38,0x00,0x00,0x00 # mulx 56($ap), %rax, %r15
1931 mov %rbx, 8+64-8(%rsp)
1933 mov %r9, 8+64+8(%rsp)
1934 mov %r10, 8+64+16(%rsp)
1935 mov %r11, 8+64+24(%rsp)
1936 mov %r12, 8+64+32(%rsp)
1937 mov %r13, 8+64+40(%rsp)
1938 mov %r14, 8+64+48(%rsp)
1939 mov %r15, 8+64+56(%rsp)
1942 .size __rsaz_512_mulx,.-__rsaz_512_mulx
1946 my ($out,$inp,$power)= $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1948 .globl rsaz_512_scatter4
1949 .type rsaz_512_scatter4,\@abi-omnipotent
1952 leaq ($out,$power,4), $out
1962 leaq 128($out), $out
1966 .size rsaz_512_scatter4,.-rsaz_512_scatter4
1968 .globl rsaz_512_gather4
1969 .type rsaz_512_gather4,\@abi-omnipotent
1972 leaq ($inp,$power,4), $inp
1979 leaq 128($inp), $inp
1987 .size rsaz_512_gather4,.-rsaz_512_gather4
1991 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1992 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2000 .extern __imp_RtlVirtualUnwind
2001 .type se_handler,\@abi-omnipotent
2015 mov 120($context),%rax # pull context->Rax
2016 mov 248($context),%rbx # pull context->Rip
2018 mov 8($disp),%rsi # disp->ImageBase
2019 mov 56($disp),%r11 # disp->HandlerData
2021 mov 0(%r11),%r10d # HandlerData[0]
2022 lea (%rsi,%r10),%r10 # end of prologue label
2023 cmp %r10,%rbx # context->Rip<end of prologue label
2024 jb .Lcommon_seh_tail
2026 mov 152($context),%rax # pull context->Rsp
2028 mov 4(%r11),%r10d # HandlerData[1]
2029 lea (%rsi,%r10),%r10 # epilogue label
2030 cmp %r10,%rbx # context->Rip>=epilogue label
2031 jae .Lcommon_seh_tail
2033 lea 128+24+48(%rax),%rax
2041 mov %rbx,144($context) # restore context->Rbx
2042 mov %rbp,160($context) # restore context->Rbp
2043 mov %r12,216($context) # restore context->R12
2044 mov %r13,224($context) # restore context->R13
2045 mov %r14,232($context) # restore context->R14
2046 mov %r15,240($context) # restore context->R15
2051 mov %rax,152($context) # restore context->Rsp
2052 mov %rsi,168($context) # restore context->Rsi
2053 mov %rdi,176($context) # restore context->Rdi
2055 mov 40($disp),%rdi # disp->ContextRecord
2056 mov $context,%rsi # context
2057 mov \$154,%ecx # sizeof(CONTEXT)
2058 .long 0xa548f3fc # cld; rep movsq
2061 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2062 mov 8(%rsi),%rdx # arg2, disp->ImageBase
2063 mov 0(%rsi),%r8 # arg3, disp->ControlPc
2064 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
2065 mov 40(%rsi),%r10 # disp->ContextRecord
2066 lea 56(%rsi),%r11 # &disp->HandlerData
2067 lea 24(%rsi),%r12 # &disp->EstablisherFrame
2068 mov %r10,32(%rsp) # arg5
2069 mov %r11,40(%rsp) # arg6
2070 mov %r12,48(%rsp) # arg7
2071 mov %rcx,56(%rsp) # arg8, (NULL)
2072 call *__imp_RtlVirtualUnwind(%rip)
2074 mov \$1,%eax # ExceptionContinueSearch
2086 .size sqr_handler,.-sqr_handler
2090 .rva .LSEH_begin_rsaz_512_sqr
2091 .rva .LSEH_end_rsaz_512_sqr
2092 .rva .LSEH_info_rsaz_512_sqr
2094 .rva .LSEH_begin_rsaz_512_mul
2095 .rva .LSEH_end_rsaz_512_mul
2096 .rva .LSEH_info_rsaz_512_mul
2098 .rva .LSEH_begin_rsaz_512_mul_gather4
2099 .rva .LSEH_end_rsaz_512_mul_gather4
2100 .rva .LSEH_info_rsaz_512_mul_gather4
2102 .rva .LSEH_begin_rsaz_512_mul_scatter4
2103 .rva .LSEH_end_rsaz_512_mul_scatter4
2104 .rva .LSEH_info_rsaz_512_mul_scatter4
2106 .rva .LSEH_begin_rsaz_512_mul_by_one
2107 .rva .LSEH_end_rsaz_512_mul_by_one
2108 .rva .LSEH_info_rsaz_512_mul_by_one
2112 .LSEH_info_rsaz_512_sqr:
2115 .rva .Lsqr_body,.Lsqr_epilogue # HandlerData[]
2116 .LSEH_info_rsaz_512_mul:
2119 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
2120 .LSEH_info_rsaz_512_mul_gather4:
2123 .rva .Lmul_gather4_body,.Lmul_gather4_epilogue # HandlerData[]
2124 .LSEH_info_rsaz_512_mul_scatter4:
2127 .rva .Lmul_scatter4_body,.Lmul_scatter4_epilogue # HandlerData[]
2128 .LSEH_info_rsaz_512_mul_by_one:
2131 .rva .Lmul_by_one_body,.Lmul_by_one_epilogue # HandlerData[]
2135 $code =~ s/\`([^\`]*)\`/eval $1/gem;