f45c2149fc60f8aa986860ded625029794aefdf3
[openssl.git] / crypto / bn / asm / rsaz-avx2.pl
1 #! /usr/bin/env perl
2 # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 ##############################################################################
11 #                                                                            #
12 #  Copyright (c) 2012, Intel Corporation                                     #
13 #                                                                            #
14 #  All rights reserved.                                                      #
15 #                                                                            #
16 #  Redistribution and use in source and binary forms, with or without        #
17 #  modification, are permitted provided that the following conditions are    #
18 #  met:                                                                      #
19 #                                                                            #
20 #  *  Redistributions of source code must retain the above copyright         #
21 #     notice, this list of conditions and the following disclaimer.          #
22 #                                                                            #
23 #  *  Redistributions in binary form must reproduce the above copyright      #
24 #     notice, this list of conditions and the following disclaimer in the    #
25 #     documentation and/or other materials provided with the                 #
26 #     distribution.                                                          #
27 #                                                                            #
28 #  *  Neither the name of the Intel Corporation nor the names of its         #
29 #     contributors may be used to endorse or promote products derived from   #
30 #     this software without specific prior written permission.               #
31 #                                                                            #
32 #                                                                            #
33 #  THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY          #
34 #  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE         #
35 #  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR        #
36 #  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR            #
37 #  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,     #
38 #  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,       #
39 #  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR        #
40 #  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF    #
41 #  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING      #
42 #  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS        #
43 #  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              #
44 #                                                                            #
45 ##############################################################################
46 # Developers and authors:                                                    #
47 # Shay Gueron (1, 2), and Vlad Krasnov (1)                                   #
48 # (1) Intel Corporation, Israel Development Center, Haifa, Israel            #
49 # (2) University of Haifa, Israel                                            #
50 ##############################################################################
51 # Reference:                                                                 #
52 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular             #
53 #     Exponentiation,  Using Advanced Vector Instructions Architectures",    #
54 #     F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,   #
55 #     pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012              #
56 # [2] S. Gueron: "Efficient Software Implementations of Modular              #
57 #     Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).  #
58 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE         #
59 #     Proceedings of 9th International Conference on Information Technology: #
60 #     New Generations (ITNG 2012), pp.821-823 (2012)                         #
61 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis    #
62 #     resistant 1024-bit modular exponentiation, for optimizing RSA2048      #
63 #     on AVX2 capable x86_64 platforms",                                     #
64 #     http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
65 ##############################################################################
66 #
67 # +13% improvement over original submission by <appro@openssl.org>
68 #
69 # rsa2048 sign/sec      OpenSSL 1.0.1   scalar(*)       this
70 # 2.3GHz Haswell        621             765/+23%        1113/+79%
71 # 2.3GHz Broadwell(**)  688             1200(***)/+74%  1120/+63%
72 #
73 # (*)   if system doesn't support AVX2, for reference purposes;
74 # (**)  scaled to 2.3GHz to simplify comparison;
75 # (***) scalar AD*X code is faster than AVX2 and is preferred code
76 #       path for Broadwell;
77
78 $flavour = shift;
79 $output  = shift;
80 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
81
82 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
83
84 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
85 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
86 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
87 die "can't locate x86_64-xlate.pl";
88
89 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
90                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
91         $avx = ($1>=2.19) + ($1>=2.22);
92         $addx = ($1>=2.23);
93 }
94
95 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
96             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
97         $avx = ($1>=2.09) + ($1>=2.10);
98         $addx = ($1>=2.10);
99 }
100
101 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
102             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
103         $avx = ($1>=10) + ($1>=11);
104         $addx = ($1>=11);
105 }
106
107 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
108         my $ver = $2 + $3/100.0;        # 3.1->3.01, 3.10->3.10
109         $avx = ($ver>=3.0) + ($ver>=3.01);
110         $addx = ($ver>=3.03);
111 }
112
113 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
114 *STDOUT = *OUT;
115
116 if ($avx>1) {{{
117 { # void AMS_WW(
118 my $rp="%rdi";  # BN_ULONG *rp,
119 my $ap="%rsi";  # const BN_ULONG *ap,
120 my $np="%rdx";  # const BN_ULONG *np,
121 my $n0="%ecx";  # const BN_ULONG n0,
122 my $rep="%r8d"; # int repeat);
123
124 # The registers that hold the accumulated redundant result
125 # The AMM works on 1024 bit operands, and redundant word size is 29
126 # Therefore: ceil(1024/29)/4 = 9
127 my $ACC0="%ymm0";
128 my $ACC1="%ymm1";
129 my $ACC2="%ymm2";
130 my $ACC3="%ymm3";
131 my $ACC4="%ymm4";
132 my $ACC5="%ymm5";
133 my $ACC6="%ymm6";
134 my $ACC7="%ymm7";
135 my $ACC8="%ymm8";
136 my $ACC9="%ymm9";
137 # Registers that hold the broadcasted words of bp, currently used
138 my $B1="%ymm10";
139 my $B2="%ymm11";
140 # Registers that hold the broadcasted words of Y, currently used
141 my $Y1="%ymm12";
142 my $Y2="%ymm13";
143 # Helper registers
144 my $TEMP1="%ymm14";
145 my $AND_MASK="%ymm15";
146 # alu registers that hold the first words of the ACC
147 my $r0="%r9";
148 my $r1="%r10";
149 my $r2="%r11";
150 my $r3="%r12";
151
152 my $i="%r14d";                  # loop counter
153 my $tmp = "%r15";
154
155 my $FrameSize=32*18+32*8;       # place for A^2 and 2*A
156
157 my $aap=$r0;
158 my $tp0="%rbx";
159 my $tp1=$r3;
160 my $tpa=$tmp;
161
162 $np="%r13";                     # reassigned argument
163
164 $code.=<<___;
165 .text
166
167 .globl  rsaz_1024_sqr_avx2
168 .type   rsaz_1024_sqr_avx2,\@function,5
169 .align  64
170 rsaz_1024_sqr_avx2:             # 702 cycles, 14% faster than rsaz_1024_mul_avx2
171 .cfi_startproc
172         lea     (%rsp), %rax
173 .cfi_def_cfa_register   %rax
174         push    %rbx
175 .cfi_push       %rbx
176         push    %rbp
177 .cfi_push       %rbp
178         push    %r12
179 .cfi_push       %r12
180         push    %r13
181 .cfi_push       %r13
182         push    %r14
183 .cfi_push       %r14
184         push    %r15
185 .cfi_push       %r15
186         vzeroupper
187 ___
188 $code.=<<___ if ($win64);
189         lea     -0xa8(%rsp),%rsp
190         vmovaps %xmm6,-0xd8(%rax)
191         vmovaps %xmm7,-0xc8(%rax)
192         vmovaps %xmm8,-0xb8(%rax)
193         vmovaps %xmm9,-0xa8(%rax)
194         vmovaps %xmm10,-0x98(%rax)
195         vmovaps %xmm11,-0x88(%rax)
196         vmovaps %xmm12,-0x78(%rax)
197         vmovaps %xmm13,-0x68(%rax)
198         vmovaps %xmm14,-0x58(%rax)
199         vmovaps %xmm15,-0x48(%rax)
200 .Lsqr_1024_body:
201 ___
202 $code.=<<___;
203         mov     %rax,%rbp
204 .cfi_def_cfa_register   %rbp
205         mov     %rdx, $np                       # reassigned argument
206         sub     \$$FrameSize, %rsp
207         mov     $np, $tmp
208         sub     \$-128, $rp                     # size optimization
209         sub     \$-128, $ap
210         sub     \$-128, $np
211
212         and     \$4095, $tmp                    # see if $np crosses page
213         add     \$32*10, $tmp
214         shr     \$12, $tmp
215         vpxor   $ACC9,$ACC9,$ACC9
216         jz      .Lsqr_1024_no_n_copy
217
218         # unaligned 256-bit load that crosses page boundary can
219         # cause >2x performance degradation here, so if $np does
220         # cross page boundary, copy it to stack and make sure stack
221         # frame doesn't...
222         sub             \$32*10,%rsp
223         vmovdqu         32*0-128($np), $ACC0
224         and             \$-2048, %rsp
225         vmovdqu         32*1-128($np), $ACC1
226         vmovdqu         32*2-128($np), $ACC2
227         vmovdqu         32*3-128($np), $ACC3
228         vmovdqu         32*4-128($np), $ACC4
229         vmovdqu         32*5-128($np), $ACC5
230         vmovdqu         32*6-128($np), $ACC6
231         vmovdqu         32*7-128($np), $ACC7
232         vmovdqu         32*8-128($np), $ACC8
233         lea             $FrameSize+128(%rsp),$np
234         vmovdqu         $ACC0, 32*0-128($np)
235         vmovdqu         $ACC1, 32*1-128($np)
236         vmovdqu         $ACC2, 32*2-128($np)
237         vmovdqu         $ACC3, 32*3-128($np)
238         vmovdqu         $ACC4, 32*4-128($np)
239         vmovdqu         $ACC5, 32*5-128($np)
240         vmovdqu         $ACC6, 32*6-128($np)
241         vmovdqu         $ACC7, 32*7-128($np)
242         vmovdqu         $ACC8, 32*8-128($np)
243         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero
244
245 .Lsqr_1024_no_n_copy:
246         and             \$-1024, %rsp
247
248         vmovdqu         32*1-128($ap), $ACC1
249         vmovdqu         32*2-128($ap), $ACC2
250         vmovdqu         32*3-128($ap), $ACC3
251         vmovdqu         32*4-128($ap), $ACC4
252         vmovdqu         32*5-128($ap), $ACC5
253         vmovdqu         32*6-128($ap), $ACC6
254         vmovdqu         32*7-128($ap), $ACC7
255         vmovdqu         32*8-128($ap), $ACC8
256
257         lea     192(%rsp), $tp0                 # 64+128=192
258         vpbroadcastq    .Land_mask(%rip), $AND_MASK
259         jmp     .LOOP_GRANDE_SQR_1024
260
261 .align  32
262 .LOOP_GRANDE_SQR_1024:
263         lea     32*18+128(%rsp), $aap           # size optimization
264         lea     448(%rsp), $tp1                 # 64+128+256=448
265
266         # the squaring is performed as described in Variant B of
267         # "Speeding up Big-Number Squaring", so start by calculating
268         # the A*2=A+A vector
269         vpaddq          $ACC1, $ACC1, $ACC1
270          vpbroadcastq   32*0-128($ap), $B1
271         vpaddq          $ACC2, $ACC2, $ACC2
272         vmovdqa         $ACC1, 32*0-128($aap)
273         vpaddq          $ACC3, $ACC3, $ACC3
274         vmovdqa         $ACC2, 32*1-128($aap)
275         vpaddq          $ACC4, $ACC4, $ACC4
276         vmovdqa         $ACC3, 32*2-128($aap)
277         vpaddq          $ACC5, $ACC5, $ACC5
278         vmovdqa         $ACC4, 32*3-128($aap)
279         vpaddq          $ACC6, $ACC6, $ACC6
280         vmovdqa         $ACC5, 32*4-128($aap)
281         vpaddq          $ACC7, $ACC7, $ACC7
282         vmovdqa         $ACC6, 32*5-128($aap)
283         vpaddq          $ACC8, $ACC8, $ACC8
284         vmovdqa         $ACC7, 32*6-128($aap)
285         vpxor           $ACC9, $ACC9, $ACC9
286         vmovdqa         $ACC8, 32*7-128($aap)
287
288         vpmuludq        32*0-128($ap), $B1, $ACC0
289          vpbroadcastq   32*1-128($ap), $B2
290          vmovdqu        $ACC9, 32*9-192($tp0)   # zero upper half
291         vpmuludq        $B1, $ACC1, $ACC1
292          vmovdqu        $ACC9, 32*10-448($tp1)
293         vpmuludq        $B1, $ACC2, $ACC2
294          vmovdqu        $ACC9, 32*11-448($tp1)
295         vpmuludq        $B1, $ACC3, $ACC3
296          vmovdqu        $ACC9, 32*12-448($tp1)
297         vpmuludq        $B1, $ACC4, $ACC4
298          vmovdqu        $ACC9, 32*13-448($tp1)
299         vpmuludq        $B1, $ACC5, $ACC5
300          vmovdqu        $ACC9, 32*14-448($tp1)
301         vpmuludq        $B1, $ACC6, $ACC6
302          vmovdqu        $ACC9, 32*15-448($tp1)
303         vpmuludq        $B1, $ACC7, $ACC7
304          vmovdqu        $ACC9, 32*16-448($tp1)
305         vpmuludq        $B1, $ACC8, $ACC8
306          vpbroadcastq   32*2-128($ap), $B1
307          vmovdqu        $ACC9, 32*17-448($tp1)
308
309         mov     $ap, $tpa
310         mov     \$4, $i
311         jmp     .Lsqr_entry_1024
312 ___
313 $TEMP0=$Y1;
314 $TEMP2=$Y2;
315 $code.=<<___;
316 .align  32
317 .LOOP_SQR_1024:
318          vpbroadcastq   32*1-128($tpa), $B2
319         vpmuludq        32*0-128($ap), $B1, $ACC0
320         vpaddq          32*0-192($tp0), $ACC0, $ACC0
321         vpmuludq        32*0-128($aap), $B1, $ACC1
322         vpaddq          32*1-192($tp0), $ACC1, $ACC1
323         vpmuludq        32*1-128($aap), $B1, $ACC2
324         vpaddq          32*2-192($tp0), $ACC2, $ACC2
325         vpmuludq        32*2-128($aap), $B1, $ACC3
326         vpaddq          32*3-192($tp0), $ACC3, $ACC3
327         vpmuludq        32*3-128($aap), $B1, $ACC4
328         vpaddq          32*4-192($tp0), $ACC4, $ACC4
329         vpmuludq        32*4-128($aap), $B1, $ACC5
330         vpaddq          32*5-192($tp0), $ACC5, $ACC5
331         vpmuludq        32*5-128($aap), $B1, $ACC6
332         vpaddq          32*6-192($tp0), $ACC6, $ACC6
333         vpmuludq        32*6-128($aap), $B1, $ACC7
334         vpaddq          32*7-192($tp0), $ACC7, $ACC7
335         vpmuludq        32*7-128($aap), $B1, $ACC8
336          vpbroadcastq   32*2-128($tpa), $B1
337         vpaddq          32*8-192($tp0), $ACC8, $ACC8
338 .Lsqr_entry_1024:
339         vmovdqu         $ACC0, 32*0-192($tp0)
340         vmovdqu         $ACC1, 32*1-192($tp0)
341
342         vpmuludq        32*1-128($ap), $B2, $TEMP0
343         vpaddq          $TEMP0, $ACC2, $ACC2
344         vpmuludq        32*1-128($aap), $B2, $TEMP1
345         vpaddq          $TEMP1, $ACC3, $ACC3
346         vpmuludq        32*2-128($aap), $B2, $TEMP2
347         vpaddq          $TEMP2, $ACC4, $ACC4
348         vpmuludq        32*3-128($aap), $B2, $TEMP0
349         vpaddq          $TEMP0, $ACC5, $ACC5
350         vpmuludq        32*4-128($aap), $B2, $TEMP1
351         vpaddq          $TEMP1, $ACC6, $ACC6
352         vpmuludq        32*5-128($aap), $B2, $TEMP2
353         vpaddq          $TEMP2, $ACC7, $ACC7
354         vpmuludq        32*6-128($aap), $B2, $TEMP0
355         vpaddq          $TEMP0, $ACC8, $ACC8
356         vpmuludq        32*7-128($aap), $B2, $ACC0
357          vpbroadcastq   32*3-128($tpa), $B2
358         vpaddq          32*9-192($tp0), $ACC0, $ACC0
359
360         vmovdqu         $ACC2, 32*2-192($tp0)
361         vmovdqu         $ACC3, 32*3-192($tp0)
362
363         vpmuludq        32*2-128($ap), $B1, $TEMP2
364         vpaddq          $TEMP2, $ACC4, $ACC4
365         vpmuludq        32*2-128($aap), $B1, $TEMP0
366         vpaddq          $TEMP0, $ACC5, $ACC5
367         vpmuludq        32*3-128($aap), $B1, $TEMP1
368         vpaddq          $TEMP1, $ACC6, $ACC6
369         vpmuludq        32*4-128($aap), $B1, $TEMP2
370         vpaddq          $TEMP2, $ACC7, $ACC7
371         vpmuludq        32*5-128($aap), $B1, $TEMP0
372         vpaddq          $TEMP0, $ACC8, $ACC8
373         vpmuludq        32*6-128($aap), $B1, $TEMP1
374         vpaddq          $TEMP1, $ACC0, $ACC0
375         vpmuludq        32*7-128($aap), $B1, $ACC1
376          vpbroadcastq   32*4-128($tpa), $B1
377         vpaddq          32*10-448($tp1), $ACC1, $ACC1
378
379         vmovdqu         $ACC4, 32*4-192($tp0)
380         vmovdqu         $ACC5, 32*5-192($tp0)
381
382         vpmuludq        32*3-128($ap), $B2, $TEMP0
383         vpaddq          $TEMP0, $ACC6, $ACC6
384         vpmuludq        32*3-128($aap), $B2, $TEMP1
385         vpaddq          $TEMP1, $ACC7, $ACC7
386         vpmuludq        32*4-128($aap), $B2, $TEMP2
387         vpaddq          $TEMP2, $ACC8, $ACC8
388         vpmuludq        32*5-128($aap), $B2, $TEMP0
389         vpaddq          $TEMP0, $ACC0, $ACC0
390         vpmuludq        32*6-128($aap), $B2, $TEMP1
391         vpaddq          $TEMP1, $ACC1, $ACC1
392         vpmuludq        32*7-128($aap), $B2, $ACC2
393          vpbroadcastq   32*5-128($tpa), $B2
394         vpaddq          32*11-448($tp1), $ACC2, $ACC2
395
396         vmovdqu         $ACC6, 32*6-192($tp0)
397         vmovdqu         $ACC7, 32*7-192($tp0)
398
399         vpmuludq        32*4-128($ap), $B1, $TEMP0
400         vpaddq          $TEMP0, $ACC8, $ACC8
401         vpmuludq        32*4-128($aap), $B1, $TEMP1
402         vpaddq          $TEMP1, $ACC0, $ACC0
403         vpmuludq        32*5-128($aap), $B1, $TEMP2
404         vpaddq          $TEMP2, $ACC1, $ACC1
405         vpmuludq        32*6-128($aap), $B1, $TEMP0
406         vpaddq          $TEMP0, $ACC2, $ACC2
407         vpmuludq        32*7-128($aap), $B1, $ACC3
408          vpbroadcastq   32*6-128($tpa), $B1
409         vpaddq          32*12-448($tp1), $ACC3, $ACC3
410
411         vmovdqu         $ACC8, 32*8-192($tp0)
412         vmovdqu         $ACC0, 32*9-192($tp0)
413         lea             8($tp0), $tp0
414
415         vpmuludq        32*5-128($ap), $B2, $TEMP2
416         vpaddq          $TEMP2, $ACC1, $ACC1
417         vpmuludq        32*5-128($aap), $B2, $TEMP0
418         vpaddq          $TEMP0, $ACC2, $ACC2
419         vpmuludq        32*6-128($aap), $B2, $TEMP1
420         vpaddq          $TEMP1, $ACC3, $ACC3
421         vpmuludq        32*7-128($aap), $B2, $ACC4
422          vpbroadcastq   32*7-128($tpa), $B2
423         vpaddq          32*13-448($tp1), $ACC4, $ACC4
424
425         vmovdqu         $ACC1, 32*10-448($tp1)
426         vmovdqu         $ACC2, 32*11-448($tp1)
427
428         vpmuludq        32*6-128($ap), $B1, $TEMP0
429         vpaddq          $TEMP0, $ACC3, $ACC3
430         vpmuludq        32*6-128($aap), $B1, $TEMP1
431          vpbroadcastq   32*8-128($tpa), $ACC0           # borrow $ACC0 for $B1
432         vpaddq          $TEMP1, $ACC4, $ACC4
433         vpmuludq        32*7-128($aap), $B1, $ACC5
434          vpbroadcastq   32*0+8-128($tpa), $B1           # for next iteration
435         vpaddq          32*14-448($tp1), $ACC5, $ACC5
436
437         vmovdqu         $ACC3, 32*12-448($tp1)
438         vmovdqu         $ACC4, 32*13-448($tp1)
439         lea             8($tpa), $tpa
440
441         vpmuludq        32*7-128($ap), $B2, $TEMP0
442         vpaddq          $TEMP0, $ACC5, $ACC5
443         vpmuludq        32*7-128($aap), $B2, $ACC6
444         vpaddq          32*15-448($tp1), $ACC6, $ACC6
445
446         vpmuludq        32*8-128($ap), $ACC0, $ACC7
447         vmovdqu         $ACC5, 32*14-448($tp1)
448         vpaddq          32*16-448($tp1), $ACC7, $ACC7
449         vmovdqu         $ACC6, 32*15-448($tp1)
450         vmovdqu         $ACC7, 32*16-448($tp1)
451         lea             8($tp1), $tp1
452
453         dec     $i
454         jnz     .LOOP_SQR_1024
455 ___
456 $ZERO = $ACC9;
457 $TEMP0 = $B1;
458 $TEMP2 = $B2;
459 $TEMP3 = $Y1;
460 $TEMP4 = $Y2;
461 $code.=<<___;
462         # we need to fix indices 32-39 to avoid overflow
463         vmovdqu         32*8(%rsp), $ACC8               # 32*8-192($tp0),
464         vmovdqu         32*9(%rsp), $ACC1               # 32*9-192($tp0)
465         vmovdqu         32*10(%rsp), $ACC2              # 32*10-192($tp0)
466         lea             192(%rsp), $tp0                 # 64+128=192
467
468         vpsrlq          \$29, $ACC8, $TEMP1
469         vpand           $AND_MASK, $ACC8, $ACC8
470         vpsrlq          \$29, $ACC1, $TEMP2
471         vpand           $AND_MASK, $ACC1, $ACC1
472
473         vpermq          \$0x93, $TEMP1, $TEMP1
474         vpxor           $ZERO, $ZERO, $ZERO
475         vpermq          \$0x93, $TEMP2, $TEMP2
476
477         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
478         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
479         vpaddq          $TEMP0, $ACC8, $ACC8
480         vpblendd        \$3, $TEMP2, $ZERO, $TEMP2
481         vpaddq          $TEMP1, $ACC1, $ACC1
482         vpaddq          $TEMP2, $ACC2, $ACC2
483         vmovdqu         $ACC1, 32*9-192($tp0)
484         vmovdqu         $ACC2, 32*10-192($tp0)
485
486         mov     (%rsp), %rax
487         mov     8(%rsp), $r1
488         mov     16(%rsp), $r2
489         mov     24(%rsp), $r3
490         vmovdqu 32*1(%rsp), $ACC1
491         vmovdqu 32*2-192($tp0), $ACC2
492         vmovdqu 32*3-192($tp0), $ACC3
493         vmovdqu 32*4-192($tp0), $ACC4
494         vmovdqu 32*5-192($tp0), $ACC5
495         vmovdqu 32*6-192($tp0), $ACC6
496         vmovdqu 32*7-192($tp0), $ACC7
497
498         mov     %rax, $r0
499         imull   $n0, %eax
500         and     \$0x1fffffff, %eax
501         vmovd   %eax, $Y1
502
503         mov     %rax, %rdx
504         imulq   -128($np), %rax
505          vpbroadcastq   $Y1, $Y1
506         add     %rax, $r0
507         mov     %rdx, %rax
508         imulq   8-128($np), %rax
509         shr     \$29, $r0
510         add     %rax, $r1
511         mov     %rdx, %rax
512         imulq   16-128($np), %rax
513         add     $r0, $r1
514         add     %rax, $r2
515         imulq   24-128($np), %rdx
516         add     %rdx, $r3
517
518         mov     $r1, %rax
519         imull   $n0, %eax
520         and     \$0x1fffffff, %eax
521
522         mov \$9, $i
523         jmp .LOOP_REDUCE_1024
524
525 .align  32
526 .LOOP_REDUCE_1024:
527         vmovd   %eax, $Y2
528         vpbroadcastq    $Y2, $Y2
529
530         vpmuludq        32*1-128($np), $Y1, $TEMP0
531          mov    %rax, %rdx
532          imulq  -128($np), %rax
533         vpaddq          $TEMP0, $ACC1, $ACC1
534          add    %rax, $r1
535         vpmuludq        32*2-128($np), $Y1, $TEMP1
536          mov    %rdx, %rax
537          imulq  8-128($np), %rax
538         vpaddq          $TEMP1, $ACC2, $ACC2
539         vpmuludq        32*3-128($np), $Y1, $TEMP2
540          .byte  0x67
541          add    %rax, $r2
542          .byte  0x67
543          mov    %rdx, %rax
544          imulq  16-128($np), %rax
545          shr    \$29, $r1
546         vpaddq          $TEMP2, $ACC3, $ACC3
547         vpmuludq        32*4-128($np), $Y1, $TEMP0
548          add    %rax, $r3
549          add    $r1, $r2
550         vpaddq          $TEMP0, $ACC4, $ACC4
551         vpmuludq        32*5-128($np), $Y1, $TEMP1
552          mov    $r2, %rax
553          imull  $n0, %eax
554         vpaddq          $TEMP1, $ACC5, $ACC5
555         vpmuludq        32*6-128($np), $Y1, $TEMP2
556          and    \$0x1fffffff, %eax
557         vpaddq          $TEMP2, $ACC6, $ACC6
558         vpmuludq        32*7-128($np), $Y1, $TEMP0
559         vpaddq          $TEMP0, $ACC7, $ACC7
560         vpmuludq        32*8-128($np), $Y1, $TEMP1
561          vmovd  %eax, $Y1
562          #vmovdqu       32*1-8-128($np), $TEMP2         # moved below
563         vpaddq          $TEMP1, $ACC8, $ACC8
564          #vmovdqu       32*2-8-128($np), $TEMP0         # moved below
565          vpbroadcastq   $Y1, $Y1
566
567         vpmuludq        32*1-8-128($np), $Y2, $TEMP2    # see above
568         vmovdqu         32*3-8-128($np), $TEMP1
569          mov    %rax, %rdx
570          imulq  -128($np), %rax
571         vpaddq          $TEMP2, $ACC1, $ACC1
572         vpmuludq        32*2-8-128($np), $Y2, $TEMP0    # see above
573         vmovdqu         32*4-8-128($np), $TEMP2
574          add    %rax, $r2
575          mov    %rdx, %rax
576          imulq  8-128($np), %rax
577         vpaddq          $TEMP0, $ACC2, $ACC2
578          add    $r3, %rax
579          shr    \$29, $r2
580         vpmuludq        $Y2, $TEMP1, $TEMP1
581         vmovdqu         32*5-8-128($np), $TEMP0
582          add    $r2, %rax
583         vpaddq          $TEMP1, $ACC3, $ACC3
584         vpmuludq        $Y2, $TEMP2, $TEMP2
585         vmovdqu         32*6-8-128($np), $TEMP1
586          .byte  0x67
587          mov    %rax, $r3
588          imull  $n0, %eax
589         vpaddq          $TEMP2, $ACC4, $ACC4
590         vpmuludq        $Y2, $TEMP0, $TEMP0
591         .byte   0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00    # vmovdqu               32*7-8-128($np), $TEMP2
592          and    \$0x1fffffff, %eax
593         vpaddq          $TEMP0, $ACC5, $ACC5
594         vpmuludq        $Y2, $TEMP1, $TEMP1
595         vmovdqu         32*8-8-128($np), $TEMP0
596         vpaddq          $TEMP1, $ACC6, $ACC6
597         vpmuludq        $Y2, $TEMP2, $TEMP2
598         vmovdqu         32*9-8-128($np), $ACC9
599          vmovd  %eax, $ACC0                     # borrow ACC0 for Y2
600          imulq  -128($np), %rax
601         vpaddq          $TEMP2, $ACC7, $ACC7
602         vpmuludq        $Y2, $TEMP0, $TEMP0
603          vmovdqu        32*1-16-128($np), $TEMP1
604          vpbroadcastq   $ACC0, $ACC0
605         vpaddq          $TEMP0, $ACC8, $ACC8
606         vpmuludq        $Y2, $ACC9, $ACC9
607          vmovdqu        32*2-16-128($np), $TEMP2
608          add    %rax, $r3
609
610 ___
611 ($ACC0,$Y2)=($Y2,$ACC0);
612 $code.=<<___;
613          vmovdqu        32*1-24-128($np), $ACC0
614         vpmuludq        $Y1, $TEMP1, $TEMP1
615         vmovdqu         32*3-16-128($np), $TEMP0
616         vpaddq          $TEMP1, $ACC1, $ACC1
617          vpmuludq       $Y2, $ACC0, $ACC0
618         vpmuludq        $Y1, $TEMP2, $TEMP2
619         .byte   0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff    # vmovdqu               32*4-16-128($np), $TEMP1
620          vpaddq         $ACC1, $ACC0, $ACC0
621         vpaddq          $TEMP2, $ACC2, $ACC2
622         vpmuludq        $Y1, $TEMP0, $TEMP0
623         vmovdqu         32*5-16-128($np), $TEMP2
624          .byte  0x67
625          vmovq          $ACC0, %rax
626          vmovdqu        $ACC0, (%rsp)           # transfer $r0-$r3
627         vpaddq          $TEMP0, $ACC3, $ACC3
628         vpmuludq        $Y1, $TEMP1, $TEMP1
629         vmovdqu         32*6-16-128($np), $TEMP0
630         vpaddq          $TEMP1, $ACC4, $ACC4
631         vpmuludq        $Y1, $TEMP2, $TEMP2
632         vmovdqu         32*7-16-128($np), $TEMP1
633         vpaddq          $TEMP2, $ACC5, $ACC5
634         vpmuludq        $Y1, $TEMP0, $TEMP0
635         vmovdqu         32*8-16-128($np), $TEMP2
636         vpaddq          $TEMP0, $ACC6, $ACC6
637         vpmuludq        $Y1, $TEMP1, $TEMP1
638          shr    \$29, $r3
639         vmovdqu         32*9-16-128($np), $TEMP0
640          add    $r3, %rax
641         vpaddq          $TEMP1, $ACC7, $ACC7
642         vpmuludq        $Y1, $TEMP2, $TEMP2
643          #vmovdqu       32*2-24-128($np), $TEMP1        # moved below
644          mov    %rax, $r0
645          imull  $n0, %eax
646         vpaddq          $TEMP2, $ACC8, $ACC8
647         vpmuludq        $Y1, $TEMP0, $TEMP0
648          and    \$0x1fffffff, %eax
649          vmovd  %eax, $Y1
650          vmovdqu        32*3-24-128($np), $TEMP2
651         .byte   0x67
652         vpaddq          $TEMP0, $ACC9, $ACC9
653          vpbroadcastq   $Y1, $Y1
654
655         vpmuludq        32*2-24-128($np), $Y2, $TEMP1   # see above
656         vmovdqu         32*4-24-128($np), $TEMP0
657          mov    %rax, %rdx
658          imulq  -128($np), %rax
659          mov    8(%rsp), $r1
660         vpaddq          $TEMP1, $ACC2, $ACC1
661         vpmuludq        $Y2, $TEMP2, $TEMP2
662         vmovdqu         32*5-24-128($np), $TEMP1
663          add    %rax, $r0
664          mov    %rdx, %rax
665          imulq  8-128($np), %rax
666          .byte  0x67
667          shr    \$29, $r0
668          mov    16(%rsp), $r2
669         vpaddq          $TEMP2, $ACC3, $ACC2
670         vpmuludq        $Y2, $TEMP0, $TEMP0
671         vmovdqu         32*6-24-128($np), $TEMP2
672          add    %rax, $r1
673          mov    %rdx, %rax
674          imulq  16-128($np), %rax
675         vpaddq          $TEMP0, $ACC4, $ACC3
676         vpmuludq        $Y2, $TEMP1, $TEMP1
677         vmovdqu         32*7-24-128($np), $TEMP0
678          imulq  24-128($np), %rdx               # future $r3
679          add    %rax, $r2
680          lea    ($r0,$r1), %rax
681         vpaddq          $TEMP1, $ACC5, $ACC4
682         vpmuludq        $Y2, $TEMP2, $TEMP2
683         vmovdqu         32*8-24-128($np), $TEMP1
684          mov    %rax, $r1
685          imull  $n0, %eax
686         vpmuludq        $Y2, $TEMP0, $TEMP0
687         vpaddq          $TEMP2, $ACC6, $ACC5
688         vmovdqu         32*9-24-128($np), $TEMP2
689          and    \$0x1fffffff, %eax
690         vpaddq          $TEMP0, $ACC7, $ACC6
691         vpmuludq        $Y2, $TEMP1, $TEMP1
692          add    24(%rsp), %rdx
693         vpaddq          $TEMP1, $ACC8, $ACC7
694         vpmuludq        $Y2, $TEMP2, $TEMP2
695         vpaddq          $TEMP2, $ACC9, $ACC8
696          vmovq  $r3, $ACC9
697          mov    %rdx, $r3
698
699         dec     $i
700         jnz     .LOOP_REDUCE_1024
701 ___
702 ($ACC0,$Y2)=($Y2,$ACC0);
703 $code.=<<___;
704         lea     448(%rsp), $tp1                 # size optimization
705         vpaddq  $ACC9, $Y2, $ACC0
706         vpxor   $ZERO, $ZERO, $ZERO
707
708         vpaddq          32*9-192($tp0), $ACC0, $ACC0
709         vpaddq          32*10-448($tp1), $ACC1, $ACC1
710         vpaddq          32*11-448($tp1), $ACC2, $ACC2
711         vpaddq          32*12-448($tp1), $ACC3, $ACC3
712         vpaddq          32*13-448($tp1), $ACC4, $ACC4
713         vpaddq          32*14-448($tp1), $ACC5, $ACC5
714         vpaddq          32*15-448($tp1), $ACC6, $ACC6
715         vpaddq          32*16-448($tp1), $ACC7, $ACC7
716         vpaddq          32*17-448($tp1), $ACC8, $ACC8
717
718         vpsrlq          \$29, $ACC0, $TEMP1
719         vpand           $AND_MASK, $ACC0, $ACC0
720         vpsrlq          \$29, $ACC1, $TEMP2
721         vpand           $AND_MASK, $ACC1, $ACC1
722         vpsrlq          \$29, $ACC2, $TEMP3
723         vpermq          \$0x93, $TEMP1, $TEMP1
724         vpand           $AND_MASK, $ACC2, $ACC2
725         vpsrlq          \$29, $ACC3, $TEMP4
726         vpermq          \$0x93, $TEMP2, $TEMP2
727         vpand           $AND_MASK, $ACC3, $ACC3
728         vpermq          \$0x93, $TEMP3, $TEMP3
729
730         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
731         vpermq          \$0x93, $TEMP4, $TEMP4
732         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
733         vpaddq          $TEMP0, $ACC0, $ACC0
734         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
735         vpaddq          $TEMP1, $ACC1, $ACC1
736         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
737         vpaddq          $TEMP2, $ACC2, $ACC2
738         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
739         vpaddq          $TEMP3, $ACC3, $ACC3
740         vpaddq          $TEMP4, $ACC4, $ACC4
741
742         vpsrlq          \$29, $ACC0, $TEMP1
743         vpand           $AND_MASK, $ACC0, $ACC0
744         vpsrlq          \$29, $ACC1, $TEMP2
745         vpand           $AND_MASK, $ACC1, $ACC1
746         vpsrlq          \$29, $ACC2, $TEMP3
747         vpermq          \$0x93, $TEMP1, $TEMP1
748         vpand           $AND_MASK, $ACC2, $ACC2
749         vpsrlq          \$29, $ACC3, $TEMP4
750         vpermq          \$0x93, $TEMP2, $TEMP2
751         vpand           $AND_MASK, $ACC3, $ACC3
752         vpermq          \$0x93, $TEMP3, $TEMP3
753
754         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
755         vpermq          \$0x93, $TEMP4, $TEMP4
756         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
757         vpaddq          $TEMP0, $ACC0, $ACC0
758         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
759         vpaddq          $TEMP1, $ACC1, $ACC1
760         vmovdqu         $ACC0, 32*0-128($rp)
761         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
762         vpaddq          $TEMP2, $ACC2, $ACC2
763         vmovdqu         $ACC1, 32*1-128($rp)
764         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
765         vpaddq          $TEMP3, $ACC3, $ACC3
766         vmovdqu         $ACC2, 32*2-128($rp)
767         vpaddq          $TEMP4, $ACC4, $ACC4
768         vmovdqu         $ACC3, 32*3-128($rp)
769 ___
770 $TEMP5=$ACC0;
771 $code.=<<___;
772         vpsrlq          \$29, $ACC4, $TEMP1
773         vpand           $AND_MASK, $ACC4, $ACC4
774         vpsrlq          \$29, $ACC5, $TEMP2
775         vpand           $AND_MASK, $ACC5, $ACC5
776         vpsrlq          \$29, $ACC6, $TEMP3
777         vpermq          \$0x93, $TEMP1, $TEMP1
778         vpand           $AND_MASK, $ACC6, $ACC6
779         vpsrlq          \$29, $ACC7, $TEMP4
780         vpermq          \$0x93, $TEMP2, $TEMP2
781         vpand           $AND_MASK, $ACC7, $ACC7
782         vpsrlq          \$29, $ACC8, $TEMP5
783         vpermq          \$0x93, $TEMP3, $TEMP3
784         vpand           $AND_MASK, $ACC8, $ACC8
785         vpermq          \$0x93, $TEMP4, $TEMP4
786
787         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
788         vpermq          \$0x93, $TEMP5, $TEMP5
789         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
790         vpaddq          $TEMP0, $ACC4, $ACC4
791         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
792         vpaddq          $TEMP1, $ACC5, $ACC5
793         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
794         vpaddq          $TEMP2, $ACC6, $ACC6
795         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
796         vpaddq          $TEMP3, $ACC7, $ACC7
797         vpaddq          $TEMP4, $ACC8, $ACC8
798
799         vpsrlq          \$29, $ACC4, $TEMP1
800         vpand           $AND_MASK, $ACC4, $ACC4
801         vpsrlq          \$29, $ACC5, $TEMP2
802         vpand           $AND_MASK, $ACC5, $ACC5
803         vpsrlq          \$29, $ACC6, $TEMP3
804         vpermq          \$0x93, $TEMP1, $TEMP1
805         vpand           $AND_MASK, $ACC6, $ACC6
806         vpsrlq          \$29, $ACC7, $TEMP4
807         vpermq          \$0x93, $TEMP2, $TEMP2
808         vpand           $AND_MASK, $ACC7, $ACC7
809         vpsrlq          \$29, $ACC8, $TEMP5
810         vpermq          \$0x93, $TEMP3, $TEMP3
811         vpand           $AND_MASK, $ACC8, $ACC8
812         vpermq          \$0x93, $TEMP4, $TEMP4
813
814         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
815         vpermq          \$0x93, $TEMP5, $TEMP5
816         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
817         vpaddq          $TEMP0, $ACC4, $ACC4
818         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
819         vpaddq          $TEMP1, $ACC5, $ACC5
820         vmovdqu         $ACC4, 32*4-128($rp)
821         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
822         vpaddq          $TEMP2, $ACC6, $ACC6
823         vmovdqu         $ACC5, 32*5-128($rp)
824         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
825         vpaddq          $TEMP3, $ACC7, $ACC7
826         vmovdqu         $ACC6, 32*6-128($rp)
827         vpaddq          $TEMP4, $ACC8, $ACC8
828         vmovdqu         $ACC7, 32*7-128($rp)
829         vmovdqu         $ACC8, 32*8-128($rp)
830
831         mov     $rp, $ap
832         dec     $rep
833         jne     .LOOP_GRANDE_SQR_1024
834
835         vzeroall
836         mov     %rbp, %rax
837 .cfi_def_cfa_register   %rax
838 ___
839 $code.=<<___ if ($win64);
840 .Lsqr_1024_in_tail:
841         movaps  -0xd8(%rax),%xmm6
842         movaps  -0xc8(%rax),%xmm7
843         movaps  -0xb8(%rax),%xmm8
844         movaps  -0xa8(%rax),%xmm9
845         movaps  -0x98(%rax),%xmm10
846         movaps  -0x88(%rax),%xmm11
847         movaps  -0x78(%rax),%xmm12
848         movaps  -0x68(%rax),%xmm13
849         movaps  -0x58(%rax),%xmm14
850         movaps  -0x48(%rax),%xmm15
851 ___
852 $code.=<<___;
853         mov     -48(%rax),%r15
854 .cfi_restore    %r15
855         mov     -40(%rax),%r14
856 .cfi_restore    %r14
857         mov     -32(%rax),%r13
858 .cfi_restore    %r13
859         mov     -24(%rax),%r12
860 .cfi_restore    %r12
861         mov     -16(%rax),%rbp
862 .cfi_restore    %rbp
863         mov     -8(%rax),%rbx
864 .cfi_restore    %rbx
865         lea     (%rax),%rsp             # restore %rsp
866 .cfi_def_cfa_register   %rsp
867 .Lsqr_1024_epilogue:
868         ret
869 .cfi_endproc
870 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
871 ___
872 }
873
874 { # void AMM_WW(
875 my $rp="%rdi";  # BN_ULONG *rp,
876 my $ap="%rsi";  # const BN_ULONG *ap,
877 my $bp="%rdx";  # const BN_ULONG *bp,
878 my $np="%rcx";  # const BN_ULONG *np,
879 my $n0="%r8d";  # unsigned int n0);
880
881 # The registers that hold the accumulated redundant result
882 # The AMM works on 1024 bit operands, and redundant word size is 29
883 # Therefore: ceil(1024/29)/4 = 9
884 my $ACC0="%ymm0";
885 my $ACC1="%ymm1";
886 my $ACC2="%ymm2";
887 my $ACC3="%ymm3";
888 my $ACC4="%ymm4";
889 my $ACC5="%ymm5";
890 my $ACC6="%ymm6";
891 my $ACC7="%ymm7";
892 my $ACC8="%ymm8";
893 my $ACC9="%ymm9";
894
895 # Registers that hold the broadcasted words of multiplier, currently used
896 my $Bi="%ymm10";
897 my $Yi="%ymm11";
898
899 # Helper registers
900 my $TEMP0=$ACC0;
901 my $TEMP1="%ymm12";
902 my $TEMP2="%ymm13";
903 my $ZERO="%ymm14";
904 my $AND_MASK="%ymm15";
905
906 # alu registers that hold the first words of the ACC
907 my $r0="%r9";
908 my $r1="%r10";
909 my $r2="%r11";
910 my $r3="%r12";
911
912 my $i="%r14d";
913 my $tmp="%r15";
914
915 $bp="%r13";     # reassigned argument
916
917 $code.=<<___;
918 .globl  rsaz_1024_mul_avx2
919 .type   rsaz_1024_mul_avx2,\@function,5
920 .align  64
921 rsaz_1024_mul_avx2:
922 .cfi_startproc
923         lea     (%rsp), %rax
924 .cfi_def_cfa_register   %rax
925         push    %rbx
926 .cfi_push       %rbx
927         push    %rbp
928 .cfi_push       %rbp
929         push    %r12
930 .cfi_push       %r12
931         push    %r13
932 .cfi_push       %r13
933         push    %r14
934 .cfi_push       %r14
935         push    %r15
936 .cfi_push       %r15
937 ___
938 $code.=<<___ if ($win64);
939         vzeroupper
940         lea     -0xa8(%rsp),%rsp
941         vmovaps %xmm6,-0xd8(%rax)
942         vmovaps %xmm7,-0xc8(%rax)
943         vmovaps %xmm8,-0xb8(%rax)
944         vmovaps %xmm9,-0xa8(%rax)
945         vmovaps %xmm10,-0x98(%rax)
946         vmovaps %xmm11,-0x88(%rax)
947         vmovaps %xmm12,-0x78(%rax)
948         vmovaps %xmm13,-0x68(%rax)
949         vmovaps %xmm14,-0x58(%rax)
950         vmovaps %xmm15,-0x48(%rax)
951 .Lmul_1024_body:
952 ___
953 $code.=<<___;
954         mov     %rax,%rbp
955 .cfi_def_cfa_register   %rbp
956         vzeroall
957         mov     %rdx, $bp       # reassigned argument
958         sub     \$64,%rsp
959
960         # unaligned 256-bit load that crosses page boundary can
961         # cause severe performance degradation here, so if $ap does
962         # cross page boundary, swap it with $bp [meaning that caller
963         # is advised to lay down $ap and $bp next to each other, so
964         # that only one can cross page boundary].
965         .byte   0x67,0x67
966         mov     $ap, $tmp
967         and     \$4095, $tmp
968         add     \$32*10, $tmp
969         shr     \$12, $tmp
970         mov     $ap, $tmp
971         cmovnz  $bp, $ap
972         cmovnz  $tmp, $bp
973
974         mov     $np, $tmp
975         sub     \$-128,$ap      # size optimization
976         sub     \$-128,$np
977         sub     \$-128,$rp
978
979         and     \$4095, $tmp    # see if $np crosses page
980         add     \$32*10, $tmp
981         .byte   0x67,0x67
982         shr     \$12, $tmp
983         jz      .Lmul_1024_no_n_copy
984
985         # unaligned 256-bit load that crosses page boundary can
986         # cause severe performance degradation here, so if $np does
987         # cross page boundary, copy it to stack and make sure stack
988         # frame doesn't...
989         sub             \$32*10,%rsp
990         vmovdqu         32*0-128($np), $ACC0
991         and             \$-512, %rsp
992         vmovdqu         32*1-128($np), $ACC1
993         vmovdqu         32*2-128($np), $ACC2
994         vmovdqu         32*3-128($np), $ACC3
995         vmovdqu         32*4-128($np), $ACC4
996         vmovdqu         32*5-128($np), $ACC5
997         vmovdqu         32*6-128($np), $ACC6
998         vmovdqu         32*7-128($np), $ACC7
999         vmovdqu         32*8-128($np), $ACC8
1000         lea             64+128(%rsp),$np
1001         vmovdqu         $ACC0, 32*0-128($np)
1002         vpxor           $ACC0, $ACC0, $ACC0
1003         vmovdqu         $ACC1, 32*1-128($np)
1004         vpxor           $ACC1, $ACC1, $ACC1
1005         vmovdqu         $ACC2, 32*2-128($np)
1006         vpxor           $ACC2, $ACC2, $ACC2
1007         vmovdqu         $ACC3, 32*3-128($np)
1008         vpxor           $ACC3, $ACC3, $ACC3
1009         vmovdqu         $ACC4, 32*4-128($np)
1010         vpxor           $ACC4, $ACC4, $ACC4
1011         vmovdqu         $ACC5, 32*5-128($np)
1012         vpxor           $ACC5, $ACC5, $ACC5
1013         vmovdqu         $ACC6, 32*6-128($np)
1014         vpxor           $ACC6, $ACC6, $ACC6
1015         vmovdqu         $ACC7, 32*7-128($np)
1016         vpxor           $ACC7, $ACC7, $ACC7
1017         vmovdqu         $ACC8, 32*8-128($np)
1018         vmovdqa         $ACC0, $ACC8
1019         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
1020 .Lmul_1024_no_n_copy:
1021         and     \$-64,%rsp
1022
1023         mov     ($bp), %rbx
1024         vpbroadcastq ($bp), $Bi
1025         vmovdqu $ACC0, (%rsp)                   # clear top of stack
1026         xor     $r0, $r0
1027         .byte   0x67
1028         xor     $r1, $r1
1029         xor     $r2, $r2
1030         xor     $r3, $r3
1031
1032         vmovdqu .Land_mask(%rip), $AND_MASK
1033         mov     \$9, $i
1034         vmovdqu $ACC9, 32*9-128($rp)            # $ACC9 is zero after vzeroall
1035         jmp     .Loop_mul_1024
1036
1037 .align  32
1038 .Loop_mul_1024:
1039          vpsrlq         \$29, $ACC3, $ACC9              # correct $ACC3(*)
1040         mov     %rbx, %rax
1041         imulq   -128($ap), %rax
1042         add     $r0, %rax
1043         mov     %rbx, $r1
1044         imulq   8-128($ap), $r1
1045         add     8(%rsp), $r1
1046
1047         mov     %rax, $r0
1048         imull   $n0, %eax
1049         and     \$0x1fffffff, %eax
1050
1051          mov    %rbx, $r2
1052          imulq  16-128($ap), $r2
1053          add    16(%rsp), $r2
1054
1055          mov    %rbx, $r3
1056          imulq  24-128($ap), $r3
1057          add    24(%rsp), $r3
1058         vpmuludq        32*1-128($ap),$Bi,$TEMP0
1059          vmovd          %eax, $Yi
1060         vpaddq          $TEMP0,$ACC1,$ACC1
1061         vpmuludq        32*2-128($ap),$Bi,$TEMP1
1062          vpbroadcastq   $Yi, $Yi
1063         vpaddq          $TEMP1,$ACC2,$ACC2
1064         vpmuludq        32*3-128($ap),$Bi,$TEMP2
1065          vpand          $AND_MASK, $ACC3, $ACC3         # correct $ACC3
1066         vpaddq          $TEMP2,$ACC3,$ACC3
1067         vpmuludq        32*4-128($ap),$Bi,$TEMP0
1068         vpaddq          $TEMP0,$ACC4,$ACC4
1069         vpmuludq        32*5-128($ap),$Bi,$TEMP1
1070         vpaddq          $TEMP1,$ACC5,$ACC5
1071         vpmuludq        32*6-128($ap),$Bi,$TEMP2
1072         vpaddq          $TEMP2,$ACC6,$ACC6
1073         vpmuludq        32*7-128($ap),$Bi,$TEMP0
1074          vpermq         \$0x93, $ACC9, $ACC9            # correct $ACC3
1075         vpaddq          $TEMP0,$ACC7,$ACC7
1076         vpmuludq        32*8-128($ap),$Bi,$TEMP1
1077          vpbroadcastq   8($bp), $Bi
1078         vpaddq          $TEMP1,$ACC8,$ACC8
1079
1080         mov     %rax,%rdx
1081         imulq   -128($np),%rax
1082         add     %rax,$r0
1083         mov     %rdx,%rax
1084         imulq   8-128($np),%rax
1085         add     %rax,$r1
1086         mov     %rdx,%rax
1087         imulq   16-128($np),%rax
1088         add     %rax,$r2
1089         shr     \$29, $r0
1090         imulq   24-128($np),%rdx
1091         add     %rdx,$r3
1092         add     $r0, $r1
1093
1094         vpmuludq        32*1-128($np),$Yi,$TEMP2
1095          vmovq          $Bi, %rbx
1096         vpaddq          $TEMP2,$ACC1,$ACC1
1097         vpmuludq        32*2-128($np),$Yi,$TEMP0
1098         vpaddq          $TEMP0,$ACC2,$ACC2
1099         vpmuludq        32*3-128($np),$Yi,$TEMP1
1100         vpaddq          $TEMP1,$ACC3,$ACC3
1101         vpmuludq        32*4-128($np),$Yi,$TEMP2
1102         vpaddq          $TEMP2,$ACC4,$ACC4
1103         vpmuludq        32*5-128($np),$Yi,$TEMP0
1104         vpaddq          $TEMP0,$ACC5,$ACC5
1105         vpmuludq        32*6-128($np),$Yi,$TEMP1
1106         vpaddq          $TEMP1,$ACC6,$ACC6
1107         vpmuludq        32*7-128($np),$Yi,$TEMP2
1108          vpblendd       \$3, $ZERO, $ACC9, $ACC9        # correct $ACC3
1109         vpaddq          $TEMP2,$ACC7,$ACC7
1110         vpmuludq        32*8-128($np),$Yi,$TEMP0
1111          vpaddq         $ACC9, $ACC3, $ACC3             # correct $ACC3
1112         vpaddq          $TEMP0,$ACC8,$ACC8
1113
1114         mov     %rbx, %rax
1115         imulq   -128($ap),%rax
1116         add     %rax,$r1
1117          vmovdqu        -8+32*1-128($ap),$TEMP1
1118         mov     %rbx, %rax
1119         imulq   8-128($ap),%rax
1120         add     %rax,$r2
1121          vmovdqu        -8+32*2-128($ap),$TEMP2
1122
1123         mov     $r1, %rax
1124         imull   $n0, %eax
1125         and     \$0x1fffffff, %eax
1126
1127          imulq  16-128($ap),%rbx
1128          add    %rbx,$r3
1129         vpmuludq        $Bi,$TEMP1,$TEMP1
1130          vmovd          %eax, $Yi
1131         vmovdqu         -8+32*3-128($ap),$TEMP0
1132         vpaddq          $TEMP1,$ACC1,$ACC1
1133         vpmuludq        $Bi,$TEMP2,$TEMP2
1134          vpbroadcastq   $Yi, $Yi
1135         vmovdqu         -8+32*4-128($ap),$TEMP1
1136         vpaddq          $TEMP2,$ACC2,$ACC2
1137         vpmuludq        $Bi,$TEMP0,$TEMP0
1138         vmovdqu         -8+32*5-128($ap),$TEMP2
1139         vpaddq          $TEMP0,$ACC3,$ACC3
1140         vpmuludq        $Bi,$TEMP1,$TEMP1
1141         vmovdqu         -8+32*6-128($ap),$TEMP0
1142         vpaddq          $TEMP1,$ACC4,$ACC4
1143         vpmuludq        $Bi,$TEMP2,$TEMP2
1144         vmovdqu         -8+32*7-128($ap),$TEMP1
1145         vpaddq          $TEMP2,$ACC5,$ACC5
1146         vpmuludq        $Bi,$TEMP0,$TEMP0
1147         vmovdqu         -8+32*8-128($ap),$TEMP2
1148         vpaddq          $TEMP0,$ACC6,$ACC6
1149         vpmuludq        $Bi,$TEMP1,$TEMP1
1150         vmovdqu         -8+32*9-128($ap),$ACC9
1151         vpaddq          $TEMP1,$ACC7,$ACC7
1152         vpmuludq        $Bi,$TEMP2,$TEMP2
1153         vpaddq          $TEMP2,$ACC8,$ACC8
1154         vpmuludq        $Bi,$ACC9,$ACC9
1155          vpbroadcastq   16($bp), $Bi
1156
1157         mov     %rax,%rdx
1158         imulq   -128($np),%rax
1159         add     %rax,$r1
1160          vmovdqu        -8+32*1-128($np),$TEMP0
1161         mov     %rdx,%rax
1162         imulq   8-128($np),%rax
1163         add     %rax,$r2
1164          vmovdqu        -8+32*2-128($np),$TEMP1
1165         shr     \$29, $r1
1166         imulq   16-128($np),%rdx
1167         add     %rdx,$r3
1168         add     $r1, $r2
1169
1170         vpmuludq        $Yi,$TEMP0,$TEMP0
1171          vmovq          $Bi, %rbx
1172         vmovdqu         -8+32*3-128($np),$TEMP2
1173         vpaddq          $TEMP0,$ACC1,$ACC1
1174         vpmuludq        $Yi,$TEMP1,$TEMP1
1175         vmovdqu         -8+32*4-128($np),$TEMP0
1176         vpaddq          $TEMP1,$ACC2,$ACC2
1177         vpmuludq        $Yi,$TEMP2,$TEMP2
1178         vmovdqu         -8+32*5-128($np),$TEMP1
1179         vpaddq          $TEMP2,$ACC3,$ACC3
1180         vpmuludq        $Yi,$TEMP0,$TEMP0
1181         vmovdqu         -8+32*6-128($np),$TEMP2
1182         vpaddq          $TEMP0,$ACC4,$ACC4
1183         vpmuludq        $Yi,$TEMP1,$TEMP1
1184         vmovdqu         -8+32*7-128($np),$TEMP0
1185         vpaddq          $TEMP1,$ACC5,$ACC5
1186         vpmuludq        $Yi,$TEMP2,$TEMP2
1187         vmovdqu         -8+32*8-128($np),$TEMP1
1188         vpaddq          $TEMP2,$ACC6,$ACC6
1189         vpmuludq        $Yi,$TEMP0,$TEMP0
1190         vmovdqu         -8+32*9-128($np),$TEMP2
1191         vpaddq          $TEMP0,$ACC7,$ACC7
1192         vpmuludq        $Yi,$TEMP1,$TEMP1
1193         vpaddq          $TEMP1,$ACC8,$ACC8
1194         vpmuludq        $Yi,$TEMP2,$TEMP2
1195         vpaddq          $TEMP2,$ACC9,$ACC9
1196
1197          vmovdqu        -16+32*1-128($ap),$TEMP0
1198         mov     %rbx,%rax
1199         imulq   -128($ap),%rax
1200         add     $r2,%rax
1201
1202          vmovdqu        -16+32*2-128($ap),$TEMP1
1203         mov     %rax,$r2
1204         imull   $n0, %eax
1205         and     \$0x1fffffff, %eax
1206
1207          imulq  8-128($ap),%rbx
1208          add    %rbx,$r3
1209         vpmuludq        $Bi,$TEMP0,$TEMP0
1210          vmovd          %eax, $Yi
1211         vmovdqu         -16+32*3-128($ap),$TEMP2
1212         vpaddq          $TEMP0,$ACC1,$ACC1
1213         vpmuludq        $Bi,$TEMP1,$TEMP1
1214          vpbroadcastq   $Yi, $Yi
1215         vmovdqu         -16+32*4-128($ap),$TEMP0
1216         vpaddq          $TEMP1,$ACC2,$ACC2
1217         vpmuludq        $Bi,$TEMP2,$TEMP2
1218         vmovdqu         -16+32*5-128($ap),$TEMP1
1219         vpaddq          $TEMP2,$ACC3,$ACC3
1220         vpmuludq        $Bi,$TEMP0,$TEMP0
1221         vmovdqu         -16+32*6-128($ap),$TEMP2
1222         vpaddq          $TEMP0,$ACC4,$ACC4
1223         vpmuludq        $Bi,$TEMP1,$TEMP1
1224         vmovdqu         -16+32*7-128($ap),$TEMP0
1225         vpaddq          $TEMP1,$ACC5,$ACC5
1226         vpmuludq        $Bi,$TEMP2,$TEMP2
1227         vmovdqu         -16+32*8-128($ap),$TEMP1
1228         vpaddq          $TEMP2,$ACC6,$ACC6
1229         vpmuludq        $Bi,$TEMP0,$TEMP0
1230         vmovdqu         -16+32*9-128($ap),$TEMP2
1231         vpaddq          $TEMP0,$ACC7,$ACC7
1232         vpmuludq        $Bi,$TEMP1,$TEMP1
1233         vpaddq          $TEMP1,$ACC8,$ACC8
1234         vpmuludq        $Bi,$TEMP2,$TEMP2
1235          vpbroadcastq   24($bp), $Bi
1236         vpaddq          $TEMP2,$ACC9,$ACC9
1237
1238          vmovdqu        -16+32*1-128($np),$TEMP0
1239         mov     %rax,%rdx
1240         imulq   -128($np),%rax
1241         add     %rax,$r2
1242          vmovdqu        -16+32*2-128($np),$TEMP1
1243         imulq   8-128($np),%rdx
1244         add     %rdx,$r3
1245         shr     \$29, $r2
1246
1247         vpmuludq        $Yi,$TEMP0,$TEMP0
1248          vmovq          $Bi, %rbx
1249         vmovdqu         -16+32*3-128($np),$TEMP2
1250         vpaddq          $TEMP0,$ACC1,$ACC1
1251         vpmuludq        $Yi,$TEMP1,$TEMP1
1252         vmovdqu         -16+32*4-128($np),$TEMP0
1253         vpaddq          $TEMP1,$ACC2,$ACC2
1254         vpmuludq        $Yi,$TEMP2,$TEMP2
1255         vmovdqu         -16+32*5-128($np),$TEMP1
1256         vpaddq          $TEMP2,$ACC3,$ACC3
1257         vpmuludq        $Yi,$TEMP0,$TEMP0
1258         vmovdqu         -16+32*6-128($np),$TEMP2
1259         vpaddq          $TEMP0,$ACC4,$ACC4
1260         vpmuludq        $Yi,$TEMP1,$TEMP1
1261         vmovdqu         -16+32*7-128($np),$TEMP0
1262         vpaddq          $TEMP1,$ACC5,$ACC5
1263         vpmuludq        $Yi,$TEMP2,$TEMP2
1264         vmovdqu         -16+32*8-128($np),$TEMP1
1265         vpaddq          $TEMP2,$ACC6,$ACC6
1266         vpmuludq        $Yi,$TEMP0,$TEMP0
1267         vmovdqu         -16+32*9-128($np),$TEMP2
1268         vpaddq          $TEMP0,$ACC7,$ACC7
1269         vpmuludq        $Yi,$TEMP1,$TEMP1
1270          vmovdqu        -24+32*1-128($ap),$TEMP0
1271         vpaddq          $TEMP1,$ACC8,$ACC8
1272         vpmuludq        $Yi,$TEMP2,$TEMP2
1273          vmovdqu        -24+32*2-128($ap),$TEMP1
1274         vpaddq          $TEMP2,$ACC9,$ACC9
1275
1276         add     $r2, $r3
1277         imulq   -128($ap),%rbx
1278         add     %rbx,$r3
1279
1280         mov     $r3, %rax
1281         imull   $n0, %eax
1282         and     \$0x1fffffff, %eax
1283
1284         vpmuludq        $Bi,$TEMP0,$TEMP0
1285          vmovd          %eax, $Yi
1286         vmovdqu         -24+32*3-128($ap),$TEMP2
1287         vpaddq          $TEMP0,$ACC1,$ACC1
1288         vpmuludq        $Bi,$TEMP1,$TEMP1
1289          vpbroadcastq   $Yi, $Yi
1290         vmovdqu         -24+32*4-128($ap),$TEMP0
1291         vpaddq          $TEMP1,$ACC2,$ACC2
1292         vpmuludq        $Bi,$TEMP2,$TEMP2
1293         vmovdqu         -24+32*5-128($ap),$TEMP1
1294         vpaddq          $TEMP2,$ACC3,$ACC3
1295         vpmuludq        $Bi,$TEMP0,$TEMP0
1296         vmovdqu         -24+32*6-128($ap),$TEMP2
1297         vpaddq          $TEMP0,$ACC4,$ACC4
1298         vpmuludq        $Bi,$TEMP1,$TEMP1
1299         vmovdqu         -24+32*7-128($ap),$TEMP0
1300         vpaddq          $TEMP1,$ACC5,$ACC5
1301         vpmuludq        $Bi,$TEMP2,$TEMP2
1302         vmovdqu         -24+32*8-128($ap),$TEMP1
1303         vpaddq          $TEMP2,$ACC6,$ACC6
1304         vpmuludq        $Bi,$TEMP0,$TEMP0
1305         vmovdqu         -24+32*9-128($ap),$TEMP2
1306         vpaddq          $TEMP0,$ACC7,$ACC7
1307         vpmuludq        $Bi,$TEMP1,$TEMP1
1308         vpaddq          $TEMP1,$ACC8,$ACC8
1309         vpmuludq        $Bi,$TEMP2,$TEMP2
1310          vpbroadcastq   32($bp), $Bi
1311         vpaddq          $TEMP2,$ACC9,$ACC9
1312          add            \$32, $bp                       # $bp++
1313
1314         vmovdqu         -24+32*1-128($np),$TEMP0
1315         imulq   -128($np),%rax
1316         add     %rax,$r3
1317         shr     \$29, $r3
1318
1319         vmovdqu         -24+32*2-128($np),$TEMP1
1320         vpmuludq        $Yi,$TEMP0,$TEMP0
1321          vmovq          $Bi, %rbx
1322         vmovdqu         -24+32*3-128($np),$TEMP2
1323         vpaddq          $TEMP0,$ACC1,$ACC0              # $ACC0==$TEMP0
1324         vpmuludq        $Yi,$TEMP1,$TEMP1
1325          vmovdqu        $ACC0, (%rsp)                   # transfer $r0-$r3
1326         vpaddq          $TEMP1,$ACC2,$ACC1
1327         vmovdqu         -24+32*4-128($np),$TEMP0
1328         vpmuludq        $Yi,$TEMP2,$TEMP2
1329         vmovdqu         -24+32*5-128($np),$TEMP1
1330         vpaddq          $TEMP2,$ACC3,$ACC2
1331         vpmuludq        $Yi,$TEMP0,$TEMP0
1332         vmovdqu         -24+32*6-128($np),$TEMP2
1333         vpaddq          $TEMP0,$ACC4,$ACC3
1334         vpmuludq        $Yi,$TEMP1,$TEMP1
1335         vmovdqu         -24+32*7-128($np),$TEMP0
1336         vpaddq          $TEMP1,$ACC5,$ACC4
1337         vpmuludq        $Yi,$TEMP2,$TEMP2
1338         vmovdqu         -24+32*8-128($np),$TEMP1
1339         vpaddq          $TEMP2,$ACC6,$ACC5
1340         vpmuludq        $Yi,$TEMP0,$TEMP0
1341         vmovdqu         -24+32*9-128($np),$TEMP2
1342          mov    $r3, $r0
1343         vpaddq          $TEMP0,$ACC7,$ACC6
1344         vpmuludq        $Yi,$TEMP1,$TEMP1
1345          add    (%rsp), $r0
1346         vpaddq          $TEMP1,$ACC8,$ACC7
1347         vpmuludq        $Yi,$TEMP2,$TEMP2
1348          vmovq  $r3, $TEMP1
1349         vpaddq          $TEMP2,$ACC9,$ACC8
1350
1351         dec     $i
1352         jnz     .Loop_mul_1024
1353 ___
1354
1355 # (*)   Original implementation was correcting ACC1-ACC3 for overflow
1356 #       after 7 loop runs, or after 28 iterations, or 56 additions.
1357 #       But as we underutilize resources, it's possible to correct in
1358 #       each iteration with marginal performance loss. But then, as
1359 #       we do it in each iteration, we can correct less digits, and
1360 #       avoid performance penalties completely. Also note that we
1361 #       correct only three digits out of four. This works because
1362 #       most significant digit is subjected to less additions.
1363
1364 $TEMP0 = $ACC9;
1365 $TEMP3 = $Bi;
1366 $TEMP4 = $Yi;
1367 $code.=<<___;
1368         vpermq          \$0, $AND_MASK, $AND_MASK
1369         vpaddq          (%rsp), $TEMP1, $ACC0
1370
1371         vpsrlq          \$29, $ACC0, $TEMP1
1372         vpand           $AND_MASK, $ACC0, $ACC0
1373         vpsrlq          \$29, $ACC1, $TEMP2
1374         vpand           $AND_MASK, $ACC1, $ACC1
1375         vpsrlq          \$29, $ACC2, $TEMP3
1376         vpermq          \$0x93, $TEMP1, $TEMP1
1377         vpand           $AND_MASK, $ACC2, $ACC2
1378         vpsrlq          \$29, $ACC3, $TEMP4
1379         vpermq          \$0x93, $TEMP2, $TEMP2
1380         vpand           $AND_MASK, $ACC3, $ACC3
1381
1382         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1383         vpermq          \$0x93, $TEMP3, $TEMP3
1384         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1385         vpermq          \$0x93, $TEMP4, $TEMP4
1386         vpaddq          $TEMP0, $ACC0, $ACC0
1387         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1388         vpaddq          $TEMP1, $ACC1, $ACC1
1389         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1390         vpaddq          $TEMP2, $ACC2, $ACC2
1391         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1392         vpaddq          $TEMP3, $ACC3, $ACC3
1393         vpaddq          $TEMP4, $ACC4, $ACC4
1394
1395         vpsrlq          \$29, $ACC0, $TEMP1
1396         vpand           $AND_MASK, $ACC0, $ACC0
1397         vpsrlq          \$29, $ACC1, $TEMP2
1398         vpand           $AND_MASK, $ACC1, $ACC1
1399         vpsrlq          \$29, $ACC2, $TEMP3
1400         vpermq          \$0x93, $TEMP1, $TEMP1
1401         vpand           $AND_MASK, $ACC2, $ACC2
1402         vpsrlq          \$29, $ACC3, $TEMP4
1403         vpermq          \$0x93, $TEMP2, $TEMP2
1404         vpand           $AND_MASK, $ACC3, $ACC3
1405         vpermq          \$0x93, $TEMP3, $TEMP3
1406
1407         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1408         vpermq          \$0x93, $TEMP4, $TEMP4
1409         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1410         vpaddq          $TEMP0, $ACC0, $ACC0
1411         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1412         vpaddq          $TEMP1, $ACC1, $ACC1
1413         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1414         vpaddq          $TEMP2, $ACC2, $ACC2
1415         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1416         vpaddq          $TEMP3, $ACC3, $ACC3
1417         vpaddq          $TEMP4, $ACC4, $ACC4
1418
1419         vmovdqu         $ACC0, 0-128($rp)
1420         vmovdqu         $ACC1, 32-128($rp)
1421         vmovdqu         $ACC2, 64-128($rp)
1422         vmovdqu         $ACC3, 96-128($rp)
1423 ___
1424
1425 $TEMP5=$ACC0;
1426 $code.=<<___;
1427         vpsrlq          \$29, $ACC4, $TEMP1
1428         vpand           $AND_MASK, $ACC4, $ACC4
1429         vpsrlq          \$29, $ACC5, $TEMP2
1430         vpand           $AND_MASK, $ACC5, $ACC5
1431         vpsrlq          \$29, $ACC6, $TEMP3
1432         vpermq          \$0x93, $TEMP1, $TEMP1
1433         vpand           $AND_MASK, $ACC6, $ACC6
1434         vpsrlq          \$29, $ACC7, $TEMP4
1435         vpermq          \$0x93, $TEMP2, $TEMP2
1436         vpand           $AND_MASK, $ACC7, $ACC7
1437         vpsrlq          \$29, $ACC8, $TEMP5
1438         vpermq          \$0x93, $TEMP3, $TEMP3
1439         vpand           $AND_MASK, $ACC8, $ACC8
1440         vpermq          \$0x93, $TEMP4, $TEMP4
1441
1442         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1443         vpermq          \$0x93, $TEMP5, $TEMP5
1444         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1445         vpaddq          $TEMP0, $ACC4, $ACC4
1446         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1447         vpaddq          $TEMP1, $ACC5, $ACC5
1448         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1449         vpaddq          $TEMP2, $ACC6, $ACC6
1450         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1451         vpaddq          $TEMP3, $ACC7, $ACC7
1452         vpaddq          $TEMP4, $ACC8, $ACC8
1453
1454         vpsrlq          \$29, $ACC4, $TEMP1
1455         vpand           $AND_MASK, $ACC4, $ACC4
1456         vpsrlq          \$29, $ACC5, $TEMP2
1457         vpand           $AND_MASK, $ACC5, $ACC5
1458         vpsrlq          \$29, $ACC6, $TEMP3
1459         vpermq          \$0x93, $TEMP1, $TEMP1
1460         vpand           $AND_MASK, $ACC6, $ACC6
1461         vpsrlq          \$29, $ACC7, $TEMP4
1462         vpermq          \$0x93, $TEMP2, $TEMP2
1463         vpand           $AND_MASK, $ACC7, $ACC7
1464         vpsrlq          \$29, $ACC8, $TEMP5
1465         vpermq          \$0x93, $TEMP3, $TEMP3
1466         vpand           $AND_MASK, $ACC8, $ACC8
1467         vpermq          \$0x93, $TEMP4, $TEMP4
1468
1469         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1470         vpermq          \$0x93, $TEMP5, $TEMP5
1471         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1472         vpaddq          $TEMP0, $ACC4, $ACC4
1473         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1474         vpaddq          $TEMP1, $ACC5, $ACC5
1475         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1476         vpaddq          $TEMP2, $ACC6, $ACC6
1477         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1478         vpaddq          $TEMP3, $ACC7, $ACC7
1479         vpaddq          $TEMP4, $ACC8, $ACC8
1480
1481         vmovdqu         $ACC4, 128-128($rp)
1482         vmovdqu         $ACC5, 160-128($rp)
1483         vmovdqu         $ACC6, 192-128($rp)
1484         vmovdqu         $ACC7, 224-128($rp)
1485         vmovdqu         $ACC8, 256-128($rp)
1486         vzeroupper
1487
1488         mov     %rbp, %rax
1489 .cfi_def_cfa_register   %rax
1490 ___
1491 $code.=<<___ if ($win64);
1492 .Lmul_1024_in_tail:
1493         movaps  -0xd8(%rax),%xmm6
1494         movaps  -0xc8(%rax),%xmm7
1495         movaps  -0xb8(%rax),%xmm8
1496         movaps  -0xa8(%rax),%xmm9
1497         movaps  -0x98(%rax),%xmm10
1498         movaps  -0x88(%rax),%xmm11
1499         movaps  -0x78(%rax),%xmm12
1500         movaps  -0x68(%rax),%xmm13
1501         movaps  -0x58(%rax),%xmm14
1502         movaps  -0x48(%rax),%xmm15
1503 ___
1504 $code.=<<___;
1505         mov     -48(%rax),%r15
1506 .cfi_restore    %r15
1507         mov     -40(%rax),%r14
1508 .cfi_restore    %r14
1509         mov     -32(%rax),%r13
1510 .cfi_restore    %r13
1511         mov     -24(%rax),%r12
1512 .cfi_restore    %r12
1513         mov     -16(%rax),%rbp
1514 .cfi_restore    %rbp
1515         mov     -8(%rax),%rbx
1516 .cfi_restore    %rbx
1517         lea     (%rax),%rsp             # restore %rsp
1518 .cfi_def_cfa_register   %rsp
1519 .Lmul_1024_epilogue:
1520         ret
1521 .cfi_endproc
1522 .size   rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1523 ___
1524 }
1525 {
1526 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1527 my @T = map("%r$_",(8..11));
1528
1529 $code.=<<___;
1530 .globl  rsaz_1024_red2norm_avx2
1531 .type   rsaz_1024_red2norm_avx2,\@abi-omnipotent
1532 .align  32
1533 rsaz_1024_red2norm_avx2:
1534         sub     \$-128,$inp     # size optimization
1535         xor     %rax,%rax
1536 ___
1537
1538 for ($j=0,$i=0; $i<16; $i++) {
1539     my $k=0;
1540     while (29*$j<64*($i+1)) {   # load data till boundary
1541         $code.="        mov     `8*$j-128`($inp), @T[0]\n";
1542         $j++; $k++; push(@T,shift(@T));
1543     }
1544     $l=$k;
1545     while ($k>1) {              # shift loaded data but last value
1546         $code.="        shl     \$`29*($j-$k)`,@T[-$k]\n";
1547         $k--;
1548     }
1549     $code.=<<___;               # shift last value
1550         mov     @T[-1], @T[0]
1551         shl     \$`29*($j-1)`, @T[-1]
1552         shr     \$`-29*($j-1)`, @T[0]
1553 ___
1554     while ($l) {                # accumulate all values
1555         $code.="        add     @T[-$l], %rax\n";
1556         $l--;
1557     }
1558         $code.=<<___;
1559         adc     \$0, @T[0]      # consume eventual carry
1560         mov     %rax, 8*$i($out)
1561         mov     @T[0], %rax
1562 ___
1563     push(@T,shift(@T));
1564 }
1565 $code.=<<___;
1566         ret
1567 .size   rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1568
1569 .globl  rsaz_1024_norm2red_avx2
1570 .type   rsaz_1024_norm2red_avx2,\@abi-omnipotent
1571 .align  32
1572 rsaz_1024_norm2red_avx2:
1573         sub     \$-128,$out     # size optimization
1574         mov     ($inp),@T[0]
1575         mov     \$0x1fffffff,%eax
1576 ___
1577 for ($j=0,$i=0; $i<16; $i++) {
1578     $code.="    mov     `8*($i+1)`($inp),@T[1]\n"       if ($i<15);
1579     $code.="    xor     @T[1],@T[1]\n"                  if ($i==15);
1580     my $k=1;
1581     while (29*($j+1)<64*($i+1)) {
1582         $code.=<<___;
1583         mov     @T[0],@T[-$k]
1584         shr     \$`29*$j`,@T[-$k]
1585         and     %rax,@T[-$k]                            # &0x1fffffff
1586         mov     @T[-$k],`8*$j-128`($out)
1587 ___
1588         $j++; $k++;
1589     }
1590     $code.=<<___;
1591         shrd    \$`29*$j`,@T[1],@T[0]
1592         and     %rax,@T[0]
1593         mov     @T[0],`8*$j-128`($out)
1594 ___
1595     $j++;
1596     push(@T,shift(@T));
1597 }
1598 $code.=<<___;
1599         mov     @T[0],`8*$j-128`($out)                  # zero
1600         mov     @T[0],`8*($j+1)-128`($out)
1601         mov     @T[0],`8*($j+2)-128`($out)
1602         mov     @T[0],`8*($j+3)-128`($out)
1603         ret
1604 .size   rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1605 ___
1606 }
1607 {
1608 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1609
1610 $code.=<<___;
1611 .globl  rsaz_1024_scatter5_avx2
1612 .type   rsaz_1024_scatter5_avx2,\@abi-omnipotent
1613 .align  32
1614 rsaz_1024_scatter5_avx2:
1615         vzeroupper
1616         vmovdqu .Lscatter_permd(%rip),%ymm5
1617         shl     \$4,$power
1618         lea     ($out,$power),$out
1619         mov     \$9,%eax
1620         jmp     .Loop_scatter_1024
1621
1622 .align  32
1623 .Loop_scatter_1024:
1624         vmovdqu         ($inp),%ymm0
1625         lea             32($inp),$inp
1626         vpermd          %ymm0,%ymm5,%ymm0
1627         vmovdqu         %xmm0,($out)
1628         lea             16*32($out),$out
1629         dec     %eax
1630         jnz     .Loop_scatter_1024
1631
1632         vzeroupper
1633         ret
1634 .size   rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1635
1636 .globl  rsaz_1024_gather5_avx2
1637 .type   rsaz_1024_gather5_avx2,\@abi-omnipotent
1638 .align  32
1639 rsaz_1024_gather5_avx2:
1640 .cfi_startproc
1641         vzeroupper
1642         mov     %rsp,%r11
1643 .cfi_def_cfa_register   %r11
1644 ___
1645 $code.=<<___ if ($win64);
1646         lea     -0x88(%rsp),%rax
1647 .LSEH_begin_rsaz_1024_gather5:
1648         # I can't trust assembler to use specific encoding:-(
1649         .byte   0x48,0x8d,0x60,0xe0             # lea   -0x20(%rax),%rsp
1650         .byte   0xc5,0xf8,0x29,0x70,0xe0        # vmovaps %xmm6,-0x20(%rax)
1651         .byte   0xc5,0xf8,0x29,0x78,0xf0        # vmovaps %xmm7,-0x10(%rax)
1652         .byte   0xc5,0x78,0x29,0x40,0x00        # vmovaps %xmm8,0(%rax)
1653         .byte   0xc5,0x78,0x29,0x48,0x10        # vmovaps %xmm9,0x10(%rax)
1654         .byte   0xc5,0x78,0x29,0x50,0x20        # vmovaps %xmm10,0x20(%rax)
1655         .byte   0xc5,0x78,0x29,0x58,0x30        # vmovaps %xmm11,0x30(%rax)
1656         .byte   0xc5,0x78,0x29,0x60,0x40        # vmovaps %xmm12,0x40(%rax)
1657         .byte   0xc5,0x78,0x29,0x68,0x50        # vmovaps %xmm13,0x50(%rax)
1658         .byte   0xc5,0x78,0x29,0x70,0x60        # vmovaps %xmm14,0x60(%rax)
1659         .byte   0xc5,0x78,0x29,0x78,0x70        # vmovaps %xmm15,0x70(%rax)
1660 ___
1661 $code.=<<___;
1662         lea     -0x100(%rsp),%rsp
1663         and     \$-32, %rsp
1664         lea     .Linc(%rip), %r10
1665         lea     -128(%rsp),%rax                 # control u-op density
1666
1667         vmovd           $power, %xmm4
1668         vmovdqa         (%r10),%ymm0
1669         vmovdqa         32(%r10),%ymm1
1670         vmovdqa         64(%r10),%ymm5
1671         vpbroadcastd    %xmm4,%ymm4
1672
1673         vpaddd          %ymm5, %ymm0, %ymm2
1674         vpcmpeqd        %ymm4, %ymm0, %ymm0
1675         vpaddd          %ymm5, %ymm1, %ymm3
1676         vpcmpeqd        %ymm4, %ymm1, %ymm1
1677         vmovdqa         %ymm0, 32*0+128(%rax)
1678         vpaddd          %ymm5, %ymm2, %ymm0
1679         vpcmpeqd        %ymm4, %ymm2, %ymm2
1680         vmovdqa         %ymm1, 32*1+128(%rax)
1681         vpaddd          %ymm5, %ymm3, %ymm1
1682         vpcmpeqd        %ymm4, %ymm3, %ymm3
1683         vmovdqa         %ymm2, 32*2+128(%rax)
1684         vpaddd          %ymm5, %ymm0, %ymm2
1685         vpcmpeqd        %ymm4, %ymm0, %ymm0
1686         vmovdqa         %ymm3, 32*3+128(%rax)
1687         vpaddd          %ymm5, %ymm1, %ymm3
1688         vpcmpeqd        %ymm4, %ymm1, %ymm1
1689         vmovdqa         %ymm0, 32*4+128(%rax)
1690         vpaddd          %ymm5, %ymm2, %ymm8
1691         vpcmpeqd        %ymm4, %ymm2, %ymm2
1692         vmovdqa         %ymm1, 32*5+128(%rax)
1693         vpaddd          %ymm5, %ymm3, %ymm9
1694         vpcmpeqd        %ymm4, %ymm3, %ymm3
1695         vmovdqa         %ymm2, 32*6+128(%rax)
1696         vpaddd          %ymm5, %ymm8, %ymm10
1697         vpcmpeqd        %ymm4, %ymm8, %ymm8
1698         vmovdqa         %ymm3, 32*7+128(%rax)
1699         vpaddd          %ymm5, %ymm9, %ymm11
1700         vpcmpeqd        %ymm4, %ymm9, %ymm9
1701         vpaddd          %ymm5, %ymm10, %ymm12
1702         vpcmpeqd        %ymm4, %ymm10, %ymm10
1703         vpaddd          %ymm5, %ymm11, %ymm13
1704         vpcmpeqd        %ymm4, %ymm11, %ymm11
1705         vpaddd          %ymm5, %ymm12, %ymm14
1706         vpcmpeqd        %ymm4, %ymm12, %ymm12
1707         vpaddd          %ymm5, %ymm13, %ymm15
1708         vpcmpeqd        %ymm4, %ymm13, %ymm13
1709         vpcmpeqd        %ymm4, %ymm14, %ymm14
1710         vpcmpeqd        %ymm4, %ymm15, %ymm15
1711
1712         vmovdqa -32(%r10),%ymm7                 # .Lgather_permd
1713         lea     128($inp), $inp
1714         mov     \$9,$power
1715
1716 .Loop_gather_1024:
1717         vmovdqa         32*0-128($inp), %ymm0
1718         vmovdqa         32*1-128($inp), %ymm1
1719         vmovdqa         32*2-128($inp), %ymm2
1720         vmovdqa         32*3-128($inp), %ymm3
1721         vpand           32*0+128(%rax), %ymm0,  %ymm0
1722         vpand           32*1+128(%rax), %ymm1,  %ymm1
1723         vpand           32*2+128(%rax), %ymm2,  %ymm2
1724         vpor            %ymm0, %ymm1, %ymm4
1725         vpand           32*3+128(%rax), %ymm3,  %ymm3
1726         vmovdqa         32*4-128($inp), %ymm0
1727         vmovdqa         32*5-128($inp), %ymm1
1728         vpor            %ymm2, %ymm3, %ymm5
1729         vmovdqa         32*6-128($inp), %ymm2
1730         vmovdqa         32*7-128($inp), %ymm3
1731         vpand           32*4+128(%rax), %ymm0,  %ymm0
1732         vpand           32*5+128(%rax), %ymm1,  %ymm1
1733         vpand           32*6+128(%rax), %ymm2,  %ymm2
1734         vpor            %ymm0, %ymm4, %ymm4
1735         vpand           32*7+128(%rax), %ymm3,  %ymm3
1736         vpand           32*8-128($inp), %ymm8,  %ymm0
1737         vpor            %ymm1, %ymm5, %ymm5
1738         vpand           32*9-128($inp), %ymm9,  %ymm1
1739         vpor            %ymm2, %ymm4, %ymm4
1740         vpand           32*10-128($inp),%ymm10, %ymm2
1741         vpor            %ymm3, %ymm5, %ymm5
1742         vpand           32*11-128($inp),%ymm11, %ymm3
1743         vpor            %ymm0, %ymm4, %ymm4
1744         vpand           32*12-128($inp),%ymm12, %ymm0
1745         vpor            %ymm1, %ymm5, %ymm5
1746         vpand           32*13-128($inp),%ymm13, %ymm1
1747         vpor            %ymm2, %ymm4, %ymm4
1748         vpand           32*14-128($inp),%ymm14, %ymm2
1749         vpor            %ymm3, %ymm5, %ymm5
1750         vpand           32*15-128($inp),%ymm15, %ymm3
1751         lea             32*16($inp), $inp
1752         vpor            %ymm0, %ymm4, %ymm4
1753         vpor            %ymm1, %ymm5, %ymm5
1754         vpor            %ymm2, %ymm4, %ymm4
1755         vpor            %ymm3, %ymm5, %ymm5
1756
1757         vpor            %ymm5, %ymm4, %ymm4
1758         vextracti128    \$1, %ymm4, %xmm5       # upper half is cleared
1759         vpor            %xmm4, %xmm5, %xmm5
1760         vpermd          %ymm5,%ymm7,%ymm5
1761         vmovdqu         %ymm5,($out)
1762         lea             32($out),$out
1763         dec     $power
1764         jnz     .Loop_gather_1024
1765
1766         vpxor   %ymm0,%ymm0,%ymm0
1767         vmovdqu %ymm0,($out)
1768         vzeroupper
1769 ___
1770 $code.=<<___ if ($win64);
1771         movaps  -0xa8(%r11),%xmm6
1772         movaps  -0x98(%r11),%xmm7
1773         movaps  -0x88(%r11),%xmm8
1774         movaps  -0x78(%r11),%xmm9
1775         movaps  -0x68(%r11),%xmm10
1776         movaps  -0x58(%r11),%xmm11
1777         movaps  -0x48(%r11),%xmm12
1778         movaps  -0x38(%r11),%xmm13
1779         movaps  -0x28(%r11),%xmm14
1780         movaps  -0x18(%r11),%xmm15
1781 ___
1782 $code.=<<___;
1783         lea     (%r11),%rsp
1784 .cfi_def_cfa_register   %rsp
1785         ret
1786 .cfi_endproc
1787 .LSEH_end_rsaz_1024_gather5:
1788 .size   rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1789 ___
1790 }
1791
1792 $code.=<<___;
1793 .extern OPENSSL_ia32cap_P
1794 .globl  rsaz_avx2_eligible
1795 .type   rsaz_avx2_eligible,\@abi-omnipotent
1796 .align  32
1797 rsaz_avx2_eligible:
1798         mov     OPENSSL_ia32cap_P+8(%rip),%eax
1799 ___
1800 $code.=<<___    if ($addx);
1801         mov     \$`1<<8|1<<19`,%ecx
1802         mov     \$0,%edx
1803         and     %eax,%ecx
1804         cmp     \$`1<<8|1<<19`,%ecx     # check for BMI2+AD*X
1805         cmove   %edx,%eax
1806 ___
1807 $code.=<<___;
1808         and     \$`1<<5`,%eax
1809         shr     \$5,%eax
1810         ret
1811 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1812
1813 .align  64
1814 .Land_mask:
1815         .quad   0x1fffffff,0x1fffffff,0x1fffffff,-1
1816 .Lscatter_permd:
1817         .long   0,2,4,6,7,7,7,7
1818 .Lgather_permd:
1819         .long   0,7,1,7,2,7,3,7
1820 .Linc:
1821         .long   0,0,0,0, 1,1,1,1
1822         .long   2,2,2,2, 3,3,3,3
1823         .long   4,4,4,4, 4,4,4,4
1824 .align  64
1825 ___
1826
1827 if ($win64) {
1828 $rec="%rcx";
1829 $frame="%rdx";
1830 $context="%r8";
1831 $disp="%r9";
1832
1833 $code.=<<___
1834 .extern __imp_RtlVirtualUnwind
1835 .type   rsaz_se_handler,\@abi-omnipotent
1836 .align  16
1837 rsaz_se_handler:
1838         push    %rsi
1839         push    %rdi
1840         push    %rbx
1841         push    %rbp
1842         push    %r12
1843         push    %r13
1844         push    %r14
1845         push    %r15
1846         pushfq
1847         sub     \$64,%rsp
1848
1849         mov     120($context),%rax      # pull context->Rax
1850         mov     248($context),%rbx      # pull context->Rip
1851
1852         mov     8($disp),%rsi           # disp->ImageBase
1853         mov     56($disp),%r11          # disp->HandlerData
1854
1855         mov     0(%r11),%r10d           # HandlerData[0]
1856         lea     (%rsi,%r10),%r10        # prologue label
1857         cmp     %r10,%rbx               # context->Rip<prologue label
1858         jb      .Lcommon_seh_tail
1859
1860         mov     4(%r11),%r10d           # HandlerData[1]
1861         lea     (%rsi,%r10),%r10        # epilogue label
1862         cmp     %r10,%rbx               # context->Rip>=epilogue label
1863         jae     .Lcommon_seh_tail
1864
1865         mov     160($context),%rbp      # pull context->Rbp
1866
1867         mov     8(%r11),%r10d           # HandlerData[2]
1868         lea     (%rsi,%r10),%r10        # "in tail" label
1869         cmp     %r10,%rbx               # context->Rip>="in tail" label
1870         cmovc   %rbp,%rax
1871
1872         mov     -48(%rax),%r15
1873         mov     -40(%rax),%r14
1874         mov     -32(%rax),%r13
1875         mov     -24(%rax),%r12
1876         mov     -16(%rax),%rbp
1877         mov     -8(%rax),%rbx
1878         mov     %r15,240($context)
1879         mov     %r14,232($context)
1880         mov     %r13,224($context)
1881         mov     %r12,216($context)
1882         mov     %rbp,160($context)
1883         mov     %rbx,144($context)
1884
1885         lea     -0xd8(%rax),%rsi        # %xmm save area
1886         lea     512($context),%rdi      # & context.Xmm6
1887         mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
1888         .long   0xa548f3fc              # cld; rep movsq
1889
1890 .Lcommon_seh_tail:
1891         mov     8(%rax),%rdi
1892         mov     16(%rax),%rsi
1893         mov     %rax,152($context)      # restore context->Rsp
1894         mov     %rsi,168($context)      # restore context->Rsi
1895         mov     %rdi,176($context)      # restore context->Rdi
1896
1897         mov     40($disp),%rdi          # disp->ContextRecord
1898         mov     $context,%rsi           # context
1899         mov     \$154,%ecx              # sizeof(CONTEXT)
1900         .long   0xa548f3fc              # cld; rep movsq
1901
1902         mov     $disp,%rsi
1903         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1904         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1905         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1906         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1907         mov     40(%rsi),%r10           # disp->ContextRecord
1908         lea     56(%rsi),%r11           # &disp->HandlerData
1909         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1910         mov     %r10,32(%rsp)           # arg5
1911         mov     %r11,40(%rsp)           # arg6
1912         mov     %r12,48(%rsp)           # arg7
1913         mov     %rcx,56(%rsp)           # arg8, (NULL)
1914         call    *__imp_RtlVirtualUnwind(%rip)
1915
1916         mov     \$1,%eax                # ExceptionContinueSearch
1917         add     \$64,%rsp
1918         popfq
1919         pop     %r15
1920         pop     %r14
1921         pop     %r13
1922         pop     %r12
1923         pop     %rbp
1924         pop     %rbx
1925         pop     %rdi
1926         pop     %rsi
1927         ret
1928 .size   rsaz_se_handler,.-rsaz_se_handler
1929
1930 .section        .pdata
1931 .align  4
1932         .rva    .LSEH_begin_rsaz_1024_sqr_avx2
1933         .rva    .LSEH_end_rsaz_1024_sqr_avx2
1934         .rva    .LSEH_info_rsaz_1024_sqr_avx2
1935
1936         .rva    .LSEH_begin_rsaz_1024_mul_avx2
1937         .rva    .LSEH_end_rsaz_1024_mul_avx2
1938         .rva    .LSEH_info_rsaz_1024_mul_avx2
1939
1940         .rva    .LSEH_begin_rsaz_1024_gather5
1941         .rva    .LSEH_end_rsaz_1024_gather5
1942         .rva    .LSEH_info_rsaz_1024_gather5
1943 .section        .xdata
1944 .align  8
1945 .LSEH_info_rsaz_1024_sqr_avx2:
1946         .byte   9,0,0,0
1947         .rva    rsaz_se_handler
1948         .rva    .Lsqr_1024_body,.Lsqr_1024_epilogue,.Lsqr_1024_in_tail
1949         .long   0
1950 .LSEH_info_rsaz_1024_mul_avx2:
1951         .byte   9,0,0,0
1952         .rva    rsaz_se_handler
1953         .rva    .Lmul_1024_body,.Lmul_1024_epilogue,.Lmul_1024_in_tail
1954         .long   0
1955 .LSEH_info_rsaz_1024_gather5:
1956         .byte   0x01,0x36,0x17,0x0b
1957         .byte   0x36,0xf8,0x09,0x00     # vmovaps 0x90(rsp),xmm15
1958         .byte   0x31,0xe8,0x08,0x00     # vmovaps 0x80(rsp),xmm14
1959         .byte   0x2c,0xd8,0x07,0x00     # vmovaps 0x70(rsp),xmm13
1960         .byte   0x27,0xc8,0x06,0x00     # vmovaps 0x60(rsp),xmm12
1961         .byte   0x22,0xb8,0x05,0x00     # vmovaps 0x50(rsp),xmm11
1962         .byte   0x1d,0xa8,0x04,0x00     # vmovaps 0x40(rsp),xmm10
1963         .byte   0x18,0x98,0x03,0x00     # vmovaps 0x30(rsp),xmm9
1964         .byte   0x13,0x88,0x02,0x00     # vmovaps 0x20(rsp),xmm8
1965         .byte   0x0e,0x78,0x01,0x00     # vmovaps 0x10(rsp),xmm7
1966         .byte   0x09,0x68,0x00,0x00     # vmovaps 0x00(rsp),xmm6
1967         .byte   0x04,0x01,0x15,0x00     # sub     rsp,0xa8
1968         .byte   0x00,0xb3,0x00,0x00     # set_frame r11
1969 ___
1970 }
1971
1972 foreach (split("\n",$code)) {
1973         s/\`([^\`]*)\`/eval($1)/ge;
1974
1975         s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge               or
1976
1977         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1978         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1979         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1980         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1981         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1982         print $_,"\n";
1983 }
1984
1985 }}} else {{{
1986 print <<___;    # assembler is too old
1987 .text
1988
1989 .globl  rsaz_avx2_eligible
1990 .type   rsaz_avx2_eligible,\@abi-omnipotent
1991 rsaz_avx2_eligible:
1992         xor     %eax,%eax
1993         ret
1994 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1995
1996 .globl  rsaz_1024_sqr_avx2
1997 .globl  rsaz_1024_mul_avx2
1998 .globl  rsaz_1024_norm2red_avx2
1999 .globl  rsaz_1024_red2norm_avx2
2000 .globl  rsaz_1024_scatter5_avx2
2001 .globl  rsaz_1024_gather5_avx2
2002 .type   rsaz_1024_sqr_avx2,\@abi-omnipotent
2003 rsaz_1024_sqr_avx2:
2004 rsaz_1024_mul_avx2:
2005 rsaz_1024_norm2red_avx2:
2006 rsaz_1024_red2norm_avx2:
2007 rsaz_1024_scatter5_avx2:
2008 rsaz_1024_gather5_avx2:
2009         .byte   0x0f,0x0b       # ud2
2010         ret
2011 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
2012 ___
2013 }}}
2014
2015 close STDOUT;