perlasm/x86_64-xlate.pl: recognize DWARF CFI directives.
[openssl.git] / crypto / bn / asm / rsaz-avx2.pl
1 #! /usr/bin/env perl
2 # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 ##############################################################################
11 #                                                                            #
12 #  Copyright (c) 2012, Intel Corporation                                     #
13 #                                                                            #
14 #  All rights reserved.                                                      #
15 #                                                                            #
16 #  Redistribution and use in source and binary forms, with or without        #
17 #  modification, are permitted provided that the following conditions are    #
18 #  met:                                                                      #
19 #                                                                            #
20 #  *  Redistributions of source code must retain the above copyright         #
21 #     notice, this list of conditions and the following disclaimer.          #
22 #                                                                            #
23 #  *  Redistributions in binary form must reproduce the above copyright      #
24 #     notice, this list of conditions and the following disclaimer in the    #
25 #     documentation and/or other materials provided with the                 #
26 #     distribution.                                                          #
27 #                                                                            #
28 #  *  Neither the name of the Intel Corporation nor the names of its         #
29 #     contributors may be used to endorse or promote products derived from   #
30 #     this software without specific prior written permission.               #
31 #                                                                            #
32 #                                                                            #
33 #  THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY          #
34 #  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE         #
35 #  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR        #
36 #  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR            #
37 #  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,     #
38 #  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,       #
39 #  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR        #
40 #  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF    #
41 #  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING      #
42 #  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS        #
43 #  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              #
44 #                                                                            #
45 ##############################################################################
46 # Developers and authors:                                                    #
47 # Shay Gueron (1, 2), and Vlad Krasnov (1)                                   #
48 # (1) Intel Corporation, Israel Development Center, Haifa, Israel            #
49 # (2) University of Haifa, Israel                                            #
50 ##############################################################################
51 # Reference:                                                                 #
52 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular             #
53 #     Exponentiation,  Using Advanced Vector Instructions Architectures",    #
54 #     F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,   #
55 #     pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012              #
56 # [2] S. Gueron: "Efficient Software Implementations of Modular              #
57 #     Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).  #
58 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE         #
59 #     Proceedings of 9th International Conference on Information Technology: #
60 #     New Generations (ITNG 2012), pp.821-823 (2012)                         #
61 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis    #
62 #     resistant 1024-bit modular exponentiation, for optimizing RSA2048      #
63 #     on AVX2 capable x86_64 platforms",                                     #
64 #     http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
65 ##############################################################################
66 #
67 # +13% improvement over original submission by <appro@openssl.org>
68 #
69 # rsa2048 sign/sec      OpenSSL 1.0.1   scalar(*)       this
70 # 2.3GHz Haswell        621             765/+23%        1113/+79%
71 # 2.3GHz Broadwell(**)  688             1200(***)/+74%  1120/+63%
72 #
73 # (*)   if system doesn't support AVX2, for reference purposes;
74 # (**)  scaled to 2.3GHz to simplify comparison;
75 # (***) scalar AD*X code is faster than AVX2 and is preferred code
76 #       path for Broadwell;
77
78 $flavour = shift;
79 $output  = shift;
80 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
81
82 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
83
84 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
85 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
86 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
87 die "can't locate x86_64-xlate.pl";
88
89 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
90                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
91         $avx = ($1>=2.19) + ($1>=2.22);
92         $addx = ($1>=2.23);
93 }
94
95 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
96             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
97         $avx = ($1>=2.09) + ($1>=2.10);
98         $addx = ($1>=2.10);
99 }
100
101 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
102             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
103         $avx = ($1>=10) + ($1>=11);
104         $addx = ($1>=11);
105 }
106
107 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
108         my $ver = $2 + $3/100.0;        # 3.1->3.01, 3.10->3.10
109         $avx = ($ver>=3.0) + ($ver>=3.01);
110         $addx = ($ver>=3.03);
111 }
112
113 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
114 *STDOUT = *OUT;
115
116 if ($avx>1) {{{
117 { # void AMS_WW(
118 my $rp="%rdi";  # BN_ULONG *rp,
119 my $ap="%rsi";  # const BN_ULONG *ap,
120 my $np="%rdx";  # const BN_ULONG *np,
121 my $n0="%ecx";  # const BN_ULONG n0,
122 my $rep="%r8d"; # int repeat);
123
124 # The registers that hold the accumulated redundant result
125 # The AMM works on 1024 bit operands, and redundant word size is 29
126 # Therefore: ceil(1024/29)/4 = 9
127 my $ACC0="%ymm0";
128 my $ACC1="%ymm1";
129 my $ACC2="%ymm2";
130 my $ACC3="%ymm3";
131 my $ACC4="%ymm4";
132 my $ACC5="%ymm5";
133 my $ACC6="%ymm6";
134 my $ACC7="%ymm7";
135 my $ACC8="%ymm8";
136 my $ACC9="%ymm9";
137 # Registers that hold the broadcasted words of bp, currently used
138 my $B1="%ymm10";
139 my $B2="%ymm11";
140 # Registers that hold the broadcasted words of Y, currently used
141 my $Y1="%ymm12";
142 my $Y2="%ymm13";
143 # Helper registers
144 my $TEMP1="%ymm14";
145 my $AND_MASK="%ymm15";
146 # alu registers that hold the first words of the ACC
147 my $r0="%r9";
148 my $r1="%r10";
149 my $r2="%r11";
150 my $r3="%r12";
151
152 my $i="%r14d";                  # loop counter
153 my $tmp = "%r15";
154
155 my $FrameSize=32*18+32*8;       # place for A^2 and 2*A
156
157 my $aap=$r0;
158 my $tp0="%rbx";
159 my $tp1=$r3;
160 my $tpa=$tmp;
161
162 $np="%r13";                     # reassigned argument
163
164 $code.=<<___;
165 .text
166
167 .globl  rsaz_1024_sqr_avx2
168 .type   rsaz_1024_sqr_avx2,\@function,5
169 .align  64
170 rsaz_1024_sqr_avx2:             # 702 cycles, 14% faster than rsaz_1024_mul_avx2
171         lea     (%rsp), %rax
172         push    %rbx
173         push    %rbp
174         push    %r12
175         push    %r13
176         push    %r14
177         push    %r15
178         vzeroupper
179 ___
180 $code.=<<___ if ($win64);
181         lea     -0xa8(%rsp),%rsp
182         vmovaps %xmm6,-0xd8(%rax)
183         vmovaps %xmm7,-0xc8(%rax)
184         vmovaps %xmm8,-0xb8(%rax)
185         vmovaps %xmm9,-0xa8(%rax)
186         vmovaps %xmm10,-0x98(%rax)
187         vmovaps %xmm11,-0x88(%rax)
188         vmovaps %xmm12,-0x78(%rax)
189         vmovaps %xmm13,-0x68(%rax)
190         vmovaps %xmm14,-0x58(%rax)
191         vmovaps %xmm15,-0x48(%rax)
192 .Lsqr_1024_body:
193 ___
194 $code.=<<___;
195         mov     %rax,%rbp
196         mov     %rdx, $np                       # reassigned argument
197         sub     \$$FrameSize, %rsp
198         mov     $np, $tmp
199         sub     \$-128, $rp                     # size optimization
200         sub     \$-128, $ap
201         sub     \$-128, $np
202
203         and     \$4095, $tmp                    # see if $np crosses page
204         add     \$32*10, $tmp
205         shr     \$12, $tmp
206         vpxor   $ACC9,$ACC9,$ACC9
207         jz      .Lsqr_1024_no_n_copy
208
209         # unaligned 256-bit load that crosses page boundary can
210         # cause >2x performance degradation here, so if $np does
211         # cross page boundary, copy it to stack and make sure stack
212         # frame doesn't...
213         sub             \$32*10,%rsp
214         vmovdqu         32*0-128($np), $ACC0
215         and             \$-2048, %rsp
216         vmovdqu         32*1-128($np), $ACC1
217         vmovdqu         32*2-128($np), $ACC2
218         vmovdqu         32*3-128($np), $ACC3
219         vmovdqu         32*4-128($np), $ACC4
220         vmovdqu         32*5-128($np), $ACC5
221         vmovdqu         32*6-128($np), $ACC6
222         vmovdqu         32*7-128($np), $ACC7
223         vmovdqu         32*8-128($np), $ACC8
224         lea             $FrameSize+128(%rsp),$np
225         vmovdqu         $ACC0, 32*0-128($np)
226         vmovdqu         $ACC1, 32*1-128($np)
227         vmovdqu         $ACC2, 32*2-128($np)
228         vmovdqu         $ACC3, 32*3-128($np)
229         vmovdqu         $ACC4, 32*4-128($np)
230         vmovdqu         $ACC5, 32*5-128($np)
231         vmovdqu         $ACC6, 32*6-128($np)
232         vmovdqu         $ACC7, 32*7-128($np)
233         vmovdqu         $ACC8, 32*8-128($np)
234         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero
235
236 .Lsqr_1024_no_n_copy:
237         and             \$-1024, %rsp
238
239         vmovdqu         32*1-128($ap), $ACC1
240         vmovdqu         32*2-128($ap), $ACC2
241         vmovdqu         32*3-128($ap), $ACC3
242         vmovdqu         32*4-128($ap), $ACC4
243         vmovdqu         32*5-128($ap), $ACC5
244         vmovdqu         32*6-128($ap), $ACC6
245         vmovdqu         32*7-128($ap), $ACC7
246         vmovdqu         32*8-128($ap), $ACC8
247
248         lea     192(%rsp), $tp0                 # 64+128=192
249         vpbroadcastq    .Land_mask(%rip), $AND_MASK
250         jmp     .LOOP_GRANDE_SQR_1024
251
252 .align  32
253 .LOOP_GRANDE_SQR_1024:
254         lea     32*18+128(%rsp), $aap           # size optimization
255         lea     448(%rsp), $tp1                 # 64+128+256=448
256
257         # the squaring is performed as described in Variant B of
258         # "Speeding up Big-Number Squaring", so start by calculating
259         # the A*2=A+A vector
260         vpaddq          $ACC1, $ACC1, $ACC1
261          vpbroadcastq   32*0-128($ap), $B1
262         vpaddq          $ACC2, $ACC2, $ACC2
263         vmovdqa         $ACC1, 32*0-128($aap)
264         vpaddq          $ACC3, $ACC3, $ACC3
265         vmovdqa         $ACC2, 32*1-128($aap)
266         vpaddq          $ACC4, $ACC4, $ACC4
267         vmovdqa         $ACC3, 32*2-128($aap)
268         vpaddq          $ACC5, $ACC5, $ACC5
269         vmovdqa         $ACC4, 32*3-128($aap)
270         vpaddq          $ACC6, $ACC6, $ACC6
271         vmovdqa         $ACC5, 32*4-128($aap)
272         vpaddq          $ACC7, $ACC7, $ACC7
273         vmovdqa         $ACC6, 32*5-128($aap)
274         vpaddq          $ACC8, $ACC8, $ACC8
275         vmovdqa         $ACC7, 32*6-128($aap)
276         vpxor           $ACC9, $ACC9, $ACC9
277         vmovdqa         $ACC8, 32*7-128($aap)
278
279         vpmuludq        32*0-128($ap), $B1, $ACC0
280          vpbroadcastq   32*1-128($ap), $B2
281          vmovdqu        $ACC9, 32*9-192($tp0)   # zero upper half
282         vpmuludq        $B1, $ACC1, $ACC1
283          vmovdqu        $ACC9, 32*10-448($tp1)
284         vpmuludq        $B1, $ACC2, $ACC2
285          vmovdqu        $ACC9, 32*11-448($tp1)
286         vpmuludq        $B1, $ACC3, $ACC3
287          vmovdqu        $ACC9, 32*12-448($tp1)
288         vpmuludq        $B1, $ACC4, $ACC4
289          vmovdqu        $ACC9, 32*13-448($tp1)
290         vpmuludq        $B1, $ACC5, $ACC5
291          vmovdqu        $ACC9, 32*14-448($tp1)
292         vpmuludq        $B1, $ACC6, $ACC6
293          vmovdqu        $ACC9, 32*15-448($tp1)
294         vpmuludq        $B1, $ACC7, $ACC7
295          vmovdqu        $ACC9, 32*16-448($tp1)
296         vpmuludq        $B1, $ACC8, $ACC8
297          vpbroadcastq   32*2-128($ap), $B1
298          vmovdqu        $ACC9, 32*17-448($tp1)
299
300         mov     $ap, $tpa
301         mov     \$4, $i
302         jmp     .Lsqr_entry_1024
303 ___
304 $TEMP0=$Y1;
305 $TEMP2=$Y2;
306 $code.=<<___;
307 .align  32
308 .LOOP_SQR_1024:
309          vpbroadcastq   32*1-128($tpa), $B2
310         vpmuludq        32*0-128($ap), $B1, $ACC0
311         vpaddq          32*0-192($tp0), $ACC0, $ACC0
312         vpmuludq        32*0-128($aap), $B1, $ACC1
313         vpaddq          32*1-192($tp0), $ACC1, $ACC1
314         vpmuludq        32*1-128($aap), $B1, $ACC2
315         vpaddq          32*2-192($tp0), $ACC2, $ACC2
316         vpmuludq        32*2-128($aap), $B1, $ACC3
317         vpaddq          32*3-192($tp0), $ACC3, $ACC3
318         vpmuludq        32*3-128($aap), $B1, $ACC4
319         vpaddq          32*4-192($tp0), $ACC4, $ACC4
320         vpmuludq        32*4-128($aap), $B1, $ACC5
321         vpaddq          32*5-192($tp0), $ACC5, $ACC5
322         vpmuludq        32*5-128($aap), $B1, $ACC6
323         vpaddq          32*6-192($tp0), $ACC6, $ACC6
324         vpmuludq        32*6-128($aap), $B1, $ACC7
325         vpaddq          32*7-192($tp0), $ACC7, $ACC7
326         vpmuludq        32*7-128($aap), $B1, $ACC8
327          vpbroadcastq   32*2-128($tpa), $B1
328         vpaddq          32*8-192($tp0), $ACC8, $ACC8
329 .Lsqr_entry_1024:
330         vmovdqu         $ACC0, 32*0-192($tp0)
331         vmovdqu         $ACC1, 32*1-192($tp0)
332
333         vpmuludq        32*1-128($ap), $B2, $TEMP0
334         vpaddq          $TEMP0, $ACC2, $ACC2
335         vpmuludq        32*1-128($aap), $B2, $TEMP1
336         vpaddq          $TEMP1, $ACC3, $ACC3
337         vpmuludq        32*2-128($aap), $B2, $TEMP2
338         vpaddq          $TEMP2, $ACC4, $ACC4
339         vpmuludq        32*3-128($aap), $B2, $TEMP0
340         vpaddq          $TEMP0, $ACC5, $ACC5
341         vpmuludq        32*4-128($aap), $B2, $TEMP1
342         vpaddq          $TEMP1, $ACC6, $ACC6
343         vpmuludq        32*5-128($aap), $B2, $TEMP2
344         vpaddq          $TEMP2, $ACC7, $ACC7
345         vpmuludq        32*6-128($aap), $B2, $TEMP0
346         vpaddq          $TEMP0, $ACC8, $ACC8
347         vpmuludq        32*7-128($aap), $B2, $ACC0
348          vpbroadcastq   32*3-128($tpa), $B2
349         vpaddq          32*9-192($tp0), $ACC0, $ACC0
350
351         vmovdqu         $ACC2, 32*2-192($tp0)
352         vmovdqu         $ACC3, 32*3-192($tp0)
353
354         vpmuludq        32*2-128($ap), $B1, $TEMP2
355         vpaddq          $TEMP2, $ACC4, $ACC4
356         vpmuludq        32*2-128($aap), $B1, $TEMP0
357         vpaddq          $TEMP0, $ACC5, $ACC5
358         vpmuludq        32*3-128($aap), $B1, $TEMP1
359         vpaddq          $TEMP1, $ACC6, $ACC6
360         vpmuludq        32*4-128($aap), $B1, $TEMP2
361         vpaddq          $TEMP2, $ACC7, $ACC7
362         vpmuludq        32*5-128($aap), $B1, $TEMP0
363         vpaddq          $TEMP0, $ACC8, $ACC8
364         vpmuludq        32*6-128($aap), $B1, $TEMP1
365         vpaddq          $TEMP1, $ACC0, $ACC0
366         vpmuludq        32*7-128($aap), $B1, $ACC1
367          vpbroadcastq   32*4-128($tpa), $B1
368         vpaddq          32*10-448($tp1), $ACC1, $ACC1
369
370         vmovdqu         $ACC4, 32*4-192($tp0)
371         vmovdqu         $ACC5, 32*5-192($tp0)
372
373         vpmuludq        32*3-128($ap), $B2, $TEMP0
374         vpaddq          $TEMP0, $ACC6, $ACC6
375         vpmuludq        32*3-128($aap), $B2, $TEMP1
376         vpaddq          $TEMP1, $ACC7, $ACC7
377         vpmuludq        32*4-128($aap), $B2, $TEMP2
378         vpaddq          $TEMP2, $ACC8, $ACC8
379         vpmuludq        32*5-128($aap), $B2, $TEMP0
380         vpaddq          $TEMP0, $ACC0, $ACC0
381         vpmuludq        32*6-128($aap), $B2, $TEMP1
382         vpaddq          $TEMP1, $ACC1, $ACC1
383         vpmuludq        32*7-128($aap), $B2, $ACC2
384          vpbroadcastq   32*5-128($tpa), $B2
385         vpaddq          32*11-448($tp1), $ACC2, $ACC2
386
387         vmovdqu         $ACC6, 32*6-192($tp0)
388         vmovdqu         $ACC7, 32*7-192($tp0)
389
390         vpmuludq        32*4-128($ap), $B1, $TEMP0
391         vpaddq          $TEMP0, $ACC8, $ACC8
392         vpmuludq        32*4-128($aap), $B1, $TEMP1
393         vpaddq          $TEMP1, $ACC0, $ACC0
394         vpmuludq        32*5-128($aap), $B1, $TEMP2
395         vpaddq          $TEMP2, $ACC1, $ACC1
396         vpmuludq        32*6-128($aap), $B1, $TEMP0
397         vpaddq          $TEMP0, $ACC2, $ACC2
398         vpmuludq        32*7-128($aap), $B1, $ACC3
399          vpbroadcastq   32*6-128($tpa), $B1
400         vpaddq          32*12-448($tp1), $ACC3, $ACC3
401
402         vmovdqu         $ACC8, 32*8-192($tp0)
403         vmovdqu         $ACC0, 32*9-192($tp0)
404         lea             8($tp0), $tp0
405
406         vpmuludq        32*5-128($ap), $B2, $TEMP2
407         vpaddq          $TEMP2, $ACC1, $ACC1
408         vpmuludq        32*5-128($aap), $B2, $TEMP0
409         vpaddq          $TEMP0, $ACC2, $ACC2
410         vpmuludq        32*6-128($aap), $B2, $TEMP1
411         vpaddq          $TEMP1, $ACC3, $ACC3
412         vpmuludq        32*7-128($aap), $B2, $ACC4
413          vpbroadcastq   32*7-128($tpa), $B2
414         vpaddq          32*13-448($tp1), $ACC4, $ACC4
415
416         vmovdqu         $ACC1, 32*10-448($tp1)
417         vmovdqu         $ACC2, 32*11-448($tp1)
418
419         vpmuludq        32*6-128($ap), $B1, $TEMP0
420         vpaddq          $TEMP0, $ACC3, $ACC3
421         vpmuludq        32*6-128($aap), $B1, $TEMP1
422          vpbroadcastq   32*8-128($tpa), $ACC0           # borrow $ACC0 for $B1
423         vpaddq          $TEMP1, $ACC4, $ACC4
424         vpmuludq        32*7-128($aap), $B1, $ACC5
425          vpbroadcastq   32*0+8-128($tpa), $B1           # for next iteration
426         vpaddq          32*14-448($tp1), $ACC5, $ACC5
427
428         vmovdqu         $ACC3, 32*12-448($tp1)
429         vmovdqu         $ACC4, 32*13-448($tp1)
430         lea             8($tpa), $tpa
431
432         vpmuludq        32*7-128($ap), $B2, $TEMP0
433         vpaddq          $TEMP0, $ACC5, $ACC5
434         vpmuludq        32*7-128($aap), $B2, $ACC6
435         vpaddq          32*15-448($tp1), $ACC6, $ACC6
436
437         vpmuludq        32*8-128($ap), $ACC0, $ACC7
438         vmovdqu         $ACC5, 32*14-448($tp1)
439         vpaddq          32*16-448($tp1), $ACC7, $ACC7
440         vmovdqu         $ACC6, 32*15-448($tp1)
441         vmovdqu         $ACC7, 32*16-448($tp1)
442         lea             8($tp1), $tp1
443
444         dec     $i
445         jnz     .LOOP_SQR_1024
446 ___
447 $ZERO = $ACC9;
448 $TEMP0 = $B1;
449 $TEMP2 = $B2;
450 $TEMP3 = $Y1;
451 $TEMP4 = $Y2;
452 $code.=<<___;
453         # we need to fix indices 32-39 to avoid overflow
454         vmovdqu         32*8(%rsp), $ACC8               # 32*8-192($tp0),
455         vmovdqu         32*9(%rsp), $ACC1               # 32*9-192($tp0)
456         vmovdqu         32*10(%rsp), $ACC2              # 32*10-192($tp0)
457         lea             192(%rsp), $tp0                 # 64+128=192
458
459         vpsrlq          \$29, $ACC8, $TEMP1
460         vpand           $AND_MASK, $ACC8, $ACC8
461         vpsrlq          \$29, $ACC1, $TEMP2
462         vpand           $AND_MASK, $ACC1, $ACC1
463
464         vpermq          \$0x93, $TEMP1, $TEMP1
465         vpxor           $ZERO, $ZERO, $ZERO
466         vpermq          \$0x93, $TEMP2, $TEMP2
467
468         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
469         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
470         vpaddq          $TEMP0, $ACC8, $ACC8
471         vpblendd        \$3, $TEMP2, $ZERO, $TEMP2
472         vpaddq          $TEMP1, $ACC1, $ACC1
473         vpaddq          $TEMP2, $ACC2, $ACC2
474         vmovdqu         $ACC1, 32*9-192($tp0)
475         vmovdqu         $ACC2, 32*10-192($tp0)
476
477         mov     (%rsp), %rax
478         mov     8(%rsp), $r1
479         mov     16(%rsp), $r2
480         mov     24(%rsp), $r3
481         vmovdqu 32*1(%rsp), $ACC1
482         vmovdqu 32*2-192($tp0), $ACC2
483         vmovdqu 32*3-192($tp0), $ACC3
484         vmovdqu 32*4-192($tp0), $ACC4
485         vmovdqu 32*5-192($tp0), $ACC5
486         vmovdqu 32*6-192($tp0), $ACC6
487         vmovdqu 32*7-192($tp0), $ACC7
488
489         mov     %rax, $r0
490         imull   $n0, %eax
491         and     \$0x1fffffff, %eax
492         vmovd   %eax, $Y1
493
494         mov     %rax, %rdx
495         imulq   -128($np), %rax
496          vpbroadcastq   $Y1, $Y1
497         add     %rax, $r0
498         mov     %rdx, %rax
499         imulq   8-128($np), %rax
500         shr     \$29, $r0
501         add     %rax, $r1
502         mov     %rdx, %rax
503         imulq   16-128($np), %rax
504         add     $r0, $r1
505         add     %rax, $r2
506         imulq   24-128($np), %rdx
507         add     %rdx, $r3
508
509         mov     $r1, %rax
510         imull   $n0, %eax
511         and     \$0x1fffffff, %eax
512
513         mov \$9, $i
514         jmp .LOOP_REDUCE_1024
515
516 .align  32
517 .LOOP_REDUCE_1024:
518         vmovd   %eax, $Y2
519         vpbroadcastq    $Y2, $Y2
520
521         vpmuludq        32*1-128($np), $Y1, $TEMP0
522          mov    %rax, %rdx
523          imulq  -128($np), %rax
524         vpaddq          $TEMP0, $ACC1, $ACC1
525          add    %rax, $r1
526         vpmuludq        32*2-128($np), $Y1, $TEMP1
527          mov    %rdx, %rax
528          imulq  8-128($np), %rax
529         vpaddq          $TEMP1, $ACC2, $ACC2
530         vpmuludq        32*3-128($np), $Y1, $TEMP2
531          .byte  0x67
532          add    %rax, $r2
533          .byte  0x67
534          mov    %rdx, %rax
535          imulq  16-128($np), %rax
536          shr    \$29, $r1
537         vpaddq          $TEMP2, $ACC3, $ACC3
538         vpmuludq        32*4-128($np), $Y1, $TEMP0
539          add    %rax, $r3
540          add    $r1, $r2
541         vpaddq          $TEMP0, $ACC4, $ACC4
542         vpmuludq        32*5-128($np), $Y1, $TEMP1
543          mov    $r2, %rax
544          imull  $n0, %eax
545         vpaddq          $TEMP1, $ACC5, $ACC5
546         vpmuludq        32*6-128($np), $Y1, $TEMP2
547          and    \$0x1fffffff, %eax
548         vpaddq          $TEMP2, $ACC6, $ACC6
549         vpmuludq        32*7-128($np), $Y1, $TEMP0
550         vpaddq          $TEMP0, $ACC7, $ACC7
551         vpmuludq        32*8-128($np), $Y1, $TEMP1
552          vmovd  %eax, $Y1
553          #vmovdqu       32*1-8-128($np), $TEMP2         # moved below
554         vpaddq          $TEMP1, $ACC8, $ACC8
555          #vmovdqu       32*2-8-128($np), $TEMP0         # moved below
556          vpbroadcastq   $Y1, $Y1
557
558         vpmuludq        32*1-8-128($np), $Y2, $TEMP2    # see above
559         vmovdqu         32*3-8-128($np), $TEMP1
560          mov    %rax, %rdx
561          imulq  -128($np), %rax
562         vpaddq          $TEMP2, $ACC1, $ACC1
563         vpmuludq        32*2-8-128($np), $Y2, $TEMP0    # see above
564         vmovdqu         32*4-8-128($np), $TEMP2
565          add    %rax, $r2
566          mov    %rdx, %rax
567          imulq  8-128($np), %rax
568         vpaddq          $TEMP0, $ACC2, $ACC2
569          add    $r3, %rax
570          shr    \$29, $r2
571         vpmuludq        $Y2, $TEMP1, $TEMP1
572         vmovdqu         32*5-8-128($np), $TEMP0
573          add    $r2, %rax
574         vpaddq          $TEMP1, $ACC3, $ACC3
575         vpmuludq        $Y2, $TEMP2, $TEMP2
576         vmovdqu         32*6-8-128($np), $TEMP1
577          .byte  0x67
578          mov    %rax, $r3
579          imull  $n0, %eax
580         vpaddq          $TEMP2, $ACC4, $ACC4
581         vpmuludq        $Y2, $TEMP0, $TEMP0
582         .byte   0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00    # vmovdqu               32*7-8-128($np), $TEMP2
583          and    \$0x1fffffff, %eax
584         vpaddq          $TEMP0, $ACC5, $ACC5
585         vpmuludq        $Y2, $TEMP1, $TEMP1
586         vmovdqu         32*8-8-128($np), $TEMP0
587         vpaddq          $TEMP1, $ACC6, $ACC6
588         vpmuludq        $Y2, $TEMP2, $TEMP2
589         vmovdqu         32*9-8-128($np), $ACC9
590          vmovd  %eax, $ACC0                     # borrow ACC0 for Y2
591          imulq  -128($np), %rax
592         vpaddq          $TEMP2, $ACC7, $ACC7
593         vpmuludq        $Y2, $TEMP0, $TEMP0
594          vmovdqu        32*1-16-128($np), $TEMP1
595          vpbroadcastq   $ACC0, $ACC0
596         vpaddq          $TEMP0, $ACC8, $ACC8
597         vpmuludq        $Y2, $ACC9, $ACC9
598          vmovdqu        32*2-16-128($np), $TEMP2
599          add    %rax, $r3
600
601 ___
602 ($ACC0,$Y2)=($Y2,$ACC0);
603 $code.=<<___;
604          vmovdqu        32*1-24-128($np), $ACC0
605         vpmuludq        $Y1, $TEMP1, $TEMP1
606         vmovdqu         32*3-16-128($np), $TEMP0
607         vpaddq          $TEMP1, $ACC1, $ACC1
608          vpmuludq       $Y2, $ACC0, $ACC0
609         vpmuludq        $Y1, $TEMP2, $TEMP2
610         .byte   0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff    # vmovdqu               32*4-16-128($np), $TEMP1
611          vpaddq         $ACC1, $ACC0, $ACC0
612         vpaddq          $TEMP2, $ACC2, $ACC2
613         vpmuludq        $Y1, $TEMP0, $TEMP0
614         vmovdqu         32*5-16-128($np), $TEMP2
615          .byte  0x67
616          vmovq          $ACC0, %rax
617          vmovdqu        $ACC0, (%rsp)           # transfer $r0-$r3
618         vpaddq          $TEMP0, $ACC3, $ACC3
619         vpmuludq        $Y1, $TEMP1, $TEMP1
620         vmovdqu         32*6-16-128($np), $TEMP0
621         vpaddq          $TEMP1, $ACC4, $ACC4
622         vpmuludq        $Y1, $TEMP2, $TEMP2
623         vmovdqu         32*7-16-128($np), $TEMP1
624         vpaddq          $TEMP2, $ACC5, $ACC5
625         vpmuludq        $Y1, $TEMP0, $TEMP0
626         vmovdqu         32*8-16-128($np), $TEMP2
627         vpaddq          $TEMP0, $ACC6, $ACC6
628         vpmuludq        $Y1, $TEMP1, $TEMP1
629          shr    \$29, $r3
630         vmovdqu         32*9-16-128($np), $TEMP0
631          add    $r3, %rax
632         vpaddq          $TEMP1, $ACC7, $ACC7
633         vpmuludq        $Y1, $TEMP2, $TEMP2
634          #vmovdqu       32*2-24-128($np), $TEMP1        # moved below
635          mov    %rax, $r0
636          imull  $n0, %eax
637         vpaddq          $TEMP2, $ACC8, $ACC8
638         vpmuludq        $Y1, $TEMP0, $TEMP0
639          and    \$0x1fffffff, %eax
640          vmovd  %eax, $Y1
641          vmovdqu        32*3-24-128($np), $TEMP2
642         .byte   0x67
643         vpaddq          $TEMP0, $ACC9, $ACC9
644          vpbroadcastq   $Y1, $Y1
645
646         vpmuludq        32*2-24-128($np), $Y2, $TEMP1   # see above
647         vmovdqu         32*4-24-128($np), $TEMP0
648          mov    %rax, %rdx
649          imulq  -128($np), %rax
650          mov    8(%rsp), $r1
651         vpaddq          $TEMP1, $ACC2, $ACC1
652         vpmuludq        $Y2, $TEMP2, $TEMP2
653         vmovdqu         32*5-24-128($np), $TEMP1
654          add    %rax, $r0
655          mov    %rdx, %rax
656          imulq  8-128($np), %rax
657          .byte  0x67
658          shr    \$29, $r0
659          mov    16(%rsp), $r2
660         vpaddq          $TEMP2, $ACC3, $ACC2
661         vpmuludq        $Y2, $TEMP0, $TEMP0
662         vmovdqu         32*6-24-128($np), $TEMP2
663          add    %rax, $r1
664          mov    %rdx, %rax
665          imulq  16-128($np), %rax
666         vpaddq          $TEMP0, $ACC4, $ACC3
667         vpmuludq        $Y2, $TEMP1, $TEMP1
668         vmovdqu         32*7-24-128($np), $TEMP0
669          imulq  24-128($np), %rdx               # future $r3
670          add    %rax, $r2
671          lea    ($r0,$r1), %rax
672         vpaddq          $TEMP1, $ACC5, $ACC4
673         vpmuludq        $Y2, $TEMP2, $TEMP2
674         vmovdqu         32*8-24-128($np), $TEMP1
675          mov    %rax, $r1
676          imull  $n0, %eax
677         vpmuludq        $Y2, $TEMP0, $TEMP0
678         vpaddq          $TEMP2, $ACC6, $ACC5
679         vmovdqu         32*9-24-128($np), $TEMP2
680          and    \$0x1fffffff, %eax
681         vpaddq          $TEMP0, $ACC7, $ACC6
682         vpmuludq        $Y2, $TEMP1, $TEMP1
683          add    24(%rsp), %rdx
684         vpaddq          $TEMP1, $ACC8, $ACC7
685         vpmuludq        $Y2, $TEMP2, $TEMP2
686         vpaddq          $TEMP2, $ACC9, $ACC8
687          vmovq  $r3, $ACC9
688          mov    %rdx, $r3
689
690         dec     $i
691         jnz     .LOOP_REDUCE_1024
692 ___
693 ($ACC0,$Y2)=($Y2,$ACC0);
694 $code.=<<___;
695         lea     448(%rsp), $tp1                 # size optimization
696         vpaddq  $ACC9, $Y2, $ACC0
697         vpxor   $ZERO, $ZERO, $ZERO
698
699         vpaddq          32*9-192($tp0), $ACC0, $ACC0
700         vpaddq          32*10-448($tp1), $ACC1, $ACC1
701         vpaddq          32*11-448($tp1), $ACC2, $ACC2
702         vpaddq          32*12-448($tp1), $ACC3, $ACC3
703         vpaddq          32*13-448($tp1), $ACC4, $ACC4
704         vpaddq          32*14-448($tp1), $ACC5, $ACC5
705         vpaddq          32*15-448($tp1), $ACC6, $ACC6
706         vpaddq          32*16-448($tp1), $ACC7, $ACC7
707         vpaddq          32*17-448($tp1), $ACC8, $ACC8
708
709         vpsrlq          \$29, $ACC0, $TEMP1
710         vpand           $AND_MASK, $ACC0, $ACC0
711         vpsrlq          \$29, $ACC1, $TEMP2
712         vpand           $AND_MASK, $ACC1, $ACC1
713         vpsrlq          \$29, $ACC2, $TEMP3
714         vpermq          \$0x93, $TEMP1, $TEMP1
715         vpand           $AND_MASK, $ACC2, $ACC2
716         vpsrlq          \$29, $ACC3, $TEMP4
717         vpermq          \$0x93, $TEMP2, $TEMP2
718         vpand           $AND_MASK, $ACC3, $ACC3
719         vpermq          \$0x93, $TEMP3, $TEMP3
720
721         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
722         vpermq          \$0x93, $TEMP4, $TEMP4
723         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
724         vpaddq          $TEMP0, $ACC0, $ACC0
725         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
726         vpaddq          $TEMP1, $ACC1, $ACC1
727         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
728         vpaddq          $TEMP2, $ACC2, $ACC2
729         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
730         vpaddq          $TEMP3, $ACC3, $ACC3
731         vpaddq          $TEMP4, $ACC4, $ACC4
732
733         vpsrlq          \$29, $ACC0, $TEMP1
734         vpand           $AND_MASK, $ACC0, $ACC0
735         vpsrlq          \$29, $ACC1, $TEMP2
736         vpand           $AND_MASK, $ACC1, $ACC1
737         vpsrlq          \$29, $ACC2, $TEMP3
738         vpermq          \$0x93, $TEMP1, $TEMP1
739         vpand           $AND_MASK, $ACC2, $ACC2
740         vpsrlq          \$29, $ACC3, $TEMP4
741         vpermq          \$0x93, $TEMP2, $TEMP2
742         vpand           $AND_MASK, $ACC3, $ACC3
743         vpermq          \$0x93, $TEMP3, $TEMP3
744
745         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
746         vpermq          \$0x93, $TEMP4, $TEMP4
747         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
748         vpaddq          $TEMP0, $ACC0, $ACC0
749         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
750         vpaddq          $TEMP1, $ACC1, $ACC1
751         vmovdqu         $ACC0, 32*0-128($rp)
752         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
753         vpaddq          $TEMP2, $ACC2, $ACC2
754         vmovdqu         $ACC1, 32*1-128($rp)
755         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
756         vpaddq          $TEMP3, $ACC3, $ACC3
757         vmovdqu         $ACC2, 32*2-128($rp)
758         vpaddq          $TEMP4, $ACC4, $ACC4
759         vmovdqu         $ACC3, 32*3-128($rp)
760 ___
761 $TEMP5=$ACC0;
762 $code.=<<___;
763         vpsrlq          \$29, $ACC4, $TEMP1
764         vpand           $AND_MASK, $ACC4, $ACC4
765         vpsrlq          \$29, $ACC5, $TEMP2
766         vpand           $AND_MASK, $ACC5, $ACC5
767         vpsrlq          \$29, $ACC6, $TEMP3
768         vpermq          \$0x93, $TEMP1, $TEMP1
769         vpand           $AND_MASK, $ACC6, $ACC6
770         vpsrlq          \$29, $ACC7, $TEMP4
771         vpermq          \$0x93, $TEMP2, $TEMP2
772         vpand           $AND_MASK, $ACC7, $ACC7
773         vpsrlq          \$29, $ACC8, $TEMP5
774         vpermq          \$0x93, $TEMP3, $TEMP3
775         vpand           $AND_MASK, $ACC8, $ACC8
776         vpermq          \$0x93, $TEMP4, $TEMP4
777
778         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
779         vpermq          \$0x93, $TEMP5, $TEMP5
780         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
781         vpaddq          $TEMP0, $ACC4, $ACC4
782         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
783         vpaddq          $TEMP1, $ACC5, $ACC5
784         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
785         vpaddq          $TEMP2, $ACC6, $ACC6
786         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
787         vpaddq          $TEMP3, $ACC7, $ACC7
788         vpaddq          $TEMP4, $ACC8, $ACC8
789
790         vpsrlq          \$29, $ACC4, $TEMP1
791         vpand           $AND_MASK, $ACC4, $ACC4
792         vpsrlq          \$29, $ACC5, $TEMP2
793         vpand           $AND_MASK, $ACC5, $ACC5
794         vpsrlq          \$29, $ACC6, $TEMP3
795         vpermq          \$0x93, $TEMP1, $TEMP1
796         vpand           $AND_MASK, $ACC6, $ACC6
797         vpsrlq          \$29, $ACC7, $TEMP4
798         vpermq          \$0x93, $TEMP2, $TEMP2
799         vpand           $AND_MASK, $ACC7, $ACC7
800         vpsrlq          \$29, $ACC8, $TEMP5
801         vpermq          \$0x93, $TEMP3, $TEMP3
802         vpand           $AND_MASK, $ACC8, $ACC8
803         vpermq          \$0x93, $TEMP4, $TEMP4
804
805         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
806         vpermq          \$0x93, $TEMP5, $TEMP5
807         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
808         vpaddq          $TEMP0, $ACC4, $ACC4
809         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
810         vpaddq          $TEMP1, $ACC5, $ACC5
811         vmovdqu         $ACC4, 32*4-128($rp)
812         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
813         vpaddq          $TEMP2, $ACC6, $ACC6
814         vmovdqu         $ACC5, 32*5-128($rp)
815         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
816         vpaddq          $TEMP3, $ACC7, $ACC7
817         vmovdqu         $ACC6, 32*6-128($rp)
818         vpaddq          $TEMP4, $ACC8, $ACC8
819         vmovdqu         $ACC7, 32*7-128($rp)
820         vmovdqu         $ACC8, 32*8-128($rp)
821
822         mov     $rp, $ap
823         dec     $rep
824         jne     .LOOP_GRANDE_SQR_1024
825
826         vzeroall
827         mov     %rbp, %rax
828 ___
829 $code.=<<___ if ($win64);
830 .Lsqr_1024_in_tail:
831         movaps  -0xd8(%rax),%xmm6
832         movaps  -0xc8(%rax),%xmm7
833         movaps  -0xb8(%rax),%xmm8
834         movaps  -0xa8(%rax),%xmm9
835         movaps  -0x98(%rax),%xmm10
836         movaps  -0x88(%rax),%xmm11
837         movaps  -0x78(%rax),%xmm12
838         movaps  -0x68(%rax),%xmm13
839         movaps  -0x58(%rax),%xmm14
840         movaps  -0x48(%rax),%xmm15
841 ___
842 $code.=<<___;
843         mov     -48(%rax),%r15
844         mov     -40(%rax),%r14
845         mov     -32(%rax),%r13
846         mov     -24(%rax),%r12
847         mov     -16(%rax),%rbp
848         mov     -8(%rax),%rbx
849         lea     (%rax),%rsp             # restore %rsp
850 .Lsqr_1024_epilogue:
851         ret
852 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
853 ___
854 }
855
856 { # void AMM_WW(
857 my $rp="%rdi";  # BN_ULONG *rp,
858 my $ap="%rsi";  # const BN_ULONG *ap,
859 my $bp="%rdx";  # const BN_ULONG *bp,
860 my $np="%rcx";  # const BN_ULONG *np,
861 my $n0="%r8d";  # unsigned int n0);
862
863 # The registers that hold the accumulated redundant result
864 # The AMM works on 1024 bit operands, and redundant word size is 29
865 # Therefore: ceil(1024/29)/4 = 9
866 my $ACC0="%ymm0";
867 my $ACC1="%ymm1";
868 my $ACC2="%ymm2";
869 my $ACC3="%ymm3";
870 my $ACC4="%ymm4";
871 my $ACC5="%ymm5";
872 my $ACC6="%ymm6";
873 my $ACC7="%ymm7";
874 my $ACC8="%ymm8";
875 my $ACC9="%ymm9";
876
877 # Registers that hold the broadcasted words of multiplier, currently used
878 my $Bi="%ymm10";
879 my $Yi="%ymm11";
880
881 # Helper registers
882 my $TEMP0=$ACC0;
883 my $TEMP1="%ymm12";
884 my $TEMP2="%ymm13";
885 my $ZERO="%ymm14";
886 my $AND_MASK="%ymm15";
887
888 # alu registers that hold the first words of the ACC
889 my $r0="%r9";
890 my $r1="%r10";
891 my $r2="%r11";
892 my $r3="%r12";
893
894 my $i="%r14d";
895 my $tmp="%r15";
896
897 $bp="%r13";     # reassigned argument
898
899 $code.=<<___;
900 .globl  rsaz_1024_mul_avx2
901 .type   rsaz_1024_mul_avx2,\@function,5
902 .align  64
903 rsaz_1024_mul_avx2:
904         lea     (%rsp), %rax
905         push    %rbx
906         push    %rbp
907         push    %r12
908         push    %r13
909         push    %r14
910         push    %r15
911 ___
912 $code.=<<___ if ($win64);
913         vzeroupper
914         lea     -0xa8(%rsp),%rsp
915         vmovaps %xmm6,-0xd8(%rax)
916         vmovaps %xmm7,-0xc8(%rax)
917         vmovaps %xmm8,-0xb8(%rax)
918         vmovaps %xmm9,-0xa8(%rax)
919         vmovaps %xmm10,-0x98(%rax)
920         vmovaps %xmm11,-0x88(%rax)
921         vmovaps %xmm12,-0x78(%rax)
922         vmovaps %xmm13,-0x68(%rax)
923         vmovaps %xmm14,-0x58(%rax)
924         vmovaps %xmm15,-0x48(%rax)
925 .Lmul_1024_body:
926 ___
927 $code.=<<___;
928         mov     %rax,%rbp
929         vzeroall
930         mov     %rdx, $bp       # reassigned argument
931         sub     \$64,%rsp
932
933         # unaligned 256-bit load that crosses page boundary can
934         # cause severe performance degradation here, so if $ap does
935         # cross page boundary, swap it with $bp [meaning that caller
936         # is advised to lay down $ap and $bp next to each other, so
937         # that only one can cross page boundary].
938         .byte   0x67,0x67
939         mov     $ap, $tmp
940         and     \$4095, $tmp
941         add     \$32*10, $tmp
942         shr     \$12, $tmp
943         mov     $ap, $tmp
944         cmovnz  $bp, $ap
945         cmovnz  $tmp, $bp
946
947         mov     $np, $tmp
948         sub     \$-128,$ap      # size optimization
949         sub     \$-128,$np
950         sub     \$-128,$rp
951
952         and     \$4095, $tmp    # see if $np crosses page
953         add     \$32*10, $tmp
954         .byte   0x67,0x67
955         shr     \$12, $tmp
956         jz      .Lmul_1024_no_n_copy
957
958         # unaligned 256-bit load that crosses page boundary can
959         # cause severe performance degradation here, so if $np does
960         # cross page boundary, copy it to stack and make sure stack
961         # frame doesn't...
962         sub             \$32*10,%rsp
963         vmovdqu         32*0-128($np), $ACC0
964         and             \$-512, %rsp
965         vmovdqu         32*1-128($np), $ACC1
966         vmovdqu         32*2-128($np), $ACC2
967         vmovdqu         32*3-128($np), $ACC3
968         vmovdqu         32*4-128($np), $ACC4
969         vmovdqu         32*5-128($np), $ACC5
970         vmovdqu         32*6-128($np), $ACC6
971         vmovdqu         32*7-128($np), $ACC7
972         vmovdqu         32*8-128($np), $ACC8
973         lea             64+128(%rsp),$np
974         vmovdqu         $ACC0, 32*0-128($np)
975         vpxor           $ACC0, $ACC0, $ACC0
976         vmovdqu         $ACC1, 32*1-128($np)
977         vpxor           $ACC1, $ACC1, $ACC1
978         vmovdqu         $ACC2, 32*2-128($np)
979         vpxor           $ACC2, $ACC2, $ACC2
980         vmovdqu         $ACC3, 32*3-128($np)
981         vpxor           $ACC3, $ACC3, $ACC3
982         vmovdqu         $ACC4, 32*4-128($np)
983         vpxor           $ACC4, $ACC4, $ACC4
984         vmovdqu         $ACC5, 32*5-128($np)
985         vpxor           $ACC5, $ACC5, $ACC5
986         vmovdqu         $ACC6, 32*6-128($np)
987         vpxor           $ACC6, $ACC6, $ACC6
988         vmovdqu         $ACC7, 32*7-128($np)
989         vpxor           $ACC7, $ACC7, $ACC7
990         vmovdqu         $ACC8, 32*8-128($np)
991         vmovdqa         $ACC0, $ACC8
992         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
993 .Lmul_1024_no_n_copy:
994         and     \$-64,%rsp
995
996         mov     ($bp), %rbx
997         vpbroadcastq ($bp), $Bi
998         vmovdqu $ACC0, (%rsp)                   # clear top of stack
999         xor     $r0, $r0
1000         .byte   0x67
1001         xor     $r1, $r1
1002         xor     $r2, $r2
1003         xor     $r3, $r3
1004
1005         vmovdqu .Land_mask(%rip), $AND_MASK
1006         mov     \$9, $i
1007         vmovdqu $ACC9, 32*9-128($rp)            # $ACC9 is zero after vzeroall
1008         jmp     .Loop_mul_1024
1009
1010 .align  32
1011 .Loop_mul_1024:
1012          vpsrlq         \$29, $ACC3, $ACC9              # correct $ACC3(*)
1013         mov     %rbx, %rax
1014         imulq   -128($ap), %rax
1015         add     $r0, %rax
1016         mov     %rbx, $r1
1017         imulq   8-128($ap), $r1
1018         add     8(%rsp), $r1
1019
1020         mov     %rax, $r0
1021         imull   $n0, %eax
1022         and     \$0x1fffffff, %eax
1023
1024          mov    %rbx, $r2
1025          imulq  16-128($ap), $r2
1026          add    16(%rsp), $r2
1027
1028          mov    %rbx, $r3
1029          imulq  24-128($ap), $r3
1030          add    24(%rsp), $r3
1031         vpmuludq        32*1-128($ap),$Bi,$TEMP0
1032          vmovd          %eax, $Yi
1033         vpaddq          $TEMP0,$ACC1,$ACC1
1034         vpmuludq        32*2-128($ap),$Bi,$TEMP1
1035          vpbroadcastq   $Yi, $Yi
1036         vpaddq          $TEMP1,$ACC2,$ACC2
1037         vpmuludq        32*3-128($ap),$Bi,$TEMP2
1038          vpand          $AND_MASK, $ACC3, $ACC3         # correct $ACC3
1039         vpaddq          $TEMP2,$ACC3,$ACC3
1040         vpmuludq        32*4-128($ap),$Bi,$TEMP0
1041         vpaddq          $TEMP0,$ACC4,$ACC4
1042         vpmuludq        32*5-128($ap),$Bi,$TEMP1
1043         vpaddq          $TEMP1,$ACC5,$ACC5
1044         vpmuludq        32*6-128($ap),$Bi,$TEMP2
1045         vpaddq          $TEMP2,$ACC6,$ACC6
1046         vpmuludq        32*7-128($ap),$Bi,$TEMP0
1047          vpermq         \$0x93, $ACC9, $ACC9            # correct $ACC3
1048         vpaddq          $TEMP0,$ACC7,$ACC7
1049         vpmuludq        32*8-128($ap),$Bi,$TEMP1
1050          vpbroadcastq   8($bp), $Bi
1051         vpaddq          $TEMP1,$ACC8,$ACC8
1052
1053         mov     %rax,%rdx
1054         imulq   -128($np),%rax
1055         add     %rax,$r0
1056         mov     %rdx,%rax
1057         imulq   8-128($np),%rax
1058         add     %rax,$r1
1059         mov     %rdx,%rax
1060         imulq   16-128($np),%rax
1061         add     %rax,$r2
1062         shr     \$29, $r0
1063         imulq   24-128($np),%rdx
1064         add     %rdx,$r3
1065         add     $r0, $r1
1066
1067         vpmuludq        32*1-128($np),$Yi,$TEMP2
1068          vmovq          $Bi, %rbx
1069         vpaddq          $TEMP2,$ACC1,$ACC1
1070         vpmuludq        32*2-128($np),$Yi,$TEMP0
1071         vpaddq          $TEMP0,$ACC2,$ACC2
1072         vpmuludq        32*3-128($np),$Yi,$TEMP1
1073         vpaddq          $TEMP1,$ACC3,$ACC3
1074         vpmuludq        32*4-128($np),$Yi,$TEMP2
1075         vpaddq          $TEMP2,$ACC4,$ACC4
1076         vpmuludq        32*5-128($np),$Yi,$TEMP0
1077         vpaddq          $TEMP0,$ACC5,$ACC5
1078         vpmuludq        32*6-128($np),$Yi,$TEMP1
1079         vpaddq          $TEMP1,$ACC6,$ACC6
1080         vpmuludq        32*7-128($np),$Yi,$TEMP2
1081          vpblendd       \$3, $ZERO, $ACC9, $ACC9        # correct $ACC3
1082         vpaddq          $TEMP2,$ACC7,$ACC7
1083         vpmuludq        32*8-128($np),$Yi,$TEMP0
1084          vpaddq         $ACC9, $ACC3, $ACC3             # correct $ACC3
1085         vpaddq          $TEMP0,$ACC8,$ACC8
1086
1087         mov     %rbx, %rax
1088         imulq   -128($ap),%rax
1089         add     %rax,$r1
1090          vmovdqu        -8+32*1-128($ap),$TEMP1
1091         mov     %rbx, %rax
1092         imulq   8-128($ap),%rax
1093         add     %rax,$r2
1094          vmovdqu        -8+32*2-128($ap),$TEMP2
1095
1096         mov     $r1, %rax
1097         imull   $n0, %eax
1098         and     \$0x1fffffff, %eax
1099
1100          imulq  16-128($ap),%rbx
1101          add    %rbx,$r3
1102         vpmuludq        $Bi,$TEMP1,$TEMP1
1103          vmovd          %eax, $Yi
1104         vmovdqu         -8+32*3-128($ap),$TEMP0
1105         vpaddq          $TEMP1,$ACC1,$ACC1
1106         vpmuludq        $Bi,$TEMP2,$TEMP2
1107          vpbroadcastq   $Yi, $Yi
1108         vmovdqu         -8+32*4-128($ap),$TEMP1
1109         vpaddq          $TEMP2,$ACC2,$ACC2
1110         vpmuludq        $Bi,$TEMP0,$TEMP0
1111         vmovdqu         -8+32*5-128($ap),$TEMP2
1112         vpaddq          $TEMP0,$ACC3,$ACC3
1113         vpmuludq        $Bi,$TEMP1,$TEMP1
1114         vmovdqu         -8+32*6-128($ap),$TEMP0
1115         vpaddq          $TEMP1,$ACC4,$ACC4
1116         vpmuludq        $Bi,$TEMP2,$TEMP2
1117         vmovdqu         -8+32*7-128($ap),$TEMP1
1118         vpaddq          $TEMP2,$ACC5,$ACC5
1119         vpmuludq        $Bi,$TEMP0,$TEMP0
1120         vmovdqu         -8+32*8-128($ap),$TEMP2
1121         vpaddq          $TEMP0,$ACC6,$ACC6
1122         vpmuludq        $Bi,$TEMP1,$TEMP1
1123         vmovdqu         -8+32*9-128($ap),$ACC9
1124         vpaddq          $TEMP1,$ACC7,$ACC7
1125         vpmuludq        $Bi,$TEMP2,$TEMP2
1126         vpaddq          $TEMP2,$ACC8,$ACC8
1127         vpmuludq        $Bi,$ACC9,$ACC9
1128          vpbroadcastq   16($bp), $Bi
1129
1130         mov     %rax,%rdx
1131         imulq   -128($np),%rax
1132         add     %rax,$r1
1133          vmovdqu        -8+32*1-128($np),$TEMP0
1134         mov     %rdx,%rax
1135         imulq   8-128($np),%rax
1136         add     %rax,$r2
1137          vmovdqu        -8+32*2-128($np),$TEMP1
1138         shr     \$29, $r1
1139         imulq   16-128($np),%rdx
1140         add     %rdx,$r3
1141         add     $r1, $r2
1142
1143         vpmuludq        $Yi,$TEMP0,$TEMP0
1144          vmovq          $Bi, %rbx
1145         vmovdqu         -8+32*3-128($np),$TEMP2
1146         vpaddq          $TEMP0,$ACC1,$ACC1
1147         vpmuludq        $Yi,$TEMP1,$TEMP1
1148         vmovdqu         -8+32*4-128($np),$TEMP0
1149         vpaddq          $TEMP1,$ACC2,$ACC2
1150         vpmuludq        $Yi,$TEMP2,$TEMP2
1151         vmovdqu         -8+32*5-128($np),$TEMP1
1152         vpaddq          $TEMP2,$ACC3,$ACC3
1153         vpmuludq        $Yi,$TEMP0,$TEMP0
1154         vmovdqu         -8+32*6-128($np),$TEMP2
1155         vpaddq          $TEMP0,$ACC4,$ACC4
1156         vpmuludq        $Yi,$TEMP1,$TEMP1
1157         vmovdqu         -8+32*7-128($np),$TEMP0
1158         vpaddq          $TEMP1,$ACC5,$ACC5
1159         vpmuludq        $Yi,$TEMP2,$TEMP2
1160         vmovdqu         -8+32*8-128($np),$TEMP1
1161         vpaddq          $TEMP2,$ACC6,$ACC6
1162         vpmuludq        $Yi,$TEMP0,$TEMP0
1163         vmovdqu         -8+32*9-128($np),$TEMP2
1164         vpaddq          $TEMP0,$ACC7,$ACC7
1165         vpmuludq        $Yi,$TEMP1,$TEMP1
1166         vpaddq          $TEMP1,$ACC8,$ACC8
1167         vpmuludq        $Yi,$TEMP2,$TEMP2
1168         vpaddq          $TEMP2,$ACC9,$ACC9
1169
1170          vmovdqu        -16+32*1-128($ap),$TEMP0
1171         mov     %rbx,%rax
1172         imulq   -128($ap),%rax
1173         add     $r2,%rax
1174
1175          vmovdqu        -16+32*2-128($ap),$TEMP1
1176         mov     %rax,$r2
1177         imull   $n0, %eax
1178         and     \$0x1fffffff, %eax
1179
1180          imulq  8-128($ap),%rbx
1181          add    %rbx,$r3
1182         vpmuludq        $Bi,$TEMP0,$TEMP0
1183          vmovd          %eax, $Yi
1184         vmovdqu         -16+32*3-128($ap),$TEMP2
1185         vpaddq          $TEMP0,$ACC1,$ACC1
1186         vpmuludq        $Bi,$TEMP1,$TEMP1
1187          vpbroadcastq   $Yi, $Yi
1188         vmovdqu         -16+32*4-128($ap),$TEMP0
1189         vpaddq          $TEMP1,$ACC2,$ACC2
1190         vpmuludq        $Bi,$TEMP2,$TEMP2
1191         vmovdqu         -16+32*5-128($ap),$TEMP1
1192         vpaddq          $TEMP2,$ACC3,$ACC3
1193         vpmuludq        $Bi,$TEMP0,$TEMP0
1194         vmovdqu         -16+32*6-128($ap),$TEMP2
1195         vpaddq          $TEMP0,$ACC4,$ACC4
1196         vpmuludq        $Bi,$TEMP1,$TEMP1
1197         vmovdqu         -16+32*7-128($ap),$TEMP0
1198         vpaddq          $TEMP1,$ACC5,$ACC5
1199         vpmuludq        $Bi,$TEMP2,$TEMP2
1200         vmovdqu         -16+32*8-128($ap),$TEMP1
1201         vpaddq          $TEMP2,$ACC6,$ACC6
1202         vpmuludq        $Bi,$TEMP0,$TEMP0
1203         vmovdqu         -16+32*9-128($ap),$TEMP2
1204         vpaddq          $TEMP0,$ACC7,$ACC7
1205         vpmuludq        $Bi,$TEMP1,$TEMP1
1206         vpaddq          $TEMP1,$ACC8,$ACC8
1207         vpmuludq        $Bi,$TEMP2,$TEMP2
1208          vpbroadcastq   24($bp), $Bi
1209         vpaddq          $TEMP2,$ACC9,$ACC9
1210
1211          vmovdqu        -16+32*1-128($np),$TEMP0
1212         mov     %rax,%rdx
1213         imulq   -128($np),%rax
1214         add     %rax,$r2
1215          vmovdqu        -16+32*2-128($np),$TEMP1
1216         imulq   8-128($np),%rdx
1217         add     %rdx,$r3
1218         shr     \$29, $r2
1219
1220         vpmuludq        $Yi,$TEMP0,$TEMP0
1221          vmovq          $Bi, %rbx
1222         vmovdqu         -16+32*3-128($np),$TEMP2
1223         vpaddq          $TEMP0,$ACC1,$ACC1
1224         vpmuludq        $Yi,$TEMP1,$TEMP1
1225         vmovdqu         -16+32*4-128($np),$TEMP0
1226         vpaddq          $TEMP1,$ACC2,$ACC2
1227         vpmuludq        $Yi,$TEMP2,$TEMP2
1228         vmovdqu         -16+32*5-128($np),$TEMP1
1229         vpaddq          $TEMP2,$ACC3,$ACC3
1230         vpmuludq        $Yi,$TEMP0,$TEMP0
1231         vmovdqu         -16+32*6-128($np),$TEMP2
1232         vpaddq          $TEMP0,$ACC4,$ACC4
1233         vpmuludq        $Yi,$TEMP1,$TEMP1
1234         vmovdqu         -16+32*7-128($np),$TEMP0
1235         vpaddq          $TEMP1,$ACC5,$ACC5
1236         vpmuludq        $Yi,$TEMP2,$TEMP2
1237         vmovdqu         -16+32*8-128($np),$TEMP1
1238         vpaddq          $TEMP2,$ACC6,$ACC6
1239         vpmuludq        $Yi,$TEMP0,$TEMP0
1240         vmovdqu         -16+32*9-128($np),$TEMP2
1241         vpaddq          $TEMP0,$ACC7,$ACC7
1242         vpmuludq        $Yi,$TEMP1,$TEMP1
1243          vmovdqu        -24+32*1-128($ap),$TEMP0
1244         vpaddq          $TEMP1,$ACC8,$ACC8
1245         vpmuludq        $Yi,$TEMP2,$TEMP2
1246          vmovdqu        -24+32*2-128($ap),$TEMP1
1247         vpaddq          $TEMP2,$ACC9,$ACC9
1248
1249         add     $r2, $r3
1250         imulq   -128($ap),%rbx
1251         add     %rbx,$r3
1252
1253         mov     $r3, %rax
1254         imull   $n0, %eax
1255         and     \$0x1fffffff, %eax
1256
1257         vpmuludq        $Bi,$TEMP0,$TEMP0
1258          vmovd          %eax, $Yi
1259         vmovdqu         -24+32*3-128($ap),$TEMP2
1260         vpaddq          $TEMP0,$ACC1,$ACC1
1261         vpmuludq        $Bi,$TEMP1,$TEMP1
1262          vpbroadcastq   $Yi, $Yi
1263         vmovdqu         -24+32*4-128($ap),$TEMP0
1264         vpaddq          $TEMP1,$ACC2,$ACC2
1265         vpmuludq        $Bi,$TEMP2,$TEMP2
1266         vmovdqu         -24+32*5-128($ap),$TEMP1
1267         vpaddq          $TEMP2,$ACC3,$ACC3
1268         vpmuludq        $Bi,$TEMP0,$TEMP0
1269         vmovdqu         -24+32*6-128($ap),$TEMP2
1270         vpaddq          $TEMP0,$ACC4,$ACC4
1271         vpmuludq        $Bi,$TEMP1,$TEMP1
1272         vmovdqu         -24+32*7-128($ap),$TEMP0
1273         vpaddq          $TEMP1,$ACC5,$ACC5
1274         vpmuludq        $Bi,$TEMP2,$TEMP2
1275         vmovdqu         -24+32*8-128($ap),$TEMP1
1276         vpaddq          $TEMP2,$ACC6,$ACC6
1277         vpmuludq        $Bi,$TEMP0,$TEMP0
1278         vmovdqu         -24+32*9-128($ap),$TEMP2
1279         vpaddq          $TEMP0,$ACC7,$ACC7
1280         vpmuludq        $Bi,$TEMP1,$TEMP1
1281         vpaddq          $TEMP1,$ACC8,$ACC8
1282         vpmuludq        $Bi,$TEMP2,$TEMP2
1283          vpbroadcastq   32($bp), $Bi
1284         vpaddq          $TEMP2,$ACC9,$ACC9
1285          add            \$32, $bp                       # $bp++
1286
1287         vmovdqu         -24+32*1-128($np),$TEMP0
1288         imulq   -128($np),%rax
1289         add     %rax,$r3
1290         shr     \$29, $r3
1291
1292         vmovdqu         -24+32*2-128($np),$TEMP1
1293         vpmuludq        $Yi,$TEMP0,$TEMP0
1294          vmovq          $Bi, %rbx
1295         vmovdqu         -24+32*3-128($np),$TEMP2
1296         vpaddq          $TEMP0,$ACC1,$ACC0              # $ACC0==$TEMP0
1297         vpmuludq        $Yi,$TEMP1,$TEMP1
1298          vmovdqu        $ACC0, (%rsp)                   # transfer $r0-$r3
1299         vpaddq          $TEMP1,$ACC2,$ACC1
1300         vmovdqu         -24+32*4-128($np),$TEMP0
1301         vpmuludq        $Yi,$TEMP2,$TEMP2
1302         vmovdqu         -24+32*5-128($np),$TEMP1
1303         vpaddq          $TEMP2,$ACC3,$ACC2
1304         vpmuludq        $Yi,$TEMP0,$TEMP0
1305         vmovdqu         -24+32*6-128($np),$TEMP2
1306         vpaddq          $TEMP0,$ACC4,$ACC3
1307         vpmuludq        $Yi,$TEMP1,$TEMP1
1308         vmovdqu         -24+32*7-128($np),$TEMP0
1309         vpaddq          $TEMP1,$ACC5,$ACC4
1310         vpmuludq        $Yi,$TEMP2,$TEMP2
1311         vmovdqu         -24+32*8-128($np),$TEMP1
1312         vpaddq          $TEMP2,$ACC6,$ACC5
1313         vpmuludq        $Yi,$TEMP0,$TEMP0
1314         vmovdqu         -24+32*9-128($np),$TEMP2
1315          mov    $r3, $r0
1316         vpaddq          $TEMP0,$ACC7,$ACC6
1317         vpmuludq        $Yi,$TEMP1,$TEMP1
1318          add    (%rsp), $r0
1319         vpaddq          $TEMP1,$ACC8,$ACC7
1320         vpmuludq        $Yi,$TEMP2,$TEMP2
1321          vmovq  $r3, $TEMP1
1322         vpaddq          $TEMP2,$ACC9,$ACC8
1323
1324         dec     $i
1325         jnz     .Loop_mul_1024
1326 ___
1327
1328 # (*)   Original implementation was correcting ACC1-ACC3 for overflow
1329 #       after 7 loop runs, or after 28 iterations, or 56 additions.
1330 #       But as we underutilize resources, it's possible to correct in
1331 #       each iteration with marginal performance loss. But then, as
1332 #       we do it in each iteration, we can correct less digits, and
1333 #       avoid performance penalties completely. Also note that we
1334 #       correct only three digits out of four. This works because
1335 #       most significant digit is subjected to less additions.
1336
1337 $TEMP0 = $ACC9;
1338 $TEMP3 = $Bi;
1339 $TEMP4 = $Yi;
1340 $code.=<<___;
1341         vpermq          \$0, $AND_MASK, $AND_MASK
1342         vpaddq          (%rsp), $TEMP1, $ACC0
1343
1344         vpsrlq          \$29, $ACC0, $TEMP1
1345         vpand           $AND_MASK, $ACC0, $ACC0
1346         vpsrlq          \$29, $ACC1, $TEMP2
1347         vpand           $AND_MASK, $ACC1, $ACC1
1348         vpsrlq          \$29, $ACC2, $TEMP3
1349         vpermq          \$0x93, $TEMP1, $TEMP1
1350         vpand           $AND_MASK, $ACC2, $ACC2
1351         vpsrlq          \$29, $ACC3, $TEMP4
1352         vpermq          \$0x93, $TEMP2, $TEMP2
1353         vpand           $AND_MASK, $ACC3, $ACC3
1354
1355         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1356         vpermq          \$0x93, $TEMP3, $TEMP3
1357         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1358         vpermq          \$0x93, $TEMP4, $TEMP4
1359         vpaddq          $TEMP0, $ACC0, $ACC0
1360         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1361         vpaddq          $TEMP1, $ACC1, $ACC1
1362         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1363         vpaddq          $TEMP2, $ACC2, $ACC2
1364         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1365         vpaddq          $TEMP3, $ACC3, $ACC3
1366         vpaddq          $TEMP4, $ACC4, $ACC4
1367
1368         vpsrlq          \$29, $ACC0, $TEMP1
1369         vpand           $AND_MASK, $ACC0, $ACC0
1370         vpsrlq          \$29, $ACC1, $TEMP2
1371         vpand           $AND_MASK, $ACC1, $ACC1
1372         vpsrlq          \$29, $ACC2, $TEMP3
1373         vpermq          \$0x93, $TEMP1, $TEMP1
1374         vpand           $AND_MASK, $ACC2, $ACC2
1375         vpsrlq          \$29, $ACC3, $TEMP4
1376         vpermq          \$0x93, $TEMP2, $TEMP2
1377         vpand           $AND_MASK, $ACC3, $ACC3
1378         vpermq          \$0x93, $TEMP3, $TEMP3
1379
1380         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1381         vpermq          \$0x93, $TEMP4, $TEMP4
1382         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1383         vpaddq          $TEMP0, $ACC0, $ACC0
1384         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1385         vpaddq          $TEMP1, $ACC1, $ACC1
1386         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1387         vpaddq          $TEMP2, $ACC2, $ACC2
1388         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1389         vpaddq          $TEMP3, $ACC3, $ACC3
1390         vpaddq          $TEMP4, $ACC4, $ACC4
1391
1392         vmovdqu         $ACC0, 0-128($rp)
1393         vmovdqu         $ACC1, 32-128($rp)
1394         vmovdqu         $ACC2, 64-128($rp)
1395         vmovdqu         $ACC3, 96-128($rp)
1396 ___
1397
1398 $TEMP5=$ACC0;
1399 $code.=<<___;
1400         vpsrlq          \$29, $ACC4, $TEMP1
1401         vpand           $AND_MASK, $ACC4, $ACC4
1402         vpsrlq          \$29, $ACC5, $TEMP2
1403         vpand           $AND_MASK, $ACC5, $ACC5
1404         vpsrlq          \$29, $ACC6, $TEMP3
1405         vpermq          \$0x93, $TEMP1, $TEMP1
1406         vpand           $AND_MASK, $ACC6, $ACC6
1407         vpsrlq          \$29, $ACC7, $TEMP4
1408         vpermq          \$0x93, $TEMP2, $TEMP2
1409         vpand           $AND_MASK, $ACC7, $ACC7
1410         vpsrlq          \$29, $ACC8, $TEMP5
1411         vpermq          \$0x93, $TEMP3, $TEMP3
1412         vpand           $AND_MASK, $ACC8, $ACC8
1413         vpermq          \$0x93, $TEMP4, $TEMP4
1414
1415         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1416         vpermq          \$0x93, $TEMP5, $TEMP5
1417         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1418         vpaddq          $TEMP0, $ACC4, $ACC4
1419         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1420         vpaddq          $TEMP1, $ACC5, $ACC5
1421         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1422         vpaddq          $TEMP2, $ACC6, $ACC6
1423         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1424         vpaddq          $TEMP3, $ACC7, $ACC7
1425         vpaddq          $TEMP4, $ACC8, $ACC8
1426
1427         vpsrlq          \$29, $ACC4, $TEMP1
1428         vpand           $AND_MASK, $ACC4, $ACC4
1429         vpsrlq          \$29, $ACC5, $TEMP2
1430         vpand           $AND_MASK, $ACC5, $ACC5
1431         vpsrlq          \$29, $ACC6, $TEMP3
1432         vpermq          \$0x93, $TEMP1, $TEMP1
1433         vpand           $AND_MASK, $ACC6, $ACC6
1434         vpsrlq          \$29, $ACC7, $TEMP4
1435         vpermq          \$0x93, $TEMP2, $TEMP2
1436         vpand           $AND_MASK, $ACC7, $ACC7
1437         vpsrlq          \$29, $ACC8, $TEMP5
1438         vpermq          \$0x93, $TEMP3, $TEMP3
1439         vpand           $AND_MASK, $ACC8, $ACC8
1440         vpermq          \$0x93, $TEMP4, $TEMP4
1441
1442         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1443         vpermq          \$0x93, $TEMP5, $TEMP5
1444         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1445         vpaddq          $TEMP0, $ACC4, $ACC4
1446         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1447         vpaddq          $TEMP1, $ACC5, $ACC5
1448         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1449         vpaddq          $TEMP2, $ACC6, $ACC6
1450         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1451         vpaddq          $TEMP3, $ACC7, $ACC7
1452         vpaddq          $TEMP4, $ACC8, $ACC8
1453
1454         vmovdqu         $ACC4, 128-128($rp)
1455         vmovdqu         $ACC5, 160-128($rp)
1456         vmovdqu         $ACC6, 192-128($rp)
1457         vmovdqu         $ACC7, 224-128($rp)
1458         vmovdqu         $ACC8, 256-128($rp)
1459         vzeroupper
1460
1461         mov     %rbp, %rax
1462 ___
1463 $code.=<<___ if ($win64);
1464 .Lmul_1024_in_tail:
1465         movaps  -0xd8(%rax),%xmm6
1466         movaps  -0xc8(%rax),%xmm7
1467         movaps  -0xb8(%rax),%xmm8
1468         movaps  -0xa8(%rax),%xmm9
1469         movaps  -0x98(%rax),%xmm10
1470         movaps  -0x88(%rax),%xmm11
1471         movaps  -0x78(%rax),%xmm12
1472         movaps  -0x68(%rax),%xmm13
1473         movaps  -0x58(%rax),%xmm14
1474         movaps  -0x48(%rax),%xmm15
1475 ___
1476 $code.=<<___;
1477         mov     -48(%rax),%r15
1478         mov     -40(%rax),%r14
1479         mov     -32(%rax),%r13
1480         mov     -24(%rax),%r12
1481         mov     -16(%rax),%rbp
1482         mov     -8(%rax),%rbx
1483         lea     (%rax),%rsp             # restore %rsp
1484 .Lmul_1024_epilogue:
1485         ret
1486 .size   rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1487 ___
1488 }
1489 {
1490 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1491 my @T = map("%r$_",(8..11));
1492
1493 $code.=<<___;
1494 .globl  rsaz_1024_red2norm_avx2
1495 .type   rsaz_1024_red2norm_avx2,\@abi-omnipotent
1496 .align  32
1497 rsaz_1024_red2norm_avx2:
1498         sub     \$-128,$inp     # size optimization
1499         xor     %rax,%rax
1500 ___
1501
1502 for ($j=0,$i=0; $i<16; $i++) {
1503     my $k=0;
1504     while (29*$j<64*($i+1)) {   # load data till boundary
1505         $code.="        mov     `8*$j-128`($inp), @T[0]\n";
1506         $j++; $k++; push(@T,shift(@T));
1507     }
1508     $l=$k;
1509     while ($k>1) {              # shift loaded data but last value
1510         $code.="        shl     \$`29*($j-$k)`,@T[-$k]\n";
1511         $k--;
1512     }
1513     $code.=<<___;               # shift last value
1514         mov     @T[-1], @T[0]
1515         shl     \$`29*($j-1)`, @T[-1]
1516         shr     \$`-29*($j-1)`, @T[0]
1517 ___
1518     while ($l) {                # accumulate all values
1519         $code.="        add     @T[-$l], %rax\n";
1520         $l--;
1521     }
1522         $code.=<<___;
1523         adc     \$0, @T[0]      # consume eventual carry
1524         mov     %rax, 8*$i($out)
1525         mov     @T[0], %rax
1526 ___
1527     push(@T,shift(@T));
1528 }
1529 $code.=<<___;
1530         ret
1531 .size   rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1532
1533 .globl  rsaz_1024_norm2red_avx2
1534 .type   rsaz_1024_norm2red_avx2,\@abi-omnipotent
1535 .align  32
1536 rsaz_1024_norm2red_avx2:
1537         sub     \$-128,$out     # size optimization
1538         mov     ($inp),@T[0]
1539         mov     \$0x1fffffff,%eax
1540 ___
1541 for ($j=0,$i=0; $i<16; $i++) {
1542     $code.="    mov     `8*($i+1)`($inp),@T[1]\n"       if ($i<15);
1543     $code.="    xor     @T[1],@T[1]\n"                  if ($i==15);
1544     my $k=1;
1545     while (29*($j+1)<64*($i+1)) {
1546         $code.=<<___;
1547         mov     @T[0],@T[-$k]
1548         shr     \$`29*$j`,@T[-$k]
1549         and     %rax,@T[-$k]                            # &0x1fffffff
1550         mov     @T[-$k],`8*$j-128`($out)
1551 ___
1552         $j++; $k++;
1553     }
1554     $code.=<<___;
1555         shrd    \$`29*$j`,@T[1],@T[0]
1556         and     %rax,@T[0]
1557         mov     @T[0],`8*$j-128`($out)
1558 ___
1559     $j++;
1560     push(@T,shift(@T));
1561 }
1562 $code.=<<___;
1563         mov     @T[0],`8*$j-128`($out)                  # zero
1564         mov     @T[0],`8*($j+1)-128`($out)
1565         mov     @T[0],`8*($j+2)-128`($out)
1566         mov     @T[0],`8*($j+3)-128`($out)
1567         ret
1568 .size   rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1569 ___
1570 }
1571 {
1572 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1573
1574 $code.=<<___;
1575 .globl  rsaz_1024_scatter5_avx2
1576 .type   rsaz_1024_scatter5_avx2,\@abi-omnipotent
1577 .align  32
1578 rsaz_1024_scatter5_avx2:
1579         vzeroupper
1580         vmovdqu .Lscatter_permd(%rip),%ymm5
1581         shl     \$4,$power
1582         lea     ($out,$power),$out
1583         mov     \$9,%eax
1584         jmp     .Loop_scatter_1024
1585
1586 .align  32
1587 .Loop_scatter_1024:
1588         vmovdqu         ($inp),%ymm0
1589         lea             32($inp),$inp
1590         vpermd          %ymm0,%ymm5,%ymm0
1591         vmovdqu         %xmm0,($out)
1592         lea             16*32($out),$out
1593         dec     %eax
1594         jnz     .Loop_scatter_1024
1595
1596         vzeroupper
1597         ret
1598 .size   rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1599
1600 .globl  rsaz_1024_gather5_avx2
1601 .type   rsaz_1024_gather5_avx2,\@abi-omnipotent
1602 .align  32
1603 rsaz_1024_gather5_avx2:
1604         vzeroupper
1605         mov     %rsp,%r11
1606 ___
1607 $code.=<<___ if ($win64);
1608         lea     -0x88(%rsp),%rax
1609 .LSEH_begin_rsaz_1024_gather5:
1610         # I can't trust assembler to use specific encoding:-(
1611         .byte   0x48,0x8d,0x60,0xe0             # lea   -0x20(%rax),%rsp
1612         .byte   0xc5,0xf8,0x29,0x70,0xe0        # vmovaps %xmm6,-0x20(%rax)
1613         .byte   0xc5,0xf8,0x29,0x78,0xf0        # vmovaps %xmm7,-0x10(%rax)
1614         .byte   0xc5,0x78,0x29,0x40,0x00        # vmovaps %xmm8,0(%rax)
1615         .byte   0xc5,0x78,0x29,0x48,0x10        # vmovaps %xmm9,0x10(%rax)
1616         .byte   0xc5,0x78,0x29,0x50,0x20        # vmovaps %xmm10,0x20(%rax)
1617         .byte   0xc5,0x78,0x29,0x58,0x30        # vmovaps %xmm11,0x30(%rax)
1618         .byte   0xc5,0x78,0x29,0x60,0x40        # vmovaps %xmm12,0x40(%rax)
1619         .byte   0xc5,0x78,0x29,0x68,0x50        # vmovaps %xmm13,0x50(%rax)
1620         .byte   0xc5,0x78,0x29,0x70,0x60        # vmovaps %xmm14,0x60(%rax)
1621         .byte   0xc5,0x78,0x29,0x78,0x70        # vmovaps %xmm15,0x70(%rax)
1622 ___
1623 $code.=<<___;
1624         lea     -0x100(%rsp),%rsp
1625         and     \$-32, %rsp
1626         lea     .Linc(%rip), %r10
1627         lea     -128(%rsp),%rax                 # control u-op density
1628
1629         vmovd           $power, %xmm4
1630         vmovdqa         (%r10),%ymm0
1631         vmovdqa         32(%r10),%ymm1
1632         vmovdqa         64(%r10),%ymm5
1633         vpbroadcastd    %xmm4,%ymm4
1634
1635         vpaddd          %ymm5, %ymm0, %ymm2
1636         vpcmpeqd        %ymm4, %ymm0, %ymm0
1637         vpaddd          %ymm5, %ymm1, %ymm3
1638         vpcmpeqd        %ymm4, %ymm1, %ymm1
1639         vmovdqa         %ymm0, 32*0+128(%rax)
1640         vpaddd          %ymm5, %ymm2, %ymm0
1641         vpcmpeqd        %ymm4, %ymm2, %ymm2
1642         vmovdqa         %ymm1, 32*1+128(%rax)
1643         vpaddd          %ymm5, %ymm3, %ymm1
1644         vpcmpeqd        %ymm4, %ymm3, %ymm3
1645         vmovdqa         %ymm2, 32*2+128(%rax)
1646         vpaddd          %ymm5, %ymm0, %ymm2
1647         vpcmpeqd        %ymm4, %ymm0, %ymm0
1648         vmovdqa         %ymm3, 32*3+128(%rax)
1649         vpaddd          %ymm5, %ymm1, %ymm3
1650         vpcmpeqd        %ymm4, %ymm1, %ymm1
1651         vmovdqa         %ymm0, 32*4+128(%rax)
1652         vpaddd          %ymm5, %ymm2, %ymm8
1653         vpcmpeqd        %ymm4, %ymm2, %ymm2
1654         vmovdqa         %ymm1, 32*5+128(%rax)
1655         vpaddd          %ymm5, %ymm3, %ymm9
1656         vpcmpeqd        %ymm4, %ymm3, %ymm3
1657         vmovdqa         %ymm2, 32*6+128(%rax)
1658         vpaddd          %ymm5, %ymm8, %ymm10
1659         vpcmpeqd        %ymm4, %ymm8, %ymm8
1660         vmovdqa         %ymm3, 32*7+128(%rax)
1661         vpaddd          %ymm5, %ymm9, %ymm11
1662         vpcmpeqd        %ymm4, %ymm9, %ymm9
1663         vpaddd          %ymm5, %ymm10, %ymm12
1664         vpcmpeqd        %ymm4, %ymm10, %ymm10
1665         vpaddd          %ymm5, %ymm11, %ymm13
1666         vpcmpeqd        %ymm4, %ymm11, %ymm11
1667         vpaddd          %ymm5, %ymm12, %ymm14
1668         vpcmpeqd        %ymm4, %ymm12, %ymm12
1669         vpaddd          %ymm5, %ymm13, %ymm15
1670         vpcmpeqd        %ymm4, %ymm13, %ymm13
1671         vpcmpeqd        %ymm4, %ymm14, %ymm14
1672         vpcmpeqd        %ymm4, %ymm15, %ymm15
1673
1674         vmovdqa -32(%r10),%ymm7                 # .Lgather_permd
1675         lea     128($inp), $inp
1676         mov     \$9,$power
1677
1678 .Loop_gather_1024:
1679         vmovdqa         32*0-128($inp), %ymm0
1680         vmovdqa         32*1-128($inp), %ymm1
1681         vmovdqa         32*2-128($inp), %ymm2
1682         vmovdqa         32*3-128($inp), %ymm3
1683         vpand           32*0+128(%rax), %ymm0,  %ymm0
1684         vpand           32*1+128(%rax), %ymm1,  %ymm1
1685         vpand           32*2+128(%rax), %ymm2,  %ymm2
1686         vpor            %ymm0, %ymm1, %ymm4
1687         vpand           32*3+128(%rax), %ymm3,  %ymm3
1688         vmovdqa         32*4-128($inp), %ymm0
1689         vmovdqa         32*5-128($inp), %ymm1
1690         vpor            %ymm2, %ymm3, %ymm5
1691         vmovdqa         32*6-128($inp), %ymm2
1692         vmovdqa         32*7-128($inp), %ymm3
1693         vpand           32*4+128(%rax), %ymm0,  %ymm0
1694         vpand           32*5+128(%rax), %ymm1,  %ymm1
1695         vpand           32*6+128(%rax), %ymm2,  %ymm2
1696         vpor            %ymm0, %ymm4, %ymm4
1697         vpand           32*7+128(%rax), %ymm3,  %ymm3
1698         vpand           32*8-128($inp), %ymm8,  %ymm0
1699         vpor            %ymm1, %ymm5, %ymm5
1700         vpand           32*9-128($inp), %ymm9,  %ymm1
1701         vpor            %ymm2, %ymm4, %ymm4
1702         vpand           32*10-128($inp),%ymm10, %ymm2
1703         vpor            %ymm3, %ymm5, %ymm5
1704         vpand           32*11-128($inp),%ymm11, %ymm3
1705         vpor            %ymm0, %ymm4, %ymm4
1706         vpand           32*12-128($inp),%ymm12, %ymm0
1707         vpor            %ymm1, %ymm5, %ymm5
1708         vpand           32*13-128($inp),%ymm13, %ymm1
1709         vpor            %ymm2, %ymm4, %ymm4
1710         vpand           32*14-128($inp),%ymm14, %ymm2
1711         vpor            %ymm3, %ymm5, %ymm5
1712         vpand           32*15-128($inp),%ymm15, %ymm3
1713         lea             32*16($inp), $inp
1714         vpor            %ymm0, %ymm4, %ymm4
1715         vpor            %ymm1, %ymm5, %ymm5
1716         vpor            %ymm2, %ymm4, %ymm4
1717         vpor            %ymm3, %ymm5, %ymm5
1718
1719         vpor            %ymm5, %ymm4, %ymm4
1720         vextracti128    \$1, %ymm4, %xmm5       # upper half is cleared
1721         vpor            %xmm4, %xmm5, %xmm5
1722         vpermd          %ymm5,%ymm7,%ymm5
1723         vmovdqu         %ymm5,($out)
1724         lea             32($out),$out
1725         dec     $power
1726         jnz     .Loop_gather_1024
1727
1728         vpxor   %ymm0,%ymm0,%ymm0
1729         vmovdqu %ymm0,($out)
1730         vzeroupper
1731 ___
1732 $code.=<<___ if ($win64);
1733         movaps  -0xa8(%r11),%xmm6
1734         movaps  -0x98(%r11),%xmm7
1735         movaps  -0x88(%r11),%xmm8
1736         movaps  -0x78(%r11),%xmm9
1737         movaps  -0x68(%r11),%xmm10
1738         movaps  -0x58(%r11),%xmm11
1739         movaps  -0x48(%r11),%xmm12
1740         movaps  -0x38(%r11),%xmm13
1741         movaps  -0x28(%r11),%xmm14
1742         movaps  -0x18(%r11),%xmm15
1743 ___
1744 $code.=<<___;
1745         lea     (%r11),%rsp
1746         ret
1747 .LSEH_end_rsaz_1024_gather5:
1748 .size   rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1749 ___
1750 }
1751
1752 $code.=<<___;
1753 .extern OPENSSL_ia32cap_P
1754 .globl  rsaz_avx2_eligible
1755 .type   rsaz_avx2_eligible,\@abi-omnipotent
1756 .align  32
1757 rsaz_avx2_eligible:
1758         mov     OPENSSL_ia32cap_P+8(%rip),%eax
1759 ___
1760 $code.=<<___    if ($addx);
1761         mov     \$`1<<8|1<<19`,%ecx
1762         mov     \$0,%edx
1763         and     %eax,%ecx
1764         cmp     \$`1<<8|1<<19`,%ecx     # check for BMI2+AD*X
1765         cmove   %edx,%eax
1766 ___
1767 $code.=<<___;
1768         and     \$`1<<5`,%eax
1769         shr     \$5,%eax
1770         ret
1771 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1772
1773 .align  64
1774 .Land_mask:
1775         .quad   0x1fffffff,0x1fffffff,0x1fffffff,-1
1776 .Lscatter_permd:
1777         .long   0,2,4,6,7,7,7,7
1778 .Lgather_permd:
1779         .long   0,7,1,7,2,7,3,7
1780 .Linc:
1781         .long   0,0,0,0, 1,1,1,1
1782         .long   2,2,2,2, 3,3,3,3
1783         .long   4,4,4,4, 4,4,4,4
1784 .align  64
1785 ___
1786
1787 if ($win64) {
1788 $rec="%rcx";
1789 $frame="%rdx";
1790 $context="%r8";
1791 $disp="%r9";
1792
1793 $code.=<<___
1794 .extern __imp_RtlVirtualUnwind
1795 .type   rsaz_se_handler,\@abi-omnipotent
1796 .align  16
1797 rsaz_se_handler:
1798         push    %rsi
1799         push    %rdi
1800         push    %rbx
1801         push    %rbp
1802         push    %r12
1803         push    %r13
1804         push    %r14
1805         push    %r15
1806         pushfq
1807         sub     \$64,%rsp
1808
1809         mov     120($context),%rax      # pull context->Rax
1810         mov     248($context),%rbx      # pull context->Rip
1811
1812         mov     8($disp),%rsi           # disp->ImageBase
1813         mov     56($disp),%r11          # disp->HandlerData
1814
1815         mov     0(%r11),%r10d           # HandlerData[0]
1816         lea     (%rsi,%r10),%r10        # prologue label
1817         cmp     %r10,%rbx               # context->Rip<prologue label
1818         jb      .Lcommon_seh_tail
1819
1820         mov     4(%r11),%r10d           # HandlerData[1]
1821         lea     (%rsi,%r10),%r10        # epilogue label
1822         cmp     %r10,%rbx               # context->Rip>=epilogue label
1823         jae     .Lcommon_seh_tail
1824
1825         mov     160($context),%rbp      # pull context->Rbp
1826
1827         mov     8(%r11),%r10d           # HandlerData[2]
1828         lea     (%rsi,%r10),%r10        # "in tail" label
1829         cmp     %r10,%rbx               # context->Rip>="in tail" label
1830         cmovc   %rbp,%rax
1831
1832         mov     -48(%rax),%r15
1833         mov     -40(%rax),%r14
1834         mov     -32(%rax),%r13
1835         mov     -24(%rax),%r12
1836         mov     -16(%rax),%rbp
1837         mov     -8(%rax),%rbx
1838         mov     %r15,240($context)
1839         mov     %r14,232($context)
1840         mov     %r13,224($context)
1841         mov     %r12,216($context)
1842         mov     %rbp,160($context)
1843         mov     %rbx,144($context)
1844
1845         lea     -0xd8(%rax),%rsi        # %xmm save area
1846         lea     512($context),%rdi      # & context.Xmm6
1847         mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
1848         .long   0xa548f3fc              # cld; rep movsq
1849
1850 .Lcommon_seh_tail:
1851         mov     8(%rax),%rdi
1852         mov     16(%rax),%rsi
1853         mov     %rax,152($context)      # restore context->Rsp
1854         mov     %rsi,168($context)      # restore context->Rsi
1855         mov     %rdi,176($context)      # restore context->Rdi
1856
1857         mov     40($disp),%rdi          # disp->ContextRecord
1858         mov     $context,%rsi           # context
1859         mov     \$154,%ecx              # sizeof(CONTEXT)
1860         .long   0xa548f3fc              # cld; rep movsq
1861
1862         mov     $disp,%rsi
1863         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1864         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1865         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1866         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1867         mov     40(%rsi),%r10           # disp->ContextRecord
1868         lea     56(%rsi),%r11           # &disp->HandlerData
1869         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1870         mov     %r10,32(%rsp)           # arg5
1871         mov     %r11,40(%rsp)           # arg6
1872         mov     %r12,48(%rsp)           # arg7
1873         mov     %rcx,56(%rsp)           # arg8, (NULL)
1874         call    *__imp_RtlVirtualUnwind(%rip)
1875
1876         mov     \$1,%eax                # ExceptionContinueSearch
1877         add     \$64,%rsp
1878         popfq
1879         pop     %r15
1880         pop     %r14
1881         pop     %r13
1882         pop     %r12
1883         pop     %rbp
1884         pop     %rbx
1885         pop     %rdi
1886         pop     %rsi
1887         ret
1888 .size   rsaz_se_handler,.-rsaz_se_handler
1889
1890 .section        .pdata
1891 .align  4
1892         .rva    .LSEH_begin_rsaz_1024_sqr_avx2
1893         .rva    .LSEH_end_rsaz_1024_sqr_avx2
1894         .rva    .LSEH_info_rsaz_1024_sqr_avx2
1895
1896         .rva    .LSEH_begin_rsaz_1024_mul_avx2
1897         .rva    .LSEH_end_rsaz_1024_mul_avx2
1898         .rva    .LSEH_info_rsaz_1024_mul_avx2
1899
1900         .rva    .LSEH_begin_rsaz_1024_gather5
1901         .rva    .LSEH_end_rsaz_1024_gather5
1902         .rva    .LSEH_info_rsaz_1024_gather5
1903 .section        .xdata
1904 .align  8
1905 .LSEH_info_rsaz_1024_sqr_avx2:
1906         .byte   9,0,0,0
1907         .rva    rsaz_se_handler
1908         .rva    .Lsqr_1024_body,.Lsqr_1024_epilogue,.Lsqr_1024_in_tail
1909         .long   0
1910 .LSEH_info_rsaz_1024_mul_avx2:
1911         .byte   9,0,0,0
1912         .rva    rsaz_se_handler
1913         .rva    .Lmul_1024_body,.Lmul_1024_epilogue,.Lmul_1024_in_tail
1914         .long   0
1915 .LSEH_info_rsaz_1024_gather5:
1916         .byte   0x01,0x36,0x17,0x0b
1917         .byte   0x36,0xf8,0x09,0x00     # vmovaps 0x90(rsp),xmm15
1918         .byte   0x31,0xe8,0x08,0x00     # vmovaps 0x80(rsp),xmm14
1919         .byte   0x2c,0xd8,0x07,0x00     # vmovaps 0x70(rsp),xmm13
1920         .byte   0x27,0xc8,0x06,0x00     # vmovaps 0x60(rsp),xmm12
1921         .byte   0x22,0xb8,0x05,0x00     # vmovaps 0x50(rsp),xmm11
1922         .byte   0x1d,0xa8,0x04,0x00     # vmovaps 0x40(rsp),xmm10
1923         .byte   0x18,0x98,0x03,0x00     # vmovaps 0x30(rsp),xmm9
1924         .byte   0x13,0x88,0x02,0x00     # vmovaps 0x20(rsp),xmm8
1925         .byte   0x0e,0x78,0x01,0x00     # vmovaps 0x10(rsp),xmm7
1926         .byte   0x09,0x68,0x00,0x00     # vmovaps 0x00(rsp),xmm6
1927         .byte   0x04,0x01,0x15,0x00     # sub     rsp,0xa8
1928         .byte   0x00,0xb3,0x00,0x00     # set_frame r11
1929 ___
1930 }
1931
1932 foreach (split("\n",$code)) {
1933         s/\`([^\`]*)\`/eval($1)/ge;
1934
1935         s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge               or
1936
1937         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1938         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1939         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1940         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1941         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1942         print $_,"\n";
1943 }
1944
1945 }}} else {{{
1946 print <<___;    # assembler is too old
1947 .text
1948
1949 .globl  rsaz_avx2_eligible
1950 .type   rsaz_avx2_eligible,\@abi-omnipotent
1951 rsaz_avx2_eligible:
1952         xor     %eax,%eax
1953         ret
1954 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1955
1956 .globl  rsaz_1024_sqr_avx2
1957 .globl  rsaz_1024_mul_avx2
1958 .globl  rsaz_1024_norm2red_avx2
1959 .globl  rsaz_1024_red2norm_avx2
1960 .globl  rsaz_1024_scatter5_avx2
1961 .globl  rsaz_1024_gather5_avx2
1962 .type   rsaz_1024_sqr_avx2,\@abi-omnipotent
1963 rsaz_1024_sqr_avx2:
1964 rsaz_1024_mul_avx2:
1965 rsaz_1024_norm2red_avx2:
1966 rsaz_1024_red2norm_avx2:
1967 rsaz_1024_scatter5_avx2:
1968 rsaz_1024_gather5_avx2:
1969         .byte   0x0f,0x0b       # ud2
1970         ret
1971 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
1972 ___
1973 }}}
1974
1975 close STDOUT;