Unify all assembler file generators
[openssl.git] / crypto / bn / asm / rsaz-avx2.pl
1 #! /usr/bin/env perl
2 # Copyright 2013-2018 The OpenSSL Project Authors. All Rights Reserved.
3 # Copyright (c) 2012, Intel Corporation. All Rights Reserved.
4 #
5 # Licensed under the Apache License 2.0 (the "License").  You may not use
6 # this file except in compliance with the License.  You can obtain a copy
7 # in the file LICENSE in the source distribution or at
8 # https://www.openssl.org/source/license.html
9 #
10 # Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1)
11 # (1) Intel Corporation, Israel Development Center, Haifa, Israel
12 # (2) University of Haifa, Israel
13 #
14 # References:
15 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular
16 #     Exponentiation,  Using Advanced Vector Instructions Architectures",
17 #     F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,
18 #     pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012
19 # [2] S. Gueron: "Efficient Software Implementations of Modular
20 #     Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).
21 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE
22 #     Proceedings of 9th International Conference on Information Technology:
23 #     New Generations (ITNG 2012), pp.821-823 (2012)
24 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis
25 #     resistant 1024-bit modular exponentiation, for optimizing RSA2048
26 #     on AVX2 capable x86_64 platforms",
27 #     http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest
28 #
29 # +13% improvement over original submission by <appro@openssl.org>
30 #
31 # rsa2048 sign/sec      OpenSSL 1.0.1   scalar(*)       this
32 # 2.3GHz Haswell        621             765/+23%        1113/+79%
33 # 2.3GHz Broadwell(**)  688             1200(***)/+74%  1120/+63%
34 #
35 # (*)   if system doesn't support AVX2, for reference purposes;
36 # (**)  scaled to 2.3GHz to simplify comparison;
37 # (***) scalar AD*X code is faster than AVX2 and is preferred code
38 #       path for Broadwell;
39
40 # $output is the last argument if it looks like a file (it has an extension)
41 # $flavour is the first argument if it doesn't look like a file
42 $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
43 $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
44
45 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
46
47 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
48 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
49 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
50 die "can't locate x86_64-xlate.pl";
51
52 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
53                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
54         $avx = ($1>=2.19) + ($1>=2.22);
55         $addx = ($1>=2.23);
56 }
57
58 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
59             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
60         $avx = ($1>=2.09) + ($1>=2.10);
61         $addx = ($1>=2.10);
62 }
63
64 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
65             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
66         $avx = ($1>=10) + ($1>=11);
67         $addx = ($1>=11);
68 }
69
70 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9])\.([0-9]+)/) {
71         my $ver = $2 + $3/100.0;        # 3.1->3.01, 3.10->3.10
72         $avx = ($ver>=3.0) + ($ver>=3.01);
73         $addx = ($ver>=3.03);
74 }
75
76 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
77     or die "can't call $xlate: $!";
78 *STDOUT = *OUT;
79
80 if ($avx>1) {{{
81 { # void AMS_WW(
82 my $rp="%rdi";  # BN_ULONG *rp,
83 my $ap="%rsi";  # const BN_ULONG *ap,
84 my $np="%rdx";  # const BN_ULONG *np,
85 my $n0="%ecx";  # const BN_ULONG n0,
86 my $rep="%r8d"; # int repeat);
87
88 # The registers that hold the accumulated redundant result
89 # The AMM works on 1024 bit operands, and redundant word size is 29
90 # Therefore: ceil(1024/29)/4 = 9
91 my $ACC0="%ymm0";
92 my $ACC1="%ymm1";
93 my $ACC2="%ymm2";
94 my $ACC3="%ymm3";
95 my $ACC4="%ymm4";
96 my $ACC5="%ymm5";
97 my $ACC6="%ymm6";
98 my $ACC7="%ymm7";
99 my $ACC8="%ymm8";
100 my $ACC9="%ymm9";
101 # Registers that hold the broadcasted words of bp, currently used
102 my $B1="%ymm10";
103 my $B2="%ymm11";
104 # Registers that hold the broadcasted words of Y, currently used
105 my $Y1="%ymm12";
106 my $Y2="%ymm13";
107 # Helper registers
108 my $TEMP1="%ymm14";
109 my $AND_MASK="%ymm15";
110 # alu registers that hold the first words of the ACC
111 my $r0="%r9";
112 my $r1="%r10";
113 my $r2="%r11";
114 my $r3="%r12";
115
116 my $i="%r14d";                  # loop counter
117 my $tmp = "%r15";
118
119 my $FrameSize=32*18+32*8;       # place for A^2 and 2*A
120
121 my $aap=$r0;
122 my $tp0="%rbx";
123 my $tp1=$r3;
124 my $tpa=$tmp;
125
126 $np="%r13";                     # reassigned argument
127
128 $code.=<<___;
129 .text
130
131 .globl  rsaz_1024_sqr_avx2
132 .type   rsaz_1024_sqr_avx2,\@function,5
133 .align  64
134 rsaz_1024_sqr_avx2:             # 702 cycles, 14% faster than rsaz_1024_mul_avx2
135 .cfi_startproc
136         lea     (%rsp), %rax
137 .cfi_def_cfa_register   %rax
138         push    %rbx
139 .cfi_push       %rbx
140         push    %rbp
141 .cfi_push       %rbp
142         push    %r12
143 .cfi_push       %r12
144         push    %r13
145 .cfi_push       %r13
146         push    %r14
147 .cfi_push       %r14
148         push    %r15
149 .cfi_push       %r15
150         vzeroupper
151 ___
152 $code.=<<___ if ($win64);
153         lea     -0xa8(%rsp),%rsp
154         vmovaps %xmm6,-0xd8(%rax)
155         vmovaps %xmm7,-0xc8(%rax)
156         vmovaps %xmm8,-0xb8(%rax)
157         vmovaps %xmm9,-0xa8(%rax)
158         vmovaps %xmm10,-0x98(%rax)
159         vmovaps %xmm11,-0x88(%rax)
160         vmovaps %xmm12,-0x78(%rax)
161         vmovaps %xmm13,-0x68(%rax)
162         vmovaps %xmm14,-0x58(%rax)
163         vmovaps %xmm15,-0x48(%rax)
164 .Lsqr_1024_body:
165 ___
166 $code.=<<___;
167         mov     %rax,%rbp
168 .cfi_def_cfa_register   %rbp
169         mov     %rdx, $np                       # reassigned argument
170         sub     \$$FrameSize, %rsp
171         mov     $np, $tmp
172         sub     \$-128, $rp                     # size optimization
173         sub     \$-128, $ap
174         sub     \$-128, $np
175
176         and     \$4095, $tmp                    # see if $np crosses page
177         add     \$32*10, $tmp
178         shr     \$12, $tmp
179         vpxor   $ACC9,$ACC9,$ACC9
180         jz      .Lsqr_1024_no_n_copy
181
182         # unaligned 256-bit load that crosses page boundary can
183         # cause >2x performance degradation here, so if $np does
184         # cross page boundary, copy it to stack and make sure stack
185         # frame doesn't...
186         sub             \$32*10,%rsp
187         vmovdqu         32*0-128($np), $ACC0
188         and             \$-2048, %rsp
189         vmovdqu         32*1-128($np), $ACC1
190         vmovdqu         32*2-128($np), $ACC2
191         vmovdqu         32*3-128($np), $ACC3
192         vmovdqu         32*4-128($np), $ACC4
193         vmovdqu         32*5-128($np), $ACC5
194         vmovdqu         32*6-128($np), $ACC6
195         vmovdqu         32*7-128($np), $ACC7
196         vmovdqu         32*8-128($np), $ACC8
197         lea             $FrameSize+128(%rsp),$np
198         vmovdqu         $ACC0, 32*0-128($np)
199         vmovdqu         $ACC1, 32*1-128($np)
200         vmovdqu         $ACC2, 32*2-128($np)
201         vmovdqu         $ACC3, 32*3-128($np)
202         vmovdqu         $ACC4, 32*4-128($np)
203         vmovdqu         $ACC5, 32*5-128($np)
204         vmovdqu         $ACC6, 32*6-128($np)
205         vmovdqu         $ACC7, 32*7-128($np)
206         vmovdqu         $ACC8, 32*8-128($np)
207         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero
208
209 .Lsqr_1024_no_n_copy:
210         and             \$-1024, %rsp
211
212         vmovdqu         32*1-128($ap), $ACC1
213         vmovdqu         32*2-128($ap), $ACC2
214         vmovdqu         32*3-128($ap), $ACC3
215         vmovdqu         32*4-128($ap), $ACC4
216         vmovdqu         32*5-128($ap), $ACC5
217         vmovdqu         32*6-128($ap), $ACC6
218         vmovdqu         32*7-128($ap), $ACC7
219         vmovdqu         32*8-128($ap), $ACC8
220
221         lea     192(%rsp), $tp0                 # 64+128=192
222         vmovdqu .Land_mask(%rip), $AND_MASK
223         jmp     .LOOP_GRANDE_SQR_1024
224
225 .align  32
226 .LOOP_GRANDE_SQR_1024:
227         lea     32*18+128(%rsp), $aap           # size optimization
228         lea     448(%rsp), $tp1                 # 64+128+256=448
229
230         # the squaring is performed as described in Variant B of
231         # "Speeding up Big-Number Squaring", so start by calculating
232         # the A*2=A+A vector
233         vpaddq          $ACC1, $ACC1, $ACC1
234          vpbroadcastq   32*0-128($ap), $B1
235         vpaddq          $ACC2, $ACC2, $ACC2
236         vmovdqa         $ACC1, 32*0-128($aap)
237         vpaddq          $ACC3, $ACC3, $ACC3
238         vmovdqa         $ACC2, 32*1-128($aap)
239         vpaddq          $ACC4, $ACC4, $ACC4
240         vmovdqa         $ACC3, 32*2-128($aap)
241         vpaddq          $ACC5, $ACC5, $ACC5
242         vmovdqa         $ACC4, 32*3-128($aap)
243         vpaddq          $ACC6, $ACC6, $ACC6
244         vmovdqa         $ACC5, 32*4-128($aap)
245         vpaddq          $ACC7, $ACC7, $ACC7
246         vmovdqa         $ACC6, 32*5-128($aap)
247         vpaddq          $ACC8, $ACC8, $ACC8
248         vmovdqa         $ACC7, 32*6-128($aap)
249         vpxor           $ACC9, $ACC9, $ACC9
250         vmovdqa         $ACC8, 32*7-128($aap)
251
252         vpmuludq        32*0-128($ap), $B1, $ACC0
253          vpbroadcastq   32*1-128($ap), $B2
254          vmovdqu        $ACC9, 32*9-192($tp0)   # zero upper half
255         vpmuludq        $B1, $ACC1, $ACC1
256          vmovdqu        $ACC9, 32*10-448($tp1)
257         vpmuludq        $B1, $ACC2, $ACC2
258          vmovdqu        $ACC9, 32*11-448($tp1)
259         vpmuludq        $B1, $ACC3, $ACC3
260          vmovdqu        $ACC9, 32*12-448($tp1)
261         vpmuludq        $B1, $ACC4, $ACC4
262          vmovdqu        $ACC9, 32*13-448($tp1)
263         vpmuludq        $B1, $ACC5, $ACC5
264          vmovdqu        $ACC9, 32*14-448($tp1)
265         vpmuludq        $B1, $ACC6, $ACC6
266          vmovdqu        $ACC9, 32*15-448($tp1)
267         vpmuludq        $B1, $ACC7, $ACC7
268          vmovdqu        $ACC9, 32*16-448($tp1)
269         vpmuludq        $B1, $ACC8, $ACC8
270          vpbroadcastq   32*2-128($ap), $B1
271          vmovdqu        $ACC9, 32*17-448($tp1)
272
273         mov     $ap, $tpa
274         mov     \$4, $i
275         jmp     .Lsqr_entry_1024
276 ___
277 $TEMP0=$Y1;
278 $TEMP2=$Y2;
279 $code.=<<___;
280 .align  32
281 .LOOP_SQR_1024:
282          vpbroadcastq   32*1-128($tpa), $B2
283         vpmuludq        32*0-128($ap), $B1, $ACC0
284         vpaddq          32*0-192($tp0), $ACC0, $ACC0
285         vpmuludq        32*0-128($aap), $B1, $ACC1
286         vpaddq          32*1-192($tp0), $ACC1, $ACC1
287         vpmuludq        32*1-128($aap), $B1, $ACC2
288         vpaddq          32*2-192($tp0), $ACC2, $ACC2
289         vpmuludq        32*2-128($aap), $B1, $ACC3
290         vpaddq          32*3-192($tp0), $ACC3, $ACC3
291         vpmuludq        32*3-128($aap), $B1, $ACC4
292         vpaddq          32*4-192($tp0), $ACC4, $ACC4
293         vpmuludq        32*4-128($aap), $B1, $ACC5
294         vpaddq          32*5-192($tp0), $ACC5, $ACC5
295         vpmuludq        32*5-128($aap), $B1, $ACC6
296         vpaddq          32*6-192($tp0), $ACC6, $ACC6
297         vpmuludq        32*6-128($aap), $B1, $ACC7
298         vpaddq          32*7-192($tp0), $ACC7, $ACC7
299         vpmuludq        32*7-128($aap), $B1, $ACC8
300          vpbroadcastq   32*2-128($tpa), $B1
301         vpaddq          32*8-192($tp0), $ACC8, $ACC8
302 .Lsqr_entry_1024:
303         vmovdqu         $ACC0, 32*0-192($tp0)
304         vmovdqu         $ACC1, 32*1-192($tp0)
305
306         vpmuludq        32*1-128($ap), $B2, $TEMP0
307         vpaddq          $TEMP0, $ACC2, $ACC2
308         vpmuludq        32*1-128($aap), $B2, $TEMP1
309         vpaddq          $TEMP1, $ACC3, $ACC3
310         vpmuludq        32*2-128($aap), $B2, $TEMP2
311         vpaddq          $TEMP2, $ACC4, $ACC4
312         vpmuludq        32*3-128($aap), $B2, $TEMP0
313         vpaddq          $TEMP0, $ACC5, $ACC5
314         vpmuludq        32*4-128($aap), $B2, $TEMP1
315         vpaddq          $TEMP1, $ACC6, $ACC6
316         vpmuludq        32*5-128($aap), $B2, $TEMP2
317         vpaddq          $TEMP2, $ACC7, $ACC7
318         vpmuludq        32*6-128($aap), $B2, $TEMP0
319         vpaddq          $TEMP0, $ACC8, $ACC8
320         vpmuludq        32*7-128($aap), $B2, $ACC0
321          vpbroadcastq   32*3-128($tpa), $B2
322         vpaddq          32*9-192($tp0), $ACC0, $ACC0
323
324         vmovdqu         $ACC2, 32*2-192($tp0)
325         vmovdqu         $ACC3, 32*3-192($tp0)
326
327         vpmuludq        32*2-128($ap), $B1, $TEMP2
328         vpaddq          $TEMP2, $ACC4, $ACC4
329         vpmuludq        32*2-128($aap), $B1, $TEMP0
330         vpaddq          $TEMP0, $ACC5, $ACC5
331         vpmuludq        32*3-128($aap), $B1, $TEMP1
332         vpaddq          $TEMP1, $ACC6, $ACC6
333         vpmuludq        32*4-128($aap), $B1, $TEMP2
334         vpaddq          $TEMP2, $ACC7, $ACC7
335         vpmuludq        32*5-128($aap), $B1, $TEMP0
336         vpaddq          $TEMP0, $ACC8, $ACC8
337         vpmuludq        32*6-128($aap), $B1, $TEMP1
338         vpaddq          $TEMP1, $ACC0, $ACC0
339         vpmuludq        32*7-128($aap), $B1, $ACC1
340          vpbroadcastq   32*4-128($tpa), $B1
341         vpaddq          32*10-448($tp1), $ACC1, $ACC1
342
343         vmovdqu         $ACC4, 32*4-192($tp0)
344         vmovdqu         $ACC5, 32*5-192($tp0)
345
346         vpmuludq        32*3-128($ap), $B2, $TEMP0
347         vpaddq          $TEMP0, $ACC6, $ACC6
348         vpmuludq        32*3-128($aap), $B2, $TEMP1
349         vpaddq          $TEMP1, $ACC7, $ACC7
350         vpmuludq        32*4-128($aap), $B2, $TEMP2
351         vpaddq          $TEMP2, $ACC8, $ACC8
352         vpmuludq        32*5-128($aap), $B2, $TEMP0
353         vpaddq          $TEMP0, $ACC0, $ACC0
354         vpmuludq        32*6-128($aap), $B2, $TEMP1
355         vpaddq          $TEMP1, $ACC1, $ACC1
356         vpmuludq        32*7-128($aap), $B2, $ACC2
357          vpbroadcastq   32*5-128($tpa), $B2
358         vpaddq          32*11-448($tp1), $ACC2, $ACC2
359
360         vmovdqu         $ACC6, 32*6-192($tp0)
361         vmovdqu         $ACC7, 32*7-192($tp0)
362
363         vpmuludq        32*4-128($ap), $B1, $TEMP0
364         vpaddq          $TEMP0, $ACC8, $ACC8
365         vpmuludq        32*4-128($aap), $B1, $TEMP1
366         vpaddq          $TEMP1, $ACC0, $ACC0
367         vpmuludq        32*5-128($aap), $B1, $TEMP2
368         vpaddq          $TEMP2, $ACC1, $ACC1
369         vpmuludq        32*6-128($aap), $B1, $TEMP0
370         vpaddq          $TEMP0, $ACC2, $ACC2
371         vpmuludq        32*7-128($aap), $B1, $ACC3
372          vpbroadcastq   32*6-128($tpa), $B1
373         vpaddq          32*12-448($tp1), $ACC3, $ACC3
374
375         vmovdqu         $ACC8, 32*8-192($tp0)
376         vmovdqu         $ACC0, 32*9-192($tp0)
377         lea             8($tp0), $tp0
378
379         vpmuludq        32*5-128($ap), $B2, $TEMP2
380         vpaddq          $TEMP2, $ACC1, $ACC1
381         vpmuludq        32*5-128($aap), $B2, $TEMP0
382         vpaddq          $TEMP0, $ACC2, $ACC2
383         vpmuludq        32*6-128($aap), $B2, $TEMP1
384         vpaddq          $TEMP1, $ACC3, $ACC3
385         vpmuludq        32*7-128($aap), $B2, $ACC4
386          vpbroadcastq   32*7-128($tpa), $B2
387         vpaddq          32*13-448($tp1), $ACC4, $ACC4
388
389         vmovdqu         $ACC1, 32*10-448($tp1)
390         vmovdqu         $ACC2, 32*11-448($tp1)
391
392         vpmuludq        32*6-128($ap), $B1, $TEMP0
393         vpaddq          $TEMP0, $ACC3, $ACC3
394         vpmuludq        32*6-128($aap), $B1, $TEMP1
395          vpbroadcastq   32*8-128($tpa), $ACC0           # borrow $ACC0 for $B1
396         vpaddq          $TEMP1, $ACC4, $ACC4
397         vpmuludq        32*7-128($aap), $B1, $ACC5
398          vpbroadcastq   32*0+8-128($tpa), $B1           # for next iteration
399         vpaddq          32*14-448($tp1), $ACC5, $ACC5
400
401         vmovdqu         $ACC3, 32*12-448($tp1)
402         vmovdqu         $ACC4, 32*13-448($tp1)
403         lea             8($tpa), $tpa
404
405         vpmuludq        32*7-128($ap), $B2, $TEMP0
406         vpaddq          $TEMP0, $ACC5, $ACC5
407         vpmuludq        32*7-128($aap), $B2, $ACC6
408         vpaddq          32*15-448($tp1), $ACC6, $ACC6
409
410         vpmuludq        32*8-128($ap), $ACC0, $ACC7
411         vmovdqu         $ACC5, 32*14-448($tp1)
412         vpaddq          32*16-448($tp1), $ACC7, $ACC7
413         vmovdqu         $ACC6, 32*15-448($tp1)
414         vmovdqu         $ACC7, 32*16-448($tp1)
415         lea             8($tp1), $tp1
416
417         dec     $i
418         jnz     .LOOP_SQR_1024
419 ___
420 $ZERO = $ACC9;
421 $TEMP0 = $B1;
422 $TEMP2 = $B2;
423 $TEMP3 = $Y1;
424 $TEMP4 = $Y2;
425 $code.=<<___;
426         # we need to fix indices 32-39 to avoid overflow
427         vmovdqu         32*8(%rsp), $ACC8               # 32*8-192($tp0),
428         vmovdqu         32*9(%rsp), $ACC1               # 32*9-192($tp0)
429         vmovdqu         32*10(%rsp), $ACC2              # 32*10-192($tp0)
430         lea             192(%rsp), $tp0                 # 64+128=192
431
432         vpsrlq          \$29, $ACC8, $TEMP1
433         vpand           $AND_MASK, $ACC8, $ACC8
434         vpsrlq          \$29, $ACC1, $TEMP2
435         vpand           $AND_MASK, $ACC1, $ACC1
436
437         vpermq          \$0x93, $TEMP1, $TEMP1
438         vpxor           $ZERO, $ZERO, $ZERO
439         vpermq          \$0x93, $TEMP2, $TEMP2
440
441         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
442         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
443         vpaddq          $TEMP0, $ACC8, $ACC8
444         vpblendd        \$3, $TEMP2, $ZERO, $TEMP2
445         vpaddq          $TEMP1, $ACC1, $ACC1
446         vpaddq          $TEMP2, $ACC2, $ACC2
447         vmovdqu         $ACC1, 32*9-192($tp0)
448         vmovdqu         $ACC2, 32*10-192($tp0)
449
450         mov     (%rsp), %rax
451         mov     8(%rsp), $r1
452         mov     16(%rsp), $r2
453         mov     24(%rsp), $r3
454         vmovdqu 32*1(%rsp), $ACC1
455         vmovdqu 32*2-192($tp0), $ACC2
456         vmovdqu 32*3-192($tp0), $ACC3
457         vmovdqu 32*4-192($tp0), $ACC4
458         vmovdqu 32*5-192($tp0), $ACC5
459         vmovdqu 32*6-192($tp0), $ACC6
460         vmovdqu 32*7-192($tp0), $ACC7
461
462         mov     %rax, $r0
463         imull   $n0, %eax
464         and     \$0x1fffffff, %eax
465         vmovd   %eax, $Y1
466
467         mov     %rax, %rdx
468         imulq   -128($np), %rax
469          vpbroadcastq   $Y1, $Y1
470         add     %rax, $r0
471         mov     %rdx, %rax
472         imulq   8-128($np), %rax
473         shr     \$29, $r0
474         add     %rax, $r1
475         mov     %rdx, %rax
476         imulq   16-128($np), %rax
477         add     $r0, $r1
478         add     %rax, $r2
479         imulq   24-128($np), %rdx
480         add     %rdx, $r3
481
482         mov     $r1, %rax
483         imull   $n0, %eax
484         and     \$0x1fffffff, %eax
485
486         mov \$9, $i
487         jmp .LOOP_REDUCE_1024
488
489 .align  32
490 .LOOP_REDUCE_1024:
491         vmovd   %eax, $Y2
492         vpbroadcastq    $Y2, $Y2
493
494         vpmuludq        32*1-128($np), $Y1, $TEMP0
495          mov    %rax, %rdx
496          imulq  -128($np), %rax
497         vpaddq          $TEMP0, $ACC1, $ACC1
498          add    %rax, $r1
499         vpmuludq        32*2-128($np), $Y1, $TEMP1
500          mov    %rdx, %rax
501          imulq  8-128($np), %rax
502         vpaddq          $TEMP1, $ACC2, $ACC2
503         vpmuludq        32*3-128($np), $Y1, $TEMP2
504          .byte  0x67
505          add    %rax, $r2
506          .byte  0x67
507          mov    %rdx, %rax
508          imulq  16-128($np), %rax
509          shr    \$29, $r1
510         vpaddq          $TEMP2, $ACC3, $ACC3
511         vpmuludq        32*4-128($np), $Y1, $TEMP0
512          add    %rax, $r3
513          add    $r1, $r2
514         vpaddq          $TEMP0, $ACC4, $ACC4
515         vpmuludq        32*5-128($np), $Y1, $TEMP1
516          mov    $r2, %rax
517          imull  $n0, %eax
518         vpaddq          $TEMP1, $ACC5, $ACC5
519         vpmuludq        32*6-128($np), $Y1, $TEMP2
520          and    \$0x1fffffff, %eax
521         vpaddq          $TEMP2, $ACC6, $ACC6
522         vpmuludq        32*7-128($np), $Y1, $TEMP0
523         vpaddq          $TEMP0, $ACC7, $ACC7
524         vpmuludq        32*8-128($np), $Y1, $TEMP1
525          vmovd  %eax, $Y1
526          #vmovdqu       32*1-8-128($np), $TEMP2         # moved below
527         vpaddq          $TEMP1, $ACC8, $ACC8
528          #vmovdqu       32*2-8-128($np), $TEMP0         # moved below
529          vpbroadcastq   $Y1, $Y1
530
531         vpmuludq        32*1-8-128($np), $Y2, $TEMP2    # see above
532         vmovdqu         32*3-8-128($np), $TEMP1
533          mov    %rax, %rdx
534          imulq  -128($np), %rax
535         vpaddq          $TEMP2, $ACC1, $ACC1
536         vpmuludq        32*2-8-128($np), $Y2, $TEMP0    # see above
537         vmovdqu         32*4-8-128($np), $TEMP2
538          add    %rax, $r2
539          mov    %rdx, %rax
540          imulq  8-128($np), %rax
541         vpaddq          $TEMP0, $ACC2, $ACC2
542          add    $r3, %rax
543          shr    \$29, $r2
544         vpmuludq        $Y2, $TEMP1, $TEMP1
545         vmovdqu         32*5-8-128($np), $TEMP0
546          add    $r2, %rax
547         vpaddq          $TEMP1, $ACC3, $ACC3
548         vpmuludq        $Y2, $TEMP2, $TEMP2
549         vmovdqu         32*6-8-128($np), $TEMP1
550          .byte  0x67
551          mov    %rax, $r3
552          imull  $n0, %eax
553         vpaddq          $TEMP2, $ACC4, $ACC4
554         vpmuludq        $Y2, $TEMP0, $TEMP0
555         .byte   0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00    # vmovdqu               32*7-8-128($np), $TEMP2
556          and    \$0x1fffffff, %eax
557         vpaddq          $TEMP0, $ACC5, $ACC5
558         vpmuludq        $Y2, $TEMP1, $TEMP1
559         vmovdqu         32*8-8-128($np), $TEMP0
560         vpaddq          $TEMP1, $ACC6, $ACC6
561         vpmuludq        $Y2, $TEMP2, $TEMP2
562         vmovdqu         32*9-8-128($np), $ACC9
563          vmovd  %eax, $ACC0                     # borrow ACC0 for Y2
564          imulq  -128($np), %rax
565         vpaddq          $TEMP2, $ACC7, $ACC7
566         vpmuludq        $Y2, $TEMP0, $TEMP0
567          vmovdqu        32*1-16-128($np), $TEMP1
568          vpbroadcastq   $ACC0, $ACC0
569         vpaddq          $TEMP0, $ACC8, $ACC8
570         vpmuludq        $Y2, $ACC9, $ACC9
571          vmovdqu        32*2-16-128($np), $TEMP2
572          add    %rax, $r3
573
574 ___
575 ($ACC0,$Y2)=($Y2,$ACC0);
576 $code.=<<___;
577          vmovdqu        32*1-24-128($np), $ACC0
578         vpmuludq        $Y1, $TEMP1, $TEMP1
579         vmovdqu         32*3-16-128($np), $TEMP0
580         vpaddq          $TEMP1, $ACC1, $ACC1
581          vpmuludq       $Y2, $ACC0, $ACC0
582         vpmuludq        $Y1, $TEMP2, $TEMP2
583         .byte   0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff    # vmovdqu               32*4-16-128($np), $TEMP1
584          vpaddq         $ACC1, $ACC0, $ACC0
585         vpaddq          $TEMP2, $ACC2, $ACC2
586         vpmuludq        $Y1, $TEMP0, $TEMP0
587         vmovdqu         32*5-16-128($np), $TEMP2
588          .byte  0x67
589          vmovq          $ACC0, %rax
590          vmovdqu        $ACC0, (%rsp)           # transfer $r0-$r3
591         vpaddq          $TEMP0, $ACC3, $ACC3
592         vpmuludq        $Y1, $TEMP1, $TEMP1
593         vmovdqu         32*6-16-128($np), $TEMP0
594         vpaddq          $TEMP1, $ACC4, $ACC4
595         vpmuludq        $Y1, $TEMP2, $TEMP2
596         vmovdqu         32*7-16-128($np), $TEMP1
597         vpaddq          $TEMP2, $ACC5, $ACC5
598         vpmuludq        $Y1, $TEMP0, $TEMP0
599         vmovdqu         32*8-16-128($np), $TEMP2
600         vpaddq          $TEMP0, $ACC6, $ACC6
601         vpmuludq        $Y1, $TEMP1, $TEMP1
602          shr    \$29, $r3
603         vmovdqu         32*9-16-128($np), $TEMP0
604          add    $r3, %rax
605         vpaddq          $TEMP1, $ACC7, $ACC7
606         vpmuludq        $Y1, $TEMP2, $TEMP2
607          #vmovdqu       32*2-24-128($np), $TEMP1        # moved below
608          mov    %rax, $r0
609          imull  $n0, %eax
610         vpaddq          $TEMP2, $ACC8, $ACC8
611         vpmuludq        $Y1, $TEMP0, $TEMP0
612          and    \$0x1fffffff, %eax
613          vmovd  %eax, $Y1
614          vmovdqu        32*3-24-128($np), $TEMP2
615         .byte   0x67
616         vpaddq          $TEMP0, $ACC9, $ACC9
617          vpbroadcastq   $Y1, $Y1
618
619         vpmuludq        32*2-24-128($np), $Y2, $TEMP1   # see above
620         vmovdqu         32*4-24-128($np), $TEMP0
621          mov    %rax, %rdx
622          imulq  -128($np), %rax
623          mov    8(%rsp), $r1
624         vpaddq          $TEMP1, $ACC2, $ACC1
625         vpmuludq        $Y2, $TEMP2, $TEMP2
626         vmovdqu         32*5-24-128($np), $TEMP1
627          add    %rax, $r0
628          mov    %rdx, %rax
629          imulq  8-128($np), %rax
630          .byte  0x67
631          shr    \$29, $r0
632          mov    16(%rsp), $r2
633         vpaddq          $TEMP2, $ACC3, $ACC2
634         vpmuludq        $Y2, $TEMP0, $TEMP0
635         vmovdqu         32*6-24-128($np), $TEMP2
636          add    %rax, $r1
637          mov    %rdx, %rax
638          imulq  16-128($np), %rax
639         vpaddq          $TEMP0, $ACC4, $ACC3
640         vpmuludq        $Y2, $TEMP1, $TEMP1
641         vmovdqu         32*7-24-128($np), $TEMP0
642          imulq  24-128($np), %rdx               # future $r3
643          add    %rax, $r2
644          lea    ($r0,$r1), %rax
645         vpaddq          $TEMP1, $ACC5, $ACC4
646         vpmuludq        $Y2, $TEMP2, $TEMP2
647         vmovdqu         32*8-24-128($np), $TEMP1
648          mov    %rax, $r1
649          imull  $n0, %eax
650         vpmuludq        $Y2, $TEMP0, $TEMP0
651         vpaddq          $TEMP2, $ACC6, $ACC5
652         vmovdqu         32*9-24-128($np), $TEMP2
653          and    \$0x1fffffff, %eax
654         vpaddq          $TEMP0, $ACC7, $ACC6
655         vpmuludq        $Y2, $TEMP1, $TEMP1
656          add    24(%rsp), %rdx
657         vpaddq          $TEMP1, $ACC8, $ACC7
658         vpmuludq        $Y2, $TEMP2, $TEMP2
659         vpaddq          $TEMP2, $ACC9, $ACC8
660          vmovq  $r3, $ACC9
661          mov    %rdx, $r3
662
663         dec     $i
664         jnz     .LOOP_REDUCE_1024
665 ___
666 ($ACC0,$Y2)=($Y2,$ACC0);
667 $code.=<<___;
668         lea     448(%rsp), $tp1                 # size optimization
669         vpaddq  $ACC9, $Y2, $ACC0
670         vpxor   $ZERO, $ZERO, $ZERO
671
672         vpaddq          32*9-192($tp0), $ACC0, $ACC0
673         vpaddq          32*10-448($tp1), $ACC1, $ACC1
674         vpaddq          32*11-448($tp1), $ACC2, $ACC2
675         vpaddq          32*12-448($tp1), $ACC3, $ACC3
676         vpaddq          32*13-448($tp1), $ACC4, $ACC4
677         vpaddq          32*14-448($tp1), $ACC5, $ACC5
678         vpaddq          32*15-448($tp1), $ACC6, $ACC6
679         vpaddq          32*16-448($tp1), $ACC7, $ACC7
680         vpaddq          32*17-448($tp1), $ACC8, $ACC8
681
682         vpsrlq          \$29, $ACC0, $TEMP1
683         vpand           $AND_MASK, $ACC0, $ACC0
684         vpsrlq          \$29, $ACC1, $TEMP2
685         vpand           $AND_MASK, $ACC1, $ACC1
686         vpsrlq          \$29, $ACC2, $TEMP3
687         vpermq          \$0x93, $TEMP1, $TEMP1
688         vpand           $AND_MASK, $ACC2, $ACC2
689         vpsrlq          \$29, $ACC3, $TEMP4
690         vpermq          \$0x93, $TEMP2, $TEMP2
691         vpand           $AND_MASK, $ACC3, $ACC3
692         vpermq          \$0x93, $TEMP3, $TEMP3
693
694         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
695         vpermq          \$0x93, $TEMP4, $TEMP4
696         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
697         vpaddq          $TEMP0, $ACC0, $ACC0
698         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
699         vpaddq          $TEMP1, $ACC1, $ACC1
700         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
701         vpaddq          $TEMP2, $ACC2, $ACC2
702         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
703         vpaddq          $TEMP3, $ACC3, $ACC3
704         vpaddq          $TEMP4, $ACC4, $ACC4
705
706         vpsrlq          \$29, $ACC0, $TEMP1
707         vpand           $AND_MASK, $ACC0, $ACC0
708         vpsrlq          \$29, $ACC1, $TEMP2
709         vpand           $AND_MASK, $ACC1, $ACC1
710         vpsrlq          \$29, $ACC2, $TEMP3
711         vpermq          \$0x93, $TEMP1, $TEMP1
712         vpand           $AND_MASK, $ACC2, $ACC2
713         vpsrlq          \$29, $ACC3, $TEMP4
714         vpermq          \$0x93, $TEMP2, $TEMP2
715         vpand           $AND_MASK, $ACC3, $ACC3
716         vpermq          \$0x93, $TEMP3, $TEMP3
717
718         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
719         vpermq          \$0x93, $TEMP4, $TEMP4
720         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
721         vpaddq          $TEMP0, $ACC0, $ACC0
722         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
723         vpaddq          $TEMP1, $ACC1, $ACC1
724         vmovdqu         $ACC0, 32*0-128($rp)
725         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
726         vpaddq          $TEMP2, $ACC2, $ACC2
727         vmovdqu         $ACC1, 32*1-128($rp)
728         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
729         vpaddq          $TEMP3, $ACC3, $ACC3
730         vmovdqu         $ACC2, 32*2-128($rp)
731         vpaddq          $TEMP4, $ACC4, $ACC4
732         vmovdqu         $ACC3, 32*3-128($rp)
733 ___
734 $TEMP5=$ACC0;
735 $code.=<<___;
736         vpsrlq          \$29, $ACC4, $TEMP1
737         vpand           $AND_MASK, $ACC4, $ACC4
738         vpsrlq          \$29, $ACC5, $TEMP2
739         vpand           $AND_MASK, $ACC5, $ACC5
740         vpsrlq          \$29, $ACC6, $TEMP3
741         vpermq          \$0x93, $TEMP1, $TEMP1
742         vpand           $AND_MASK, $ACC6, $ACC6
743         vpsrlq          \$29, $ACC7, $TEMP4
744         vpermq          \$0x93, $TEMP2, $TEMP2
745         vpand           $AND_MASK, $ACC7, $ACC7
746         vpsrlq          \$29, $ACC8, $TEMP5
747         vpermq          \$0x93, $TEMP3, $TEMP3
748         vpand           $AND_MASK, $ACC8, $ACC8
749         vpermq          \$0x93, $TEMP4, $TEMP4
750
751         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
752         vpermq          \$0x93, $TEMP5, $TEMP5
753         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
754         vpaddq          $TEMP0, $ACC4, $ACC4
755         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
756         vpaddq          $TEMP1, $ACC5, $ACC5
757         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
758         vpaddq          $TEMP2, $ACC6, $ACC6
759         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
760         vpaddq          $TEMP3, $ACC7, $ACC7
761         vpaddq          $TEMP4, $ACC8, $ACC8
762
763         vpsrlq          \$29, $ACC4, $TEMP1
764         vpand           $AND_MASK, $ACC4, $ACC4
765         vpsrlq          \$29, $ACC5, $TEMP2
766         vpand           $AND_MASK, $ACC5, $ACC5
767         vpsrlq          \$29, $ACC6, $TEMP3
768         vpermq          \$0x93, $TEMP1, $TEMP1
769         vpand           $AND_MASK, $ACC6, $ACC6
770         vpsrlq          \$29, $ACC7, $TEMP4
771         vpermq          \$0x93, $TEMP2, $TEMP2
772         vpand           $AND_MASK, $ACC7, $ACC7
773         vpsrlq          \$29, $ACC8, $TEMP5
774         vpermq          \$0x93, $TEMP3, $TEMP3
775         vpand           $AND_MASK, $ACC8, $ACC8
776         vpermq          \$0x93, $TEMP4, $TEMP4
777
778         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
779         vpermq          \$0x93, $TEMP5, $TEMP5
780         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
781         vpaddq          $TEMP0, $ACC4, $ACC4
782         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
783         vpaddq          $TEMP1, $ACC5, $ACC5
784         vmovdqu         $ACC4, 32*4-128($rp)
785         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
786         vpaddq          $TEMP2, $ACC6, $ACC6
787         vmovdqu         $ACC5, 32*5-128($rp)
788         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
789         vpaddq          $TEMP3, $ACC7, $ACC7
790         vmovdqu         $ACC6, 32*6-128($rp)
791         vpaddq          $TEMP4, $ACC8, $ACC8
792         vmovdqu         $ACC7, 32*7-128($rp)
793         vmovdqu         $ACC8, 32*8-128($rp)
794
795         mov     $rp, $ap
796         dec     $rep
797         jne     .LOOP_GRANDE_SQR_1024
798
799         vzeroall
800         mov     %rbp, %rax
801 .cfi_def_cfa_register   %rax
802 ___
803 $code.=<<___ if ($win64);
804 .Lsqr_1024_in_tail:
805         movaps  -0xd8(%rax),%xmm6
806         movaps  -0xc8(%rax),%xmm7
807         movaps  -0xb8(%rax),%xmm8
808         movaps  -0xa8(%rax),%xmm9
809         movaps  -0x98(%rax),%xmm10
810         movaps  -0x88(%rax),%xmm11
811         movaps  -0x78(%rax),%xmm12
812         movaps  -0x68(%rax),%xmm13
813         movaps  -0x58(%rax),%xmm14
814         movaps  -0x48(%rax),%xmm15
815 ___
816 $code.=<<___;
817         mov     -48(%rax),%r15
818 .cfi_restore    %r15
819         mov     -40(%rax),%r14
820 .cfi_restore    %r14
821         mov     -32(%rax),%r13
822 .cfi_restore    %r13
823         mov     -24(%rax),%r12
824 .cfi_restore    %r12
825         mov     -16(%rax),%rbp
826 .cfi_restore    %rbp
827         mov     -8(%rax),%rbx
828 .cfi_restore    %rbx
829         lea     (%rax),%rsp             # restore %rsp
830 .cfi_def_cfa_register   %rsp
831 .Lsqr_1024_epilogue:
832         ret
833 .cfi_endproc
834 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
835 ___
836 }
837
838 { # void AMM_WW(
839 my $rp="%rdi";  # BN_ULONG *rp,
840 my $ap="%rsi";  # const BN_ULONG *ap,
841 my $bp="%rdx";  # const BN_ULONG *bp,
842 my $np="%rcx";  # const BN_ULONG *np,
843 my $n0="%r8d";  # unsigned int n0);
844
845 # The registers that hold the accumulated redundant result
846 # The AMM works on 1024 bit operands, and redundant word size is 29
847 # Therefore: ceil(1024/29)/4 = 9
848 my $ACC0="%ymm0";
849 my $ACC1="%ymm1";
850 my $ACC2="%ymm2";
851 my $ACC3="%ymm3";
852 my $ACC4="%ymm4";
853 my $ACC5="%ymm5";
854 my $ACC6="%ymm6";
855 my $ACC7="%ymm7";
856 my $ACC8="%ymm8";
857 my $ACC9="%ymm9";
858
859 # Registers that hold the broadcasted words of multiplier, currently used
860 my $Bi="%ymm10";
861 my $Yi="%ymm11";
862
863 # Helper registers
864 my $TEMP0=$ACC0;
865 my $TEMP1="%ymm12";
866 my $TEMP2="%ymm13";
867 my $ZERO="%ymm14";
868 my $AND_MASK="%ymm15";
869
870 # alu registers that hold the first words of the ACC
871 my $r0="%r9";
872 my $r1="%r10";
873 my $r2="%r11";
874 my $r3="%r12";
875
876 my $i="%r14d";
877 my $tmp="%r15";
878
879 $bp="%r13";     # reassigned argument
880
881 $code.=<<___;
882 .globl  rsaz_1024_mul_avx2
883 .type   rsaz_1024_mul_avx2,\@function,5
884 .align  64
885 rsaz_1024_mul_avx2:
886 .cfi_startproc
887         lea     (%rsp), %rax
888 .cfi_def_cfa_register   %rax
889         push    %rbx
890 .cfi_push       %rbx
891         push    %rbp
892 .cfi_push       %rbp
893         push    %r12
894 .cfi_push       %r12
895         push    %r13
896 .cfi_push       %r13
897         push    %r14
898 .cfi_push       %r14
899         push    %r15
900 .cfi_push       %r15
901 ___
902 $code.=<<___ if ($win64);
903         vzeroupper
904         lea     -0xa8(%rsp),%rsp
905         vmovaps %xmm6,-0xd8(%rax)
906         vmovaps %xmm7,-0xc8(%rax)
907         vmovaps %xmm8,-0xb8(%rax)
908         vmovaps %xmm9,-0xa8(%rax)
909         vmovaps %xmm10,-0x98(%rax)
910         vmovaps %xmm11,-0x88(%rax)
911         vmovaps %xmm12,-0x78(%rax)
912         vmovaps %xmm13,-0x68(%rax)
913         vmovaps %xmm14,-0x58(%rax)
914         vmovaps %xmm15,-0x48(%rax)
915 .Lmul_1024_body:
916 ___
917 $code.=<<___;
918         mov     %rax,%rbp
919 .cfi_def_cfa_register   %rbp
920         vzeroall
921         mov     %rdx, $bp       # reassigned argument
922         sub     \$64,%rsp
923
924         # unaligned 256-bit load that crosses page boundary can
925         # cause severe performance degradation here, so if $ap does
926         # cross page boundary, swap it with $bp [meaning that caller
927         # is advised to lay down $ap and $bp next to each other, so
928         # that only one can cross page boundary].
929         .byte   0x67,0x67
930         mov     $ap, $tmp
931         and     \$4095, $tmp
932         add     \$32*10, $tmp
933         shr     \$12, $tmp
934         mov     $ap, $tmp
935         cmovnz  $bp, $ap
936         cmovnz  $tmp, $bp
937
938         mov     $np, $tmp
939         sub     \$-128,$ap      # size optimization
940         sub     \$-128,$np
941         sub     \$-128,$rp
942
943         and     \$4095, $tmp    # see if $np crosses page
944         add     \$32*10, $tmp
945         .byte   0x67,0x67
946         shr     \$12, $tmp
947         jz      .Lmul_1024_no_n_copy
948
949         # unaligned 256-bit load that crosses page boundary can
950         # cause severe performance degradation here, so if $np does
951         # cross page boundary, copy it to stack and make sure stack
952         # frame doesn't...
953         sub             \$32*10,%rsp
954         vmovdqu         32*0-128($np), $ACC0
955         and             \$-512, %rsp
956         vmovdqu         32*1-128($np), $ACC1
957         vmovdqu         32*2-128($np), $ACC2
958         vmovdqu         32*3-128($np), $ACC3
959         vmovdqu         32*4-128($np), $ACC4
960         vmovdqu         32*5-128($np), $ACC5
961         vmovdqu         32*6-128($np), $ACC6
962         vmovdqu         32*7-128($np), $ACC7
963         vmovdqu         32*8-128($np), $ACC8
964         lea             64+128(%rsp),$np
965         vmovdqu         $ACC0, 32*0-128($np)
966         vpxor           $ACC0, $ACC0, $ACC0
967         vmovdqu         $ACC1, 32*1-128($np)
968         vpxor           $ACC1, $ACC1, $ACC1
969         vmovdqu         $ACC2, 32*2-128($np)
970         vpxor           $ACC2, $ACC2, $ACC2
971         vmovdqu         $ACC3, 32*3-128($np)
972         vpxor           $ACC3, $ACC3, $ACC3
973         vmovdqu         $ACC4, 32*4-128($np)
974         vpxor           $ACC4, $ACC4, $ACC4
975         vmovdqu         $ACC5, 32*5-128($np)
976         vpxor           $ACC5, $ACC5, $ACC5
977         vmovdqu         $ACC6, 32*6-128($np)
978         vpxor           $ACC6, $ACC6, $ACC6
979         vmovdqu         $ACC7, 32*7-128($np)
980         vpxor           $ACC7, $ACC7, $ACC7
981         vmovdqu         $ACC8, 32*8-128($np)
982         vmovdqa         $ACC0, $ACC8
983         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
984 .Lmul_1024_no_n_copy:
985         and     \$-64,%rsp
986
987         mov     ($bp), %rbx
988         vpbroadcastq ($bp), $Bi
989         vmovdqu $ACC0, (%rsp)                   # clear top of stack
990         xor     $r0, $r0
991         .byte   0x67
992         xor     $r1, $r1
993         xor     $r2, $r2
994         xor     $r3, $r3
995
996         vmovdqu .Land_mask(%rip), $AND_MASK
997         mov     \$9, $i
998         vmovdqu $ACC9, 32*9-128($rp)            # $ACC9 is zero after vzeroall
999         jmp     .Loop_mul_1024
1000
1001 .align  32
1002 .Loop_mul_1024:
1003          vpsrlq         \$29, $ACC3, $ACC9              # correct $ACC3(*)
1004         mov     %rbx, %rax
1005         imulq   -128($ap), %rax
1006         add     $r0, %rax
1007         mov     %rbx, $r1
1008         imulq   8-128($ap), $r1
1009         add     8(%rsp), $r1
1010
1011         mov     %rax, $r0
1012         imull   $n0, %eax
1013         and     \$0x1fffffff, %eax
1014
1015          mov    %rbx, $r2
1016          imulq  16-128($ap), $r2
1017          add    16(%rsp), $r2
1018
1019          mov    %rbx, $r3
1020          imulq  24-128($ap), $r3
1021          add    24(%rsp), $r3
1022         vpmuludq        32*1-128($ap),$Bi,$TEMP0
1023          vmovd          %eax, $Yi
1024         vpaddq          $TEMP0,$ACC1,$ACC1
1025         vpmuludq        32*2-128($ap),$Bi,$TEMP1
1026          vpbroadcastq   $Yi, $Yi
1027         vpaddq          $TEMP1,$ACC2,$ACC2
1028         vpmuludq        32*3-128($ap),$Bi,$TEMP2
1029          vpand          $AND_MASK, $ACC3, $ACC3         # correct $ACC3
1030         vpaddq          $TEMP2,$ACC3,$ACC3
1031         vpmuludq        32*4-128($ap),$Bi,$TEMP0
1032         vpaddq          $TEMP0,$ACC4,$ACC4
1033         vpmuludq        32*5-128($ap),$Bi,$TEMP1
1034         vpaddq          $TEMP1,$ACC5,$ACC5
1035         vpmuludq        32*6-128($ap),$Bi,$TEMP2
1036         vpaddq          $TEMP2,$ACC6,$ACC6
1037         vpmuludq        32*7-128($ap),$Bi,$TEMP0
1038          vpermq         \$0x93, $ACC9, $ACC9            # correct $ACC3
1039         vpaddq          $TEMP0,$ACC7,$ACC7
1040         vpmuludq        32*8-128($ap),$Bi,$TEMP1
1041          vpbroadcastq   8($bp), $Bi
1042         vpaddq          $TEMP1,$ACC8,$ACC8
1043
1044         mov     %rax,%rdx
1045         imulq   -128($np),%rax
1046         add     %rax,$r0
1047         mov     %rdx,%rax
1048         imulq   8-128($np),%rax
1049         add     %rax,$r1
1050         mov     %rdx,%rax
1051         imulq   16-128($np),%rax
1052         add     %rax,$r2
1053         shr     \$29, $r0
1054         imulq   24-128($np),%rdx
1055         add     %rdx,$r3
1056         add     $r0, $r1
1057
1058         vpmuludq        32*1-128($np),$Yi,$TEMP2
1059          vmovq          $Bi, %rbx
1060         vpaddq          $TEMP2,$ACC1,$ACC1
1061         vpmuludq        32*2-128($np),$Yi,$TEMP0
1062         vpaddq          $TEMP0,$ACC2,$ACC2
1063         vpmuludq        32*3-128($np),$Yi,$TEMP1
1064         vpaddq          $TEMP1,$ACC3,$ACC3
1065         vpmuludq        32*4-128($np),$Yi,$TEMP2
1066         vpaddq          $TEMP2,$ACC4,$ACC4
1067         vpmuludq        32*5-128($np),$Yi,$TEMP0
1068         vpaddq          $TEMP0,$ACC5,$ACC5
1069         vpmuludq        32*6-128($np),$Yi,$TEMP1
1070         vpaddq          $TEMP1,$ACC6,$ACC6
1071         vpmuludq        32*7-128($np),$Yi,$TEMP2
1072          vpblendd       \$3, $ZERO, $ACC9, $TEMP1       # correct $ACC3
1073         vpaddq          $TEMP2,$ACC7,$ACC7
1074         vpmuludq        32*8-128($np),$Yi,$TEMP0
1075          vpaddq         $TEMP1, $ACC3, $ACC3            # correct $ACC3
1076         vpaddq          $TEMP0,$ACC8,$ACC8
1077
1078         mov     %rbx, %rax
1079         imulq   -128($ap),%rax
1080         add     %rax,$r1
1081          vmovdqu        -8+32*1-128($ap),$TEMP1
1082         mov     %rbx, %rax
1083         imulq   8-128($ap),%rax
1084         add     %rax,$r2
1085          vmovdqu        -8+32*2-128($ap),$TEMP2
1086
1087         mov     $r1, %rax
1088          vpblendd       \$0xfc, $ZERO, $ACC9, $ACC9     # correct $ACC3
1089         imull   $n0, %eax
1090          vpaddq         $ACC9,$ACC4,$ACC4               # correct $ACC3
1091         and     \$0x1fffffff, %eax
1092
1093          imulq  16-128($ap),%rbx
1094          add    %rbx,$r3
1095         vpmuludq        $Bi,$TEMP1,$TEMP1
1096          vmovd          %eax, $Yi
1097         vmovdqu         -8+32*3-128($ap),$TEMP0
1098         vpaddq          $TEMP1,$ACC1,$ACC1
1099         vpmuludq        $Bi,$TEMP2,$TEMP2
1100          vpbroadcastq   $Yi, $Yi
1101         vmovdqu         -8+32*4-128($ap),$TEMP1
1102         vpaddq          $TEMP2,$ACC2,$ACC2
1103         vpmuludq        $Bi,$TEMP0,$TEMP0
1104         vmovdqu         -8+32*5-128($ap),$TEMP2
1105         vpaddq          $TEMP0,$ACC3,$ACC3
1106         vpmuludq        $Bi,$TEMP1,$TEMP1
1107         vmovdqu         -8+32*6-128($ap),$TEMP0
1108         vpaddq          $TEMP1,$ACC4,$ACC4
1109         vpmuludq        $Bi,$TEMP2,$TEMP2
1110         vmovdqu         -8+32*7-128($ap),$TEMP1
1111         vpaddq          $TEMP2,$ACC5,$ACC5
1112         vpmuludq        $Bi,$TEMP0,$TEMP0
1113         vmovdqu         -8+32*8-128($ap),$TEMP2
1114         vpaddq          $TEMP0,$ACC6,$ACC6
1115         vpmuludq        $Bi,$TEMP1,$TEMP1
1116         vmovdqu         -8+32*9-128($ap),$ACC9
1117         vpaddq          $TEMP1,$ACC7,$ACC7
1118         vpmuludq        $Bi,$TEMP2,$TEMP2
1119         vpaddq          $TEMP2,$ACC8,$ACC8
1120         vpmuludq        $Bi,$ACC9,$ACC9
1121          vpbroadcastq   16($bp), $Bi
1122
1123         mov     %rax,%rdx
1124         imulq   -128($np),%rax
1125         add     %rax,$r1
1126          vmovdqu        -8+32*1-128($np),$TEMP0
1127         mov     %rdx,%rax
1128         imulq   8-128($np),%rax
1129         add     %rax,$r2
1130          vmovdqu        -8+32*2-128($np),$TEMP1
1131         shr     \$29, $r1
1132         imulq   16-128($np),%rdx
1133         add     %rdx,$r3
1134         add     $r1, $r2
1135
1136         vpmuludq        $Yi,$TEMP0,$TEMP0
1137          vmovq          $Bi, %rbx
1138         vmovdqu         -8+32*3-128($np),$TEMP2
1139         vpaddq          $TEMP0,$ACC1,$ACC1
1140         vpmuludq        $Yi,$TEMP1,$TEMP1
1141         vmovdqu         -8+32*4-128($np),$TEMP0
1142         vpaddq          $TEMP1,$ACC2,$ACC2
1143         vpmuludq        $Yi,$TEMP2,$TEMP2
1144         vmovdqu         -8+32*5-128($np),$TEMP1
1145         vpaddq          $TEMP2,$ACC3,$ACC3
1146         vpmuludq        $Yi,$TEMP0,$TEMP0
1147         vmovdqu         -8+32*6-128($np),$TEMP2
1148         vpaddq          $TEMP0,$ACC4,$ACC4
1149         vpmuludq        $Yi,$TEMP1,$TEMP1
1150         vmovdqu         -8+32*7-128($np),$TEMP0
1151         vpaddq          $TEMP1,$ACC5,$ACC5
1152         vpmuludq        $Yi,$TEMP2,$TEMP2
1153         vmovdqu         -8+32*8-128($np),$TEMP1
1154         vpaddq          $TEMP2,$ACC6,$ACC6
1155         vpmuludq        $Yi,$TEMP0,$TEMP0
1156         vmovdqu         -8+32*9-128($np),$TEMP2
1157         vpaddq          $TEMP0,$ACC7,$ACC7
1158         vpmuludq        $Yi,$TEMP1,$TEMP1
1159         vpaddq          $TEMP1,$ACC8,$ACC8
1160         vpmuludq        $Yi,$TEMP2,$TEMP2
1161         vpaddq          $TEMP2,$ACC9,$ACC9
1162
1163          vmovdqu        -16+32*1-128($ap),$TEMP0
1164         mov     %rbx,%rax
1165         imulq   -128($ap),%rax
1166         add     $r2,%rax
1167
1168          vmovdqu        -16+32*2-128($ap),$TEMP1
1169         mov     %rax,$r2
1170         imull   $n0, %eax
1171         and     \$0x1fffffff, %eax
1172
1173          imulq  8-128($ap),%rbx
1174          add    %rbx,$r3
1175         vpmuludq        $Bi,$TEMP0,$TEMP0
1176          vmovd          %eax, $Yi
1177         vmovdqu         -16+32*3-128($ap),$TEMP2
1178         vpaddq          $TEMP0,$ACC1,$ACC1
1179         vpmuludq        $Bi,$TEMP1,$TEMP1
1180          vpbroadcastq   $Yi, $Yi
1181         vmovdqu         -16+32*4-128($ap),$TEMP0
1182         vpaddq          $TEMP1,$ACC2,$ACC2
1183         vpmuludq        $Bi,$TEMP2,$TEMP2
1184         vmovdqu         -16+32*5-128($ap),$TEMP1
1185         vpaddq          $TEMP2,$ACC3,$ACC3
1186         vpmuludq        $Bi,$TEMP0,$TEMP0
1187         vmovdqu         -16+32*6-128($ap),$TEMP2
1188         vpaddq          $TEMP0,$ACC4,$ACC4
1189         vpmuludq        $Bi,$TEMP1,$TEMP1
1190         vmovdqu         -16+32*7-128($ap),$TEMP0
1191         vpaddq          $TEMP1,$ACC5,$ACC5
1192         vpmuludq        $Bi,$TEMP2,$TEMP2
1193         vmovdqu         -16+32*8-128($ap),$TEMP1
1194         vpaddq          $TEMP2,$ACC6,$ACC6
1195         vpmuludq        $Bi,$TEMP0,$TEMP0
1196         vmovdqu         -16+32*9-128($ap),$TEMP2
1197         vpaddq          $TEMP0,$ACC7,$ACC7
1198         vpmuludq        $Bi,$TEMP1,$TEMP1
1199         vpaddq          $TEMP1,$ACC8,$ACC8
1200         vpmuludq        $Bi,$TEMP2,$TEMP2
1201          vpbroadcastq   24($bp), $Bi
1202         vpaddq          $TEMP2,$ACC9,$ACC9
1203
1204          vmovdqu        -16+32*1-128($np),$TEMP0
1205         mov     %rax,%rdx
1206         imulq   -128($np),%rax
1207         add     %rax,$r2
1208          vmovdqu        -16+32*2-128($np),$TEMP1
1209         imulq   8-128($np),%rdx
1210         add     %rdx,$r3
1211         shr     \$29, $r2
1212
1213         vpmuludq        $Yi,$TEMP0,$TEMP0
1214          vmovq          $Bi, %rbx
1215         vmovdqu         -16+32*3-128($np),$TEMP2
1216         vpaddq          $TEMP0,$ACC1,$ACC1
1217         vpmuludq        $Yi,$TEMP1,$TEMP1
1218         vmovdqu         -16+32*4-128($np),$TEMP0
1219         vpaddq          $TEMP1,$ACC2,$ACC2
1220         vpmuludq        $Yi,$TEMP2,$TEMP2
1221         vmovdqu         -16+32*5-128($np),$TEMP1
1222         vpaddq          $TEMP2,$ACC3,$ACC3
1223         vpmuludq        $Yi,$TEMP0,$TEMP0
1224         vmovdqu         -16+32*6-128($np),$TEMP2
1225         vpaddq          $TEMP0,$ACC4,$ACC4
1226         vpmuludq        $Yi,$TEMP1,$TEMP1
1227         vmovdqu         -16+32*7-128($np),$TEMP0
1228         vpaddq          $TEMP1,$ACC5,$ACC5
1229         vpmuludq        $Yi,$TEMP2,$TEMP2
1230         vmovdqu         -16+32*8-128($np),$TEMP1
1231         vpaddq          $TEMP2,$ACC6,$ACC6
1232         vpmuludq        $Yi,$TEMP0,$TEMP0
1233         vmovdqu         -16+32*9-128($np),$TEMP2
1234         vpaddq          $TEMP0,$ACC7,$ACC7
1235         vpmuludq        $Yi,$TEMP1,$TEMP1
1236          vmovdqu        -24+32*1-128($ap),$TEMP0
1237         vpaddq          $TEMP1,$ACC8,$ACC8
1238         vpmuludq        $Yi,$TEMP2,$TEMP2
1239          vmovdqu        -24+32*2-128($ap),$TEMP1
1240         vpaddq          $TEMP2,$ACC9,$ACC9
1241
1242         add     $r2, $r3
1243         imulq   -128($ap),%rbx
1244         add     %rbx,$r3
1245
1246         mov     $r3, %rax
1247         imull   $n0, %eax
1248         and     \$0x1fffffff, %eax
1249
1250         vpmuludq        $Bi,$TEMP0,$TEMP0
1251          vmovd          %eax, $Yi
1252         vmovdqu         -24+32*3-128($ap),$TEMP2
1253         vpaddq          $TEMP0,$ACC1,$ACC1
1254         vpmuludq        $Bi,$TEMP1,$TEMP1
1255          vpbroadcastq   $Yi, $Yi
1256         vmovdqu         -24+32*4-128($ap),$TEMP0
1257         vpaddq          $TEMP1,$ACC2,$ACC2
1258         vpmuludq        $Bi,$TEMP2,$TEMP2
1259         vmovdqu         -24+32*5-128($ap),$TEMP1
1260         vpaddq          $TEMP2,$ACC3,$ACC3
1261         vpmuludq        $Bi,$TEMP0,$TEMP0
1262         vmovdqu         -24+32*6-128($ap),$TEMP2
1263         vpaddq          $TEMP0,$ACC4,$ACC4
1264         vpmuludq        $Bi,$TEMP1,$TEMP1
1265         vmovdqu         -24+32*7-128($ap),$TEMP0
1266         vpaddq          $TEMP1,$ACC5,$ACC5
1267         vpmuludq        $Bi,$TEMP2,$TEMP2
1268         vmovdqu         -24+32*8-128($ap),$TEMP1
1269         vpaddq          $TEMP2,$ACC6,$ACC6
1270         vpmuludq        $Bi,$TEMP0,$TEMP0
1271         vmovdqu         -24+32*9-128($ap),$TEMP2
1272         vpaddq          $TEMP0,$ACC7,$ACC7
1273         vpmuludq        $Bi,$TEMP1,$TEMP1
1274         vpaddq          $TEMP1,$ACC8,$ACC8
1275         vpmuludq        $Bi,$TEMP2,$TEMP2
1276          vpbroadcastq   32($bp), $Bi
1277         vpaddq          $TEMP2,$ACC9,$ACC9
1278          add            \$32, $bp                       # $bp++
1279
1280         vmovdqu         -24+32*1-128($np),$TEMP0
1281         imulq   -128($np),%rax
1282         add     %rax,$r3
1283         shr     \$29, $r3
1284
1285         vmovdqu         -24+32*2-128($np),$TEMP1
1286         vpmuludq        $Yi,$TEMP0,$TEMP0
1287          vmovq          $Bi, %rbx
1288         vmovdqu         -24+32*3-128($np),$TEMP2
1289         vpaddq          $TEMP0,$ACC1,$ACC0              # $ACC0==$TEMP0
1290         vpmuludq        $Yi,$TEMP1,$TEMP1
1291          vmovdqu        $ACC0, (%rsp)                   # transfer $r0-$r3
1292         vpaddq          $TEMP1,$ACC2,$ACC1
1293         vmovdqu         -24+32*4-128($np),$TEMP0
1294         vpmuludq        $Yi,$TEMP2,$TEMP2
1295         vmovdqu         -24+32*5-128($np),$TEMP1
1296         vpaddq          $TEMP2,$ACC3,$ACC2
1297         vpmuludq        $Yi,$TEMP0,$TEMP0
1298         vmovdqu         -24+32*6-128($np),$TEMP2
1299         vpaddq          $TEMP0,$ACC4,$ACC3
1300         vpmuludq        $Yi,$TEMP1,$TEMP1
1301         vmovdqu         -24+32*7-128($np),$TEMP0
1302         vpaddq          $TEMP1,$ACC5,$ACC4
1303         vpmuludq        $Yi,$TEMP2,$TEMP2
1304         vmovdqu         -24+32*8-128($np),$TEMP1
1305         vpaddq          $TEMP2,$ACC6,$ACC5
1306         vpmuludq        $Yi,$TEMP0,$TEMP0
1307         vmovdqu         -24+32*9-128($np),$TEMP2
1308          mov    $r3, $r0
1309         vpaddq          $TEMP0,$ACC7,$ACC6
1310         vpmuludq        $Yi,$TEMP1,$TEMP1
1311          add    (%rsp), $r0
1312         vpaddq          $TEMP1,$ACC8,$ACC7
1313         vpmuludq        $Yi,$TEMP2,$TEMP2
1314          vmovq  $r3, $TEMP1
1315         vpaddq          $TEMP2,$ACC9,$ACC8
1316
1317         dec     $i
1318         jnz     .Loop_mul_1024
1319 ___
1320
1321 # (*)   Original implementation was correcting ACC1-ACC3 for overflow
1322 #       after 7 loop runs, or after 28 iterations, or 56 additions.
1323 #       But as we underutilize resources, it's possible to correct in
1324 #       each iteration with marginal performance loss. But then, as
1325 #       we do it in each iteration, we can correct less digits, and
1326 #       avoid performance penalties completely.
1327
1328 $TEMP0 = $ACC9;
1329 $TEMP3 = $Bi;
1330 $TEMP4 = $Yi;
1331 $code.=<<___;
1332         vpaddq          (%rsp), $TEMP1, $ACC0
1333
1334         vpsrlq          \$29, $ACC0, $TEMP1
1335         vpand           $AND_MASK, $ACC0, $ACC0
1336         vpsrlq          \$29, $ACC1, $TEMP2
1337         vpand           $AND_MASK, $ACC1, $ACC1
1338         vpsrlq          \$29, $ACC2, $TEMP3
1339         vpermq          \$0x93, $TEMP1, $TEMP1
1340         vpand           $AND_MASK, $ACC2, $ACC2
1341         vpsrlq          \$29, $ACC3, $TEMP4
1342         vpermq          \$0x93, $TEMP2, $TEMP2
1343         vpand           $AND_MASK, $ACC3, $ACC3
1344
1345         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1346         vpermq          \$0x93, $TEMP3, $TEMP3
1347         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1348         vpermq          \$0x93, $TEMP4, $TEMP4
1349         vpaddq          $TEMP0, $ACC0, $ACC0
1350         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1351         vpaddq          $TEMP1, $ACC1, $ACC1
1352         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1353         vpaddq          $TEMP2, $ACC2, $ACC2
1354         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1355         vpaddq          $TEMP3, $ACC3, $ACC3
1356         vpaddq          $TEMP4, $ACC4, $ACC4
1357
1358         vpsrlq          \$29, $ACC0, $TEMP1
1359         vpand           $AND_MASK, $ACC0, $ACC0
1360         vpsrlq          \$29, $ACC1, $TEMP2
1361         vpand           $AND_MASK, $ACC1, $ACC1
1362         vpsrlq          \$29, $ACC2, $TEMP3
1363         vpermq          \$0x93, $TEMP1, $TEMP1
1364         vpand           $AND_MASK, $ACC2, $ACC2
1365         vpsrlq          \$29, $ACC3, $TEMP4
1366         vpermq          \$0x93, $TEMP2, $TEMP2
1367         vpand           $AND_MASK, $ACC3, $ACC3
1368         vpermq          \$0x93, $TEMP3, $TEMP3
1369
1370         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1371         vpermq          \$0x93, $TEMP4, $TEMP4
1372         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1373         vpaddq          $TEMP0, $ACC0, $ACC0
1374         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1375         vpaddq          $TEMP1, $ACC1, $ACC1
1376         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1377         vpaddq          $TEMP2, $ACC2, $ACC2
1378         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1379         vpaddq          $TEMP3, $ACC3, $ACC3
1380         vpaddq          $TEMP4, $ACC4, $ACC4
1381
1382         vmovdqu         $ACC0, 0-128($rp)
1383         vmovdqu         $ACC1, 32-128($rp)
1384         vmovdqu         $ACC2, 64-128($rp)
1385         vmovdqu         $ACC3, 96-128($rp)
1386 ___
1387
1388 $TEMP5=$ACC0;
1389 $code.=<<___;
1390         vpsrlq          \$29, $ACC4, $TEMP1
1391         vpand           $AND_MASK, $ACC4, $ACC4
1392         vpsrlq          \$29, $ACC5, $TEMP2
1393         vpand           $AND_MASK, $ACC5, $ACC5
1394         vpsrlq          \$29, $ACC6, $TEMP3
1395         vpermq          \$0x93, $TEMP1, $TEMP1
1396         vpand           $AND_MASK, $ACC6, $ACC6
1397         vpsrlq          \$29, $ACC7, $TEMP4
1398         vpermq          \$0x93, $TEMP2, $TEMP2
1399         vpand           $AND_MASK, $ACC7, $ACC7
1400         vpsrlq          \$29, $ACC8, $TEMP5
1401         vpermq          \$0x93, $TEMP3, $TEMP3
1402         vpand           $AND_MASK, $ACC8, $ACC8
1403         vpermq          \$0x93, $TEMP4, $TEMP4
1404
1405         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1406         vpermq          \$0x93, $TEMP5, $TEMP5
1407         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1408         vpaddq          $TEMP0, $ACC4, $ACC4
1409         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1410         vpaddq          $TEMP1, $ACC5, $ACC5
1411         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1412         vpaddq          $TEMP2, $ACC6, $ACC6
1413         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1414         vpaddq          $TEMP3, $ACC7, $ACC7
1415         vpaddq          $TEMP4, $ACC8, $ACC8
1416
1417         vpsrlq          \$29, $ACC4, $TEMP1
1418         vpand           $AND_MASK, $ACC4, $ACC4
1419         vpsrlq          \$29, $ACC5, $TEMP2
1420         vpand           $AND_MASK, $ACC5, $ACC5
1421         vpsrlq          \$29, $ACC6, $TEMP3
1422         vpermq          \$0x93, $TEMP1, $TEMP1
1423         vpand           $AND_MASK, $ACC6, $ACC6
1424         vpsrlq          \$29, $ACC7, $TEMP4
1425         vpermq          \$0x93, $TEMP2, $TEMP2
1426         vpand           $AND_MASK, $ACC7, $ACC7
1427         vpsrlq          \$29, $ACC8, $TEMP5
1428         vpermq          \$0x93, $TEMP3, $TEMP3
1429         vpand           $AND_MASK, $ACC8, $ACC8
1430         vpermq          \$0x93, $TEMP4, $TEMP4
1431
1432         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1433         vpermq          \$0x93, $TEMP5, $TEMP5
1434         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1435         vpaddq          $TEMP0, $ACC4, $ACC4
1436         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1437         vpaddq          $TEMP1, $ACC5, $ACC5
1438         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1439         vpaddq          $TEMP2, $ACC6, $ACC6
1440         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1441         vpaddq          $TEMP3, $ACC7, $ACC7
1442         vpaddq          $TEMP4, $ACC8, $ACC8
1443
1444         vmovdqu         $ACC4, 128-128($rp)
1445         vmovdqu         $ACC5, 160-128($rp)
1446         vmovdqu         $ACC6, 192-128($rp)
1447         vmovdqu         $ACC7, 224-128($rp)
1448         vmovdqu         $ACC8, 256-128($rp)
1449         vzeroupper
1450
1451         mov     %rbp, %rax
1452 .cfi_def_cfa_register   %rax
1453 ___
1454 $code.=<<___ if ($win64);
1455 .Lmul_1024_in_tail:
1456         movaps  -0xd8(%rax),%xmm6
1457         movaps  -0xc8(%rax),%xmm7
1458         movaps  -0xb8(%rax),%xmm8
1459         movaps  -0xa8(%rax),%xmm9
1460         movaps  -0x98(%rax),%xmm10
1461         movaps  -0x88(%rax),%xmm11
1462         movaps  -0x78(%rax),%xmm12
1463         movaps  -0x68(%rax),%xmm13
1464         movaps  -0x58(%rax),%xmm14
1465         movaps  -0x48(%rax),%xmm15
1466 ___
1467 $code.=<<___;
1468         mov     -48(%rax),%r15
1469 .cfi_restore    %r15
1470         mov     -40(%rax),%r14
1471 .cfi_restore    %r14
1472         mov     -32(%rax),%r13
1473 .cfi_restore    %r13
1474         mov     -24(%rax),%r12
1475 .cfi_restore    %r12
1476         mov     -16(%rax),%rbp
1477 .cfi_restore    %rbp
1478         mov     -8(%rax),%rbx
1479 .cfi_restore    %rbx
1480         lea     (%rax),%rsp             # restore %rsp
1481 .cfi_def_cfa_register   %rsp
1482 .Lmul_1024_epilogue:
1483         ret
1484 .cfi_endproc
1485 .size   rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1486 ___
1487 }
1488 {
1489 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1490 my @T = map("%r$_",(8..11));
1491
1492 $code.=<<___;
1493 .globl  rsaz_1024_red2norm_avx2
1494 .type   rsaz_1024_red2norm_avx2,\@abi-omnipotent
1495 .align  32
1496 rsaz_1024_red2norm_avx2:
1497 .cfi_startproc
1498         sub     \$-128,$inp     # size optimization
1499         xor     %rax,%rax
1500 ___
1501
1502 for ($j=0,$i=0; $i<16; $i++) {
1503     my $k=0;
1504     while (29*$j<64*($i+1)) {   # load data till boundary
1505         $code.="        mov     `8*$j-128`($inp), @T[0]\n";
1506         $j++; $k++; push(@T,shift(@T));
1507     }
1508     $l=$k;
1509     while ($k>1) {              # shift loaded data but last value
1510         $code.="        shl     \$`29*($j-$k)`,@T[-$k]\n";
1511         $k--;
1512     }
1513     $code.=<<___;               # shift last value
1514         mov     @T[-1], @T[0]
1515         shl     \$`29*($j-1)`, @T[-1]
1516         shr     \$`-29*($j-1)`, @T[0]
1517 ___
1518     while ($l) {                # accumulate all values
1519         $code.="        add     @T[-$l], %rax\n";
1520         $l--;
1521     }
1522         $code.=<<___;
1523         adc     \$0, @T[0]      # consume eventual carry
1524         mov     %rax, 8*$i($out)
1525         mov     @T[0], %rax
1526 ___
1527     push(@T,shift(@T));
1528 }
1529 $code.=<<___;
1530         ret
1531 .cfi_endproc
1532 .size   rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1533
1534 .globl  rsaz_1024_norm2red_avx2
1535 .type   rsaz_1024_norm2red_avx2,\@abi-omnipotent
1536 .align  32
1537 rsaz_1024_norm2red_avx2:
1538 .cfi_startproc
1539         sub     \$-128,$out     # size optimization
1540         mov     ($inp),@T[0]
1541         mov     \$0x1fffffff,%eax
1542 ___
1543 for ($j=0,$i=0; $i<16; $i++) {
1544     $code.="    mov     `8*($i+1)`($inp),@T[1]\n"       if ($i<15);
1545     $code.="    xor     @T[1],@T[1]\n"                  if ($i==15);
1546     my $k=1;
1547     while (29*($j+1)<64*($i+1)) {
1548         $code.=<<___;
1549         mov     @T[0],@T[-$k]
1550         shr     \$`29*$j`,@T[-$k]
1551         and     %rax,@T[-$k]                            # &0x1fffffff
1552         mov     @T[-$k],`8*$j-128`($out)
1553 ___
1554         $j++; $k++;
1555     }
1556     $code.=<<___;
1557         shrd    \$`29*$j`,@T[1],@T[0]
1558         and     %rax,@T[0]
1559         mov     @T[0],`8*$j-128`($out)
1560 ___
1561     $j++;
1562     push(@T,shift(@T));
1563 }
1564 $code.=<<___;
1565         mov     @T[0],`8*$j-128`($out)                  # zero
1566         mov     @T[0],`8*($j+1)-128`($out)
1567         mov     @T[0],`8*($j+2)-128`($out)
1568         mov     @T[0],`8*($j+3)-128`($out)
1569         ret
1570 .cfi_endproc
1571 .size   rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1572 ___
1573 }
1574 {
1575 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1576
1577 $code.=<<___;
1578 .globl  rsaz_1024_scatter5_avx2
1579 .type   rsaz_1024_scatter5_avx2,\@abi-omnipotent
1580 .align  32
1581 rsaz_1024_scatter5_avx2:
1582 .cfi_startproc
1583         vzeroupper
1584         vmovdqu .Lscatter_permd(%rip),%ymm5
1585         shl     \$4,$power
1586         lea     ($out,$power),$out
1587         mov     \$9,%eax
1588         jmp     .Loop_scatter_1024
1589
1590 .align  32
1591 .Loop_scatter_1024:
1592         vmovdqu         ($inp),%ymm0
1593         lea             32($inp),$inp
1594         vpermd          %ymm0,%ymm5,%ymm0
1595         vmovdqu         %xmm0,($out)
1596         lea             16*32($out),$out
1597         dec     %eax
1598         jnz     .Loop_scatter_1024
1599
1600         vzeroupper
1601         ret
1602 .cfi_endproc
1603 .size   rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1604
1605 .globl  rsaz_1024_gather5_avx2
1606 .type   rsaz_1024_gather5_avx2,\@abi-omnipotent
1607 .align  32
1608 rsaz_1024_gather5_avx2:
1609 .cfi_startproc
1610         vzeroupper
1611         mov     %rsp,%r11
1612 .cfi_def_cfa_register   %r11
1613 ___
1614 $code.=<<___ if ($win64);
1615         lea     -0x88(%rsp),%rax
1616 .LSEH_begin_rsaz_1024_gather5:
1617         # I can't trust assembler to use specific encoding:-(
1618         .byte   0x48,0x8d,0x60,0xe0             # lea   -0x20(%rax),%rsp
1619         .byte   0xc5,0xf8,0x29,0x70,0xe0        # vmovaps %xmm6,-0x20(%rax)
1620         .byte   0xc5,0xf8,0x29,0x78,0xf0        # vmovaps %xmm7,-0x10(%rax)
1621         .byte   0xc5,0x78,0x29,0x40,0x00        # vmovaps %xmm8,0(%rax)
1622         .byte   0xc5,0x78,0x29,0x48,0x10        # vmovaps %xmm9,0x10(%rax)
1623         .byte   0xc5,0x78,0x29,0x50,0x20        # vmovaps %xmm10,0x20(%rax)
1624         .byte   0xc5,0x78,0x29,0x58,0x30        # vmovaps %xmm11,0x30(%rax)
1625         .byte   0xc5,0x78,0x29,0x60,0x40        # vmovaps %xmm12,0x40(%rax)
1626         .byte   0xc5,0x78,0x29,0x68,0x50        # vmovaps %xmm13,0x50(%rax)
1627         .byte   0xc5,0x78,0x29,0x70,0x60        # vmovaps %xmm14,0x60(%rax)
1628         .byte   0xc5,0x78,0x29,0x78,0x70        # vmovaps %xmm15,0x70(%rax)
1629 ___
1630 $code.=<<___;
1631         lea     -0x100(%rsp),%rsp
1632         and     \$-32, %rsp
1633         lea     .Linc(%rip), %r10
1634         lea     -128(%rsp),%rax                 # control u-op density
1635
1636         vmovd           $power, %xmm4
1637         vmovdqa         (%r10),%ymm0
1638         vmovdqa         32(%r10),%ymm1
1639         vmovdqa         64(%r10),%ymm5
1640         vpbroadcastd    %xmm4,%ymm4
1641
1642         vpaddd          %ymm5, %ymm0, %ymm2
1643         vpcmpeqd        %ymm4, %ymm0, %ymm0
1644         vpaddd          %ymm5, %ymm1, %ymm3
1645         vpcmpeqd        %ymm4, %ymm1, %ymm1
1646         vmovdqa         %ymm0, 32*0+128(%rax)
1647         vpaddd          %ymm5, %ymm2, %ymm0
1648         vpcmpeqd        %ymm4, %ymm2, %ymm2
1649         vmovdqa         %ymm1, 32*1+128(%rax)
1650         vpaddd          %ymm5, %ymm3, %ymm1
1651         vpcmpeqd        %ymm4, %ymm3, %ymm3
1652         vmovdqa         %ymm2, 32*2+128(%rax)
1653         vpaddd          %ymm5, %ymm0, %ymm2
1654         vpcmpeqd        %ymm4, %ymm0, %ymm0
1655         vmovdqa         %ymm3, 32*3+128(%rax)
1656         vpaddd          %ymm5, %ymm1, %ymm3
1657         vpcmpeqd        %ymm4, %ymm1, %ymm1
1658         vmovdqa         %ymm0, 32*4+128(%rax)
1659         vpaddd          %ymm5, %ymm2, %ymm8
1660         vpcmpeqd        %ymm4, %ymm2, %ymm2
1661         vmovdqa         %ymm1, 32*5+128(%rax)
1662         vpaddd          %ymm5, %ymm3, %ymm9
1663         vpcmpeqd        %ymm4, %ymm3, %ymm3
1664         vmovdqa         %ymm2, 32*6+128(%rax)
1665         vpaddd          %ymm5, %ymm8, %ymm10
1666         vpcmpeqd        %ymm4, %ymm8, %ymm8
1667         vmovdqa         %ymm3, 32*7+128(%rax)
1668         vpaddd          %ymm5, %ymm9, %ymm11
1669         vpcmpeqd        %ymm4, %ymm9, %ymm9
1670         vpaddd          %ymm5, %ymm10, %ymm12
1671         vpcmpeqd        %ymm4, %ymm10, %ymm10
1672         vpaddd          %ymm5, %ymm11, %ymm13
1673         vpcmpeqd        %ymm4, %ymm11, %ymm11
1674         vpaddd          %ymm5, %ymm12, %ymm14
1675         vpcmpeqd        %ymm4, %ymm12, %ymm12
1676         vpaddd          %ymm5, %ymm13, %ymm15
1677         vpcmpeqd        %ymm4, %ymm13, %ymm13
1678         vpcmpeqd        %ymm4, %ymm14, %ymm14
1679         vpcmpeqd        %ymm4, %ymm15, %ymm15
1680
1681         vmovdqa -32(%r10),%ymm7                 # .Lgather_permd
1682         lea     128($inp), $inp
1683         mov     \$9,$power
1684
1685 .Loop_gather_1024:
1686         vmovdqa         32*0-128($inp), %ymm0
1687         vmovdqa         32*1-128($inp), %ymm1
1688         vmovdqa         32*2-128($inp), %ymm2
1689         vmovdqa         32*3-128($inp), %ymm3
1690         vpand           32*0+128(%rax), %ymm0,  %ymm0
1691         vpand           32*1+128(%rax), %ymm1,  %ymm1
1692         vpand           32*2+128(%rax), %ymm2,  %ymm2
1693         vpor            %ymm0, %ymm1, %ymm4
1694         vpand           32*3+128(%rax), %ymm3,  %ymm3
1695         vmovdqa         32*4-128($inp), %ymm0
1696         vmovdqa         32*5-128($inp), %ymm1
1697         vpor            %ymm2, %ymm3, %ymm5
1698         vmovdqa         32*6-128($inp), %ymm2
1699         vmovdqa         32*7-128($inp), %ymm3
1700         vpand           32*4+128(%rax), %ymm0,  %ymm0
1701         vpand           32*5+128(%rax), %ymm1,  %ymm1
1702         vpand           32*6+128(%rax), %ymm2,  %ymm2
1703         vpor            %ymm0, %ymm4, %ymm4
1704         vpand           32*7+128(%rax), %ymm3,  %ymm3
1705         vpand           32*8-128($inp), %ymm8,  %ymm0
1706         vpor            %ymm1, %ymm5, %ymm5
1707         vpand           32*9-128($inp), %ymm9,  %ymm1
1708         vpor            %ymm2, %ymm4, %ymm4
1709         vpand           32*10-128($inp),%ymm10, %ymm2
1710         vpor            %ymm3, %ymm5, %ymm5
1711         vpand           32*11-128($inp),%ymm11, %ymm3
1712         vpor            %ymm0, %ymm4, %ymm4
1713         vpand           32*12-128($inp),%ymm12, %ymm0
1714         vpor            %ymm1, %ymm5, %ymm5
1715         vpand           32*13-128($inp),%ymm13, %ymm1
1716         vpor            %ymm2, %ymm4, %ymm4
1717         vpand           32*14-128($inp),%ymm14, %ymm2
1718         vpor            %ymm3, %ymm5, %ymm5
1719         vpand           32*15-128($inp),%ymm15, %ymm3
1720         lea             32*16($inp), $inp
1721         vpor            %ymm0, %ymm4, %ymm4
1722         vpor            %ymm1, %ymm5, %ymm5
1723         vpor            %ymm2, %ymm4, %ymm4
1724         vpor            %ymm3, %ymm5, %ymm5
1725
1726         vpor            %ymm5, %ymm4, %ymm4
1727         vextracti128    \$1, %ymm4, %xmm5       # upper half is cleared
1728         vpor            %xmm4, %xmm5, %xmm5
1729         vpermd          %ymm5,%ymm7,%ymm5
1730         vmovdqu         %ymm5,($out)
1731         lea             32($out),$out
1732         dec     $power
1733         jnz     .Loop_gather_1024
1734
1735         vpxor   %ymm0,%ymm0,%ymm0
1736         vmovdqu %ymm0,($out)
1737         vzeroupper
1738 ___
1739 $code.=<<___ if ($win64);
1740         movaps  -0xa8(%r11),%xmm6
1741         movaps  -0x98(%r11),%xmm7
1742         movaps  -0x88(%r11),%xmm8
1743         movaps  -0x78(%r11),%xmm9
1744         movaps  -0x68(%r11),%xmm10
1745         movaps  -0x58(%r11),%xmm11
1746         movaps  -0x48(%r11),%xmm12
1747         movaps  -0x38(%r11),%xmm13
1748         movaps  -0x28(%r11),%xmm14
1749         movaps  -0x18(%r11),%xmm15
1750 ___
1751 $code.=<<___;
1752         lea     (%r11),%rsp
1753 .cfi_def_cfa_register   %rsp
1754         ret
1755 .cfi_endproc
1756 .LSEH_end_rsaz_1024_gather5:
1757 .size   rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1758 ___
1759 }
1760
1761 $code.=<<___;
1762 .extern OPENSSL_ia32cap_P
1763 .globl  rsaz_avx2_eligible
1764 .type   rsaz_avx2_eligible,\@abi-omnipotent
1765 .align  32
1766 rsaz_avx2_eligible:
1767         mov     OPENSSL_ia32cap_P+8(%rip),%eax
1768 ___
1769 $code.=<<___    if ($addx);
1770         mov     \$`1<<8|1<<19`,%ecx
1771         mov     \$0,%edx
1772         and     %eax,%ecx
1773         cmp     \$`1<<8|1<<19`,%ecx     # check for BMI2+AD*X
1774         cmove   %edx,%eax
1775 ___
1776 $code.=<<___;
1777         and     \$`1<<5`,%eax
1778         shr     \$5,%eax
1779         ret
1780 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1781
1782 .align  64
1783 .Land_mask:
1784         .quad   0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff
1785 .Lscatter_permd:
1786         .long   0,2,4,6,7,7,7,7
1787 .Lgather_permd:
1788         .long   0,7,1,7,2,7,3,7
1789 .Linc:
1790         .long   0,0,0,0, 1,1,1,1
1791         .long   2,2,2,2, 3,3,3,3
1792         .long   4,4,4,4, 4,4,4,4
1793 .align  64
1794 ___
1795
1796 if ($win64) {
1797 $rec="%rcx";
1798 $frame="%rdx";
1799 $context="%r8";
1800 $disp="%r9";
1801
1802 $code.=<<___
1803 .extern __imp_RtlVirtualUnwind
1804 .type   rsaz_se_handler,\@abi-omnipotent
1805 .align  16
1806 rsaz_se_handler:
1807         push    %rsi
1808         push    %rdi
1809         push    %rbx
1810         push    %rbp
1811         push    %r12
1812         push    %r13
1813         push    %r14
1814         push    %r15
1815         pushfq
1816         sub     \$64,%rsp
1817
1818         mov     120($context),%rax      # pull context->Rax
1819         mov     248($context),%rbx      # pull context->Rip
1820
1821         mov     8($disp),%rsi           # disp->ImageBase
1822         mov     56($disp),%r11          # disp->HandlerData
1823
1824         mov     0(%r11),%r10d           # HandlerData[0]
1825         lea     (%rsi,%r10),%r10        # prologue label
1826         cmp     %r10,%rbx               # context->Rip<prologue label
1827         jb      .Lcommon_seh_tail
1828
1829         mov     4(%r11),%r10d           # HandlerData[1]
1830         lea     (%rsi,%r10),%r10        # epilogue label
1831         cmp     %r10,%rbx               # context->Rip>=epilogue label
1832         jae     .Lcommon_seh_tail
1833
1834         mov     160($context),%rbp      # pull context->Rbp
1835
1836         mov     8(%r11),%r10d           # HandlerData[2]
1837         lea     (%rsi,%r10),%r10        # "in tail" label
1838         cmp     %r10,%rbx               # context->Rip>="in tail" label
1839         cmovc   %rbp,%rax
1840
1841         mov     -48(%rax),%r15
1842         mov     -40(%rax),%r14
1843         mov     -32(%rax),%r13
1844         mov     -24(%rax),%r12
1845         mov     -16(%rax),%rbp
1846         mov     -8(%rax),%rbx
1847         mov     %r15,240($context)
1848         mov     %r14,232($context)
1849         mov     %r13,224($context)
1850         mov     %r12,216($context)
1851         mov     %rbp,160($context)
1852         mov     %rbx,144($context)
1853
1854         lea     -0xd8(%rax),%rsi        # %xmm save area
1855         lea     512($context),%rdi      # & context.Xmm6
1856         mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
1857         .long   0xa548f3fc              # cld; rep movsq
1858
1859 .Lcommon_seh_tail:
1860         mov     8(%rax),%rdi
1861         mov     16(%rax),%rsi
1862         mov     %rax,152($context)      # restore context->Rsp
1863         mov     %rsi,168($context)      # restore context->Rsi
1864         mov     %rdi,176($context)      # restore context->Rdi
1865
1866         mov     40($disp),%rdi          # disp->ContextRecord
1867         mov     $context,%rsi           # context
1868         mov     \$154,%ecx              # sizeof(CONTEXT)
1869         .long   0xa548f3fc              # cld; rep movsq
1870
1871         mov     $disp,%rsi
1872         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1873         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1874         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1875         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1876         mov     40(%rsi),%r10           # disp->ContextRecord
1877         lea     56(%rsi),%r11           # &disp->HandlerData
1878         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1879         mov     %r10,32(%rsp)           # arg5
1880         mov     %r11,40(%rsp)           # arg6
1881         mov     %r12,48(%rsp)           # arg7
1882         mov     %rcx,56(%rsp)           # arg8, (NULL)
1883         call    *__imp_RtlVirtualUnwind(%rip)
1884
1885         mov     \$1,%eax                # ExceptionContinueSearch
1886         add     \$64,%rsp
1887         popfq
1888         pop     %r15
1889         pop     %r14
1890         pop     %r13
1891         pop     %r12
1892         pop     %rbp
1893         pop     %rbx
1894         pop     %rdi
1895         pop     %rsi
1896         ret
1897 .size   rsaz_se_handler,.-rsaz_se_handler
1898
1899 .section        .pdata
1900 .align  4
1901         .rva    .LSEH_begin_rsaz_1024_sqr_avx2
1902         .rva    .LSEH_end_rsaz_1024_sqr_avx2
1903         .rva    .LSEH_info_rsaz_1024_sqr_avx2
1904
1905         .rva    .LSEH_begin_rsaz_1024_mul_avx2
1906         .rva    .LSEH_end_rsaz_1024_mul_avx2
1907         .rva    .LSEH_info_rsaz_1024_mul_avx2
1908
1909         .rva    .LSEH_begin_rsaz_1024_gather5
1910         .rva    .LSEH_end_rsaz_1024_gather5
1911         .rva    .LSEH_info_rsaz_1024_gather5
1912 .section        .xdata
1913 .align  8
1914 .LSEH_info_rsaz_1024_sqr_avx2:
1915         .byte   9,0,0,0
1916         .rva    rsaz_se_handler
1917         .rva    .Lsqr_1024_body,.Lsqr_1024_epilogue,.Lsqr_1024_in_tail
1918         .long   0
1919 .LSEH_info_rsaz_1024_mul_avx2:
1920         .byte   9,0,0,0
1921         .rva    rsaz_se_handler
1922         .rva    .Lmul_1024_body,.Lmul_1024_epilogue,.Lmul_1024_in_tail
1923         .long   0
1924 .LSEH_info_rsaz_1024_gather5:
1925         .byte   0x01,0x36,0x17,0x0b
1926         .byte   0x36,0xf8,0x09,0x00     # vmovaps 0x90(rsp),xmm15
1927         .byte   0x31,0xe8,0x08,0x00     # vmovaps 0x80(rsp),xmm14
1928         .byte   0x2c,0xd8,0x07,0x00     # vmovaps 0x70(rsp),xmm13
1929         .byte   0x27,0xc8,0x06,0x00     # vmovaps 0x60(rsp),xmm12
1930         .byte   0x22,0xb8,0x05,0x00     # vmovaps 0x50(rsp),xmm11
1931         .byte   0x1d,0xa8,0x04,0x00     # vmovaps 0x40(rsp),xmm10
1932         .byte   0x18,0x98,0x03,0x00     # vmovaps 0x30(rsp),xmm9
1933         .byte   0x13,0x88,0x02,0x00     # vmovaps 0x20(rsp),xmm8
1934         .byte   0x0e,0x78,0x01,0x00     # vmovaps 0x10(rsp),xmm7
1935         .byte   0x09,0x68,0x00,0x00     # vmovaps 0x00(rsp),xmm6
1936         .byte   0x04,0x01,0x15,0x00     # sub     rsp,0xa8
1937         .byte   0x00,0xb3,0x00,0x00     # set_frame r11
1938 ___
1939 }
1940
1941 foreach (split("\n",$code)) {
1942         s/\`([^\`]*)\`/eval($1)/ge;
1943
1944         s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge               or
1945
1946         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1947         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1948         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1949         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1950         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1951         print $_,"\n";
1952 }
1953
1954 }}} else {{{
1955 print <<___;    # assembler is too old
1956 .text
1957
1958 .globl  rsaz_avx2_eligible
1959 .type   rsaz_avx2_eligible,\@abi-omnipotent
1960 rsaz_avx2_eligible:
1961         xor     %eax,%eax
1962         ret
1963 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1964
1965 .globl  rsaz_1024_sqr_avx2
1966 .globl  rsaz_1024_mul_avx2
1967 .globl  rsaz_1024_norm2red_avx2
1968 .globl  rsaz_1024_red2norm_avx2
1969 .globl  rsaz_1024_scatter5_avx2
1970 .globl  rsaz_1024_gather5_avx2
1971 .type   rsaz_1024_sqr_avx2,\@abi-omnipotent
1972 rsaz_1024_sqr_avx2:
1973 rsaz_1024_mul_avx2:
1974 rsaz_1024_norm2red_avx2:
1975 rsaz_1024_red2norm_avx2:
1976 rsaz_1024_scatter5_avx2:
1977 rsaz_1024_gather5_avx2:
1978         .byte   0x0f,0x0b       # ud2
1979         ret
1980 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
1981 ___
1982 }}}
1983
1984 close STDOUT;