Merge Intel copyright notice into standard
[openssl.git] / crypto / bn / asm / rsaz-avx2.pl
1 #! /usr/bin/env perl
2 # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
3 # Copyright (c) 2012, Intel Corporation. All Rights Reserved.
4 #
5 # Licensed under the OpenSSL license (the "License").  You may not use
6 # this file except in compliance with the License.  You can obtain a copy
7 # in the file LICENSE in the source distribution or at
8 # https://www.openssl.org/source/license.html
9 #
10 # Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1)
11 # (1) Intel Corporation, Israel Development Center, Haifa, Israel
12 # (2) University of Haifa, Israel
13 #
14 # References:
15 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular
16 #     Exponentiation,  Using Advanced Vector Instructions Architectures",
17 #     F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,
18 #     pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012
19 # [2] S. Gueron: "Efficient Software Implementations of Modular
20 #     Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).
21 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE
22 #     Proceedings of 9th International Conference on Information Technology:
23 #     New Generations (ITNG 2012), pp.821-823 (2012)
24 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis
25 #     resistant 1024-bit modular exponentiation, for optimizing RSA2048
26 #     on AVX2 capable x86_64 platforms",
27 #     http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest
28 #
29 # +13% improvement over original submission by <appro@openssl.org>
30 #
31 # rsa2048 sign/sec      OpenSSL 1.0.1   scalar(*)       this
32 # 2.3GHz Haswell        621             765/+23%        1113/+79%
33 # 2.3GHz Broadwell(**)  688             1200(***)/+74%  1120/+63%
34 #
35 # (*)   if system doesn't support AVX2, for reference purposes;
36 # (**)  scaled to 2.3GHz to simplify comparison;
37 # (***) scalar AD*X code is faster than AVX2 and is preferred code
38 #       path for Broadwell;
39
40 $flavour = shift;
41 $output  = shift;
42 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
43
44 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
45
46 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
47 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
48 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
49 die "can't locate x86_64-xlate.pl";
50
51 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
52                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
53         $avx = ($1>=2.19) + ($1>=2.22);
54         $addx = ($1>=2.23);
55 }
56
57 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
58             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
59         $avx = ($1>=2.09) + ($1>=2.10);
60         $addx = ($1>=2.10);
61 }
62
63 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
64             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
65         $avx = ($1>=10) + ($1>=11);
66         $addx = ($1>=11);
67 }
68
69 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
70         my $ver = $2 + $3/100.0;        # 3.1->3.01, 3.10->3.10
71         $avx = ($ver>=3.0) + ($ver>=3.01);
72         $addx = ($ver>=3.03);
73 }
74
75 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
76 *STDOUT = *OUT;
77
78 if ($avx>1) {{{
79 { # void AMS_WW(
80 my $rp="%rdi";  # BN_ULONG *rp,
81 my $ap="%rsi";  # const BN_ULONG *ap,
82 my $np="%rdx";  # const BN_ULONG *np,
83 my $n0="%ecx";  # const BN_ULONG n0,
84 my $rep="%r8d"; # int repeat);
85
86 # The registers that hold the accumulated redundant result
87 # The AMM works on 1024 bit operands, and redundant word size is 29
88 # Therefore: ceil(1024/29)/4 = 9
89 my $ACC0="%ymm0";
90 my $ACC1="%ymm1";
91 my $ACC2="%ymm2";
92 my $ACC3="%ymm3";
93 my $ACC4="%ymm4";
94 my $ACC5="%ymm5";
95 my $ACC6="%ymm6";
96 my $ACC7="%ymm7";
97 my $ACC8="%ymm8";
98 my $ACC9="%ymm9";
99 # Registers that hold the broadcasted words of bp, currently used
100 my $B1="%ymm10";
101 my $B2="%ymm11";
102 # Registers that hold the broadcasted words of Y, currently used
103 my $Y1="%ymm12";
104 my $Y2="%ymm13";
105 # Helper registers
106 my $TEMP1="%ymm14";
107 my $AND_MASK="%ymm15";
108 # alu registers that hold the first words of the ACC
109 my $r0="%r9";
110 my $r1="%r10";
111 my $r2="%r11";
112 my $r3="%r12";
113
114 my $i="%r14d";                  # loop counter
115 my $tmp = "%r15";
116
117 my $FrameSize=32*18+32*8;       # place for A^2 and 2*A
118
119 my $aap=$r0;
120 my $tp0="%rbx";
121 my $tp1=$r3;
122 my $tpa=$tmp;
123
124 $np="%r13";                     # reassigned argument
125
126 $code.=<<___;
127 .text
128
129 .globl  rsaz_1024_sqr_avx2
130 .type   rsaz_1024_sqr_avx2,\@function,5
131 .align  64
132 rsaz_1024_sqr_avx2:             # 702 cycles, 14% faster than rsaz_1024_mul_avx2
133 .cfi_startproc
134         lea     (%rsp), %rax
135 .cfi_def_cfa_register   %rax
136         push    %rbx
137 .cfi_push       %rbx
138         push    %rbp
139 .cfi_push       %rbp
140         push    %r12
141 .cfi_push       %r12
142         push    %r13
143 .cfi_push       %r13
144         push    %r14
145 .cfi_push       %r14
146         push    %r15
147 .cfi_push       %r15
148         vzeroupper
149 ___
150 $code.=<<___ if ($win64);
151         lea     -0xa8(%rsp),%rsp
152         vmovaps %xmm6,-0xd8(%rax)
153         vmovaps %xmm7,-0xc8(%rax)
154         vmovaps %xmm8,-0xb8(%rax)
155         vmovaps %xmm9,-0xa8(%rax)
156         vmovaps %xmm10,-0x98(%rax)
157         vmovaps %xmm11,-0x88(%rax)
158         vmovaps %xmm12,-0x78(%rax)
159         vmovaps %xmm13,-0x68(%rax)
160         vmovaps %xmm14,-0x58(%rax)
161         vmovaps %xmm15,-0x48(%rax)
162 .Lsqr_1024_body:
163 ___
164 $code.=<<___;
165         mov     %rax,%rbp
166 .cfi_def_cfa_register   %rbp
167         mov     %rdx, $np                       # reassigned argument
168         sub     \$$FrameSize, %rsp
169         mov     $np, $tmp
170         sub     \$-128, $rp                     # size optimization
171         sub     \$-128, $ap
172         sub     \$-128, $np
173
174         and     \$4095, $tmp                    # see if $np crosses page
175         add     \$32*10, $tmp
176         shr     \$12, $tmp
177         vpxor   $ACC9,$ACC9,$ACC9
178         jz      .Lsqr_1024_no_n_copy
179
180         # unaligned 256-bit load that crosses page boundary can
181         # cause >2x performance degradation here, so if $np does
182         # cross page boundary, copy it to stack and make sure stack
183         # frame doesn't...
184         sub             \$32*10,%rsp
185         vmovdqu         32*0-128($np), $ACC0
186         and             \$-2048, %rsp
187         vmovdqu         32*1-128($np), $ACC1
188         vmovdqu         32*2-128($np), $ACC2
189         vmovdqu         32*3-128($np), $ACC3
190         vmovdqu         32*4-128($np), $ACC4
191         vmovdqu         32*5-128($np), $ACC5
192         vmovdqu         32*6-128($np), $ACC6
193         vmovdqu         32*7-128($np), $ACC7
194         vmovdqu         32*8-128($np), $ACC8
195         lea             $FrameSize+128(%rsp),$np
196         vmovdqu         $ACC0, 32*0-128($np)
197         vmovdqu         $ACC1, 32*1-128($np)
198         vmovdqu         $ACC2, 32*2-128($np)
199         vmovdqu         $ACC3, 32*3-128($np)
200         vmovdqu         $ACC4, 32*4-128($np)
201         vmovdqu         $ACC5, 32*5-128($np)
202         vmovdqu         $ACC6, 32*6-128($np)
203         vmovdqu         $ACC7, 32*7-128($np)
204         vmovdqu         $ACC8, 32*8-128($np)
205         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero
206
207 .Lsqr_1024_no_n_copy:
208         and             \$-1024, %rsp
209
210         vmovdqu         32*1-128($ap), $ACC1
211         vmovdqu         32*2-128($ap), $ACC2
212         vmovdqu         32*3-128($ap), $ACC3
213         vmovdqu         32*4-128($ap), $ACC4
214         vmovdqu         32*5-128($ap), $ACC5
215         vmovdqu         32*6-128($ap), $ACC6
216         vmovdqu         32*7-128($ap), $ACC7
217         vmovdqu         32*8-128($ap), $ACC8
218
219         lea     192(%rsp), $tp0                 # 64+128=192
220         vpbroadcastq    .Land_mask(%rip), $AND_MASK
221         jmp     .LOOP_GRANDE_SQR_1024
222
223 .align  32
224 .LOOP_GRANDE_SQR_1024:
225         lea     32*18+128(%rsp), $aap           # size optimization
226         lea     448(%rsp), $tp1                 # 64+128+256=448
227
228         # the squaring is performed as described in Variant B of
229         # "Speeding up Big-Number Squaring", so start by calculating
230         # the A*2=A+A vector
231         vpaddq          $ACC1, $ACC1, $ACC1
232          vpbroadcastq   32*0-128($ap), $B1
233         vpaddq          $ACC2, $ACC2, $ACC2
234         vmovdqa         $ACC1, 32*0-128($aap)
235         vpaddq          $ACC3, $ACC3, $ACC3
236         vmovdqa         $ACC2, 32*1-128($aap)
237         vpaddq          $ACC4, $ACC4, $ACC4
238         vmovdqa         $ACC3, 32*2-128($aap)
239         vpaddq          $ACC5, $ACC5, $ACC5
240         vmovdqa         $ACC4, 32*3-128($aap)
241         vpaddq          $ACC6, $ACC6, $ACC6
242         vmovdqa         $ACC5, 32*4-128($aap)
243         vpaddq          $ACC7, $ACC7, $ACC7
244         vmovdqa         $ACC6, 32*5-128($aap)
245         vpaddq          $ACC8, $ACC8, $ACC8
246         vmovdqa         $ACC7, 32*6-128($aap)
247         vpxor           $ACC9, $ACC9, $ACC9
248         vmovdqa         $ACC8, 32*7-128($aap)
249
250         vpmuludq        32*0-128($ap), $B1, $ACC0
251          vpbroadcastq   32*1-128($ap), $B2
252          vmovdqu        $ACC9, 32*9-192($tp0)   # zero upper half
253         vpmuludq        $B1, $ACC1, $ACC1
254          vmovdqu        $ACC9, 32*10-448($tp1)
255         vpmuludq        $B1, $ACC2, $ACC2
256          vmovdqu        $ACC9, 32*11-448($tp1)
257         vpmuludq        $B1, $ACC3, $ACC3
258          vmovdqu        $ACC9, 32*12-448($tp1)
259         vpmuludq        $B1, $ACC4, $ACC4
260          vmovdqu        $ACC9, 32*13-448($tp1)
261         vpmuludq        $B1, $ACC5, $ACC5
262          vmovdqu        $ACC9, 32*14-448($tp1)
263         vpmuludq        $B1, $ACC6, $ACC6
264          vmovdqu        $ACC9, 32*15-448($tp1)
265         vpmuludq        $B1, $ACC7, $ACC7
266          vmovdqu        $ACC9, 32*16-448($tp1)
267         vpmuludq        $B1, $ACC8, $ACC8
268          vpbroadcastq   32*2-128($ap), $B1
269          vmovdqu        $ACC9, 32*17-448($tp1)
270
271         mov     $ap, $tpa
272         mov     \$4, $i
273         jmp     .Lsqr_entry_1024
274 ___
275 $TEMP0=$Y1;
276 $TEMP2=$Y2;
277 $code.=<<___;
278 .align  32
279 .LOOP_SQR_1024:
280          vpbroadcastq   32*1-128($tpa), $B2
281         vpmuludq        32*0-128($ap), $B1, $ACC0
282         vpaddq          32*0-192($tp0), $ACC0, $ACC0
283         vpmuludq        32*0-128($aap), $B1, $ACC1
284         vpaddq          32*1-192($tp0), $ACC1, $ACC1
285         vpmuludq        32*1-128($aap), $B1, $ACC2
286         vpaddq          32*2-192($tp0), $ACC2, $ACC2
287         vpmuludq        32*2-128($aap), $B1, $ACC3
288         vpaddq          32*3-192($tp0), $ACC3, $ACC3
289         vpmuludq        32*3-128($aap), $B1, $ACC4
290         vpaddq          32*4-192($tp0), $ACC4, $ACC4
291         vpmuludq        32*4-128($aap), $B1, $ACC5
292         vpaddq          32*5-192($tp0), $ACC5, $ACC5
293         vpmuludq        32*5-128($aap), $B1, $ACC6
294         vpaddq          32*6-192($tp0), $ACC6, $ACC6
295         vpmuludq        32*6-128($aap), $B1, $ACC7
296         vpaddq          32*7-192($tp0), $ACC7, $ACC7
297         vpmuludq        32*7-128($aap), $B1, $ACC8
298          vpbroadcastq   32*2-128($tpa), $B1
299         vpaddq          32*8-192($tp0), $ACC8, $ACC8
300 .Lsqr_entry_1024:
301         vmovdqu         $ACC0, 32*0-192($tp0)
302         vmovdqu         $ACC1, 32*1-192($tp0)
303
304         vpmuludq        32*1-128($ap), $B2, $TEMP0
305         vpaddq          $TEMP0, $ACC2, $ACC2
306         vpmuludq        32*1-128($aap), $B2, $TEMP1
307         vpaddq          $TEMP1, $ACC3, $ACC3
308         vpmuludq        32*2-128($aap), $B2, $TEMP2
309         vpaddq          $TEMP2, $ACC4, $ACC4
310         vpmuludq        32*3-128($aap), $B2, $TEMP0
311         vpaddq          $TEMP0, $ACC5, $ACC5
312         vpmuludq        32*4-128($aap), $B2, $TEMP1
313         vpaddq          $TEMP1, $ACC6, $ACC6
314         vpmuludq        32*5-128($aap), $B2, $TEMP2
315         vpaddq          $TEMP2, $ACC7, $ACC7
316         vpmuludq        32*6-128($aap), $B2, $TEMP0
317         vpaddq          $TEMP0, $ACC8, $ACC8
318         vpmuludq        32*7-128($aap), $B2, $ACC0
319          vpbroadcastq   32*3-128($tpa), $B2
320         vpaddq          32*9-192($tp0), $ACC0, $ACC0
321
322         vmovdqu         $ACC2, 32*2-192($tp0)
323         vmovdqu         $ACC3, 32*3-192($tp0)
324
325         vpmuludq        32*2-128($ap), $B1, $TEMP2
326         vpaddq          $TEMP2, $ACC4, $ACC4
327         vpmuludq        32*2-128($aap), $B1, $TEMP0
328         vpaddq          $TEMP0, $ACC5, $ACC5
329         vpmuludq        32*3-128($aap), $B1, $TEMP1
330         vpaddq          $TEMP1, $ACC6, $ACC6
331         vpmuludq        32*4-128($aap), $B1, $TEMP2
332         vpaddq          $TEMP2, $ACC7, $ACC7
333         vpmuludq        32*5-128($aap), $B1, $TEMP0
334         vpaddq          $TEMP0, $ACC8, $ACC8
335         vpmuludq        32*6-128($aap), $B1, $TEMP1
336         vpaddq          $TEMP1, $ACC0, $ACC0
337         vpmuludq        32*7-128($aap), $B1, $ACC1
338          vpbroadcastq   32*4-128($tpa), $B1
339         vpaddq          32*10-448($tp1), $ACC1, $ACC1
340
341         vmovdqu         $ACC4, 32*4-192($tp0)
342         vmovdqu         $ACC5, 32*5-192($tp0)
343
344         vpmuludq        32*3-128($ap), $B2, $TEMP0
345         vpaddq          $TEMP0, $ACC6, $ACC6
346         vpmuludq        32*3-128($aap), $B2, $TEMP1
347         vpaddq          $TEMP1, $ACC7, $ACC7
348         vpmuludq        32*4-128($aap), $B2, $TEMP2
349         vpaddq          $TEMP2, $ACC8, $ACC8
350         vpmuludq        32*5-128($aap), $B2, $TEMP0
351         vpaddq          $TEMP0, $ACC0, $ACC0
352         vpmuludq        32*6-128($aap), $B2, $TEMP1
353         vpaddq          $TEMP1, $ACC1, $ACC1
354         vpmuludq        32*7-128($aap), $B2, $ACC2
355          vpbroadcastq   32*5-128($tpa), $B2
356         vpaddq          32*11-448($tp1), $ACC2, $ACC2
357
358         vmovdqu         $ACC6, 32*6-192($tp0)
359         vmovdqu         $ACC7, 32*7-192($tp0)
360
361         vpmuludq        32*4-128($ap), $B1, $TEMP0
362         vpaddq          $TEMP0, $ACC8, $ACC8
363         vpmuludq        32*4-128($aap), $B1, $TEMP1
364         vpaddq          $TEMP1, $ACC0, $ACC0
365         vpmuludq        32*5-128($aap), $B1, $TEMP2
366         vpaddq          $TEMP2, $ACC1, $ACC1
367         vpmuludq        32*6-128($aap), $B1, $TEMP0
368         vpaddq          $TEMP0, $ACC2, $ACC2
369         vpmuludq        32*7-128($aap), $B1, $ACC3
370          vpbroadcastq   32*6-128($tpa), $B1
371         vpaddq          32*12-448($tp1), $ACC3, $ACC3
372
373         vmovdqu         $ACC8, 32*8-192($tp0)
374         vmovdqu         $ACC0, 32*9-192($tp0)
375         lea             8($tp0), $tp0
376
377         vpmuludq        32*5-128($ap), $B2, $TEMP2
378         vpaddq          $TEMP2, $ACC1, $ACC1
379         vpmuludq        32*5-128($aap), $B2, $TEMP0
380         vpaddq          $TEMP0, $ACC2, $ACC2
381         vpmuludq        32*6-128($aap), $B2, $TEMP1
382         vpaddq          $TEMP1, $ACC3, $ACC3
383         vpmuludq        32*7-128($aap), $B2, $ACC4
384          vpbroadcastq   32*7-128($tpa), $B2
385         vpaddq          32*13-448($tp1), $ACC4, $ACC4
386
387         vmovdqu         $ACC1, 32*10-448($tp1)
388         vmovdqu         $ACC2, 32*11-448($tp1)
389
390         vpmuludq        32*6-128($ap), $B1, $TEMP0
391         vpaddq          $TEMP0, $ACC3, $ACC3
392         vpmuludq        32*6-128($aap), $B1, $TEMP1
393          vpbroadcastq   32*8-128($tpa), $ACC0           # borrow $ACC0 for $B1
394         vpaddq          $TEMP1, $ACC4, $ACC4
395         vpmuludq        32*7-128($aap), $B1, $ACC5
396          vpbroadcastq   32*0+8-128($tpa), $B1           # for next iteration
397         vpaddq          32*14-448($tp1), $ACC5, $ACC5
398
399         vmovdqu         $ACC3, 32*12-448($tp1)
400         vmovdqu         $ACC4, 32*13-448($tp1)
401         lea             8($tpa), $tpa
402
403         vpmuludq        32*7-128($ap), $B2, $TEMP0
404         vpaddq          $TEMP0, $ACC5, $ACC5
405         vpmuludq        32*7-128($aap), $B2, $ACC6
406         vpaddq          32*15-448($tp1), $ACC6, $ACC6
407
408         vpmuludq        32*8-128($ap), $ACC0, $ACC7
409         vmovdqu         $ACC5, 32*14-448($tp1)
410         vpaddq          32*16-448($tp1), $ACC7, $ACC7
411         vmovdqu         $ACC6, 32*15-448($tp1)
412         vmovdqu         $ACC7, 32*16-448($tp1)
413         lea             8($tp1), $tp1
414
415         dec     $i
416         jnz     .LOOP_SQR_1024
417 ___
418 $ZERO = $ACC9;
419 $TEMP0 = $B1;
420 $TEMP2 = $B2;
421 $TEMP3 = $Y1;
422 $TEMP4 = $Y2;
423 $code.=<<___;
424         # we need to fix indices 32-39 to avoid overflow
425         vmovdqu         32*8(%rsp), $ACC8               # 32*8-192($tp0),
426         vmovdqu         32*9(%rsp), $ACC1               # 32*9-192($tp0)
427         vmovdqu         32*10(%rsp), $ACC2              # 32*10-192($tp0)
428         lea             192(%rsp), $tp0                 # 64+128=192
429
430         vpsrlq          \$29, $ACC8, $TEMP1
431         vpand           $AND_MASK, $ACC8, $ACC8
432         vpsrlq          \$29, $ACC1, $TEMP2
433         vpand           $AND_MASK, $ACC1, $ACC1
434
435         vpermq          \$0x93, $TEMP1, $TEMP1
436         vpxor           $ZERO, $ZERO, $ZERO
437         vpermq          \$0x93, $TEMP2, $TEMP2
438
439         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
440         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
441         vpaddq          $TEMP0, $ACC8, $ACC8
442         vpblendd        \$3, $TEMP2, $ZERO, $TEMP2
443         vpaddq          $TEMP1, $ACC1, $ACC1
444         vpaddq          $TEMP2, $ACC2, $ACC2
445         vmovdqu         $ACC1, 32*9-192($tp0)
446         vmovdqu         $ACC2, 32*10-192($tp0)
447
448         mov     (%rsp), %rax
449         mov     8(%rsp), $r1
450         mov     16(%rsp), $r2
451         mov     24(%rsp), $r3
452         vmovdqu 32*1(%rsp), $ACC1
453         vmovdqu 32*2-192($tp0), $ACC2
454         vmovdqu 32*3-192($tp0), $ACC3
455         vmovdqu 32*4-192($tp0), $ACC4
456         vmovdqu 32*5-192($tp0), $ACC5
457         vmovdqu 32*6-192($tp0), $ACC6
458         vmovdqu 32*7-192($tp0), $ACC7
459
460         mov     %rax, $r0
461         imull   $n0, %eax
462         and     \$0x1fffffff, %eax
463         vmovd   %eax, $Y1
464
465         mov     %rax, %rdx
466         imulq   -128($np), %rax
467          vpbroadcastq   $Y1, $Y1
468         add     %rax, $r0
469         mov     %rdx, %rax
470         imulq   8-128($np), %rax
471         shr     \$29, $r0
472         add     %rax, $r1
473         mov     %rdx, %rax
474         imulq   16-128($np), %rax
475         add     $r0, $r1
476         add     %rax, $r2
477         imulq   24-128($np), %rdx
478         add     %rdx, $r3
479
480         mov     $r1, %rax
481         imull   $n0, %eax
482         and     \$0x1fffffff, %eax
483
484         mov \$9, $i
485         jmp .LOOP_REDUCE_1024
486
487 .align  32
488 .LOOP_REDUCE_1024:
489         vmovd   %eax, $Y2
490         vpbroadcastq    $Y2, $Y2
491
492         vpmuludq        32*1-128($np), $Y1, $TEMP0
493          mov    %rax, %rdx
494          imulq  -128($np), %rax
495         vpaddq          $TEMP0, $ACC1, $ACC1
496          add    %rax, $r1
497         vpmuludq        32*2-128($np), $Y1, $TEMP1
498          mov    %rdx, %rax
499          imulq  8-128($np), %rax
500         vpaddq          $TEMP1, $ACC2, $ACC2
501         vpmuludq        32*3-128($np), $Y1, $TEMP2
502          .byte  0x67
503          add    %rax, $r2
504          .byte  0x67
505          mov    %rdx, %rax
506          imulq  16-128($np), %rax
507          shr    \$29, $r1
508         vpaddq          $TEMP2, $ACC3, $ACC3
509         vpmuludq        32*4-128($np), $Y1, $TEMP0
510          add    %rax, $r3
511          add    $r1, $r2
512         vpaddq          $TEMP0, $ACC4, $ACC4
513         vpmuludq        32*5-128($np), $Y1, $TEMP1
514          mov    $r2, %rax
515          imull  $n0, %eax
516         vpaddq          $TEMP1, $ACC5, $ACC5
517         vpmuludq        32*6-128($np), $Y1, $TEMP2
518          and    \$0x1fffffff, %eax
519         vpaddq          $TEMP2, $ACC6, $ACC6
520         vpmuludq        32*7-128($np), $Y1, $TEMP0
521         vpaddq          $TEMP0, $ACC7, $ACC7
522         vpmuludq        32*8-128($np), $Y1, $TEMP1
523          vmovd  %eax, $Y1
524          #vmovdqu       32*1-8-128($np), $TEMP2         # moved below
525         vpaddq          $TEMP1, $ACC8, $ACC8
526          #vmovdqu       32*2-8-128($np), $TEMP0         # moved below
527          vpbroadcastq   $Y1, $Y1
528
529         vpmuludq        32*1-8-128($np), $Y2, $TEMP2    # see above
530         vmovdqu         32*3-8-128($np), $TEMP1
531          mov    %rax, %rdx
532          imulq  -128($np), %rax
533         vpaddq          $TEMP2, $ACC1, $ACC1
534         vpmuludq        32*2-8-128($np), $Y2, $TEMP0    # see above
535         vmovdqu         32*4-8-128($np), $TEMP2
536          add    %rax, $r2
537          mov    %rdx, %rax
538          imulq  8-128($np), %rax
539         vpaddq          $TEMP0, $ACC2, $ACC2
540          add    $r3, %rax
541          shr    \$29, $r2
542         vpmuludq        $Y2, $TEMP1, $TEMP1
543         vmovdqu         32*5-8-128($np), $TEMP0
544          add    $r2, %rax
545         vpaddq          $TEMP1, $ACC3, $ACC3
546         vpmuludq        $Y2, $TEMP2, $TEMP2
547         vmovdqu         32*6-8-128($np), $TEMP1
548          .byte  0x67
549          mov    %rax, $r3
550          imull  $n0, %eax
551         vpaddq          $TEMP2, $ACC4, $ACC4
552         vpmuludq        $Y2, $TEMP0, $TEMP0
553         .byte   0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00    # vmovdqu               32*7-8-128($np), $TEMP2
554          and    \$0x1fffffff, %eax
555         vpaddq          $TEMP0, $ACC5, $ACC5
556         vpmuludq        $Y2, $TEMP1, $TEMP1
557         vmovdqu         32*8-8-128($np), $TEMP0
558         vpaddq          $TEMP1, $ACC6, $ACC6
559         vpmuludq        $Y2, $TEMP2, $TEMP2
560         vmovdqu         32*9-8-128($np), $ACC9
561          vmovd  %eax, $ACC0                     # borrow ACC0 for Y2
562          imulq  -128($np), %rax
563         vpaddq          $TEMP2, $ACC7, $ACC7
564         vpmuludq        $Y2, $TEMP0, $TEMP0
565          vmovdqu        32*1-16-128($np), $TEMP1
566          vpbroadcastq   $ACC0, $ACC0
567         vpaddq          $TEMP0, $ACC8, $ACC8
568         vpmuludq        $Y2, $ACC9, $ACC9
569          vmovdqu        32*2-16-128($np), $TEMP2
570          add    %rax, $r3
571
572 ___
573 ($ACC0,$Y2)=($Y2,$ACC0);
574 $code.=<<___;
575          vmovdqu        32*1-24-128($np), $ACC0
576         vpmuludq        $Y1, $TEMP1, $TEMP1
577         vmovdqu         32*3-16-128($np), $TEMP0
578         vpaddq          $TEMP1, $ACC1, $ACC1
579          vpmuludq       $Y2, $ACC0, $ACC0
580         vpmuludq        $Y1, $TEMP2, $TEMP2
581         .byte   0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff    # vmovdqu               32*4-16-128($np), $TEMP1
582          vpaddq         $ACC1, $ACC0, $ACC0
583         vpaddq          $TEMP2, $ACC2, $ACC2
584         vpmuludq        $Y1, $TEMP0, $TEMP0
585         vmovdqu         32*5-16-128($np), $TEMP2
586          .byte  0x67
587          vmovq          $ACC0, %rax
588          vmovdqu        $ACC0, (%rsp)           # transfer $r0-$r3
589         vpaddq          $TEMP0, $ACC3, $ACC3
590         vpmuludq        $Y1, $TEMP1, $TEMP1
591         vmovdqu         32*6-16-128($np), $TEMP0
592         vpaddq          $TEMP1, $ACC4, $ACC4
593         vpmuludq        $Y1, $TEMP2, $TEMP2
594         vmovdqu         32*7-16-128($np), $TEMP1
595         vpaddq          $TEMP2, $ACC5, $ACC5
596         vpmuludq        $Y1, $TEMP0, $TEMP0
597         vmovdqu         32*8-16-128($np), $TEMP2
598         vpaddq          $TEMP0, $ACC6, $ACC6
599         vpmuludq        $Y1, $TEMP1, $TEMP1
600          shr    \$29, $r3
601         vmovdqu         32*9-16-128($np), $TEMP0
602          add    $r3, %rax
603         vpaddq          $TEMP1, $ACC7, $ACC7
604         vpmuludq        $Y1, $TEMP2, $TEMP2
605          #vmovdqu       32*2-24-128($np), $TEMP1        # moved below
606          mov    %rax, $r0
607          imull  $n0, %eax
608         vpaddq          $TEMP2, $ACC8, $ACC8
609         vpmuludq        $Y1, $TEMP0, $TEMP0
610          and    \$0x1fffffff, %eax
611          vmovd  %eax, $Y1
612          vmovdqu        32*3-24-128($np), $TEMP2
613         .byte   0x67
614         vpaddq          $TEMP0, $ACC9, $ACC9
615          vpbroadcastq   $Y1, $Y1
616
617         vpmuludq        32*2-24-128($np), $Y2, $TEMP1   # see above
618         vmovdqu         32*4-24-128($np), $TEMP0
619          mov    %rax, %rdx
620          imulq  -128($np), %rax
621          mov    8(%rsp), $r1
622         vpaddq          $TEMP1, $ACC2, $ACC1
623         vpmuludq        $Y2, $TEMP2, $TEMP2
624         vmovdqu         32*5-24-128($np), $TEMP1
625          add    %rax, $r0
626          mov    %rdx, %rax
627          imulq  8-128($np), %rax
628          .byte  0x67
629          shr    \$29, $r0
630          mov    16(%rsp), $r2
631         vpaddq          $TEMP2, $ACC3, $ACC2
632         vpmuludq        $Y2, $TEMP0, $TEMP0
633         vmovdqu         32*6-24-128($np), $TEMP2
634          add    %rax, $r1
635          mov    %rdx, %rax
636          imulq  16-128($np), %rax
637         vpaddq          $TEMP0, $ACC4, $ACC3
638         vpmuludq        $Y2, $TEMP1, $TEMP1
639         vmovdqu         32*7-24-128($np), $TEMP0
640          imulq  24-128($np), %rdx               # future $r3
641          add    %rax, $r2
642          lea    ($r0,$r1), %rax
643         vpaddq          $TEMP1, $ACC5, $ACC4
644         vpmuludq        $Y2, $TEMP2, $TEMP2
645         vmovdqu         32*8-24-128($np), $TEMP1
646          mov    %rax, $r1
647          imull  $n0, %eax
648         vpmuludq        $Y2, $TEMP0, $TEMP0
649         vpaddq          $TEMP2, $ACC6, $ACC5
650         vmovdqu         32*9-24-128($np), $TEMP2
651          and    \$0x1fffffff, %eax
652         vpaddq          $TEMP0, $ACC7, $ACC6
653         vpmuludq        $Y2, $TEMP1, $TEMP1
654          add    24(%rsp), %rdx
655         vpaddq          $TEMP1, $ACC8, $ACC7
656         vpmuludq        $Y2, $TEMP2, $TEMP2
657         vpaddq          $TEMP2, $ACC9, $ACC8
658          vmovq  $r3, $ACC9
659          mov    %rdx, $r3
660
661         dec     $i
662         jnz     .LOOP_REDUCE_1024
663 ___
664 ($ACC0,$Y2)=($Y2,$ACC0);
665 $code.=<<___;
666         lea     448(%rsp), $tp1                 # size optimization
667         vpaddq  $ACC9, $Y2, $ACC0
668         vpxor   $ZERO, $ZERO, $ZERO
669
670         vpaddq          32*9-192($tp0), $ACC0, $ACC0
671         vpaddq          32*10-448($tp1), $ACC1, $ACC1
672         vpaddq          32*11-448($tp1), $ACC2, $ACC2
673         vpaddq          32*12-448($tp1), $ACC3, $ACC3
674         vpaddq          32*13-448($tp1), $ACC4, $ACC4
675         vpaddq          32*14-448($tp1), $ACC5, $ACC5
676         vpaddq          32*15-448($tp1), $ACC6, $ACC6
677         vpaddq          32*16-448($tp1), $ACC7, $ACC7
678         vpaddq          32*17-448($tp1), $ACC8, $ACC8
679
680         vpsrlq          \$29, $ACC0, $TEMP1
681         vpand           $AND_MASK, $ACC0, $ACC0
682         vpsrlq          \$29, $ACC1, $TEMP2
683         vpand           $AND_MASK, $ACC1, $ACC1
684         vpsrlq          \$29, $ACC2, $TEMP3
685         vpermq          \$0x93, $TEMP1, $TEMP1
686         vpand           $AND_MASK, $ACC2, $ACC2
687         vpsrlq          \$29, $ACC3, $TEMP4
688         vpermq          \$0x93, $TEMP2, $TEMP2
689         vpand           $AND_MASK, $ACC3, $ACC3
690         vpermq          \$0x93, $TEMP3, $TEMP3
691
692         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
693         vpermq          \$0x93, $TEMP4, $TEMP4
694         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
695         vpaddq          $TEMP0, $ACC0, $ACC0
696         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
697         vpaddq          $TEMP1, $ACC1, $ACC1
698         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
699         vpaddq          $TEMP2, $ACC2, $ACC2
700         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
701         vpaddq          $TEMP3, $ACC3, $ACC3
702         vpaddq          $TEMP4, $ACC4, $ACC4
703
704         vpsrlq          \$29, $ACC0, $TEMP1
705         vpand           $AND_MASK, $ACC0, $ACC0
706         vpsrlq          \$29, $ACC1, $TEMP2
707         vpand           $AND_MASK, $ACC1, $ACC1
708         vpsrlq          \$29, $ACC2, $TEMP3
709         vpermq          \$0x93, $TEMP1, $TEMP1
710         vpand           $AND_MASK, $ACC2, $ACC2
711         vpsrlq          \$29, $ACC3, $TEMP4
712         vpermq          \$0x93, $TEMP2, $TEMP2
713         vpand           $AND_MASK, $ACC3, $ACC3
714         vpermq          \$0x93, $TEMP3, $TEMP3
715
716         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
717         vpermq          \$0x93, $TEMP4, $TEMP4
718         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
719         vpaddq          $TEMP0, $ACC0, $ACC0
720         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
721         vpaddq          $TEMP1, $ACC1, $ACC1
722         vmovdqu         $ACC0, 32*0-128($rp)
723         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
724         vpaddq          $TEMP2, $ACC2, $ACC2
725         vmovdqu         $ACC1, 32*1-128($rp)
726         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
727         vpaddq          $TEMP3, $ACC3, $ACC3
728         vmovdqu         $ACC2, 32*2-128($rp)
729         vpaddq          $TEMP4, $ACC4, $ACC4
730         vmovdqu         $ACC3, 32*3-128($rp)
731 ___
732 $TEMP5=$ACC0;
733 $code.=<<___;
734         vpsrlq          \$29, $ACC4, $TEMP1
735         vpand           $AND_MASK, $ACC4, $ACC4
736         vpsrlq          \$29, $ACC5, $TEMP2
737         vpand           $AND_MASK, $ACC5, $ACC5
738         vpsrlq          \$29, $ACC6, $TEMP3
739         vpermq          \$0x93, $TEMP1, $TEMP1
740         vpand           $AND_MASK, $ACC6, $ACC6
741         vpsrlq          \$29, $ACC7, $TEMP4
742         vpermq          \$0x93, $TEMP2, $TEMP2
743         vpand           $AND_MASK, $ACC7, $ACC7
744         vpsrlq          \$29, $ACC8, $TEMP5
745         vpermq          \$0x93, $TEMP3, $TEMP3
746         vpand           $AND_MASK, $ACC8, $ACC8
747         vpermq          \$0x93, $TEMP4, $TEMP4
748
749         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
750         vpermq          \$0x93, $TEMP5, $TEMP5
751         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
752         vpaddq          $TEMP0, $ACC4, $ACC4
753         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
754         vpaddq          $TEMP1, $ACC5, $ACC5
755         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
756         vpaddq          $TEMP2, $ACC6, $ACC6
757         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
758         vpaddq          $TEMP3, $ACC7, $ACC7
759         vpaddq          $TEMP4, $ACC8, $ACC8
760
761         vpsrlq          \$29, $ACC4, $TEMP1
762         vpand           $AND_MASK, $ACC4, $ACC4
763         vpsrlq          \$29, $ACC5, $TEMP2
764         vpand           $AND_MASK, $ACC5, $ACC5
765         vpsrlq          \$29, $ACC6, $TEMP3
766         vpermq          \$0x93, $TEMP1, $TEMP1
767         vpand           $AND_MASK, $ACC6, $ACC6
768         vpsrlq          \$29, $ACC7, $TEMP4
769         vpermq          \$0x93, $TEMP2, $TEMP2
770         vpand           $AND_MASK, $ACC7, $ACC7
771         vpsrlq          \$29, $ACC8, $TEMP5
772         vpermq          \$0x93, $TEMP3, $TEMP3
773         vpand           $AND_MASK, $ACC8, $ACC8
774         vpermq          \$0x93, $TEMP4, $TEMP4
775
776         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
777         vpermq          \$0x93, $TEMP5, $TEMP5
778         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
779         vpaddq          $TEMP0, $ACC4, $ACC4
780         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
781         vpaddq          $TEMP1, $ACC5, $ACC5
782         vmovdqu         $ACC4, 32*4-128($rp)
783         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
784         vpaddq          $TEMP2, $ACC6, $ACC6
785         vmovdqu         $ACC5, 32*5-128($rp)
786         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
787         vpaddq          $TEMP3, $ACC7, $ACC7
788         vmovdqu         $ACC6, 32*6-128($rp)
789         vpaddq          $TEMP4, $ACC8, $ACC8
790         vmovdqu         $ACC7, 32*7-128($rp)
791         vmovdqu         $ACC8, 32*8-128($rp)
792
793         mov     $rp, $ap
794         dec     $rep
795         jne     .LOOP_GRANDE_SQR_1024
796
797         vzeroall
798         mov     %rbp, %rax
799 .cfi_def_cfa_register   %rax
800 ___
801 $code.=<<___ if ($win64);
802 .Lsqr_1024_in_tail:
803         movaps  -0xd8(%rax),%xmm6
804         movaps  -0xc8(%rax),%xmm7
805         movaps  -0xb8(%rax),%xmm8
806         movaps  -0xa8(%rax),%xmm9
807         movaps  -0x98(%rax),%xmm10
808         movaps  -0x88(%rax),%xmm11
809         movaps  -0x78(%rax),%xmm12
810         movaps  -0x68(%rax),%xmm13
811         movaps  -0x58(%rax),%xmm14
812         movaps  -0x48(%rax),%xmm15
813 ___
814 $code.=<<___;
815         mov     -48(%rax),%r15
816 .cfi_restore    %r15
817         mov     -40(%rax),%r14
818 .cfi_restore    %r14
819         mov     -32(%rax),%r13
820 .cfi_restore    %r13
821         mov     -24(%rax),%r12
822 .cfi_restore    %r12
823         mov     -16(%rax),%rbp
824 .cfi_restore    %rbp
825         mov     -8(%rax),%rbx
826 .cfi_restore    %rbx
827         lea     (%rax),%rsp             # restore %rsp
828 .cfi_def_cfa_register   %rsp
829 .Lsqr_1024_epilogue:
830         ret
831 .cfi_endproc
832 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
833 ___
834 }
835
836 { # void AMM_WW(
837 my $rp="%rdi";  # BN_ULONG *rp,
838 my $ap="%rsi";  # const BN_ULONG *ap,
839 my $bp="%rdx";  # const BN_ULONG *bp,
840 my $np="%rcx";  # const BN_ULONG *np,
841 my $n0="%r8d";  # unsigned int n0);
842
843 # The registers that hold the accumulated redundant result
844 # The AMM works on 1024 bit operands, and redundant word size is 29
845 # Therefore: ceil(1024/29)/4 = 9
846 my $ACC0="%ymm0";
847 my $ACC1="%ymm1";
848 my $ACC2="%ymm2";
849 my $ACC3="%ymm3";
850 my $ACC4="%ymm4";
851 my $ACC5="%ymm5";
852 my $ACC6="%ymm6";
853 my $ACC7="%ymm7";
854 my $ACC8="%ymm8";
855 my $ACC9="%ymm9";
856
857 # Registers that hold the broadcasted words of multiplier, currently used
858 my $Bi="%ymm10";
859 my $Yi="%ymm11";
860
861 # Helper registers
862 my $TEMP0=$ACC0;
863 my $TEMP1="%ymm12";
864 my $TEMP2="%ymm13";
865 my $ZERO="%ymm14";
866 my $AND_MASK="%ymm15";
867
868 # alu registers that hold the first words of the ACC
869 my $r0="%r9";
870 my $r1="%r10";
871 my $r2="%r11";
872 my $r3="%r12";
873
874 my $i="%r14d";
875 my $tmp="%r15";
876
877 $bp="%r13";     # reassigned argument
878
879 $code.=<<___;
880 .globl  rsaz_1024_mul_avx2
881 .type   rsaz_1024_mul_avx2,\@function,5
882 .align  64
883 rsaz_1024_mul_avx2:
884 .cfi_startproc
885         lea     (%rsp), %rax
886 .cfi_def_cfa_register   %rax
887         push    %rbx
888 .cfi_push       %rbx
889         push    %rbp
890 .cfi_push       %rbp
891         push    %r12
892 .cfi_push       %r12
893         push    %r13
894 .cfi_push       %r13
895         push    %r14
896 .cfi_push       %r14
897         push    %r15
898 .cfi_push       %r15
899 ___
900 $code.=<<___ if ($win64);
901         vzeroupper
902         lea     -0xa8(%rsp),%rsp
903         vmovaps %xmm6,-0xd8(%rax)
904         vmovaps %xmm7,-0xc8(%rax)
905         vmovaps %xmm8,-0xb8(%rax)
906         vmovaps %xmm9,-0xa8(%rax)
907         vmovaps %xmm10,-0x98(%rax)
908         vmovaps %xmm11,-0x88(%rax)
909         vmovaps %xmm12,-0x78(%rax)
910         vmovaps %xmm13,-0x68(%rax)
911         vmovaps %xmm14,-0x58(%rax)
912         vmovaps %xmm15,-0x48(%rax)
913 .Lmul_1024_body:
914 ___
915 $code.=<<___;
916         mov     %rax,%rbp
917 .cfi_def_cfa_register   %rbp
918         vzeroall
919         mov     %rdx, $bp       # reassigned argument
920         sub     \$64,%rsp
921
922         # unaligned 256-bit load that crosses page boundary can
923         # cause severe performance degradation here, so if $ap does
924         # cross page boundary, swap it with $bp [meaning that caller
925         # is advised to lay down $ap and $bp next to each other, so
926         # that only one can cross page boundary].
927         .byte   0x67,0x67
928         mov     $ap, $tmp
929         and     \$4095, $tmp
930         add     \$32*10, $tmp
931         shr     \$12, $tmp
932         mov     $ap, $tmp
933         cmovnz  $bp, $ap
934         cmovnz  $tmp, $bp
935
936         mov     $np, $tmp
937         sub     \$-128,$ap      # size optimization
938         sub     \$-128,$np
939         sub     \$-128,$rp
940
941         and     \$4095, $tmp    # see if $np crosses page
942         add     \$32*10, $tmp
943         .byte   0x67,0x67
944         shr     \$12, $tmp
945         jz      .Lmul_1024_no_n_copy
946
947         # unaligned 256-bit load that crosses page boundary can
948         # cause severe performance degradation here, so if $np does
949         # cross page boundary, copy it to stack and make sure stack
950         # frame doesn't...
951         sub             \$32*10,%rsp
952         vmovdqu         32*0-128($np), $ACC0
953         and             \$-512, %rsp
954         vmovdqu         32*1-128($np), $ACC1
955         vmovdqu         32*2-128($np), $ACC2
956         vmovdqu         32*3-128($np), $ACC3
957         vmovdqu         32*4-128($np), $ACC4
958         vmovdqu         32*5-128($np), $ACC5
959         vmovdqu         32*6-128($np), $ACC6
960         vmovdqu         32*7-128($np), $ACC7
961         vmovdqu         32*8-128($np), $ACC8
962         lea             64+128(%rsp),$np
963         vmovdqu         $ACC0, 32*0-128($np)
964         vpxor           $ACC0, $ACC0, $ACC0
965         vmovdqu         $ACC1, 32*1-128($np)
966         vpxor           $ACC1, $ACC1, $ACC1
967         vmovdqu         $ACC2, 32*2-128($np)
968         vpxor           $ACC2, $ACC2, $ACC2
969         vmovdqu         $ACC3, 32*3-128($np)
970         vpxor           $ACC3, $ACC3, $ACC3
971         vmovdqu         $ACC4, 32*4-128($np)
972         vpxor           $ACC4, $ACC4, $ACC4
973         vmovdqu         $ACC5, 32*5-128($np)
974         vpxor           $ACC5, $ACC5, $ACC5
975         vmovdqu         $ACC6, 32*6-128($np)
976         vpxor           $ACC6, $ACC6, $ACC6
977         vmovdqu         $ACC7, 32*7-128($np)
978         vpxor           $ACC7, $ACC7, $ACC7
979         vmovdqu         $ACC8, 32*8-128($np)
980         vmovdqa         $ACC0, $ACC8
981         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
982 .Lmul_1024_no_n_copy:
983         and     \$-64,%rsp
984
985         mov     ($bp), %rbx
986         vpbroadcastq ($bp), $Bi
987         vmovdqu $ACC0, (%rsp)                   # clear top of stack
988         xor     $r0, $r0
989         .byte   0x67
990         xor     $r1, $r1
991         xor     $r2, $r2
992         xor     $r3, $r3
993
994         vmovdqu .Land_mask(%rip), $AND_MASK
995         mov     \$9, $i
996         vmovdqu $ACC9, 32*9-128($rp)            # $ACC9 is zero after vzeroall
997         jmp     .Loop_mul_1024
998
999 .align  32
1000 .Loop_mul_1024:
1001          vpsrlq         \$29, $ACC3, $ACC9              # correct $ACC3(*)
1002         mov     %rbx, %rax
1003         imulq   -128($ap), %rax
1004         add     $r0, %rax
1005         mov     %rbx, $r1
1006         imulq   8-128($ap), $r1
1007         add     8(%rsp), $r1
1008
1009         mov     %rax, $r0
1010         imull   $n0, %eax
1011         and     \$0x1fffffff, %eax
1012
1013          mov    %rbx, $r2
1014          imulq  16-128($ap), $r2
1015          add    16(%rsp), $r2
1016
1017          mov    %rbx, $r3
1018          imulq  24-128($ap), $r3
1019          add    24(%rsp), $r3
1020         vpmuludq        32*1-128($ap),$Bi,$TEMP0
1021          vmovd          %eax, $Yi
1022         vpaddq          $TEMP0,$ACC1,$ACC1
1023         vpmuludq        32*2-128($ap),$Bi,$TEMP1
1024          vpbroadcastq   $Yi, $Yi
1025         vpaddq          $TEMP1,$ACC2,$ACC2
1026         vpmuludq        32*3-128($ap),$Bi,$TEMP2
1027          vpand          $AND_MASK, $ACC3, $ACC3         # correct $ACC3
1028         vpaddq          $TEMP2,$ACC3,$ACC3
1029         vpmuludq        32*4-128($ap),$Bi,$TEMP0
1030         vpaddq          $TEMP0,$ACC4,$ACC4
1031         vpmuludq        32*5-128($ap),$Bi,$TEMP1
1032         vpaddq          $TEMP1,$ACC5,$ACC5
1033         vpmuludq        32*6-128($ap),$Bi,$TEMP2
1034         vpaddq          $TEMP2,$ACC6,$ACC6
1035         vpmuludq        32*7-128($ap),$Bi,$TEMP0
1036          vpermq         \$0x93, $ACC9, $ACC9            # correct $ACC3
1037         vpaddq          $TEMP0,$ACC7,$ACC7
1038         vpmuludq        32*8-128($ap),$Bi,$TEMP1
1039          vpbroadcastq   8($bp), $Bi
1040         vpaddq          $TEMP1,$ACC8,$ACC8
1041
1042         mov     %rax,%rdx
1043         imulq   -128($np),%rax
1044         add     %rax,$r0
1045         mov     %rdx,%rax
1046         imulq   8-128($np),%rax
1047         add     %rax,$r1
1048         mov     %rdx,%rax
1049         imulq   16-128($np),%rax
1050         add     %rax,$r2
1051         shr     \$29, $r0
1052         imulq   24-128($np),%rdx
1053         add     %rdx,$r3
1054         add     $r0, $r1
1055
1056         vpmuludq        32*1-128($np),$Yi,$TEMP2
1057          vmovq          $Bi, %rbx
1058         vpaddq          $TEMP2,$ACC1,$ACC1
1059         vpmuludq        32*2-128($np),$Yi,$TEMP0
1060         vpaddq          $TEMP0,$ACC2,$ACC2
1061         vpmuludq        32*3-128($np),$Yi,$TEMP1
1062         vpaddq          $TEMP1,$ACC3,$ACC3
1063         vpmuludq        32*4-128($np),$Yi,$TEMP2
1064         vpaddq          $TEMP2,$ACC4,$ACC4
1065         vpmuludq        32*5-128($np),$Yi,$TEMP0
1066         vpaddq          $TEMP0,$ACC5,$ACC5
1067         vpmuludq        32*6-128($np),$Yi,$TEMP1
1068         vpaddq          $TEMP1,$ACC6,$ACC6
1069         vpmuludq        32*7-128($np),$Yi,$TEMP2
1070          vpblendd       \$3, $ZERO, $ACC9, $ACC9        # correct $ACC3
1071         vpaddq          $TEMP2,$ACC7,$ACC7
1072         vpmuludq        32*8-128($np),$Yi,$TEMP0
1073          vpaddq         $ACC9, $ACC3, $ACC3             # correct $ACC3
1074         vpaddq          $TEMP0,$ACC8,$ACC8
1075
1076         mov     %rbx, %rax
1077         imulq   -128($ap),%rax
1078         add     %rax,$r1
1079          vmovdqu        -8+32*1-128($ap),$TEMP1
1080         mov     %rbx, %rax
1081         imulq   8-128($ap),%rax
1082         add     %rax,$r2
1083          vmovdqu        -8+32*2-128($ap),$TEMP2
1084
1085         mov     $r1, %rax
1086         imull   $n0, %eax
1087         and     \$0x1fffffff, %eax
1088
1089          imulq  16-128($ap),%rbx
1090          add    %rbx,$r3
1091         vpmuludq        $Bi,$TEMP1,$TEMP1
1092          vmovd          %eax, $Yi
1093         vmovdqu         -8+32*3-128($ap),$TEMP0
1094         vpaddq          $TEMP1,$ACC1,$ACC1
1095         vpmuludq        $Bi,$TEMP2,$TEMP2
1096          vpbroadcastq   $Yi, $Yi
1097         vmovdqu         -8+32*4-128($ap),$TEMP1
1098         vpaddq          $TEMP2,$ACC2,$ACC2
1099         vpmuludq        $Bi,$TEMP0,$TEMP0
1100         vmovdqu         -8+32*5-128($ap),$TEMP2
1101         vpaddq          $TEMP0,$ACC3,$ACC3
1102         vpmuludq        $Bi,$TEMP1,$TEMP1
1103         vmovdqu         -8+32*6-128($ap),$TEMP0
1104         vpaddq          $TEMP1,$ACC4,$ACC4
1105         vpmuludq        $Bi,$TEMP2,$TEMP2
1106         vmovdqu         -8+32*7-128($ap),$TEMP1
1107         vpaddq          $TEMP2,$ACC5,$ACC5
1108         vpmuludq        $Bi,$TEMP0,$TEMP0
1109         vmovdqu         -8+32*8-128($ap),$TEMP2
1110         vpaddq          $TEMP0,$ACC6,$ACC6
1111         vpmuludq        $Bi,$TEMP1,$TEMP1
1112         vmovdqu         -8+32*9-128($ap),$ACC9
1113         vpaddq          $TEMP1,$ACC7,$ACC7
1114         vpmuludq        $Bi,$TEMP2,$TEMP2
1115         vpaddq          $TEMP2,$ACC8,$ACC8
1116         vpmuludq        $Bi,$ACC9,$ACC9
1117          vpbroadcastq   16($bp), $Bi
1118
1119         mov     %rax,%rdx
1120         imulq   -128($np),%rax
1121         add     %rax,$r1
1122          vmovdqu        -8+32*1-128($np),$TEMP0
1123         mov     %rdx,%rax
1124         imulq   8-128($np),%rax
1125         add     %rax,$r2
1126          vmovdqu        -8+32*2-128($np),$TEMP1
1127         shr     \$29, $r1
1128         imulq   16-128($np),%rdx
1129         add     %rdx,$r3
1130         add     $r1, $r2
1131
1132         vpmuludq        $Yi,$TEMP0,$TEMP0
1133          vmovq          $Bi, %rbx
1134         vmovdqu         -8+32*3-128($np),$TEMP2
1135         vpaddq          $TEMP0,$ACC1,$ACC1
1136         vpmuludq        $Yi,$TEMP1,$TEMP1
1137         vmovdqu         -8+32*4-128($np),$TEMP0
1138         vpaddq          $TEMP1,$ACC2,$ACC2
1139         vpmuludq        $Yi,$TEMP2,$TEMP2
1140         vmovdqu         -8+32*5-128($np),$TEMP1
1141         vpaddq          $TEMP2,$ACC3,$ACC3
1142         vpmuludq        $Yi,$TEMP0,$TEMP0
1143         vmovdqu         -8+32*6-128($np),$TEMP2
1144         vpaddq          $TEMP0,$ACC4,$ACC4
1145         vpmuludq        $Yi,$TEMP1,$TEMP1
1146         vmovdqu         -8+32*7-128($np),$TEMP0
1147         vpaddq          $TEMP1,$ACC5,$ACC5
1148         vpmuludq        $Yi,$TEMP2,$TEMP2
1149         vmovdqu         -8+32*8-128($np),$TEMP1
1150         vpaddq          $TEMP2,$ACC6,$ACC6
1151         vpmuludq        $Yi,$TEMP0,$TEMP0
1152         vmovdqu         -8+32*9-128($np),$TEMP2
1153         vpaddq          $TEMP0,$ACC7,$ACC7
1154         vpmuludq        $Yi,$TEMP1,$TEMP1
1155         vpaddq          $TEMP1,$ACC8,$ACC8
1156         vpmuludq        $Yi,$TEMP2,$TEMP2
1157         vpaddq          $TEMP2,$ACC9,$ACC9
1158
1159          vmovdqu        -16+32*1-128($ap),$TEMP0
1160         mov     %rbx,%rax
1161         imulq   -128($ap),%rax
1162         add     $r2,%rax
1163
1164          vmovdqu        -16+32*2-128($ap),$TEMP1
1165         mov     %rax,$r2
1166         imull   $n0, %eax
1167         and     \$0x1fffffff, %eax
1168
1169          imulq  8-128($ap),%rbx
1170          add    %rbx,$r3
1171         vpmuludq        $Bi,$TEMP0,$TEMP0
1172          vmovd          %eax, $Yi
1173         vmovdqu         -16+32*3-128($ap),$TEMP2
1174         vpaddq          $TEMP0,$ACC1,$ACC1
1175         vpmuludq        $Bi,$TEMP1,$TEMP1
1176          vpbroadcastq   $Yi, $Yi
1177         vmovdqu         -16+32*4-128($ap),$TEMP0
1178         vpaddq          $TEMP1,$ACC2,$ACC2
1179         vpmuludq        $Bi,$TEMP2,$TEMP2
1180         vmovdqu         -16+32*5-128($ap),$TEMP1
1181         vpaddq          $TEMP2,$ACC3,$ACC3
1182         vpmuludq        $Bi,$TEMP0,$TEMP0
1183         vmovdqu         -16+32*6-128($ap),$TEMP2
1184         vpaddq          $TEMP0,$ACC4,$ACC4
1185         vpmuludq        $Bi,$TEMP1,$TEMP1
1186         vmovdqu         -16+32*7-128($ap),$TEMP0
1187         vpaddq          $TEMP1,$ACC5,$ACC5
1188         vpmuludq        $Bi,$TEMP2,$TEMP2
1189         vmovdqu         -16+32*8-128($ap),$TEMP1
1190         vpaddq          $TEMP2,$ACC6,$ACC6
1191         vpmuludq        $Bi,$TEMP0,$TEMP0
1192         vmovdqu         -16+32*9-128($ap),$TEMP2
1193         vpaddq          $TEMP0,$ACC7,$ACC7
1194         vpmuludq        $Bi,$TEMP1,$TEMP1
1195         vpaddq          $TEMP1,$ACC8,$ACC8
1196         vpmuludq        $Bi,$TEMP2,$TEMP2
1197          vpbroadcastq   24($bp), $Bi
1198         vpaddq          $TEMP2,$ACC9,$ACC9
1199
1200          vmovdqu        -16+32*1-128($np),$TEMP0
1201         mov     %rax,%rdx
1202         imulq   -128($np),%rax
1203         add     %rax,$r2
1204          vmovdqu        -16+32*2-128($np),$TEMP1
1205         imulq   8-128($np),%rdx
1206         add     %rdx,$r3
1207         shr     \$29, $r2
1208
1209         vpmuludq        $Yi,$TEMP0,$TEMP0
1210          vmovq          $Bi, %rbx
1211         vmovdqu         -16+32*3-128($np),$TEMP2
1212         vpaddq          $TEMP0,$ACC1,$ACC1
1213         vpmuludq        $Yi,$TEMP1,$TEMP1
1214         vmovdqu         -16+32*4-128($np),$TEMP0
1215         vpaddq          $TEMP1,$ACC2,$ACC2
1216         vpmuludq        $Yi,$TEMP2,$TEMP2
1217         vmovdqu         -16+32*5-128($np),$TEMP1
1218         vpaddq          $TEMP2,$ACC3,$ACC3
1219         vpmuludq        $Yi,$TEMP0,$TEMP0
1220         vmovdqu         -16+32*6-128($np),$TEMP2
1221         vpaddq          $TEMP0,$ACC4,$ACC4
1222         vpmuludq        $Yi,$TEMP1,$TEMP1
1223         vmovdqu         -16+32*7-128($np),$TEMP0
1224         vpaddq          $TEMP1,$ACC5,$ACC5
1225         vpmuludq        $Yi,$TEMP2,$TEMP2
1226         vmovdqu         -16+32*8-128($np),$TEMP1
1227         vpaddq          $TEMP2,$ACC6,$ACC6
1228         vpmuludq        $Yi,$TEMP0,$TEMP0
1229         vmovdqu         -16+32*9-128($np),$TEMP2
1230         vpaddq          $TEMP0,$ACC7,$ACC7
1231         vpmuludq        $Yi,$TEMP1,$TEMP1
1232          vmovdqu        -24+32*1-128($ap),$TEMP0
1233         vpaddq          $TEMP1,$ACC8,$ACC8
1234         vpmuludq        $Yi,$TEMP2,$TEMP2
1235          vmovdqu        -24+32*2-128($ap),$TEMP1
1236         vpaddq          $TEMP2,$ACC9,$ACC9
1237
1238         add     $r2, $r3
1239         imulq   -128($ap),%rbx
1240         add     %rbx,$r3
1241
1242         mov     $r3, %rax
1243         imull   $n0, %eax
1244         and     \$0x1fffffff, %eax
1245
1246         vpmuludq        $Bi,$TEMP0,$TEMP0
1247          vmovd          %eax, $Yi
1248         vmovdqu         -24+32*3-128($ap),$TEMP2
1249         vpaddq          $TEMP0,$ACC1,$ACC1
1250         vpmuludq        $Bi,$TEMP1,$TEMP1
1251          vpbroadcastq   $Yi, $Yi
1252         vmovdqu         -24+32*4-128($ap),$TEMP0
1253         vpaddq          $TEMP1,$ACC2,$ACC2
1254         vpmuludq        $Bi,$TEMP2,$TEMP2
1255         vmovdqu         -24+32*5-128($ap),$TEMP1
1256         vpaddq          $TEMP2,$ACC3,$ACC3
1257         vpmuludq        $Bi,$TEMP0,$TEMP0
1258         vmovdqu         -24+32*6-128($ap),$TEMP2
1259         vpaddq          $TEMP0,$ACC4,$ACC4
1260         vpmuludq        $Bi,$TEMP1,$TEMP1
1261         vmovdqu         -24+32*7-128($ap),$TEMP0
1262         vpaddq          $TEMP1,$ACC5,$ACC5
1263         vpmuludq        $Bi,$TEMP2,$TEMP2
1264         vmovdqu         -24+32*8-128($ap),$TEMP1
1265         vpaddq          $TEMP2,$ACC6,$ACC6
1266         vpmuludq        $Bi,$TEMP0,$TEMP0
1267         vmovdqu         -24+32*9-128($ap),$TEMP2
1268         vpaddq          $TEMP0,$ACC7,$ACC7
1269         vpmuludq        $Bi,$TEMP1,$TEMP1
1270         vpaddq          $TEMP1,$ACC8,$ACC8
1271         vpmuludq        $Bi,$TEMP2,$TEMP2
1272          vpbroadcastq   32($bp), $Bi
1273         vpaddq          $TEMP2,$ACC9,$ACC9
1274          add            \$32, $bp                       # $bp++
1275
1276         vmovdqu         -24+32*1-128($np),$TEMP0
1277         imulq   -128($np),%rax
1278         add     %rax,$r3
1279         shr     \$29, $r3
1280
1281         vmovdqu         -24+32*2-128($np),$TEMP1
1282         vpmuludq        $Yi,$TEMP0,$TEMP0
1283          vmovq          $Bi, %rbx
1284         vmovdqu         -24+32*3-128($np),$TEMP2
1285         vpaddq          $TEMP0,$ACC1,$ACC0              # $ACC0==$TEMP0
1286         vpmuludq        $Yi,$TEMP1,$TEMP1
1287          vmovdqu        $ACC0, (%rsp)                   # transfer $r0-$r3
1288         vpaddq          $TEMP1,$ACC2,$ACC1
1289         vmovdqu         -24+32*4-128($np),$TEMP0
1290         vpmuludq        $Yi,$TEMP2,$TEMP2
1291         vmovdqu         -24+32*5-128($np),$TEMP1
1292         vpaddq          $TEMP2,$ACC3,$ACC2
1293         vpmuludq        $Yi,$TEMP0,$TEMP0
1294         vmovdqu         -24+32*6-128($np),$TEMP2
1295         vpaddq          $TEMP0,$ACC4,$ACC3
1296         vpmuludq        $Yi,$TEMP1,$TEMP1
1297         vmovdqu         -24+32*7-128($np),$TEMP0
1298         vpaddq          $TEMP1,$ACC5,$ACC4
1299         vpmuludq        $Yi,$TEMP2,$TEMP2
1300         vmovdqu         -24+32*8-128($np),$TEMP1
1301         vpaddq          $TEMP2,$ACC6,$ACC5
1302         vpmuludq        $Yi,$TEMP0,$TEMP0
1303         vmovdqu         -24+32*9-128($np),$TEMP2
1304          mov    $r3, $r0
1305         vpaddq          $TEMP0,$ACC7,$ACC6
1306         vpmuludq        $Yi,$TEMP1,$TEMP1
1307          add    (%rsp), $r0
1308         vpaddq          $TEMP1,$ACC8,$ACC7
1309         vpmuludq        $Yi,$TEMP2,$TEMP2
1310          vmovq  $r3, $TEMP1
1311         vpaddq          $TEMP2,$ACC9,$ACC8
1312
1313         dec     $i
1314         jnz     .Loop_mul_1024
1315 ___
1316
1317 # (*)   Original implementation was correcting ACC1-ACC3 for overflow
1318 #       after 7 loop runs, or after 28 iterations, or 56 additions.
1319 #       But as we underutilize resources, it's possible to correct in
1320 #       each iteration with marginal performance loss. But then, as
1321 #       we do it in each iteration, we can correct less digits, and
1322 #       avoid performance penalties completely. Also note that we
1323 #       correct only three digits out of four. This works because
1324 #       most significant digit is subjected to less additions.
1325
1326 $TEMP0 = $ACC9;
1327 $TEMP3 = $Bi;
1328 $TEMP4 = $Yi;
1329 $code.=<<___;
1330         vpermq          \$0, $AND_MASK, $AND_MASK
1331         vpaddq          (%rsp), $TEMP1, $ACC0
1332
1333         vpsrlq          \$29, $ACC0, $TEMP1
1334         vpand           $AND_MASK, $ACC0, $ACC0
1335         vpsrlq          \$29, $ACC1, $TEMP2
1336         vpand           $AND_MASK, $ACC1, $ACC1
1337         vpsrlq          \$29, $ACC2, $TEMP3
1338         vpermq          \$0x93, $TEMP1, $TEMP1
1339         vpand           $AND_MASK, $ACC2, $ACC2
1340         vpsrlq          \$29, $ACC3, $TEMP4
1341         vpermq          \$0x93, $TEMP2, $TEMP2
1342         vpand           $AND_MASK, $ACC3, $ACC3
1343
1344         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1345         vpermq          \$0x93, $TEMP3, $TEMP3
1346         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1347         vpermq          \$0x93, $TEMP4, $TEMP4
1348         vpaddq          $TEMP0, $ACC0, $ACC0
1349         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1350         vpaddq          $TEMP1, $ACC1, $ACC1
1351         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1352         vpaddq          $TEMP2, $ACC2, $ACC2
1353         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1354         vpaddq          $TEMP3, $ACC3, $ACC3
1355         vpaddq          $TEMP4, $ACC4, $ACC4
1356
1357         vpsrlq          \$29, $ACC0, $TEMP1
1358         vpand           $AND_MASK, $ACC0, $ACC0
1359         vpsrlq          \$29, $ACC1, $TEMP2
1360         vpand           $AND_MASK, $ACC1, $ACC1
1361         vpsrlq          \$29, $ACC2, $TEMP3
1362         vpermq          \$0x93, $TEMP1, $TEMP1
1363         vpand           $AND_MASK, $ACC2, $ACC2
1364         vpsrlq          \$29, $ACC3, $TEMP4
1365         vpermq          \$0x93, $TEMP2, $TEMP2
1366         vpand           $AND_MASK, $ACC3, $ACC3
1367         vpermq          \$0x93, $TEMP3, $TEMP3
1368
1369         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1370         vpermq          \$0x93, $TEMP4, $TEMP4
1371         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1372         vpaddq          $TEMP0, $ACC0, $ACC0
1373         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1374         vpaddq          $TEMP1, $ACC1, $ACC1
1375         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1376         vpaddq          $TEMP2, $ACC2, $ACC2
1377         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1378         vpaddq          $TEMP3, $ACC3, $ACC3
1379         vpaddq          $TEMP4, $ACC4, $ACC4
1380
1381         vmovdqu         $ACC0, 0-128($rp)
1382         vmovdqu         $ACC1, 32-128($rp)
1383         vmovdqu         $ACC2, 64-128($rp)
1384         vmovdqu         $ACC3, 96-128($rp)
1385 ___
1386
1387 $TEMP5=$ACC0;
1388 $code.=<<___;
1389         vpsrlq          \$29, $ACC4, $TEMP1
1390         vpand           $AND_MASK, $ACC4, $ACC4
1391         vpsrlq          \$29, $ACC5, $TEMP2
1392         vpand           $AND_MASK, $ACC5, $ACC5
1393         vpsrlq          \$29, $ACC6, $TEMP3
1394         vpermq          \$0x93, $TEMP1, $TEMP1
1395         vpand           $AND_MASK, $ACC6, $ACC6
1396         vpsrlq          \$29, $ACC7, $TEMP4
1397         vpermq          \$0x93, $TEMP2, $TEMP2
1398         vpand           $AND_MASK, $ACC7, $ACC7
1399         vpsrlq          \$29, $ACC8, $TEMP5
1400         vpermq          \$0x93, $TEMP3, $TEMP3
1401         vpand           $AND_MASK, $ACC8, $ACC8
1402         vpermq          \$0x93, $TEMP4, $TEMP4
1403
1404         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1405         vpermq          \$0x93, $TEMP5, $TEMP5
1406         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1407         vpaddq          $TEMP0, $ACC4, $ACC4
1408         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1409         vpaddq          $TEMP1, $ACC5, $ACC5
1410         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1411         vpaddq          $TEMP2, $ACC6, $ACC6
1412         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1413         vpaddq          $TEMP3, $ACC7, $ACC7
1414         vpaddq          $TEMP4, $ACC8, $ACC8
1415
1416         vpsrlq          \$29, $ACC4, $TEMP1
1417         vpand           $AND_MASK, $ACC4, $ACC4
1418         vpsrlq          \$29, $ACC5, $TEMP2
1419         vpand           $AND_MASK, $ACC5, $ACC5
1420         vpsrlq          \$29, $ACC6, $TEMP3
1421         vpermq          \$0x93, $TEMP1, $TEMP1
1422         vpand           $AND_MASK, $ACC6, $ACC6
1423         vpsrlq          \$29, $ACC7, $TEMP4
1424         vpermq          \$0x93, $TEMP2, $TEMP2
1425         vpand           $AND_MASK, $ACC7, $ACC7
1426         vpsrlq          \$29, $ACC8, $TEMP5
1427         vpermq          \$0x93, $TEMP3, $TEMP3
1428         vpand           $AND_MASK, $ACC8, $ACC8
1429         vpermq          \$0x93, $TEMP4, $TEMP4
1430
1431         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1432         vpermq          \$0x93, $TEMP5, $TEMP5
1433         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1434         vpaddq          $TEMP0, $ACC4, $ACC4
1435         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1436         vpaddq          $TEMP1, $ACC5, $ACC5
1437         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1438         vpaddq          $TEMP2, $ACC6, $ACC6
1439         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1440         vpaddq          $TEMP3, $ACC7, $ACC7
1441         vpaddq          $TEMP4, $ACC8, $ACC8
1442
1443         vmovdqu         $ACC4, 128-128($rp)
1444         vmovdqu         $ACC5, 160-128($rp)
1445         vmovdqu         $ACC6, 192-128($rp)
1446         vmovdqu         $ACC7, 224-128($rp)
1447         vmovdqu         $ACC8, 256-128($rp)
1448         vzeroupper
1449
1450         mov     %rbp, %rax
1451 .cfi_def_cfa_register   %rax
1452 ___
1453 $code.=<<___ if ($win64);
1454 .Lmul_1024_in_tail:
1455         movaps  -0xd8(%rax),%xmm6
1456         movaps  -0xc8(%rax),%xmm7
1457         movaps  -0xb8(%rax),%xmm8
1458         movaps  -0xa8(%rax),%xmm9
1459         movaps  -0x98(%rax),%xmm10
1460         movaps  -0x88(%rax),%xmm11
1461         movaps  -0x78(%rax),%xmm12
1462         movaps  -0x68(%rax),%xmm13
1463         movaps  -0x58(%rax),%xmm14
1464         movaps  -0x48(%rax),%xmm15
1465 ___
1466 $code.=<<___;
1467         mov     -48(%rax),%r15
1468 .cfi_restore    %r15
1469         mov     -40(%rax),%r14
1470 .cfi_restore    %r14
1471         mov     -32(%rax),%r13
1472 .cfi_restore    %r13
1473         mov     -24(%rax),%r12
1474 .cfi_restore    %r12
1475         mov     -16(%rax),%rbp
1476 .cfi_restore    %rbp
1477         mov     -8(%rax),%rbx
1478 .cfi_restore    %rbx
1479         lea     (%rax),%rsp             # restore %rsp
1480 .cfi_def_cfa_register   %rsp
1481 .Lmul_1024_epilogue:
1482         ret
1483 .cfi_endproc
1484 .size   rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1485 ___
1486 }
1487 {
1488 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1489 my @T = map("%r$_",(8..11));
1490
1491 $code.=<<___;
1492 .globl  rsaz_1024_red2norm_avx2
1493 .type   rsaz_1024_red2norm_avx2,\@abi-omnipotent
1494 .align  32
1495 rsaz_1024_red2norm_avx2:
1496         sub     \$-128,$inp     # size optimization
1497         xor     %rax,%rax
1498 ___
1499
1500 for ($j=0,$i=0; $i<16; $i++) {
1501     my $k=0;
1502     while (29*$j<64*($i+1)) {   # load data till boundary
1503         $code.="        mov     `8*$j-128`($inp), @T[0]\n";
1504         $j++; $k++; push(@T,shift(@T));
1505     }
1506     $l=$k;
1507     while ($k>1) {              # shift loaded data but last value
1508         $code.="        shl     \$`29*($j-$k)`,@T[-$k]\n";
1509         $k--;
1510     }
1511     $code.=<<___;               # shift last value
1512         mov     @T[-1], @T[0]
1513         shl     \$`29*($j-1)`, @T[-1]
1514         shr     \$`-29*($j-1)`, @T[0]
1515 ___
1516     while ($l) {                # accumulate all values
1517         $code.="        add     @T[-$l], %rax\n";
1518         $l--;
1519     }
1520         $code.=<<___;
1521         adc     \$0, @T[0]      # consume eventual carry
1522         mov     %rax, 8*$i($out)
1523         mov     @T[0], %rax
1524 ___
1525     push(@T,shift(@T));
1526 }
1527 $code.=<<___;
1528         ret
1529 .size   rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1530
1531 .globl  rsaz_1024_norm2red_avx2
1532 .type   rsaz_1024_norm2red_avx2,\@abi-omnipotent
1533 .align  32
1534 rsaz_1024_norm2red_avx2:
1535         sub     \$-128,$out     # size optimization
1536         mov     ($inp),@T[0]
1537         mov     \$0x1fffffff,%eax
1538 ___
1539 for ($j=0,$i=0; $i<16; $i++) {
1540     $code.="    mov     `8*($i+1)`($inp),@T[1]\n"       if ($i<15);
1541     $code.="    xor     @T[1],@T[1]\n"                  if ($i==15);
1542     my $k=1;
1543     while (29*($j+1)<64*($i+1)) {
1544         $code.=<<___;
1545         mov     @T[0],@T[-$k]
1546         shr     \$`29*$j`,@T[-$k]
1547         and     %rax,@T[-$k]                            # &0x1fffffff
1548         mov     @T[-$k],`8*$j-128`($out)
1549 ___
1550         $j++; $k++;
1551     }
1552     $code.=<<___;
1553         shrd    \$`29*$j`,@T[1],@T[0]
1554         and     %rax,@T[0]
1555         mov     @T[0],`8*$j-128`($out)
1556 ___
1557     $j++;
1558     push(@T,shift(@T));
1559 }
1560 $code.=<<___;
1561         mov     @T[0],`8*$j-128`($out)                  # zero
1562         mov     @T[0],`8*($j+1)-128`($out)
1563         mov     @T[0],`8*($j+2)-128`($out)
1564         mov     @T[0],`8*($j+3)-128`($out)
1565         ret
1566 .size   rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1567 ___
1568 }
1569 {
1570 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1571
1572 $code.=<<___;
1573 .globl  rsaz_1024_scatter5_avx2
1574 .type   rsaz_1024_scatter5_avx2,\@abi-omnipotent
1575 .align  32
1576 rsaz_1024_scatter5_avx2:
1577         vzeroupper
1578         vmovdqu .Lscatter_permd(%rip),%ymm5
1579         shl     \$4,$power
1580         lea     ($out,$power),$out
1581         mov     \$9,%eax
1582         jmp     .Loop_scatter_1024
1583
1584 .align  32
1585 .Loop_scatter_1024:
1586         vmovdqu         ($inp),%ymm0
1587         lea             32($inp),$inp
1588         vpermd          %ymm0,%ymm5,%ymm0
1589         vmovdqu         %xmm0,($out)
1590         lea             16*32($out),$out
1591         dec     %eax
1592         jnz     .Loop_scatter_1024
1593
1594         vzeroupper
1595         ret
1596 .size   rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1597
1598 .globl  rsaz_1024_gather5_avx2
1599 .type   rsaz_1024_gather5_avx2,\@abi-omnipotent
1600 .align  32
1601 rsaz_1024_gather5_avx2:
1602 .cfi_startproc
1603         vzeroupper
1604         mov     %rsp,%r11
1605 .cfi_def_cfa_register   %r11
1606 ___
1607 $code.=<<___ if ($win64);
1608         lea     -0x88(%rsp),%rax
1609 .LSEH_begin_rsaz_1024_gather5:
1610         # I can't trust assembler to use specific encoding:-(
1611         .byte   0x48,0x8d,0x60,0xe0             # lea   -0x20(%rax),%rsp
1612         .byte   0xc5,0xf8,0x29,0x70,0xe0        # vmovaps %xmm6,-0x20(%rax)
1613         .byte   0xc5,0xf8,0x29,0x78,0xf0        # vmovaps %xmm7,-0x10(%rax)
1614         .byte   0xc5,0x78,0x29,0x40,0x00        # vmovaps %xmm8,0(%rax)
1615         .byte   0xc5,0x78,0x29,0x48,0x10        # vmovaps %xmm9,0x10(%rax)
1616         .byte   0xc5,0x78,0x29,0x50,0x20        # vmovaps %xmm10,0x20(%rax)
1617         .byte   0xc5,0x78,0x29,0x58,0x30        # vmovaps %xmm11,0x30(%rax)
1618         .byte   0xc5,0x78,0x29,0x60,0x40        # vmovaps %xmm12,0x40(%rax)
1619         .byte   0xc5,0x78,0x29,0x68,0x50        # vmovaps %xmm13,0x50(%rax)
1620         .byte   0xc5,0x78,0x29,0x70,0x60        # vmovaps %xmm14,0x60(%rax)
1621         .byte   0xc5,0x78,0x29,0x78,0x70        # vmovaps %xmm15,0x70(%rax)
1622 ___
1623 $code.=<<___;
1624         lea     -0x100(%rsp),%rsp
1625         and     \$-32, %rsp
1626         lea     .Linc(%rip), %r10
1627         lea     -128(%rsp),%rax                 # control u-op density
1628
1629         vmovd           $power, %xmm4
1630         vmovdqa         (%r10),%ymm0
1631         vmovdqa         32(%r10),%ymm1
1632         vmovdqa         64(%r10),%ymm5
1633         vpbroadcastd    %xmm4,%ymm4
1634
1635         vpaddd          %ymm5, %ymm0, %ymm2
1636         vpcmpeqd        %ymm4, %ymm0, %ymm0
1637         vpaddd          %ymm5, %ymm1, %ymm3
1638         vpcmpeqd        %ymm4, %ymm1, %ymm1
1639         vmovdqa         %ymm0, 32*0+128(%rax)
1640         vpaddd          %ymm5, %ymm2, %ymm0
1641         vpcmpeqd        %ymm4, %ymm2, %ymm2
1642         vmovdqa         %ymm1, 32*1+128(%rax)
1643         vpaddd          %ymm5, %ymm3, %ymm1
1644         vpcmpeqd        %ymm4, %ymm3, %ymm3
1645         vmovdqa         %ymm2, 32*2+128(%rax)
1646         vpaddd          %ymm5, %ymm0, %ymm2
1647         vpcmpeqd        %ymm4, %ymm0, %ymm0
1648         vmovdqa         %ymm3, 32*3+128(%rax)
1649         vpaddd          %ymm5, %ymm1, %ymm3
1650         vpcmpeqd        %ymm4, %ymm1, %ymm1
1651         vmovdqa         %ymm0, 32*4+128(%rax)
1652         vpaddd          %ymm5, %ymm2, %ymm8
1653         vpcmpeqd        %ymm4, %ymm2, %ymm2
1654         vmovdqa         %ymm1, 32*5+128(%rax)
1655         vpaddd          %ymm5, %ymm3, %ymm9
1656         vpcmpeqd        %ymm4, %ymm3, %ymm3
1657         vmovdqa         %ymm2, 32*6+128(%rax)
1658         vpaddd          %ymm5, %ymm8, %ymm10
1659         vpcmpeqd        %ymm4, %ymm8, %ymm8
1660         vmovdqa         %ymm3, 32*7+128(%rax)
1661         vpaddd          %ymm5, %ymm9, %ymm11
1662         vpcmpeqd        %ymm4, %ymm9, %ymm9
1663         vpaddd          %ymm5, %ymm10, %ymm12
1664         vpcmpeqd        %ymm4, %ymm10, %ymm10
1665         vpaddd          %ymm5, %ymm11, %ymm13
1666         vpcmpeqd        %ymm4, %ymm11, %ymm11
1667         vpaddd          %ymm5, %ymm12, %ymm14
1668         vpcmpeqd        %ymm4, %ymm12, %ymm12
1669         vpaddd          %ymm5, %ymm13, %ymm15
1670         vpcmpeqd        %ymm4, %ymm13, %ymm13
1671         vpcmpeqd        %ymm4, %ymm14, %ymm14
1672         vpcmpeqd        %ymm4, %ymm15, %ymm15
1673
1674         vmovdqa -32(%r10),%ymm7                 # .Lgather_permd
1675         lea     128($inp), $inp
1676         mov     \$9,$power
1677
1678 .Loop_gather_1024:
1679         vmovdqa         32*0-128($inp), %ymm0
1680         vmovdqa         32*1-128($inp), %ymm1
1681         vmovdqa         32*2-128($inp), %ymm2
1682         vmovdqa         32*3-128($inp), %ymm3
1683         vpand           32*0+128(%rax), %ymm0,  %ymm0
1684         vpand           32*1+128(%rax), %ymm1,  %ymm1
1685         vpand           32*2+128(%rax), %ymm2,  %ymm2
1686         vpor            %ymm0, %ymm1, %ymm4
1687         vpand           32*3+128(%rax), %ymm3,  %ymm3
1688         vmovdqa         32*4-128($inp), %ymm0
1689         vmovdqa         32*5-128($inp), %ymm1
1690         vpor            %ymm2, %ymm3, %ymm5
1691         vmovdqa         32*6-128($inp), %ymm2
1692         vmovdqa         32*7-128($inp), %ymm3
1693         vpand           32*4+128(%rax), %ymm0,  %ymm0
1694         vpand           32*5+128(%rax), %ymm1,  %ymm1
1695         vpand           32*6+128(%rax), %ymm2,  %ymm2
1696         vpor            %ymm0, %ymm4, %ymm4
1697         vpand           32*7+128(%rax), %ymm3,  %ymm3
1698         vpand           32*8-128($inp), %ymm8,  %ymm0
1699         vpor            %ymm1, %ymm5, %ymm5
1700         vpand           32*9-128($inp), %ymm9,  %ymm1
1701         vpor            %ymm2, %ymm4, %ymm4
1702         vpand           32*10-128($inp),%ymm10, %ymm2
1703         vpor            %ymm3, %ymm5, %ymm5
1704         vpand           32*11-128($inp),%ymm11, %ymm3
1705         vpor            %ymm0, %ymm4, %ymm4
1706         vpand           32*12-128($inp),%ymm12, %ymm0
1707         vpor            %ymm1, %ymm5, %ymm5
1708         vpand           32*13-128($inp),%ymm13, %ymm1
1709         vpor            %ymm2, %ymm4, %ymm4
1710         vpand           32*14-128($inp),%ymm14, %ymm2
1711         vpor            %ymm3, %ymm5, %ymm5
1712         vpand           32*15-128($inp),%ymm15, %ymm3
1713         lea             32*16($inp), $inp
1714         vpor            %ymm0, %ymm4, %ymm4
1715         vpor            %ymm1, %ymm5, %ymm5
1716         vpor            %ymm2, %ymm4, %ymm4
1717         vpor            %ymm3, %ymm5, %ymm5
1718
1719         vpor            %ymm5, %ymm4, %ymm4
1720         vextracti128    \$1, %ymm4, %xmm5       # upper half is cleared
1721         vpor            %xmm4, %xmm5, %xmm5
1722         vpermd          %ymm5,%ymm7,%ymm5
1723         vmovdqu         %ymm5,($out)
1724         lea             32($out),$out
1725         dec     $power
1726         jnz     .Loop_gather_1024
1727
1728         vpxor   %ymm0,%ymm0,%ymm0
1729         vmovdqu %ymm0,($out)
1730         vzeroupper
1731 ___
1732 $code.=<<___ if ($win64);
1733         movaps  -0xa8(%r11),%xmm6
1734         movaps  -0x98(%r11),%xmm7
1735         movaps  -0x88(%r11),%xmm8
1736         movaps  -0x78(%r11),%xmm9
1737         movaps  -0x68(%r11),%xmm10
1738         movaps  -0x58(%r11),%xmm11
1739         movaps  -0x48(%r11),%xmm12
1740         movaps  -0x38(%r11),%xmm13
1741         movaps  -0x28(%r11),%xmm14
1742         movaps  -0x18(%r11),%xmm15
1743 ___
1744 $code.=<<___;
1745         lea     (%r11),%rsp
1746 .cfi_def_cfa_register   %rsp
1747         ret
1748 .cfi_endproc
1749 .LSEH_end_rsaz_1024_gather5:
1750 .size   rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1751 ___
1752 }
1753
1754 $code.=<<___;
1755 .extern OPENSSL_ia32cap_P
1756 .globl  rsaz_avx2_eligible
1757 .type   rsaz_avx2_eligible,\@abi-omnipotent
1758 .align  32
1759 rsaz_avx2_eligible:
1760         mov     OPENSSL_ia32cap_P+8(%rip),%eax
1761 ___
1762 $code.=<<___    if ($addx);
1763         mov     \$`1<<8|1<<19`,%ecx
1764         mov     \$0,%edx
1765         and     %eax,%ecx
1766         cmp     \$`1<<8|1<<19`,%ecx     # check for BMI2+AD*X
1767         cmove   %edx,%eax
1768 ___
1769 $code.=<<___;
1770         and     \$`1<<5`,%eax
1771         shr     \$5,%eax
1772         ret
1773 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1774
1775 .align  64
1776 .Land_mask:
1777         .quad   0x1fffffff,0x1fffffff,0x1fffffff,-1
1778 .Lscatter_permd:
1779         .long   0,2,4,6,7,7,7,7
1780 .Lgather_permd:
1781         .long   0,7,1,7,2,7,3,7
1782 .Linc:
1783         .long   0,0,0,0, 1,1,1,1
1784         .long   2,2,2,2, 3,3,3,3
1785         .long   4,4,4,4, 4,4,4,4
1786 .align  64
1787 ___
1788
1789 if ($win64) {
1790 $rec="%rcx";
1791 $frame="%rdx";
1792 $context="%r8";
1793 $disp="%r9";
1794
1795 $code.=<<___
1796 .extern __imp_RtlVirtualUnwind
1797 .type   rsaz_se_handler,\@abi-omnipotent
1798 .align  16
1799 rsaz_se_handler:
1800         push    %rsi
1801         push    %rdi
1802         push    %rbx
1803         push    %rbp
1804         push    %r12
1805         push    %r13
1806         push    %r14
1807         push    %r15
1808         pushfq
1809         sub     \$64,%rsp
1810
1811         mov     120($context),%rax      # pull context->Rax
1812         mov     248($context),%rbx      # pull context->Rip
1813
1814         mov     8($disp),%rsi           # disp->ImageBase
1815         mov     56($disp),%r11          # disp->HandlerData
1816
1817         mov     0(%r11),%r10d           # HandlerData[0]
1818         lea     (%rsi,%r10),%r10        # prologue label
1819         cmp     %r10,%rbx               # context->Rip<prologue label
1820         jb      .Lcommon_seh_tail
1821
1822         mov     4(%r11),%r10d           # HandlerData[1]
1823         lea     (%rsi,%r10),%r10        # epilogue label
1824         cmp     %r10,%rbx               # context->Rip>=epilogue label
1825         jae     .Lcommon_seh_tail
1826
1827         mov     160($context),%rbp      # pull context->Rbp
1828
1829         mov     8(%r11),%r10d           # HandlerData[2]
1830         lea     (%rsi,%r10),%r10        # "in tail" label
1831         cmp     %r10,%rbx               # context->Rip>="in tail" label
1832         cmovc   %rbp,%rax
1833
1834         mov     -48(%rax),%r15
1835         mov     -40(%rax),%r14
1836         mov     -32(%rax),%r13
1837         mov     -24(%rax),%r12
1838         mov     -16(%rax),%rbp
1839         mov     -8(%rax),%rbx
1840         mov     %r15,240($context)
1841         mov     %r14,232($context)
1842         mov     %r13,224($context)
1843         mov     %r12,216($context)
1844         mov     %rbp,160($context)
1845         mov     %rbx,144($context)
1846
1847         lea     -0xd8(%rax),%rsi        # %xmm save area
1848         lea     512($context),%rdi      # & context.Xmm6
1849         mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
1850         .long   0xa548f3fc              # cld; rep movsq
1851
1852 .Lcommon_seh_tail:
1853         mov     8(%rax),%rdi
1854         mov     16(%rax),%rsi
1855         mov     %rax,152($context)      # restore context->Rsp
1856         mov     %rsi,168($context)      # restore context->Rsi
1857         mov     %rdi,176($context)      # restore context->Rdi
1858
1859         mov     40($disp),%rdi          # disp->ContextRecord
1860         mov     $context,%rsi           # context
1861         mov     \$154,%ecx              # sizeof(CONTEXT)
1862         .long   0xa548f3fc              # cld; rep movsq
1863
1864         mov     $disp,%rsi
1865         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1866         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1867         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1868         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1869         mov     40(%rsi),%r10           # disp->ContextRecord
1870         lea     56(%rsi),%r11           # &disp->HandlerData
1871         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1872         mov     %r10,32(%rsp)           # arg5
1873         mov     %r11,40(%rsp)           # arg6
1874         mov     %r12,48(%rsp)           # arg7
1875         mov     %rcx,56(%rsp)           # arg8, (NULL)
1876         call    *__imp_RtlVirtualUnwind(%rip)
1877
1878         mov     \$1,%eax                # ExceptionContinueSearch
1879         add     \$64,%rsp
1880         popfq
1881         pop     %r15
1882         pop     %r14
1883         pop     %r13
1884         pop     %r12
1885         pop     %rbp
1886         pop     %rbx
1887         pop     %rdi
1888         pop     %rsi
1889         ret
1890 .size   rsaz_se_handler,.-rsaz_se_handler
1891
1892 .section        .pdata
1893 .align  4
1894         .rva    .LSEH_begin_rsaz_1024_sqr_avx2
1895         .rva    .LSEH_end_rsaz_1024_sqr_avx2
1896         .rva    .LSEH_info_rsaz_1024_sqr_avx2
1897
1898         .rva    .LSEH_begin_rsaz_1024_mul_avx2
1899         .rva    .LSEH_end_rsaz_1024_mul_avx2
1900         .rva    .LSEH_info_rsaz_1024_mul_avx2
1901
1902         .rva    .LSEH_begin_rsaz_1024_gather5
1903         .rva    .LSEH_end_rsaz_1024_gather5
1904         .rva    .LSEH_info_rsaz_1024_gather5
1905 .section        .xdata
1906 .align  8
1907 .LSEH_info_rsaz_1024_sqr_avx2:
1908         .byte   9,0,0,0
1909         .rva    rsaz_se_handler
1910         .rva    .Lsqr_1024_body,.Lsqr_1024_epilogue,.Lsqr_1024_in_tail
1911         .long   0
1912 .LSEH_info_rsaz_1024_mul_avx2:
1913         .byte   9,0,0,0
1914         .rva    rsaz_se_handler
1915         .rva    .Lmul_1024_body,.Lmul_1024_epilogue,.Lmul_1024_in_tail
1916         .long   0
1917 .LSEH_info_rsaz_1024_gather5:
1918         .byte   0x01,0x36,0x17,0x0b
1919         .byte   0x36,0xf8,0x09,0x00     # vmovaps 0x90(rsp),xmm15
1920         .byte   0x31,0xe8,0x08,0x00     # vmovaps 0x80(rsp),xmm14
1921         .byte   0x2c,0xd8,0x07,0x00     # vmovaps 0x70(rsp),xmm13
1922         .byte   0x27,0xc8,0x06,0x00     # vmovaps 0x60(rsp),xmm12
1923         .byte   0x22,0xb8,0x05,0x00     # vmovaps 0x50(rsp),xmm11
1924         .byte   0x1d,0xa8,0x04,0x00     # vmovaps 0x40(rsp),xmm10
1925         .byte   0x18,0x98,0x03,0x00     # vmovaps 0x30(rsp),xmm9
1926         .byte   0x13,0x88,0x02,0x00     # vmovaps 0x20(rsp),xmm8
1927         .byte   0x0e,0x78,0x01,0x00     # vmovaps 0x10(rsp),xmm7
1928         .byte   0x09,0x68,0x00,0x00     # vmovaps 0x00(rsp),xmm6
1929         .byte   0x04,0x01,0x15,0x00     # sub     rsp,0xa8
1930         .byte   0x00,0xb3,0x00,0x00     # set_frame r11
1931 ___
1932 }
1933
1934 foreach (split("\n",$code)) {
1935         s/\`([^\`]*)\`/eval($1)/ge;
1936
1937         s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge               or
1938
1939         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1940         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1941         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1942         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1943         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1944         print $_,"\n";
1945 }
1946
1947 }}} else {{{
1948 print <<___;    # assembler is too old
1949 .text
1950
1951 .globl  rsaz_avx2_eligible
1952 .type   rsaz_avx2_eligible,\@abi-omnipotent
1953 rsaz_avx2_eligible:
1954         xor     %eax,%eax
1955         ret
1956 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1957
1958 .globl  rsaz_1024_sqr_avx2
1959 .globl  rsaz_1024_mul_avx2
1960 .globl  rsaz_1024_norm2red_avx2
1961 .globl  rsaz_1024_red2norm_avx2
1962 .globl  rsaz_1024_scatter5_avx2
1963 .globl  rsaz_1024_gather5_avx2
1964 .type   rsaz_1024_sqr_avx2,\@abi-omnipotent
1965 rsaz_1024_sqr_avx2:
1966 rsaz_1024_mul_avx2:
1967 rsaz_1024_norm2red_avx2:
1968 rsaz_1024_red2norm_avx2:
1969 rsaz_1024_scatter5_avx2:
1970 rsaz_1024_gather5_avx2:
1971         .byte   0x0f,0x0b       # ud2
1972         ret
1973 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
1974 ___
1975 }}}
1976
1977 close STDOUT;