crypto/bn/rsaz*: fix licensing note.
[openssl.git] / crypto / bn / asm / rsaz-avx2.pl
1 #!/usr/bin/env perl
2
3 ##############################################################################
4 #                                                                            #
5 #  Copyright (c) 2012, Intel Corporation                                     #
6 #                                                                            #
7 #  All rights reserved.                                                      #
8 #                                                                            #
9 #  Redistribution and use in source and binary forms, with or without        #
10 #  modification, are permitted provided that the following conditions are    #
11 #  met:                                                                      #
12 #                                                                            #
13 #  *  Redistributions of source code must retain the above copyright         #
14 #     notice, this list of conditions and the following disclaimer.          #
15 #                                                                            #
16 #  *  Redistributions in binary form must reproduce the above copyright      #
17 #     notice, this list of conditions and the following disclaimer in the    #
18 #     documentation and/or other materials provided with the                 #
19 #     distribution.                                                          #
20 #                                                                            #
21 #  *  Neither the name of the Intel Corporation nor the names of its         #
22 #     contributors may be used to endorse or promote products derived from   #
23 #     this software without specific prior written permission.               #
24 #                                                                            #
25 #                                                                            #
26 #  THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY          #
27 #  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE         #
28 #  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR        #
29 #  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR            #
30 #  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,     #
31 #  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,       #
32 #  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR        #
33 #  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF    #
34 #  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING      #
35 #  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS        #
36 #  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              #
37 #                                                                            #
38 ##############################################################################
39 # Developers and authors:                                                    #
40 # Shay Gueron (1, 2), and Vlad Krasnov (1)                                   #
41 # (1) Intel Corporation, Israel Development Center, Haifa, Israel            #
42 # (2) University of Haifa, Israel                                            #
43 ##############################################################################
44 # Reference:                                                                 #
45 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular             #
46 #     Exponentiation,  Using Advanced Vector Instructions Architectures",    #
47 #     F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,   #
48 #     pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012              #
49 # [2] S. Gueron: "Efficient Software Implementations of Modular              #
50 #     Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).  #
51 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE         #
52 #     Proceedings of 9th International Conference on Information Technology: #
53 #     New Generations (ITNG 2012), pp.821-823 (2012)                         #
54 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis    #
55 #     resistant 1024-bit modular exponentiation, for optimizing RSA2048      #
56 #     on AVX2 capable x86_64 platforms",                                     #
57 #     http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
58 ##############################################################################
59 #
60 # +13% improvement over original submission by <appro@openssl.org>
61 #
62 # rsa2048 sign/sec      OpenSSL 1.0.1   scalar(*)       this
63 # 2.3GHz Haswell        621             765/+23%        1113/+79%
64 #
65 # (*)   if system doesn't support AVX2, for reference purposes;
66
67 $flavour = shift;
68 $output  = shift;
69 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
70
71 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
72
73 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
74 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
75 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
76 die "can't locate x86_64-xlate.pl";
77
78 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
79                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
80         $avx = ($1>=2.19) + ($1>=2.22);
81 }
82
83 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
84             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
85         $avx = ($1>=2.09) + ($1>=2.10);
86 }
87
88 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
89             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
90         $avx = ($1>=10) + ($1>=11);
91 }
92
93 open OUT,"| $^X $xlate $flavour $output";
94 *STDOUT = *OUT;
95
96 if ($avx>1) {{{
97 { # void AMS_WW(
98 my $rp="%rdi";  # BN_ULONG *rp,
99 my $ap="%rsi";  # const BN_ULONG *ap,
100 my $np="%rdx";  # const BN_ULONG *np,
101 my $n0="%ecx";  # const BN_ULONG n0,
102 my $rep="%r8d"; # int repeat);
103
104 # The registers that hold the accumulated redundant result
105 # The AMM works on 1024 bit operands, and redundant word size is 29
106 # Therefore: ceil(1024/29)/4 = 9
107 my $ACC0="%ymm0";
108 my $ACC1="%ymm1";
109 my $ACC2="%ymm2";
110 my $ACC3="%ymm3";
111 my $ACC4="%ymm4";
112 my $ACC5="%ymm5";
113 my $ACC6="%ymm6";
114 my $ACC7="%ymm7";
115 my $ACC8="%ymm8";
116 my $ACC9="%ymm9";
117 # Registers that hold the broadcasted words of bp, currently used
118 my $B1="%ymm10";
119 my $B2="%ymm11";
120 # Registers that hold the broadcasted words of Y, currently used
121 my $Y1="%ymm12";
122 my $Y2="%ymm13";
123 # Helper registers
124 my $TEMP1="%ymm14";
125 my $AND_MASK="%ymm15";
126 # alu registers that hold the first words of the ACC
127 my $r0="%r9";
128 my $r1="%r10";
129 my $r2="%r11";
130 my $r3="%r12";
131
132 my $i="%r14d";                  # loop counter
133 my $tmp = "%r15";
134
135 my $FrameSize=32*18+32*8;       # place for A^2 and 2*A
136
137 my $aap=$r0;
138 my $tp0="%rbx";
139 my $tp1=$r3;
140 my $tpa=$tmp;
141
142 $np="%r13";                     # reassigned argument
143
144 $code.=<<___;
145 .text
146
147 .globl  rsaz_1024_sqr_avx2
148 .type   rsaz_1024_sqr_avx2,\@function,5
149 .align  64
150 rsaz_1024_sqr_avx2:             # 702 cycles, 14% faster than rsaz_1024_mul_avx2
151         lea     (%rsp), %rax
152         push    %rbx
153         push    %rbp
154         push    %r12
155         push    %r13
156         push    %r14
157         push    %r15
158         vzeroupper
159 ___
160 $code.=<<___ if ($win64);
161         lea     -0xa8(%rsp),%rsp
162         vmovaps %xmm6,-0xd8(%rax)
163         vmovaps %xmm7,-0xc8(%rax)
164         vmovaps %xmm8,-0xb8(%rax)
165         vmovaps %xmm9,-0xa8(%rax)
166         vmovaps %xmm10,-0x98(%rax)
167         vmovaps %xmm11,-0x88(%rax)
168         vmovaps %xmm12,-0x78(%rax)
169         vmovaps %xmm13,-0x68(%rax)
170         vmovaps %xmm14,-0x58(%rax)
171         vmovaps %xmm15,-0x48(%rax)
172 .Lsqr_1024_body:
173 ___
174 $code.=<<___;
175         mov     %rax,%rbp
176         mov     %rdx, $np                       # reassigned argument
177         sub     \$$FrameSize, %rsp
178         mov     $np, $tmp
179         sub     \$-128, $rp                     # size optimization
180         sub     \$-128, $ap
181         sub     \$-128, $np
182
183         and     \$4095, $tmp                    # see if $np crosses page
184         add     \$32*10, $tmp
185         shr     \$12, $tmp
186         vpxor   $ACC9,$ACC9,$ACC9
187         jz      .Lsqr_1024_no_n_copy
188
189         # unaligned 256-bit load that crosses page boundary can
190         # cause >2x performance degradation here, so if $np does
191         # cross page boundary, copy it to stack and make sure stack
192         # frame doesn't...
193         sub             \$32*10,%rsp
194         vmovdqu         32*0-128($np), $ACC0
195         and             \$-2048, %rsp
196         vmovdqu         32*1-128($np), $ACC1
197         vmovdqu         32*2-128($np), $ACC2
198         vmovdqu         32*3-128($np), $ACC3
199         vmovdqu         32*4-128($np), $ACC4
200         vmovdqu         32*5-128($np), $ACC5
201         vmovdqu         32*6-128($np), $ACC6
202         vmovdqu         32*7-128($np), $ACC7
203         vmovdqu         32*8-128($np), $ACC8
204         lea             $FrameSize+128(%rsp),$np
205         vmovdqu         $ACC0, 32*0-128($np)
206         vmovdqu         $ACC1, 32*1-128($np)
207         vmovdqu         $ACC2, 32*2-128($np)
208         vmovdqu         $ACC3, 32*3-128($np)
209         vmovdqu         $ACC4, 32*4-128($np)
210         vmovdqu         $ACC5, 32*5-128($np)
211         vmovdqu         $ACC6, 32*6-128($np)
212         vmovdqu         $ACC7, 32*7-128($np)
213         vmovdqu         $ACC8, 32*8-128($np)
214         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero
215
216 .Lsqr_1024_no_n_copy:
217         and             \$-1024, %rsp
218
219         vmovdqu         32*1-128($ap), $ACC1
220         vmovdqu         32*2-128($ap), $ACC2
221         vmovdqu         32*3-128($ap), $ACC3
222         vmovdqu         32*4-128($ap), $ACC4
223         vmovdqu         32*5-128($ap), $ACC5
224         vmovdqu         32*6-128($ap), $ACC6
225         vmovdqu         32*7-128($ap), $ACC7
226         vmovdqu         32*8-128($ap), $ACC8
227
228         lea     192(%rsp), $tp0                 # 64+128=192
229         vpbroadcastq    .Land_mask(%rip), $AND_MASK
230         jmp     .LOOP_GRANDE_SQR_1024
231
232 .align  32
233 .LOOP_GRANDE_SQR_1024:
234         lea     32*18+128(%rsp), $aap           # size optimization
235         lea     448(%rsp), $tp1                 # 64+128+256=448
236
237         # the squaring is performed as described in Variant B of
238         # "Speeding up Big-Number Squaring", so start by calculating
239         # the A*2=A+A vector
240         vpaddq          $ACC1, $ACC1, $ACC1
241          vpbroadcastq   32*0-128($ap), $B1
242         vpaddq          $ACC2, $ACC2, $ACC2
243         vmovdqa         $ACC1, 32*0-128($aap)
244         vpaddq          $ACC3, $ACC3, $ACC3
245         vmovdqa         $ACC2, 32*1-128($aap)
246         vpaddq          $ACC4, $ACC4, $ACC4
247         vmovdqa         $ACC3, 32*2-128($aap)
248         vpaddq          $ACC5, $ACC5, $ACC5
249         vmovdqa         $ACC4, 32*3-128($aap)
250         vpaddq          $ACC6, $ACC6, $ACC6
251         vmovdqa         $ACC5, 32*4-128($aap)
252         vpaddq          $ACC7, $ACC7, $ACC7
253         vmovdqa         $ACC6, 32*5-128($aap)
254         vpaddq          $ACC8, $ACC8, $ACC8
255         vmovdqa         $ACC7, 32*6-128($aap)
256         vpxor           $ACC9, $ACC9, $ACC9
257         vmovdqa         $ACC8, 32*7-128($aap)
258
259         vpmuludq        32*0-128($ap), $B1, $ACC0
260          vpbroadcastq   32*1-128($ap), $B2
261          vmovdqu        $ACC9, 32*9-192($tp0)   # zero upper half
262         vpmuludq        $B1, $ACC1, $ACC1
263          vmovdqu        $ACC9, 32*10-448($tp1)
264         vpmuludq        $B1, $ACC2, $ACC2
265          vmovdqu        $ACC9, 32*11-448($tp1)
266         vpmuludq        $B1, $ACC3, $ACC3
267          vmovdqu        $ACC9, 32*12-448($tp1)
268         vpmuludq        $B1, $ACC4, $ACC4
269          vmovdqu        $ACC9, 32*13-448($tp1)
270         vpmuludq        $B1, $ACC5, $ACC5
271          vmovdqu        $ACC9, 32*14-448($tp1)
272         vpmuludq        $B1, $ACC6, $ACC6
273          vmovdqu        $ACC9, 32*15-448($tp1)
274         vpmuludq        $B1, $ACC7, $ACC7
275          vmovdqu        $ACC9, 32*16-448($tp1)
276         vpmuludq        $B1, $ACC8, $ACC8
277          vpbroadcastq   32*2-128($ap), $B1
278          vmovdqu        $ACC9, 32*17-448($tp1)
279
280         mov     $ap, $tpa
281         mov     \$4, $i
282         jmp     .Lsqr_entry_1024
283 ___
284 $TEMP0=$Y1;
285 $TEMP2=$Y2;
286 $code.=<<___;
287 .align  32
288 .LOOP_SQR_1024:
289          vpbroadcastq   32*1-128($tpa), $B2
290         vpmuludq        32*0-128($ap), $B1, $ACC0
291         vpaddq          32*0-192($tp0), $ACC0, $ACC0
292         vpmuludq        32*0-128($aap), $B1, $ACC1
293         vpaddq          32*1-192($tp0), $ACC1, $ACC1
294         vpmuludq        32*1-128($aap), $B1, $ACC2
295         vpaddq          32*2-192($tp0), $ACC2, $ACC2
296         vpmuludq        32*2-128($aap), $B1, $ACC3
297         vpaddq          32*3-192($tp0), $ACC3, $ACC3
298         vpmuludq        32*3-128($aap), $B1, $ACC4
299         vpaddq          32*4-192($tp0), $ACC4, $ACC4
300         vpmuludq        32*4-128($aap), $B1, $ACC5
301         vpaddq          32*5-192($tp0), $ACC5, $ACC5
302         vpmuludq        32*5-128($aap), $B1, $ACC6
303         vpaddq          32*6-192($tp0), $ACC6, $ACC6
304         vpmuludq        32*6-128($aap), $B1, $ACC7
305         vpaddq          32*7-192($tp0), $ACC7, $ACC7
306         vpmuludq        32*7-128($aap), $B1, $ACC8
307          vpbroadcastq   32*2-128($tpa), $B1
308         vpaddq          32*8-192($tp0), $ACC8, $ACC8
309 .Lsqr_entry_1024:
310         vmovdqu         $ACC0, 32*0-192($tp0)
311         vmovdqu         $ACC1, 32*1-192($tp0)
312
313         vpmuludq        32*1-128($ap), $B2, $TEMP0
314         vpaddq          $TEMP0, $ACC2, $ACC2
315         vpmuludq        32*1-128($aap), $B2, $TEMP1
316         vpaddq          $TEMP1, $ACC3, $ACC3
317         vpmuludq        32*2-128($aap), $B2, $TEMP2
318         vpaddq          $TEMP2, $ACC4, $ACC4
319         vpmuludq        32*3-128($aap), $B2, $TEMP0
320         vpaddq          $TEMP0, $ACC5, $ACC5
321         vpmuludq        32*4-128($aap), $B2, $TEMP1
322         vpaddq          $TEMP1, $ACC6, $ACC6
323         vpmuludq        32*5-128($aap), $B2, $TEMP2
324         vpaddq          $TEMP2, $ACC7, $ACC7
325         vpmuludq        32*6-128($aap), $B2, $TEMP0
326         vpaddq          $TEMP0, $ACC8, $ACC8
327         vpmuludq        32*7-128($aap), $B2, $ACC0
328          vpbroadcastq   32*3-128($tpa), $B2
329         vpaddq          32*9-192($tp0), $ACC0, $ACC0
330
331         vmovdqu         $ACC2, 32*2-192($tp0)
332         vmovdqu         $ACC3, 32*3-192($tp0)
333
334         vpmuludq        32*2-128($ap), $B1, $TEMP2
335         vpaddq          $TEMP2, $ACC4, $ACC4
336         vpmuludq        32*2-128($aap), $B1, $TEMP0
337         vpaddq          $TEMP0, $ACC5, $ACC5
338         vpmuludq        32*3-128($aap), $B1, $TEMP1
339         vpaddq          $TEMP1, $ACC6, $ACC6
340         vpmuludq        32*4-128($aap), $B1, $TEMP2
341         vpaddq          $TEMP2, $ACC7, $ACC7
342         vpmuludq        32*5-128($aap), $B1, $TEMP0
343         vpaddq          $TEMP0, $ACC8, $ACC8
344         vpmuludq        32*6-128($aap), $B1, $TEMP1
345         vpaddq          $TEMP1, $ACC0, $ACC0
346         vpmuludq        32*7-128($aap), $B1, $ACC1
347          vpbroadcastq   32*4-128($tpa), $B1
348         vpaddq          32*10-448($tp1), $ACC1, $ACC1
349
350         vmovdqu         $ACC4, 32*4-192($tp0)
351         vmovdqu         $ACC5, 32*5-192($tp0)
352
353         vpmuludq        32*3-128($ap), $B2, $TEMP0
354         vpaddq          $TEMP0, $ACC6, $ACC6
355         vpmuludq        32*3-128($aap), $B2, $TEMP1
356         vpaddq          $TEMP1, $ACC7, $ACC7
357         vpmuludq        32*4-128($aap), $B2, $TEMP2
358         vpaddq          $TEMP2, $ACC8, $ACC8
359         vpmuludq        32*5-128($aap), $B2, $TEMP0
360         vpaddq          $TEMP0, $ACC0, $ACC0
361         vpmuludq        32*6-128($aap), $B2, $TEMP1
362         vpaddq          $TEMP1, $ACC1, $ACC1
363         vpmuludq        32*7-128($aap), $B2, $ACC2
364          vpbroadcastq   32*5-128($tpa), $B2
365         vpaddq          32*11-448($tp1), $ACC2, $ACC2   
366
367         vmovdqu         $ACC6, 32*6-192($tp0)
368         vmovdqu         $ACC7, 32*7-192($tp0)
369
370         vpmuludq        32*4-128($ap), $B1, $TEMP0
371         vpaddq          $TEMP0, $ACC8, $ACC8
372         vpmuludq        32*4-128($aap), $B1, $TEMP1
373         vpaddq          $TEMP1, $ACC0, $ACC0
374         vpmuludq        32*5-128($aap), $B1, $TEMP2
375         vpaddq          $TEMP2, $ACC1, $ACC1
376         vpmuludq        32*6-128($aap), $B1, $TEMP0
377         vpaddq          $TEMP0, $ACC2, $ACC2
378         vpmuludq        32*7-128($aap), $B1, $ACC3
379          vpbroadcastq   32*6-128($tpa), $B1
380         vpaddq          32*12-448($tp1), $ACC3, $ACC3
381
382         vmovdqu         $ACC8, 32*8-192($tp0)
383         vmovdqu         $ACC0, 32*9-192($tp0)
384         lea             8($tp0), $tp0
385
386         vpmuludq        32*5-128($ap), $B2, $TEMP2
387         vpaddq          $TEMP2, $ACC1, $ACC1
388         vpmuludq        32*5-128($aap), $B2, $TEMP0
389         vpaddq          $TEMP0, $ACC2, $ACC2
390         vpmuludq        32*6-128($aap), $B2, $TEMP1
391         vpaddq          $TEMP1, $ACC3, $ACC3
392         vpmuludq        32*7-128($aap), $B2, $ACC4
393          vpbroadcastq   32*7-128($tpa), $B2
394         vpaddq          32*13-448($tp1), $ACC4, $ACC4
395
396         vmovdqu         $ACC1, 32*10-448($tp1)
397         vmovdqu         $ACC2, 32*11-448($tp1)
398
399         vpmuludq        32*6-128($ap), $B1, $TEMP0
400         vpaddq          $TEMP0, $ACC3, $ACC3
401         vpmuludq        32*6-128($aap), $B1, $TEMP1
402          vpbroadcastq   32*8-128($tpa), $ACC0           # borrow $ACC0 for $B1
403         vpaddq          $TEMP1, $ACC4, $ACC4
404         vpmuludq        32*7-128($aap), $B1, $ACC5
405          vpbroadcastq   32*0+8-128($tpa), $B1           # for next iteration
406         vpaddq          32*14-448($tp1), $ACC5, $ACC5
407
408         vmovdqu         $ACC3, 32*12-448($tp1)
409         vmovdqu         $ACC4, 32*13-448($tp1)
410         lea             8($tpa), $tpa
411
412         vpmuludq        32*7-128($ap), $B2, $TEMP0
413         vpaddq          $TEMP0, $ACC5, $ACC5
414         vpmuludq        32*7-128($aap), $B2, $ACC6
415         vpaddq          32*15-448($tp1), $ACC6, $ACC6
416
417         vpmuludq        32*8-128($ap), $ACC0, $ACC7
418         vmovdqu         $ACC5, 32*14-448($tp1)
419         vpaddq          32*16-448($tp1), $ACC7, $ACC7
420         vmovdqu         $ACC6, 32*15-448($tp1)
421         vmovdqu         $ACC7, 32*16-448($tp1)
422         lea             8($tp1), $tp1
423
424         dec     $i        
425         jnz     .LOOP_SQR_1024
426 ___
427 $ZERO = $ACC9;
428 $TEMP0 = $B1;
429 $TEMP2 = $B2;
430 $TEMP3 = $Y1;
431 $TEMP4 = $Y2;
432 $code.=<<___;
433         #we need to fix indexes 32-39 to avoid overflow
434         vmovdqu         32*8(%rsp), $ACC8               # 32*8-192($tp0),
435         vmovdqu         32*9(%rsp), $ACC1               # 32*9-192($tp0)
436         vmovdqu         32*10(%rsp), $ACC2              # 32*10-192($tp0)
437         lea             192(%rsp), $tp0                 # 64+128=192
438
439         vpsrlq          \$29, $ACC8, $TEMP1
440         vpand           $AND_MASK, $ACC8, $ACC8
441         vpsrlq          \$29, $ACC1, $TEMP2
442         vpand           $AND_MASK, $ACC1, $ACC1
443
444         vpermq          \$0x93, $TEMP1, $TEMP1
445         vpxor           $ZERO, $ZERO, $ZERO
446         vpermq          \$0x93, $TEMP2, $TEMP2
447
448         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
449         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
450         vpaddq          $TEMP0, $ACC8, $ACC8
451         vpblendd        \$3, $TEMP2, $ZERO, $TEMP2
452         vpaddq          $TEMP1, $ACC1, $ACC1
453         vpaddq          $TEMP2, $ACC2, $ACC2
454         vmovdqu         $ACC1, 32*9-192($tp0)
455         vmovdqu         $ACC2, 32*10-192($tp0)
456
457         mov     (%rsp), %rax
458         mov     8(%rsp), $r1
459         mov     16(%rsp), $r2
460         mov     24(%rsp), $r3
461         vmovdqu 32*1(%rsp), $ACC1
462         vmovdqu 32*2-192($tp0), $ACC2
463         vmovdqu 32*3-192($tp0), $ACC3
464         vmovdqu 32*4-192($tp0), $ACC4
465         vmovdqu 32*5-192($tp0), $ACC5
466         vmovdqu 32*6-192($tp0), $ACC6
467         vmovdqu 32*7-192($tp0), $ACC7
468
469         mov     %rax, $r0
470         imull   $n0, %eax
471         and     \$0x1fffffff, %eax
472         vmovd   %eax, $Y1
473
474         mov     %rax, %rdx
475         imulq   -128($np), %rax
476          vpbroadcastq   $Y1, $Y1
477         add     %rax, $r0
478         mov     %rdx, %rax
479         imulq   8-128($np), %rax
480         shr     \$29, $r0
481         add     %rax, $r1
482         mov     %rdx, %rax
483         imulq   16-128($np), %rax
484         add     $r0, $r1
485         add     %rax, $r2
486         imulq   24-128($np), %rdx
487         add     %rdx, $r3
488
489         mov     $r1, %rax
490         imull   $n0, %eax
491         and     \$0x1fffffff, %eax
492
493         mov \$9, $i
494         jmp .LOOP_REDUCE_1024
495
496 .align  32
497 .LOOP_REDUCE_1024:
498         vmovd   %eax, $Y2
499         vpbroadcastq    $Y2, $Y2
500
501         vpmuludq        32*1-128($np), $Y1, $TEMP0
502          mov    %rax, %rdx
503          imulq  -128($np), %rax
504         vpaddq          $TEMP0, $ACC1, $ACC1
505          add    %rax, $r1
506         vpmuludq        32*2-128($np), $Y1, $TEMP1
507          mov    %rdx, %rax
508          imulq  8-128($np), %rax
509         vpaddq          $TEMP1, $ACC2, $ACC2
510         vpmuludq        32*3-128($np), $Y1, $TEMP2
511          .byte  0x67
512          add    %rax, $r2
513          .byte  0x67
514          mov    %rdx, %rax
515          imulq  16-128($np), %rax
516          shr    \$29, $r1
517         vpaddq          $TEMP2, $ACC3, $ACC3
518         vpmuludq        32*4-128($np), $Y1, $TEMP0
519          add    %rax, $r3
520          add    $r1, $r2
521         vpaddq          $TEMP0, $ACC4, $ACC4
522         vpmuludq        32*5-128($np), $Y1, $TEMP1
523          mov    $r2, %rax
524          imull  $n0, %eax
525         vpaddq          $TEMP1, $ACC5, $ACC5
526         vpmuludq        32*6-128($np), $Y1, $TEMP2
527          and    \$0x1fffffff, %eax
528         vpaddq          $TEMP2, $ACC6, $ACC6
529         vpmuludq        32*7-128($np), $Y1, $TEMP0
530         vpaddq          $TEMP0, $ACC7, $ACC7
531         vpmuludq        32*8-128($np), $Y1, $TEMP1
532          vmovd  %eax, $Y1
533          #vmovdqu       32*1-8-128($np), $TEMP2         # moved below
534         vpaddq          $TEMP1, $ACC8, $ACC8
535          #vmovdqu       32*2-8-128($np), $TEMP0         # moved below
536          vpbroadcastq   $Y1, $Y1
537
538         vpmuludq        32*1-8-128($np), $Y2, $TEMP2    # see above
539         vmovdqu         32*3-8-128($np), $TEMP1
540          mov    %rax, %rdx
541          imulq  -128($np), %rax
542         vpaddq          $TEMP2, $ACC1, $ACC1
543         vpmuludq        32*2-8-128($np), $Y2, $TEMP0    # see above
544         vmovdqu         32*4-8-128($np), $TEMP2
545          add    %rax, $r2
546          mov    %rdx, %rax
547          imulq  8-128($np), %rax
548         vpaddq          $TEMP0, $ACC2, $ACC2
549          add    $r3, %rax
550          shr    \$29, $r2
551         vpmuludq        $Y2, $TEMP1, $TEMP1
552         vmovdqu         32*5-8-128($np), $TEMP0
553          add    $r2, %rax
554         vpaddq          $TEMP1, $ACC3, $ACC3
555         vpmuludq        $Y2, $TEMP2, $TEMP2
556         vmovdqu         32*6-8-128($np), $TEMP1
557          .byte  0x67
558          mov    %rax, $r3
559          imull  $n0, %eax
560         vpaddq          $TEMP2, $ACC4, $ACC4
561         vpmuludq        $Y2, $TEMP0, $TEMP0
562         .byte   0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00    # vmovdqu               32*7-8-128($np), $TEMP2
563          and    \$0x1fffffff, %eax
564         vpaddq          $TEMP0, $ACC5, $ACC5
565         vpmuludq        $Y2, $TEMP1, $TEMP1
566         vmovdqu         32*8-8-128($np), $TEMP0
567         vpaddq          $TEMP1, $ACC6, $ACC6
568         vpmuludq        $Y2, $TEMP2, $TEMP2
569         vmovdqu         32*9-8-128($np), $ACC9
570          vmovd  %eax, $ACC0                     # borrow ACC0 for Y2
571          imulq  -128($np), %rax
572         vpaddq          $TEMP2, $ACC7, $ACC7
573         vpmuludq        $Y2, $TEMP0, $TEMP0
574          vmovdqu        32*1-16-128($np), $TEMP1
575          vpbroadcastq   $ACC0, $ACC0
576         vpaddq          $TEMP0, $ACC8, $ACC8
577         vpmuludq        $Y2, $ACC9, $ACC9
578          vmovdqu        32*2-16-128($np), $TEMP2
579          add    %rax, $r3
580
581 ___
582 ($ACC0,$Y2)=($Y2,$ACC0);
583 $code.=<<___;
584          vmovdqu        32*1-24-128($np), $ACC0
585         vpmuludq        $Y1, $TEMP1, $TEMP1
586         vmovdqu         32*3-16-128($np), $TEMP0
587         vpaddq          $TEMP1, $ACC1, $ACC1
588          vpmuludq       $Y2, $ACC0, $ACC0
589         vpmuludq        $Y1, $TEMP2, $TEMP2
590         .byte   0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff    # vmovdqu               32*4-16-128($np), $TEMP1
591          vpaddq         $ACC1, $ACC0, $ACC0
592         vpaddq          $TEMP2, $ACC2, $ACC2
593         vpmuludq        $Y1, $TEMP0, $TEMP0
594         vmovdqu         32*5-16-128($np), $TEMP2
595          .byte  0x67
596          vmovq          $ACC0, %rax
597          vmovdqu        $ACC0, (%rsp)           # transfer $r0-$r3
598         vpaddq          $TEMP0, $ACC3, $ACC3
599         vpmuludq        $Y1, $TEMP1, $TEMP1
600         vmovdqu         32*6-16-128($np), $TEMP0
601         vpaddq          $TEMP1, $ACC4, $ACC4
602         vpmuludq        $Y1, $TEMP2, $TEMP2
603         vmovdqu         32*7-16-128($np), $TEMP1
604         vpaddq          $TEMP2, $ACC5, $ACC5
605         vpmuludq        $Y1, $TEMP0, $TEMP0
606         vmovdqu         32*8-16-128($np), $TEMP2
607         vpaddq          $TEMP0, $ACC6, $ACC6
608         vpmuludq        $Y1, $TEMP1, $TEMP1
609          shr    \$29, $r3
610         vmovdqu         32*9-16-128($np), $TEMP0
611          add    $r3, %rax
612         vpaddq          $TEMP1, $ACC7, $ACC7
613         vpmuludq        $Y1, $TEMP2, $TEMP2
614          #vmovdqu       32*2-24-128($np), $TEMP1        # moved below
615          mov    %rax, $r0
616          imull  $n0, %eax
617         vpaddq          $TEMP2, $ACC8, $ACC8
618         vpmuludq        $Y1, $TEMP0, $TEMP0
619          and    \$0x1fffffff, %eax
620          vmovd  %eax, $Y1
621          vmovdqu        32*3-24-128($np), $TEMP2
622         .byte   0x67
623         vpaddq          $TEMP0, $ACC9, $ACC9
624          vpbroadcastq   $Y1, $Y1
625
626         vpmuludq        32*2-24-128($np), $Y2, $TEMP1   # see above
627         vmovdqu         32*4-24-128($np), $TEMP0
628          mov    %rax, %rdx
629          imulq  -128($np), %rax
630          mov    8(%rsp), $r1
631         vpaddq          $TEMP1, $ACC2, $ACC1
632         vpmuludq        $Y2, $TEMP2, $TEMP2
633         vmovdqu         32*5-24-128($np), $TEMP1
634          add    %rax, $r0
635          mov    %rdx, %rax
636          imulq  8-128($np), %rax
637          .byte  0x67
638          shr    \$29, $r0
639          mov    16(%rsp), $r2
640         vpaddq          $TEMP2, $ACC3, $ACC2
641         vpmuludq        $Y2, $TEMP0, $TEMP0
642         vmovdqu         32*6-24-128($np), $TEMP2
643          add    %rax, $r1
644          mov    %rdx, %rax
645          imulq  16-128($np), %rax
646         vpaddq          $TEMP0, $ACC4, $ACC3
647         vpmuludq        $Y2, $TEMP1, $TEMP1
648         vmovdqu         32*7-24-128($np), $TEMP0
649          imulq  24-128($np), %rdx               # future $r3
650          add    %rax, $r2
651          lea    ($r0,$r1), %rax
652         vpaddq          $TEMP1, $ACC5, $ACC4
653         vpmuludq        $Y2, $TEMP2, $TEMP2
654         vmovdqu         32*8-24-128($np), $TEMP1
655          mov    %rax, $r1
656          imull  $n0, %eax
657         vpmuludq        $Y2, $TEMP0, $TEMP0
658         vpaddq          $TEMP2, $ACC6, $ACC5
659         vmovdqu         32*9-24-128($np), $TEMP2
660          and    \$0x1fffffff, %eax
661         vpaddq          $TEMP0, $ACC7, $ACC6
662         vpmuludq        $Y2, $TEMP1, $TEMP1
663          add    24(%rsp), %rdx
664         vpaddq          $TEMP1, $ACC8, $ACC7
665         vpmuludq        $Y2, $TEMP2, $TEMP2
666         vpaddq          $TEMP2, $ACC9, $ACC8
667          vmovq  $r3, $ACC9
668          mov    %rdx, $r3
669
670         dec     $i
671         jnz     .LOOP_REDUCE_1024
672 ___
673 ($ACC0,$Y2)=($Y2,$ACC0);
674 $code.=<<___;
675         lea     448(%rsp), $tp1                 # size optimization
676         vpaddq  $ACC9, $Y2, $ACC0
677         vpxor   $ZERO, $ZERO, $ZERO
678
679         vpaddq          32*9-192($tp0), $ACC0, $ACC0
680         vpaddq          32*10-448($tp1), $ACC1, $ACC1
681         vpaddq          32*11-448($tp1), $ACC2, $ACC2
682         vpaddq          32*12-448($tp1), $ACC3, $ACC3
683         vpaddq          32*13-448($tp1), $ACC4, $ACC4
684         vpaddq          32*14-448($tp1), $ACC5, $ACC5
685         vpaddq          32*15-448($tp1), $ACC6, $ACC6
686         vpaddq          32*16-448($tp1), $ACC7, $ACC7
687         vpaddq          32*17-448($tp1), $ACC8, $ACC8
688
689         vpsrlq          \$29, $ACC0, $TEMP1
690         vpand           $AND_MASK, $ACC0, $ACC0
691         vpsrlq          \$29, $ACC1, $TEMP2
692         vpand           $AND_MASK, $ACC1, $ACC1
693         vpsrlq          \$29, $ACC2, $TEMP3
694         vpermq          \$0x93, $TEMP1, $TEMP1
695         vpand           $AND_MASK, $ACC2, $ACC2
696         vpsrlq          \$29, $ACC3, $TEMP4
697         vpermq          \$0x93, $TEMP2, $TEMP2
698         vpand           $AND_MASK, $ACC3, $ACC3
699         vpermq          \$0x93, $TEMP3, $TEMP3
700
701         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
702         vpermq          \$0x93, $TEMP4, $TEMP4
703         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
704         vpaddq          $TEMP0, $ACC0, $ACC0
705         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
706         vpaddq          $TEMP1, $ACC1, $ACC1
707         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
708         vpaddq          $TEMP2, $ACC2, $ACC2
709         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
710         vpaddq          $TEMP3, $ACC3, $ACC3
711         vpaddq          $TEMP4, $ACC4, $ACC4
712
713         vpsrlq          \$29, $ACC0, $TEMP1
714         vpand           $AND_MASK, $ACC0, $ACC0
715         vpsrlq          \$29, $ACC1, $TEMP2
716         vpand           $AND_MASK, $ACC1, $ACC1
717         vpsrlq          \$29, $ACC2, $TEMP3
718         vpermq          \$0x93, $TEMP1, $TEMP1
719         vpand           $AND_MASK, $ACC2, $ACC2
720         vpsrlq          \$29, $ACC3, $TEMP4
721         vpermq          \$0x93, $TEMP2, $TEMP2
722         vpand           $AND_MASK, $ACC3, $ACC3
723         vpermq          \$0x93, $TEMP3, $TEMP3
724
725         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
726         vpermq          \$0x93, $TEMP4, $TEMP4
727         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
728         vpaddq          $TEMP0, $ACC0, $ACC0
729         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
730         vpaddq          $TEMP1, $ACC1, $ACC1
731         vmovdqu         $ACC0, 32*0-128($rp)
732         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
733         vpaddq          $TEMP2, $ACC2, $ACC2
734         vmovdqu         $ACC1, 32*1-128($rp)
735         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
736         vpaddq          $TEMP3, $ACC3, $ACC3
737         vmovdqu         $ACC2, 32*2-128($rp)
738         vpaddq          $TEMP4, $ACC4, $ACC4
739         vmovdqu         $ACC3, 32*3-128($rp)
740 ___
741 $TEMP5=$ACC0;
742 $code.=<<___;
743         vpsrlq          \$29, $ACC4, $TEMP1
744         vpand           $AND_MASK, $ACC4, $ACC4
745         vpsrlq          \$29, $ACC5, $TEMP2
746         vpand           $AND_MASK, $ACC5, $ACC5
747         vpsrlq          \$29, $ACC6, $TEMP3
748         vpermq          \$0x93, $TEMP1, $TEMP1
749         vpand           $AND_MASK, $ACC6, $ACC6
750         vpsrlq          \$29, $ACC7, $TEMP4
751         vpermq          \$0x93, $TEMP2, $TEMP2
752         vpand           $AND_MASK, $ACC7, $ACC7
753         vpsrlq          \$29, $ACC8, $TEMP5
754         vpermq          \$0x93, $TEMP3, $TEMP3
755         vpand           $AND_MASK, $ACC8, $ACC8
756         vpermq          \$0x93, $TEMP4, $TEMP4
757
758         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
759         vpermq          \$0x93, $TEMP5, $TEMP5
760         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
761         vpaddq          $TEMP0, $ACC4, $ACC4
762         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
763         vpaddq          $TEMP1, $ACC5, $ACC5
764         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
765         vpaddq          $TEMP2, $ACC6, $ACC6
766         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
767         vpaddq          $TEMP3, $ACC7, $ACC7
768         vpaddq          $TEMP4, $ACC8, $ACC8
769      
770         vpsrlq          \$29, $ACC4, $TEMP1
771         vpand           $AND_MASK, $ACC4, $ACC4
772         vpsrlq          \$29, $ACC5, $TEMP2
773         vpand           $AND_MASK, $ACC5, $ACC5
774         vpsrlq          \$29, $ACC6, $TEMP3
775         vpermq          \$0x93, $TEMP1, $TEMP1
776         vpand           $AND_MASK, $ACC6, $ACC6
777         vpsrlq          \$29, $ACC7, $TEMP4
778         vpermq          \$0x93, $TEMP2, $TEMP2
779         vpand           $AND_MASK, $ACC7, $ACC7
780         vpsrlq          \$29, $ACC8, $TEMP5
781         vpermq          \$0x93, $TEMP3, $TEMP3
782         vpand           $AND_MASK, $ACC8, $ACC8
783         vpermq          \$0x93, $TEMP4, $TEMP4
784
785         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
786         vpermq          \$0x93, $TEMP5, $TEMP5
787         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
788         vpaddq          $TEMP0, $ACC4, $ACC4
789         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
790         vpaddq          $TEMP1, $ACC5, $ACC5
791         vmovdqu         $ACC4, 32*4-128($rp)
792         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
793         vpaddq          $TEMP2, $ACC6, $ACC6
794         vmovdqu         $ACC5, 32*5-128($rp)
795         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
796         vpaddq          $TEMP3, $ACC7, $ACC7
797         vmovdqu         $ACC6, 32*6-128($rp)
798         vpaddq          $TEMP4, $ACC8, $ACC8
799         vmovdqu         $ACC7, 32*7-128($rp)
800         vmovdqu         $ACC8, 32*8-128($rp)
801
802         mov     $rp, $ap
803         dec     $rep
804         jne     .LOOP_GRANDE_SQR_1024
805
806         vzeroall
807         mov     %rbp, %rax
808 ___
809 $code.=<<___ if ($win64);
810         movaps  -0xd8(%rax),%xmm6
811         movaps  -0xc8(%rax),%xmm7
812         movaps  -0xb8(%rax),%xmm8
813         movaps  -0xa8(%rax),%xmm9
814         movaps  -0x98(%rax),%xmm10
815         movaps  -0x88(%rax),%xmm11
816         movaps  -0x78(%rax),%xmm12
817         movaps  -0x68(%rax),%xmm13
818         movaps  -0x58(%rax),%xmm14
819         movaps  -0x48(%rax),%xmm15
820 ___
821 $code.=<<___;
822         mov     -48(%rax),%r15
823         mov     -40(%rax),%r14
824         mov     -32(%rax),%r13
825         mov     -24(%rax),%r12
826         mov     -16(%rax),%rbp
827         mov     -8(%rax),%rbx
828         lea     (%rax),%rsp             # restore %rsp
829 .Lsqr_1024_epilogue:
830         ret
831 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
832 ___
833 }
834
835 { # void AMM_WW(
836 my $rp="%rdi";  # BN_ULONG *rp,
837 my $ap="%rsi";  # const BN_ULONG *ap,
838 my $bp="%rdx";  # const BN_ULONG *bp,
839 my $np="%rcx";  # const BN_ULONG *np,
840 my $n0="%r8d";  # unsigned int n0);
841
842 # The registers that hold the accumulated redundant result
843 # The AMM works on 1024 bit operands, and redundant word size is 29
844 # Therefore: ceil(1024/29)/4 = 9
845 my $ACC0="%ymm0";
846 my $ACC1="%ymm1";
847 my $ACC2="%ymm2";
848 my $ACC3="%ymm3";
849 my $ACC4="%ymm4";
850 my $ACC5="%ymm5";
851 my $ACC6="%ymm6";
852 my $ACC7="%ymm7";
853 my $ACC8="%ymm8";
854 my $ACC9="%ymm9";
855
856 # Registers that hold the broadcasted words of multiplier, currently used
857 my $Bi="%ymm10";
858 my $Yi="%ymm11";
859
860 # Helper registers
861 my $TEMP0=$ACC0;
862 my $TEMP1="%ymm12";
863 my $TEMP2="%ymm13";
864 my $ZERO="%ymm14";
865 my $AND_MASK="%ymm15";
866
867 # alu registers that hold the first words of the ACC
868 my $r0="%r9";
869 my $r1="%r10";
870 my $r2="%r11";
871 my $r3="%r12";
872
873 my $i="%r14d";
874 my $tmp="%r15";
875
876 $bp="%r13";     # reassigned argument
877
878 $code.=<<___;
879 .globl  rsaz_1024_mul_avx2
880 .type   rsaz_1024_mul_avx2,\@function,5
881 .align  64
882 rsaz_1024_mul_avx2:
883         lea     (%rsp), %rax
884         push    %rbx
885         push    %rbp
886         push    %r12
887         push    %r13
888         push    %r14
889         push    %r15
890 ___
891 $code.=<<___ if ($win64);
892         vzeroupper
893         lea     -0xa8(%rsp),%rsp
894         vmovaps %xmm6,-0xd8(%rax)
895         vmovaps %xmm7,-0xc8(%rax)
896         vmovaps %xmm8,-0xb8(%rax)
897         vmovaps %xmm9,-0xa8(%rax)
898         vmovaps %xmm10,-0x98(%rax)
899         vmovaps %xmm11,-0x88(%rax)
900         vmovaps %xmm12,-0x78(%rax)
901         vmovaps %xmm13,-0x68(%rax)
902         vmovaps %xmm14,-0x58(%rax)
903         vmovaps %xmm15,-0x48(%rax)
904 .Lmul_1024_body:
905 ___
906 $code.=<<___;
907         mov     %rax,%rbp
908         vzeroall
909         mov     %rdx, $bp       # reassigned argument
910         sub     \$64,%rsp
911
912         # unaligned 256-bit load that crosses page boundary can
913         # cause severe performance degradation here, so if $ap does
914         # cross page boundary, swap it with $bp [meaning that caller
915         # is advised to lay down $ap and $bp next to each other, so
916         # that only one can cross page boundary].
917         .byte   0x67,0x67
918         mov     $ap, $tmp
919         and     \$4095, $tmp
920         add     \$32*10, $tmp
921         shr     \$12, $tmp
922         mov     $ap, $tmp
923         cmovnz  $bp, $ap
924         cmovnz  $tmp, $bp
925
926         mov     $np, $tmp
927         sub     \$-128,$ap      # size optimization
928         sub     \$-128,$np
929         sub     \$-128,$rp
930
931         and     \$4095, $tmp    # see if $np crosses page
932         add     \$32*10, $tmp
933         .byte   0x67,0x67
934         shr     \$12, $tmp
935         jz      .Lmul_1024_no_n_copy
936
937         # unaligned 256-bit load that crosses page boundary can
938         # cause severe performance degradation here, so if $np does
939         # cross page boundary, copy it to stack and make sure stack
940         # frame doesn't...
941         sub             \$32*10,%rsp
942         vmovdqu         32*0-128($np), $ACC0
943         and             \$-512, %rsp
944         vmovdqu         32*1-128($np), $ACC1
945         vmovdqu         32*2-128($np), $ACC2
946         vmovdqu         32*3-128($np), $ACC3
947         vmovdqu         32*4-128($np), $ACC4
948         vmovdqu         32*5-128($np), $ACC5
949         vmovdqu         32*6-128($np), $ACC6
950         vmovdqu         32*7-128($np), $ACC7
951         vmovdqu         32*8-128($np), $ACC8
952         lea             64+128(%rsp),$np
953         vmovdqu         $ACC0, 32*0-128($np)
954         vpxor           $ACC0, $ACC0, $ACC0
955         vmovdqu         $ACC1, 32*1-128($np)
956         vpxor           $ACC1, $ACC1, $ACC1
957         vmovdqu         $ACC2, 32*2-128($np)
958         vpxor           $ACC2, $ACC2, $ACC2
959         vmovdqu         $ACC3, 32*3-128($np)
960         vpxor           $ACC3, $ACC3, $ACC3
961         vmovdqu         $ACC4, 32*4-128($np)
962         vpxor           $ACC4, $ACC4, $ACC4
963         vmovdqu         $ACC5, 32*5-128($np)
964         vpxor           $ACC5, $ACC5, $ACC5
965         vmovdqu         $ACC6, 32*6-128($np)
966         vpxor           $ACC6, $ACC6, $ACC6
967         vmovdqu         $ACC7, 32*7-128($np)
968         vpxor           $ACC7, $ACC7, $ACC7
969         vmovdqu         $ACC8, 32*8-128($np)
970         vmovdqa         $ACC0, $ACC8
971         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
972 .Lmul_1024_no_n_copy:
973         and     \$-64,%rsp
974
975         mov     ($bp), %rbx
976         vpbroadcastq ($bp), $Bi
977         vmovdqu $ACC0, (%rsp)                   # clear top of stack
978         xor     $r0, $r0
979         .byte   0x67
980         xor     $r1, $r1
981         xor     $r2, $r2
982         xor     $r3, $r3
983
984         vmovdqu .Land_mask(%rip), $AND_MASK
985         mov     \$9, $i
986         jmp     .Loop_mul_1024
987
988 .align  32
989 .Loop_mul_1024:
990          vpsrlq         \$29, $ACC3, $ACC9              # correct $ACC3(*)
991         mov     %rbx, %rax
992         imulq   -128($ap), %rax
993         add     $r0, %rax
994         mov     %rbx, $r1
995         imulq   8-128($ap), $r1
996         add     8(%rsp), $r1
997
998         mov     %rax, $r0
999         imull   $n0, %eax
1000         and     \$0x1fffffff, %eax
1001
1002          mov    %rbx, $r2
1003          imulq  16-128($ap), $r2
1004          add    16(%rsp), $r2
1005
1006          mov    %rbx, $r3
1007          imulq  24-128($ap), $r3
1008          add    24(%rsp), $r3
1009         vpmuludq        32*1-128($ap),$Bi,$TEMP0
1010          vmovd          %eax, $Yi
1011         vpaddq          $TEMP0,$ACC1,$ACC1
1012         vpmuludq        32*2-128($ap),$Bi,$TEMP1
1013          vpbroadcastq   $Yi, $Yi
1014         vpaddq          $TEMP1,$ACC2,$ACC2
1015         vpmuludq        32*3-128($ap),$Bi,$TEMP2
1016          vpand          $AND_MASK, $ACC3, $ACC3         # correct $ACC3
1017         vpaddq          $TEMP2,$ACC3,$ACC3
1018         vpmuludq        32*4-128($ap),$Bi,$TEMP0
1019         vpaddq          $TEMP0,$ACC4,$ACC4
1020         vpmuludq        32*5-128($ap),$Bi,$TEMP1
1021         vpaddq          $TEMP1,$ACC5,$ACC5
1022         vpmuludq        32*6-128($ap),$Bi,$TEMP2
1023         vpaddq          $TEMP2,$ACC6,$ACC6
1024         vpmuludq        32*7-128($ap),$Bi,$TEMP0
1025          vpermq         \$0x93, $ACC9, $ACC9            # correct $ACC3
1026         vpaddq          $TEMP0,$ACC7,$ACC7
1027         vpmuludq        32*8-128($ap),$Bi,$TEMP1
1028          vpbroadcastq   8($bp), $Bi
1029         vpaddq          $TEMP1,$ACC8,$ACC8
1030
1031         mov     %rax,%rdx
1032         imulq   -128($np),%rax
1033         add     %rax,$r0
1034         mov     %rdx,%rax
1035         imulq   8-128($np),%rax
1036         add     %rax,$r1
1037         mov     %rdx,%rax
1038         imulq   16-128($np),%rax
1039         add     %rax,$r2
1040         shr     \$29, $r0
1041         imulq   24-128($np),%rdx
1042         add     %rdx,$r3
1043         add     $r0, $r1
1044
1045         vpmuludq        32*1-128($np),$Yi,$TEMP2
1046          vmovq          $Bi, %rbx
1047         vpaddq          $TEMP2,$ACC1,$ACC1
1048         vpmuludq        32*2-128($np),$Yi,$TEMP0
1049         vpaddq          $TEMP0,$ACC2,$ACC2
1050         vpmuludq        32*3-128($np),$Yi,$TEMP1
1051         vpaddq          $TEMP1,$ACC3,$ACC3
1052         vpmuludq        32*4-128($np),$Yi,$TEMP2
1053         vpaddq          $TEMP2,$ACC4,$ACC4
1054         vpmuludq        32*5-128($np),$Yi,$TEMP0
1055         vpaddq          $TEMP0,$ACC5,$ACC5
1056         vpmuludq        32*6-128($np),$Yi,$TEMP1
1057         vpaddq          $TEMP1,$ACC6,$ACC6
1058         vpmuludq        32*7-128($np),$Yi,$TEMP2
1059          vpblendd       \$3, $ZERO, $ACC9, $ACC9        # correct $ACC3
1060         vpaddq          $TEMP2,$ACC7,$ACC7
1061         vpmuludq        32*8-128($np),$Yi,$TEMP0
1062          vpaddq         $ACC9, $ACC3, $ACC3             # correct $ACC3
1063         vpaddq          $TEMP0,$ACC8,$ACC8
1064
1065         mov     %rbx, %rax
1066         imulq   -128($ap),%rax
1067         add     %rax,$r1
1068          vmovdqu        -8+32*1-128($ap),$TEMP1
1069         mov     %rbx, %rax
1070         imulq   8-128($ap),%rax
1071         add     %rax,$r2
1072          vmovdqu        -8+32*2-128($ap),$TEMP2
1073
1074         mov     $r1, %rax
1075         imull   $n0, %eax
1076         and     \$0x1fffffff, %eax
1077
1078          imulq  16-128($ap),%rbx
1079          add    %rbx,$r3
1080         vpmuludq        $Bi,$TEMP1,$TEMP1
1081          vmovd          %eax, $Yi
1082         vmovdqu         -8+32*3-128($ap),$TEMP0
1083         vpaddq          $TEMP1,$ACC1,$ACC1
1084         vpmuludq        $Bi,$TEMP2,$TEMP2
1085          vpbroadcastq   $Yi, $Yi
1086         vmovdqu         -8+32*4-128($ap),$TEMP1
1087         vpaddq          $TEMP2,$ACC2,$ACC2
1088         vpmuludq        $Bi,$TEMP0,$TEMP0
1089         vmovdqu         -8+32*5-128($ap),$TEMP2
1090         vpaddq          $TEMP0,$ACC3,$ACC3
1091         vpmuludq        $Bi,$TEMP1,$TEMP1
1092         vmovdqu         -8+32*6-128($ap),$TEMP0
1093         vpaddq          $TEMP1,$ACC4,$ACC4
1094         vpmuludq        $Bi,$TEMP2,$TEMP2
1095         vmovdqu         -8+32*7-128($ap),$TEMP1
1096         vpaddq          $TEMP2,$ACC5,$ACC5
1097         vpmuludq        $Bi,$TEMP0,$TEMP0
1098         vmovdqu         -8+32*8-128($ap),$TEMP2
1099         vpaddq          $TEMP0,$ACC6,$ACC6
1100         vpmuludq        $Bi,$TEMP1,$TEMP1
1101         vmovdqu         -8+32*9-128($ap),$ACC9
1102         vpaddq          $TEMP1,$ACC7,$ACC7
1103         vpmuludq        $Bi,$TEMP2,$TEMP2
1104         vpaddq          $TEMP2,$ACC8,$ACC8
1105         vpmuludq        $Bi,$ACC9,$ACC9
1106          vpbroadcastq   16($bp), $Bi
1107
1108         mov     %rax,%rdx
1109         imulq   -128($np),%rax
1110         add     %rax,$r1
1111          vmovdqu        -8+32*1-128($np),$TEMP0
1112         mov     %rdx,%rax
1113         imulq   8-128($np),%rax
1114         add     %rax,$r2
1115          vmovdqu        -8+32*2-128($np),$TEMP1
1116         shr     \$29, $r1
1117         imulq   16-128($np),%rdx
1118         add     %rdx,$r3
1119         add     $r1, $r2
1120
1121         vpmuludq        $Yi,$TEMP0,$TEMP0
1122          vmovq          $Bi, %rbx
1123         vmovdqu         -8+32*3-128($np),$TEMP2
1124         vpaddq          $TEMP0,$ACC1,$ACC1
1125         vpmuludq        $Yi,$TEMP1,$TEMP1
1126         vmovdqu         -8+32*4-128($np),$TEMP0
1127         vpaddq          $TEMP1,$ACC2,$ACC2
1128         vpmuludq        $Yi,$TEMP2,$TEMP2
1129         vmovdqu         -8+32*5-128($np),$TEMP1
1130         vpaddq          $TEMP2,$ACC3,$ACC3
1131         vpmuludq        $Yi,$TEMP0,$TEMP0
1132         vmovdqu         -8+32*6-128($np),$TEMP2
1133         vpaddq          $TEMP0,$ACC4,$ACC4
1134         vpmuludq        $Yi,$TEMP1,$TEMP1
1135         vmovdqu         -8+32*7-128($np),$TEMP0
1136         vpaddq          $TEMP1,$ACC5,$ACC5
1137         vpmuludq        $Yi,$TEMP2,$TEMP2
1138         vmovdqu         -8+32*8-128($np),$TEMP1
1139         vpaddq          $TEMP2,$ACC6,$ACC6
1140         vpmuludq        $Yi,$TEMP0,$TEMP0
1141         vmovdqu         -8+32*9-128($np),$TEMP2
1142         vpaddq          $TEMP0,$ACC7,$ACC7
1143         vpmuludq        $Yi,$TEMP1,$TEMP1
1144         vpaddq          $TEMP1,$ACC8,$ACC8
1145         vpmuludq        $Yi,$TEMP2,$TEMP2
1146         vpaddq          $TEMP2,$ACC9,$ACC9
1147
1148          vmovdqu        -16+32*1-128($ap),$TEMP0
1149         mov     %rbx,%rax
1150         imulq   -128($ap),%rax
1151         add     $r2,%rax
1152
1153          vmovdqu        -16+32*2-128($ap),$TEMP1
1154         mov     %rax,$r2
1155         imull   $n0, %eax
1156         and     \$0x1fffffff, %eax
1157
1158          imulq  8-128($ap),%rbx
1159          add    %rbx,$r3
1160         vpmuludq        $Bi,$TEMP0,$TEMP0
1161          vmovd          %eax, $Yi
1162         vmovdqu         -16+32*3-128($ap),$TEMP2
1163         vpaddq          $TEMP0,$ACC1,$ACC1
1164         vpmuludq        $Bi,$TEMP1,$TEMP1
1165          vpbroadcastq   $Yi, $Yi
1166         vmovdqu         -16+32*4-128($ap),$TEMP0
1167         vpaddq          $TEMP1,$ACC2,$ACC2
1168         vpmuludq        $Bi,$TEMP2,$TEMP2
1169         vmovdqu         -16+32*5-128($ap),$TEMP1
1170         vpaddq          $TEMP2,$ACC3,$ACC3
1171         vpmuludq        $Bi,$TEMP0,$TEMP0
1172         vmovdqu         -16+32*6-128($ap),$TEMP2
1173         vpaddq          $TEMP0,$ACC4,$ACC4
1174         vpmuludq        $Bi,$TEMP1,$TEMP1
1175         vmovdqu         -16+32*7-128($ap),$TEMP0
1176         vpaddq          $TEMP1,$ACC5,$ACC5
1177         vpmuludq        $Bi,$TEMP2,$TEMP2
1178         vmovdqu         -16+32*8-128($ap),$TEMP1
1179         vpaddq          $TEMP2,$ACC6,$ACC6
1180         vpmuludq        $Bi,$TEMP0,$TEMP0
1181         vmovdqu         -16+32*9-128($ap),$TEMP2
1182         vpaddq          $TEMP0,$ACC7,$ACC7
1183         vpmuludq        $Bi,$TEMP1,$TEMP1
1184         vpaddq          $TEMP1,$ACC8,$ACC8
1185         vpmuludq        $Bi,$TEMP2,$TEMP2
1186          vpbroadcastq   24($bp), $Bi
1187         vpaddq          $TEMP2,$ACC9,$ACC9
1188
1189          vmovdqu        -16+32*1-128($np),$TEMP0
1190         mov     %rax,%rdx
1191         imulq   -128($np),%rax
1192         add     %rax,$r2
1193          vmovdqu        -16+32*2-128($np),$TEMP1
1194         imulq   8-128($np),%rdx
1195         add     %rdx,$r3
1196         shr     \$29, $r2
1197
1198         vpmuludq        $Yi,$TEMP0,$TEMP0
1199          vmovq          $Bi, %rbx
1200         vmovdqu         -16+32*3-128($np),$TEMP2
1201         vpaddq          $TEMP0,$ACC1,$ACC1
1202         vpmuludq        $Yi,$TEMP1,$TEMP1
1203         vmovdqu         -16+32*4-128($np),$TEMP0
1204         vpaddq          $TEMP1,$ACC2,$ACC2
1205         vpmuludq        $Yi,$TEMP2,$TEMP2
1206         vmovdqu         -16+32*5-128($np),$TEMP1
1207         vpaddq          $TEMP2,$ACC3,$ACC3
1208         vpmuludq        $Yi,$TEMP0,$TEMP0
1209         vmovdqu         -16+32*6-128($np),$TEMP2
1210         vpaddq          $TEMP0,$ACC4,$ACC4
1211         vpmuludq        $Yi,$TEMP1,$TEMP1
1212         vmovdqu         -16+32*7-128($np),$TEMP0
1213         vpaddq          $TEMP1,$ACC5,$ACC5
1214         vpmuludq        $Yi,$TEMP2,$TEMP2
1215         vmovdqu         -16+32*8-128($np),$TEMP1
1216         vpaddq          $TEMP2,$ACC6,$ACC6
1217         vpmuludq        $Yi,$TEMP0,$TEMP0
1218         vmovdqu         -16+32*9-128($np),$TEMP2
1219         vpaddq          $TEMP0,$ACC7,$ACC7
1220         vpmuludq        $Yi,$TEMP1,$TEMP1
1221          vmovdqu        -24+32*1-128($ap),$TEMP0
1222         vpaddq          $TEMP1,$ACC8,$ACC8
1223         vpmuludq        $Yi,$TEMP2,$TEMP2
1224          vmovdqu        -24+32*2-128($ap),$TEMP1
1225         vpaddq          $TEMP2,$ACC9,$ACC9
1226
1227         add     $r2, $r3
1228         imulq   -128($ap),%rbx
1229         add     %rbx,$r3
1230
1231         mov     $r3, %rax
1232         imull   $n0, %eax
1233         and     \$0x1fffffff, %eax
1234
1235         vpmuludq        $Bi,$TEMP0,$TEMP0
1236          vmovd          %eax, $Yi
1237         vmovdqu         -24+32*3-128($ap),$TEMP2
1238         vpaddq          $TEMP0,$ACC1,$ACC1
1239         vpmuludq        $Bi,$TEMP1,$TEMP1
1240          vpbroadcastq   $Yi, $Yi
1241         vmovdqu         -24+32*4-128($ap),$TEMP0
1242         vpaddq          $TEMP1,$ACC2,$ACC2
1243         vpmuludq        $Bi,$TEMP2,$TEMP2
1244         vmovdqu         -24+32*5-128($ap),$TEMP1
1245         vpaddq          $TEMP2,$ACC3,$ACC3
1246         vpmuludq        $Bi,$TEMP0,$TEMP0
1247         vmovdqu         -24+32*6-128($ap),$TEMP2
1248         vpaddq          $TEMP0,$ACC4,$ACC4
1249         vpmuludq        $Bi,$TEMP1,$TEMP1
1250         vmovdqu         -24+32*7-128($ap),$TEMP0
1251         vpaddq          $TEMP1,$ACC5,$ACC5
1252         vpmuludq        $Bi,$TEMP2,$TEMP2
1253         vmovdqu         -24+32*8-128($ap),$TEMP1
1254         vpaddq          $TEMP2,$ACC6,$ACC6
1255         vpmuludq        $Bi,$TEMP0,$TEMP0
1256         vmovdqu         -24+32*9-128($ap),$TEMP2
1257         vpaddq          $TEMP0,$ACC7,$ACC7
1258         vpmuludq        $Bi,$TEMP1,$TEMP1
1259         vpaddq          $TEMP1,$ACC8,$ACC8
1260         vpmuludq        $Bi,$TEMP2,$TEMP2
1261          vpbroadcastq   32($bp), $Bi
1262         vpaddq          $TEMP2,$ACC9,$ACC9
1263          add            \$32, $bp                       # $bp++
1264
1265         vmovdqu         -24+32*1-128($np),$TEMP0
1266         imulq   -128($np),%rax
1267         add     %rax,$r3
1268         shr     \$29, $r3
1269
1270         vmovdqu         -24+32*2-128($np),$TEMP1
1271         vpmuludq        $Yi,$TEMP0,$TEMP0
1272          vmovq          $Bi, %rbx
1273         vmovdqu         -24+32*3-128($np),$TEMP2
1274         vpaddq          $TEMP0,$ACC1,$ACC0              # $ACC0==$TEMP0
1275         vpmuludq        $Yi,$TEMP1,$TEMP1
1276          vmovdqu        $ACC0, (%rsp)                   # transfer $r0-$r3
1277         vpaddq          $TEMP1,$ACC2,$ACC1
1278         vmovdqu         -24+32*4-128($np),$TEMP0
1279         vpmuludq        $Yi,$TEMP2,$TEMP2
1280         vmovdqu         -24+32*5-128($np),$TEMP1
1281         vpaddq          $TEMP2,$ACC3,$ACC2
1282         vpmuludq        $Yi,$TEMP0,$TEMP0
1283         vmovdqu         -24+32*6-128($np),$TEMP2
1284         vpaddq          $TEMP0,$ACC4,$ACC3
1285         vpmuludq        $Yi,$TEMP1,$TEMP1
1286         vmovdqu         -24+32*7-128($np),$TEMP0
1287         vpaddq          $TEMP1,$ACC5,$ACC4
1288         vpmuludq        $Yi,$TEMP2,$TEMP2
1289         vmovdqu         -24+32*8-128($np),$TEMP1
1290         vpaddq          $TEMP2,$ACC6,$ACC5
1291         vpmuludq        $Yi,$TEMP0,$TEMP0
1292         vmovdqu         -24+32*9-128($np),$TEMP2
1293          mov    $r3, $r0
1294         vpaddq          $TEMP0,$ACC7,$ACC6
1295         vpmuludq        $Yi,$TEMP1,$TEMP1
1296          add    (%rsp), $r0
1297         vpaddq          $TEMP1,$ACC8,$ACC7
1298         vpmuludq        $Yi,$TEMP2,$TEMP2
1299          vmovq  $r3, $TEMP1
1300         vpaddq          $TEMP2,$ACC9,$ACC8
1301
1302         dec     $i
1303         jnz     .Loop_mul_1024
1304 ___
1305
1306 # (*)   Original implementation was correcting ACC1-ACC3 for overflow
1307 #       after 7 loop runs, or after 28 iterations, or 56 additions.
1308 #       But as we underutilize resources, it's possible to correct in
1309 #       each iteration with marginal performance loss. But then, as
1310 #       we do it in each iteration, we can correct less digits, and
1311 #       avoid performance penalties completely. Also note that we
1312 #       correct only three digits out of four. This works because
1313 #       most significant digit is subjected to less additions.
1314
1315 $TEMP0 = $ACC9;
1316 $TEMP3 = $Bi;
1317 $TEMP4 = $Yi;
1318 $code.=<<___;
1319         vpermq          \$0, $AND_MASK, $AND_MASK
1320         vpaddq          (%rsp), $TEMP1, $ACC0
1321
1322         vpsrlq          \$29, $ACC0, $TEMP1
1323         vpand           $AND_MASK, $ACC0, $ACC0
1324         vpsrlq          \$29, $ACC1, $TEMP2
1325         vpand           $AND_MASK, $ACC1, $ACC1
1326         vpsrlq          \$29, $ACC2, $TEMP3
1327         vpermq          \$0x93, $TEMP1, $TEMP1
1328         vpand           $AND_MASK, $ACC2, $ACC2
1329         vpsrlq          \$29, $ACC3, $TEMP4
1330         vpermq          \$0x93, $TEMP2, $TEMP2
1331         vpand           $AND_MASK, $ACC3, $ACC3
1332
1333         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1334         vpermq          \$0x93, $TEMP3, $TEMP3
1335         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1336         vpermq          \$0x93, $TEMP4, $TEMP4
1337         vpaddq          $TEMP0, $ACC0, $ACC0
1338         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1339         vpaddq          $TEMP1, $ACC1, $ACC1
1340         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1341         vpaddq          $TEMP2, $ACC2, $ACC2
1342         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1343         vpaddq          $TEMP3, $ACC3, $ACC3
1344         vpaddq          $TEMP4, $ACC4, $ACC4
1345
1346         vpsrlq          \$29, $ACC0, $TEMP1
1347         vpand           $AND_MASK, $ACC0, $ACC0
1348         vpsrlq          \$29, $ACC1, $TEMP2
1349         vpand           $AND_MASK, $ACC1, $ACC1
1350         vpsrlq          \$29, $ACC2, $TEMP3
1351         vpermq          \$0x93, $TEMP1, $TEMP1
1352         vpand           $AND_MASK, $ACC2, $ACC2
1353         vpsrlq          \$29, $ACC3, $TEMP4
1354         vpermq          \$0x93, $TEMP2, $TEMP2
1355         vpand           $AND_MASK, $ACC3, $ACC3
1356         vpermq          \$0x93, $TEMP3, $TEMP3
1357
1358         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1359         vpermq          \$0x93, $TEMP4, $TEMP4
1360         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1361         vpaddq          $TEMP0, $ACC0, $ACC0
1362         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1363         vpaddq          $TEMP1, $ACC1, $ACC1
1364         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1365         vpaddq          $TEMP2, $ACC2, $ACC2
1366         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1367         vpaddq          $TEMP3, $ACC3, $ACC3
1368         vpaddq          $TEMP4, $ACC4, $ACC4
1369
1370         vmovdqu         $ACC0, 0-128($rp)
1371         vmovdqu         $ACC1, 32-128($rp)
1372         vmovdqu         $ACC2, 64-128($rp)
1373         vmovdqu         $ACC3, 96-128($rp)
1374 ___
1375
1376 $TEMP5=$ACC0;
1377 $code.=<<___;
1378         vpsrlq          \$29, $ACC4, $TEMP1
1379         vpand           $AND_MASK, $ACC4, $ACC4
1380         vpsrlq          \$29, $ACC5, $TEMP2
1381         vpand           $AND_MASK, $ACC5, $ACC5
1382         vpsrlq          \$29, $ACC6, $TEMP3
1383         vpermq          \$0x93, $TEMP1, $TEMP1
1384         vpand           $AND_MASK, $ACC6, $ACC6
1385         vpsrlq          \$29, $ACC7, $TEMP4
1386         vpermq          \$0x93, $TEMP2, $TEMP2
1387         vpand           $AND_MASK, $ACC7, $ACC7
1388         vpsrlq          \$29, $ACC8, $TEMP5
1389         vpermq          \$0x93, $TEMP3, $TEMP3
1390         vpand           $AND_MASK, $ACC8, $ACC8
1391         vpermq          \$0x93, $TEMP4, $TEMP4
1392
1393         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1394         vpermq          \$0x93, $TEMP5, $TEMP5
1395         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1396         vpaddq          $TEMP0, $ACC4, $ACC4
1397         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1398         vpaddq          $TEMP1, $ACC5, $ACC5
1399         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1400         vpaddq          $TEMP2, $ACC6, $ACC6
1401         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1402         vpaddq          $TEMP3, $ACC7, $ACC7
1403         vpaddq          $TEMP4, $ACC8, $ACC8
1404
1405         vpsrlq          \$29, $ACC4, $TEMP1
1406         vpand           $AND_MASK, $ACC4, $ACC4
1407         vpsrlq          \$29, $ACC5, $TEMP2
1408         vpand           $AND_MASK, $ACC5, $ACC5
1409         vpsrlq          \$29, $ACC6, $TEMP3
1410         vpermq          \$0x93, $TEMP1, $TEMP1
1411         vpand           $AND_MASK, $ACC6, $ACC6
1412         vpsrlq          \$29, $ACC7, $TEMP4
1413         vpermq          \$0x93, $TEMP2, $TEMP2
1414         vpand           $AND_MASK, $ACC7, $ACC7
1415         vpsrlq          \$29, $ACC8, $TEMP5
1416         vpermq          \$0x93, $TEMP3, $TEMP3
1417         vpand           $AND_MASK, $ACC8, $ACC8
1418         vpermq          \$0x93, $TEMP4, $TEMP4
1419
1420         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1421         vpermq          \$0x93, $TEMP5, $TEMP5
1422         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1423         vpaddq          $TEMP0, $ACC4, $ACC4
1424         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1425         vpaddq          $TEMP1, $ACC5, $ACC5
1426         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1427         vpaddq          $TEMP2, $ACC6, $ACC6
1428         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1429         vpaddq          $TEMP3, $ACC7, $ACC7
1430         vpaddq          $TEMP4, $ACC8, $ACC8
1431
1432         vmovdqu         $ACC4, 128-128($rp)
1433         vmovdqu         $ACC5, 160-128($rp)    
1434         vmovdqu         $ACC6, 192-128($rp)
1435         vmovdqu         $ACC7, 224-128($rp)
1436         vmovdqu         $ACC8, 256-128($rp)
1437         vzeroupper
1438
1439         mov     %rbp, %rax
1440 ___
1441 $code.=<<___ if ($win64);
1442         movaps  -0xd8(%rax),%xmm6
1443         movaps  -0xc8(%rax),%xmm7
1444         movaps  -0xb8(%rax),%xmm8
1445         movaps  -0xa8(%rax),%xmm9
1446         movaps  -0x98(%rax),%xmm10
1447         movaps  -0x88(%rax),%xmm11
1448         movaps  -0x78(%rax),%xmm12
1449         movaps  -0x68(%rax),%xmm13
1450         movaps  -0x58(%rax),%xmm14
1451         movaps  -0x48(%rax),%xmm15
1452 ___
1453 $code.=<<___;
1454         mov     -48(%rax),%r15
1455         mov     -40(%rax),%r14
1456         mov     -32(%rax),%r13
1457         mov     -24(%rax),%r12
1458         mov     -16(%rax),%rbp
1459         mov     -8(%rax),%rbx
1460         lea     (%rax),%rsp             # restore %rsp
1461 .Lmul_1024_epilogue:
1462         ret
1463 .size   rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1464 ___
1465 }
1466 {
1467 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1468 my @T = map("%r$_",(8..11));
1469
1470 $code.=<<___;
1471 .globl  rsaz_1024_red2norm_avx2
1472 .type   rsaz_1024_red2norm_avx2,\@abi-omnipotent
1473 .align  32
1474 rsaz_1024_red2norm_avx2:
1475         sub     \$-128,$inp     # size optimization
1476         xor     %rax,%rax
1477 ___
1478
1479 for ($j=0,$i=0; $i<16; $i++) {
1480     my $k=0;
1481     while (29*$j<64*($i+1)) {   # load data till boundary
1482         $code.="        mov     `8*$j-128`($inp), @T[0]\n";
1483         $j++; $k++; push(@T,shift(@T));
1484     }
1485     $l=$k;
1486     while ($k>1) {              # shift loaded data but last value
1487         $code.="        shl     \$`29*($j-$k)`,@T[-$k]\n";
1488         $k--;
1489     }
1490     $code.=<<___;               # shift last value
1491         mov     @T[-1], @T[0]
1492         shl     \$`29*($j-1)`, @T[-1]
1493         shr     \$`-29*($j-1)`, @T[0]
1494 ___
1495     while ($l) {                # accumulate all values
1496         $code.="        add     @T[-$l], %rax\n";
1497         $l--;
1498     }
1499         $code.=<<___;
1500         adc     \$0, @T[0]      # consume eventual carry
1501         mov     %rax, 8*$i($out)
1502         mov     @T[0], %rax
1503 ___
1504     push(@T,shift(@T));
1505 }
1506 $code.=<<___;
1507         ret
1508 .size   rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1509
1510 .globl  rsaz_1024_norm2red_avx2
1511 .type   rsaz_1024_norm2red_avx2,\@abi-omnipotent
1512 .align  32
1513 rsaz_1024_norm2red_avx2:
1514         sub     \$-128,$out     # size optimization
1515         mov     ($inp),@T[0]
1516         mov     \$0x1fffffff,%eax
1517 ___
1518 for ($j=0,$i=0; $i<16; $i++) {
1519     $code.="    mov     `8*($i+1)`($inp),@T[1]\n"       if ($i<15);
1520     $code.="    xor     @T[1],@T[1]\n"                  if ($i==15);
1521     my $k=1;
1522     while (29*($j+1)<64*($i+1)) {
1523         $code.=<<___;
1524         mov     @T[0],@T[-$k]
1525         shr     \$`29*$j`,@T[-$k]
1526         and     %rax,@T[-$k]                            # &0x1fffffff
1527         mov     @T[-$k],`8*$j-128`($out)
1528 ___
1529         $j++; $k++;
1530     }
1531     $code.=<<___;
1532         shrd    \$`29*$j`,@T[1],@T[0]
1533         and     %rax,@T[0]
1534         mov     @T[0],`8*$j-128`($out)
1535 ___
1536     $j++;
1537     push(@T,shift(@T));
1538 }
1539 $code.=<<___;
1540         mov     @T[0],`8*$j-128`($out)                  # zero
1541         mov     @T[0],`8*($j+1)-128`($out)
1542         mov     @T[0],`8*($j+2)-128`($out)
1543         mov     @T[0],`8*($j+3)-128`($out)
1544         ret
1545 .size   rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1546 ___
1547 }
1548 {
1549 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1550
1551 $code.=<<___;
1552 .globl  rsaz_1024_scatter5_avx2
1553 .type   rsaz_1024_scatter5_avx2,\@abi-omnipotent
1554 .align  32
1555 rsaz_1024_scatter5_avx2:
1556         vzeroupper
1557         vmovdqu .Lscatter_permd(%rip),%ymm5
1558         shl     \$4,$power
1559         lea     ($out,$power),$out
1560         mov     \$9,%eax
1561         jmp     .Loop_scatter_1024
1562
1563 .align  32
1564 .Loop_scatter_1024:
1565         vmovdqu         ($inp),%ymm0
1566         lea             32($inp),$inp
1567         vpermd          %ymm0,%ymm5,%ymm0
1568         vmovdqu         %xmm0,($out)
1569         lea             16*32($out),$out
1570         dec     %eax
1571         jnz     .Loop_scatter_1024
1572
1573         vzeroupper
1574         ret
1575 .size   rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1576
1577 .globl  rsaz_1024_gather5_avx2
1578 .type   rsaz_1024_gather5_avx2,\@abi-omnipotent
1579 .align  32
1580 rsaz_1024_gather5_avx2:
1581 ___
1582 $code.=<<___ if ($win64);
1583         lea     -0x88(%rsp),%rax
1584         vzeroupper
1585 .LSEH_begin_rsaz_1024_gather5:
1586         # I can't trust assembler to use specific encoding:-(
1587         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1588         .byte   0xc5,0xf8,0x29,0x70,0xe0        #vmovaps %xmm6,-0x20(%rax)
1589         .byte   0xc5,0xf8,0x29,0x78,0xf0        #vmovaps %xmm7,-0x10(%rax)
1590         .byte   0xc5,0x78,0x29,0x40,0x00        #vmovaps %xmm8,0(%rax)
1591         .byte   0xc5,0x78,0x29,0x48,0x10        #vmovaps %xmm9,0x10(%rax)
1592         .byte   0xc5,0x78,0x29,0x50,0x20        #vmovaps %xmm10,0x20(%rax)
1593         .byte   0xc5,0x78,0x29,0x58,0x30        #vmovaps %xmm11,0x30(%rax)
1594         .byte   0xc5,0x78,0x29,0x60,0x40        #vmovaps %xmm12,0x40(%rax)
1595         .byte   0xc5,0x78,0x29,0x68,0x50        #vmovaps %xmm13,0x50(%rax)
1596         .byte   0xc5,0x78,0x29,0x70,0x60        #vmovaps %xmm14,0x60(%rax)
1597         .byte   0xc5,0x78,0x29,0x78,0x70        #vmovaps %xmm15,0x70(%rax)
1598 ___
1599 $code.=<<___;
1600         lea     .Lgather_table(%rip),%r11
1601         mov     $power,%eax
1602         and     \$3,$power
1603         shr     \$2,%eax                        # cache line number
1604         shl     \$4,$power                      # offset within cache line
1605
1606         vmovdqu         -32(%r11),%ymm7         # .Lgather_permd
1607         vpbroadcastb    8(%r11,%rax), %xmm8
1608         vpbroadcastb    7(%r11,%rax), %xmm9
1609         vpbroadcastb    6(%r11,%rax), %xmm10
1610         vpbroadcastb    5(%r11,%rax), %xmm11
1611         vpbroadcastb    4(%r11,%rax), %xmm12
1612         vpbroadcastb    3(%r11,%rax), %xmm13
1613         vpbroadcastb    2(%r11,%rax), %xmm14
1614         vpbroadcastb    1(%r11,%rax), %xmm15
1615
1616         lea     64($inp,$power),$inp
1617         mov     \$64,%r11                       # size optimization
1618         mov     \$9,%eax
1619         jmp     .Loop_gather_1024
1620
1621 .align  32
1622 .Loop_gather_1024:
1623         vpand           -64($inp),              %xmm8,%xmm0
1624         vpand           ($inp),                 %xmm9,%xmm1
1625         vpand           64($inp),               %xmm10,%xmm2
1626         vpand           ($inp,%r11,2),          %xmm11,%xmm3
1627          vpor                                   %xmm0,%xmm1,%xmm1
1628         vpand           64($inp,%r11,2),        %xmm12,%xmm4
1629          vpor                                   %xmm2,%xmm3,%xmm3
1630         vpand           ($inp,%r11,4),          %xmm13,%xmm5
1631          vpor                                   %xmm1,%xmm3,%xmm3
1632         vpand           64($inp,%r11,4),        %xmm14,%xmm6
1633          vpor                                   %xmm4,%xmm5,%xmm5
1634         vpand           -128($inp,%r11,8),      %xmm15,%xmm2
1635         lea             ($inp,%r11,8),$inp
1636          vpor                                   %xmm3,%xmm5,%xmm5
1637          vpor                                   %xmm2,%xmm6,%xmm6
1638          vpor                                   %xmm5,%xmm6,%xmm6
1639         vpermd          %ymm6,%ymm7,%ymm6
1640         vmovdqu         %ymm6,($out)
1641         lea             32($out),$out
1642         dec     %eax
1643         jnz     .Loop_gather_1024
1644
1645         vpxor   %ymm0,%ymm0,%ymm0
1646         vmovdqu %ymm0,($out)
1647         vzeroupper
1648 ___
1649 $code.=<<___ if ($win64);
1650         movaps  (%rsp),%xmm6
1651         movaps  0x10(%rsp),%xmm7
1652         movaps  0x20(%rsp),%xmm8
1653         movaps  0x30(%rsp),%xmm9
1654         movaps  0x40(%rsp),%xmm10
1655         movaps  0x50(%rsp),%xmm11
1656         movaps  0x60(%rsp),%xmm12
1657         movaps  0x70(%rsp),%xmm13
1658         movaps  0x80(%rsp),%xmm14
1659         movaps  0x90(%rsp),%xmm15
1660         lea     0xa8(%rsp),%rsp
1661 .LSEH_end_rsaz_1024_gather5:
1662 ___
1663 $code.=<<___;
1664         ret
1665 .size   rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1666 ___
1667 }
1668
1669 $code.=<<___;
1670 .extern OPENSSL_ia32cap_P
1671 .globl  rsaz_avx2_eligible
1672 .type   rsaz_avx2_eligible,\@abi-omnipotent
1673 .align  32
1674 rsaz_avx2_eligible:
1675         mov     OPENSSL_ia32cap_P+8(%rip),%eax
1676         and     \$`1<<5`,%eax
1677         shr     \$5,%eax
1678         ret
1679 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1680
1681 .align  64
1682 .Land_mask:
1683         .quad   0x1fffffff,0x1fffffff,0x1fffffff,-1
1684 .Lscatter_permd:
1685         .long   0,2,4,6,7,7,7,7
1686 .Lgather_permd:
1687         .long   0,7,1,7,2,7,3,7
1688 .Lgather_table:
1689         .byte   0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0
1690 .align  64
1691 ___
1692
1693 if ($win64) {
1694 $rec="%rcx";
1695 $frame="%rdx";
1696 $context="%r8";
1697 $disp="%r9";
1698
1699 $code.=<<___
1700 .extern __imp_RtlVirtualUnwind
1701 .type   rsaz_se_handler,\@abi-omnipotent
1702 .align  16
1703 rsaz_se_handler:
1704         push    %rsi
1705         push    %rdi
1706         push    %rbx
1707         push    %rbp
1708         push    %r12
1709         push    %r13
1710         push    %r14
1711         push    %r15
1712         pushfq
1713         sub     \$64,%rsp
1714
1715         mov     120($context),%rax      # pull context->Rax
1716         mov     248($context),%rbx      # pull context->Rip
1717
1718         mov     8($disp),%rsi           # disp->ImageBase
1719         mov     56($disp),%r11          # disp->HandlerData
1720
1721         mov     0(%r11),%r10d           # HandlerData[0]
1722         lea     (%rsi,%r10),%r10        # prologue label
1723         cmp     %r10,%rbx               # context->Rip<prologue label
1724         jb      .Lcommon_seh_tail
1725
1726         mov     152($context),%rax      # pull context->Rsp
1727
1728         mov     4(%r11),%r10d           # HandlerData[1]
1729         lea     (%rsi,%r10),%r10        # epilogue label
1730         cmp     %r10,%rbx               # context->Rip>=epilogue label
1731         jae     .Lcommon_seh_tail
1732
1733         mov     160($context),%rax      # pull context->Rbp
1734
1735         mov     -48(%rax),%r15
1736         mov     -40(%rax),%r14
1737         mov     -32(%rax),%r13
1738         mov     -24(%rax),%r12
1739         mov     -16(%rax),%rbp
1740         mov     -8(%rax),%rbx
1741         mov     %r15,240($context)
1742         mov     %r14,232($context)
1743         mov     %r13,224($context)
1744         mov     %r12,216($context)
1745         mov     %rbp,160($context)
1746         mov     %rbx,144($context)
1747
1748         lea     -0xd8(%rax),%rsi        # %xmm save area
1749         lea     512($context),%rdi      # & context.Xmm6
1750         mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
1751         .long   0xa548f3fc              # cld; rep movsq
1752
1753 .Lcommon_seh_tail:
1754         mov     8(%rax),%rdi
1755         mov     16(%rax),%rsi
1756         mov     %rax,152($context)      # restore context->Rsp
1757         mov     %rsi,168($context)      # restore context->Rsi
1758         mov     %rdi,176($context)      # restore context->Rdi
1759
1760         mov     40($disp),%rdi          # disp->ContextRecord
1761         mov     $context,%rsi           # context
1762         mov     \$154,%ecx              # sizeof(CONTEXT)
1763         .long   0xa548f3fc              # cld; rep movsq
1764
1765         mov     $disp,%rsi
1766         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1767         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1768         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1769         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1770         mov     40(%rsi),%r10           # disp->ContextRecord
1771         lea     56(%rsi),%r11           # &disp->HandlerData
1772         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1773         mov     %r10,32(%rsp)           # arg5
1774         mov     %r11,40(%rsp)           # arg6
1775         mov     %r12,48(%rsp)           # arg7
1776         mov     %rcx,56(%rsp)           # arg8, (NULL)
1777         call    *__imp_RtlVirtualUnwind(%rip)
1778
1779         mov     \$1,%eax                # ExceptionContinueSearch
1780         add     \$64,%rsp
1781         popfq
1782         pop     %r15
1783         pop     %r14
1784         pop     %r13
1785         pop     %r12
1786         pop     %rbp
1787         pop     %rbx
1788         pop     %rdi
1789         pop     %rsi
1790         ret
1791 .size   rsaz_se_handler,.-rsaz_se_handler
1792
1793 .section        .pdata
1794 .align  4
1795         .rva    .LSEH_begin_rsaz_1024_sqr_avx2
1796         .rva    .LSEH_end_rsaz_1024_sqr_avx2
1797         .rva    .LSEH_info_rsaz_1024_sqr_avx2
1798
1799         .rva    .LSEH_begin_rsaz_1024_mul_avx2
1800         .rva    .LSEH_end_rsaz_1024_mul_avx2
1801         .rva    .LSEH_info_rsaz_1024_mul_avx2
1802
1803         .rva    .LSEH_begin_rsaz_1024_gather5
1804         .rva    .LSEH_end_rsaz_1024_gather5
1805         .rva    .LSEH_info_rsaz_1024_gather5
1806 .section        .xdata
1807 .align  8
1808 .LSEH_info_rsaz_1024_sqr_avx2:
1809         .byte   9,0,0,0
1810         .rva    rsaz_se_handler
1811         .rva    .Lsqr_1024_body,.Lsqr_1024_epilogue
1812 .LSEH_info_rsaz_1024_mul_avx2:
1813         .byte   9,0,0,0
1814         .rva    rsaz_se_handler
1815         .rva    .Lmul_1024_body,.Lmul_1024_epilogue
1816 .LSEH_info_rsaz_1024_gather5:
1817         .byte   0x01,0x33,0x16,0x00
1818         .byte   0x36,0xf8,0x09,0x00     #vmovaps 0x90(rsp),xmm15
1819         .byte   0x31,0xe8,0x08,0x00     #vmovaps 0x80(rsp),xmm14
1820         .byte   0x2c,0xd8,0x07,0x00     #vmovaps 0x70(rsp),xmm13
1821         .byte   0x27,0xc8,0x06,0x00     #vmovaps 0x60(rsp),xmm12
1822         .byte   0x22,0xb8,0x05,0x00     #vmovaps 0x50(rsp),xmm11
1823         .byte   0x1d,0xa8,0x04,0x00     #vmovaps 0x40(rsp),xmm10
1824         .byte   0x18,0x98,0x03,0x00     #vmovaps 0x30(rsp),xmm9
1825         .byte   0x13,0x88,0x02,0x00     #vmovaps 0x20(rsp),xmm8
1826         .byte   0x0e,0x78,0x01,0x00     #vmovaps 0x10(rsp),xmm7
1827         .byte   0x09,0x68,0x00,0x00     #vmovaps 0x00(rsp),xmm6
1828         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1829 ___
1830 }
1831
1832 foreach (split("\n",$code)) {
1833         s/\`([^\`]*)\`/eval($1)/ge;
1834
1835         s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge               or
1836
1837         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1838         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1839         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1840         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1841         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1842         print $_,"\n";
1843 }
1844
1845 }}} else {{{
1846 print <<___;    # assembler is too old
1847 .text
1848
1849 .globl  rsaz_avx2_eligible
1850 .type   rsaz_avx2_eligible,\@abi-omnipotent
1851 rsaz_avx2_eligible:
1852         xor     %eax,%eax
1853         ret
1854 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1855
1856 .globl  rsaz_1024_sqr_avx2
1857 .globl  rsaz_1024_mul_avx2
1858 .globl  rsaz_1024_norm2red_avx2
1859 .globl  rsaz_1024_red2norm_avx2
1860 .globl  rsaz_1024_scatter5_avx2
1861 .globl  rsaz_1024_gather5_avx2
1862 .type   rsaz_1024_sqr_avx2,\@abi-omnipotent
1863 rsaz_1024_sqr_avx2:
1864 rsaz_1024_mul_avx2:
1865 rsaz_1024_norm2red_avx2:
1866 rsaz_1024_red2norm_avx2:
1867 rsaz_1024_scatter5_avx2:
1868 rsaz_1024_gather5_avx2:
1869         .byte   0x0f,0x0b       # ud2
1870         ret
1871 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
1872 ___
1873 }}}
1874
1875 close STDOUT;