s/rsaz_eligible/rsaz_avx2_eligible/.
[openssl.git] / crypto / bn / asm / rsaz-avx2.pl
1 #!/usr/bin/env perl
2
3 #******************************************************************************
4 #* Copyright(c) 2012, Intel Corp.                                             
5 #* Developers and authors:                                                    
6 #* Shay Gueron (1, 2), and Vlad Krasnov (1)                                   
7 #* (1) Intel Corporation, Israel Development Center, Haifa, Israel
8 #* (2) University of Haifa, Israel                                              
9 #******************************************************************************
10 #* LICENSE:                                                                
11 #* This submission to OpenSSL is to be made available under the OpenSSL  
12 #* license, and only to the OpenSSL project, in order to allow integration    
13 #* into the publicly distributed code. 
14 #* The use of this code, or portions of this code, or concepts embedded in
15 #* this code, or modification of this code and/or algorithm(s) in it, or the
16 #* use of this code for any other purpose than stated above, requires special
17 #* licensing.                                                                  
18 #******************************************************************************
19 #* DISCLAIMER:                                                                
20 #* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS AND THE COPYRIGHT OWNERS     
21 #* ``AS IS''. ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
22 #* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
23 #* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS OR THE COPYRIGHT
24 #* OWNERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 
25 #* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF    
26 #* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS   
27 #* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN    
28 #* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)    
29 #* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
30 #* POSSIBILITY OF SUCH DAMAGE.                                                
31 #******************************************************************************
32 #* Reference:                                                                 
33 #* [1]  S. Gueron, V. Krasnov: "Software Implementation of Modular
34 #*      Exponentiation,  Using Advanced Vector Instructions Architectures",
35 #*      F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,
36 #*      pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012
37 #* [2]  S. Gueron: "Efficient Software Implementations of Modular
38 #*      Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).
39 #* [3]  S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE
40 #*      Proceedings of 9th International Conference on Information Technology:
41 #*      New Generations (ITNG 2012), pp.821-823 (2012)
42 #* [4]  S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis
43 #*      resistant 1024-bit modular exponentiation, for optimizing RSA2048
44 #*      on AVX2 capable x86_64 platforms",
45 #*      http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest
46 #******************************************************************************
47
48 # +10% improvement by <appro@openssl.org>
49 #
50 # rsa2048 sign/sec      OpenSSL 1.0.1   scalar(*)       this
51 # 2GHz Haswell          544             632/+16%        947/+74%
52 #
53 # (*)   if system doesn't support AVX2, for reference purposes;
54
55 $flavour = shift;
56 $output  = shift;
57 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
58
59 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
60
61 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
62 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
63 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
64 die "can't locate x86_64-xlate.pl";
65
66 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
67                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
68         $avx = ($1>=2.19) + ($1>=2.22);
69 }
70
71 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
72             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
73         $avx = ($1>=2.09) + ($1>=2.11);
74 }
75
76 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
77             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
78         $avx = ($1>=10) + ($1>=11);
79 }
80
81 open OUT,"| $^X $xlate $flavour $output";
82 *STDOUT = *OUT;
83
84 if ($avx>1) {{{
85 { # void AMS_WW(
86 my $rp="%rdi";  # BN_ULONG *rp,
87 my $ap="%rsi";  # const BN_ULONG *ap,
88 my $np="%rdx";  # const BN_ULONG *np,
89 my $n0="%ecx";  # const BN_ULONG n0,
90 my $rep="%r8d"; # int repeat);
91
92 # The registers that hold the accumulated redundant result
93 # The AMM works on 1024 bit operands, and redundant word size is 29
94 # Therefore: ceil(1024/29)/4 = 9
95 my $ACC0="%ymm0";
96 my $ACC1="%ymm1";
97 my $ACC2="%ymm2";
98 my $ACC3="%ymm3";
99 my $ACC4="%ymm4";
100 my $ACC5="%ymm5";
101 my $ACC6="%ymm6";
102 my $ACC7="%ymm7";
103 my $ACC8="%ymm8";
104 my $ACC9="%ymm9";
105 # Registers that hold the broadcasted words of bp, currently used
106 my $B1="%ymm10";
107 my $B2="%ymm11";
108 # Registers that hold the broadcasted words of Y, currently used
109 my $Y1="%ymm12";
110 my $Y2="%ymm13";
111 # Helper registers
112 my $TEMP1="%ymm14";
113 my $AND_MASK="%ymm15";
114 # alu registers that hold the first words of the ACC
115 my $r0="%r9";
116 my $r1="%r10";
117 my $r2="%r11";
118 my $r3="%r12";
119
120 my $i="%r14d";                  # loop counter
121 my $tmp = "%r15";
122
123 my $FrameSize=32*18+32*8;       # place for A^2 and 2*A
124
125 my $aap=$r0;
126 my $tp0="%rbx";
127 my $tp1=$r3;
128
129 $np="%r13";                     # reassigned argument
130
131 $code.=<<___;
132 .globl  rsaz_1024_sqr_avx2
133 .type   rsaz_1024_sqr_avx2,\@function,5
134 .align  64
135 rsaz_1024_sqr_avx2:             # 702 cycles, 14% faster than rsaz_1024_mul_avx2
136         lea     (%rsp), %rax
137         push    %rbx
138         push    %rbp
139         push    %r12
140         push    %r13
141         push    %r14
142         push    %r15
143 ___
144 $code.=<<___ if ($win64);
145         lea     -0xa8(%rsp),%rsp
146         movaps  %xmm6,-0xd8(%rax)
147         movaps  %xmm7,-0xc8(%rax)
148         movaps  %xmm8,-0xb8(%rax)
149         movaps  %xmm9,-0xa8(%rax)
150         movaps  %xmm10,-0x98(%rax)
151         movaps  %xmm11,-0x88(%rax)
152         movaps  %xmm12,-0x78(%rax)
153         movaps  %xmm13,-0x68(%rax)
154         movaps  %xmm14,-0x58(%rax)
155         movaps  %xmm15,-0x48(%rax)
156 .Lsqr_1024_body:
157 ___
158 $code.=<<___;
159         mov     %rax,%rbp
160         vzeroall
161         mov     %rdx, $np                       # reassigned argument
162         sub     \$$FrameSize, %rsp
163         mov     $np, $tmp
164         sub     \$-128, $rp                     # size optimization
165         sub     \$-128, $ap
166         sub     \$-128, $np
167
168         and     \$4095, $tmp                    # see if $np crosses page
169         add     \$32*10, $tmp
170         shr     \$12, $tmp
171         jz      .Lsqr_1024_no_n_copy
172
173         # unaligned 256-bit load that crosses page boundary can
174         # cause >2x performance degradation here, so if $np does
175         # cross page boundary, copy it to stack and make sure stack
176         # frame doesn't...
177         sub             \$32*10,%rsp
178         vmovdqu         32*0-128($np), $ACC0
179         and             \$-2048, %rsp
180         vmovdqu         32*1-128($np), $ACC1
181         vmovdqu         32*2-128($np), $ACC2
182         vmovdqu         32*3-128($np), $ACC3
183         vmovdqu         32*4-128($np), $ACC4
184         vmovdqu         32*5-128($np), $ACC5
185         vmovdqu         32*6-128($np), $ACC6
186         vmovdqu         32*7-128($np), $ACC7
187         vmovdqu         32*8-128($np), $ACC8
188         lea             $FrameSize+128(%rsp),$np
189         vmovdqu         $ACC0, 32*0-128($np)
190         vmovdqu         $ACC1, 32*1-128($np)
191         vmovdqu         $ACC2, 32*2-128($np)
192         vmovdqu         $ACC3, 32*3-128($np)
193         vmovdqu         $ACC4, 32*4-128($np)
194         vmovdqu         $ACC5, 32*5-128($np)
195         vmovdqu         $ACC6, 32*6-128($np)
196         vmovdqu         $ACC7, 32*7-128($np)
197         vmovdqu         $ACC8, 32*8-128($np)
198         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
199
200 .Lsqr_1024_no_n_copy:
201         and             \$-1024, %rsp
202
203         vmovdqu         32*1-128($ap), $ACC1
204         vmovdqu         32*2-128($ap), $ACC2
205         vmovdqu         32*3-128($ap), $ACC3
206         vmovdqu         32*4-128($ap), $ACC4
207         vmovdqu         32*5-128($ap), $ACC5
208         vmovdqu         32*6-128($ap), $ACC6
209         vmovdqu         32*7-128($ap), $ACC7
210         vmovdqu         32*8-128($ap), $ACC8
211
212         lea     192(%rsp), $tp0                 # 64+128=192
213         vpbroadcastq    .Land_mask(%rip), $AND_MASK
214         jmp     .LOOP_GRANDE_SQR_1024
215
216 .align  32
217 .LOOP_GRANDE_SQR_1024:
218         lea     32*18+128(%rsp), $aap           # size optimization
219         lea     448(%rsp), $tp1                 # 64+128+256=448
220
221         # the squaring is performed as described in Variant B of
222         # "Speeding up Big-Number Squaring", so start by calculating
223         # the A*2=A+A vector
224         vpaddq          $ACC1, $ACC1, $ACC1
225          vpbroadcastq   32*0-128($ap), $B1
226         vpaddq          $ACC2, $ACC2, $ACC2
227         vmovdqa         $ACC1, 32*0-128($aap)
228         vpaddq          $ACC3, $ACC3, $ACC3
229         vmovdqa         $ACC2, 32*1-128($aap)
230         vpaddq          $ACC4, $ACC4, $ACC4
231         vmovdqa         $ACC3, 32*2-128($aap)
232         vpaddq          $ACC5, $ACC5, $ACC5
233         vmovdqa         $ACC4, 32*3-128($aap)
234         vpaddq          $ACC6, $ACC6, $ACC6
235         vmovdqa         $ACC5, 32*4-128($aap)
236         vpaddq          $ACC7, $ACC7, $ACC7
237         vmovdqa         $ACC6, 32*5-128($aap)
238         vpaddq          $ACC8, $ACC8, $ACC8
239         vmovdqa         $ACC7, 32*6-128($aap)
240         vpxor           $ACC9, $ACC9, $ACC9
241         vmovdqa         $ACC8, 32*7-128($aap)
242
243         vpmuludq        32*0-128($ap), $B1, $ACC0
244          vpbroadcastq   32*1-128($ap), $B2
245          vmovdqu        $ACC9, 32*9-192($tp0)   # zero upper half
246         vpmuludq        $B1, $ACC1, $ACC1
247          vmovdqu        $ACC9, 32*10-448($tp1)
248         vpmuludq        $B1, $ACC2, $ACC2
249          vmovdqu        $ACC9, 32*11-448($tp1)
250         vpmuludq        $B1, $ACC3, $ACC3
251          vmovdqu        $ACC9, 32*12-448($tp1)
252         vpmuludq        $B1, $ACC4, $ACC4
253          vmovdqu        $ACC9, 32*13-448($tp1)
254         vpmuludq        $B1, $ACC5, $ACC5
255          vmovdqu        $ACC9, 32*14-448($tp1)
256         vpmuludq        $B1, $ACC6, $ACC6
257          vmovdqu        $ACC9, 32*15-448($tp1)
258         vpmuludq        $B1, $ACC7, $ACC7
259          vmovdqu        $ACC9, 32*16-448($tp1)
260         vpmuludq        $B1, $ACC8, $ACC8
261          vpbroadcastq   32*2-128($ap), $B1
262          vmovdqu        $ACC9, 32*17-448($tp1)
263
264         xor     $tmp, $tmp
265         mov     \$4, $i
266         jmp     .Lentry_1024
267 ___
268 $TEMP0=$Y1;
269 $TEMP2=$Y2;
270 $code.=<<___;
271 .align  32
272 .LOOP_SQR_1024:
273         vmovdqu         32*0(%rsp,$tmp), $TEMP0 # 32*0-192($tp0,$tmp)
274         vmovdqu         32*1(%rsp,$tmp), $TEMP1 # 32*1-192($tp0,$tmp)
275          vpbroadcastq   32*1-128($ap,$tmp), $B2
276         vpmuludq        32*0-128($ap), $B1, $ACC0
277         vmovdqu         32*2-192($tp0,$tmp), $TEMP2
278         vpaddq          $TEMP0, $ACC0, $ACC0
279         vpmuludq        32*0-128($aap), $B1, $ACC1
280         vmovdqu         32*3-192($tp0,$tmp), $TEMP0
281         vpaddq          $TEMP1, $ACC1, $ACC1
282         vpmuludq        32*1-128($aap), $B1, $ACC2
283         vmovdqu         32*4-192($tp0,$tmp), $TEMP1
284         vpaddq          $TEMP2, $ACC2, $ACC2
285         vpmuludq        32*2-128($aap), $B1, $ACC3
286         vmovdqu         32*5-192($tp0,$tmp), $TEMP2
287         vpaddq          $TEMP0, $ACC3, $ACC3
288         vpmuludq        32*3-128($aap), $B1, $ACC4
289         vmovdqu         32*6-192($tp0,$tmp), $TEMP0
290         vpaddq          $TEMP1, $ACC4, $ACC4
291         vpmuludq        32*4-128($aap), $B1, $ACC5
292         vmovdqu         32*7-192($tp0,$tmp), $TEMP1
293         vpaddq          $TEMP2, $ACC5, $ACC5
294         vpmuludq        32*5-128($aap), $B1, $ACC6
295         vmovdqu         32*8-192($tp0,$tmp), $TEMP2
296         vpaddq          $TEMP0, $ACC6, $ACC6
297         vpmuludq        32*6-128($aap), $B1, $ACC7
298         vpaddq          $TEMP1, $ACC7, $ACC7
299         vpmuludq        32*7-128($aap), $B1, $ACC8
300          vpbroadcastq   32*2-128($ap,$tmp), $B1
301         vpaddq          $TEMP2, $ACC8, $ACC8
302 .Lentry_1024:
303         vmovdqu         $ACC0, 32*0(%rsp,$tmp)  # 32*0-192($tp0,$tmp)
304         vmovdqu         $ACC1, 32*1(%rsp,$tmp)  # 32*1-192($tp0,$tmp)
305
306         vpmuludq        32*1-128($ap), $B2, $TEMP0
307         vpaddq          $TEMP0, $ACC2, $ACC2
308         vpmuludq        32*1-128($aap), $B2, $TEMP1
309         vpaddq          $TEMP1, $ACC3, $ACC3
310         vpmuludq        32*2-128($aap), $B2, $TEMP2
311         vpaddq          $TEMP2, $ACC4, $ACC4
312         vpmuludq        32*3-128($aap), $B2, $TEMP0
313         vpaddq          $TEMP0, $ACC5, $ACC5
314         vpmuludq        32*4-128($aap), $B2, $TEMP1
315         vpaddq          $TEMP1, $ACC6, $ACC6
316         vpmuludq        32*5-128($aap), $B2, $TEMP2
317         vmovdqu         32*9-192($tp0,$tmp), $TEMP1
318         vpaddq          $TEMP2, $ACC7, $ACC7
319         vpmuludq        32*6-128($aap), $B2, $TEMP0
320         vpaddq          $TEMP0, $ACC8, $ACC8
321         vpmuludq        32*7-128($aap), $B2, $ACC0
322          vpbroadcastq   32*3-128($ap,$tmp), $B2
323         vpaddq          $TEMP1, $ACC0, $ACC0
324
325         vmovdqu         $ACC2, 32*2-192($tp0,$tmp)
326         vmovdqu         $ACC3, 32*3-192($tp0,$tmp)
327
328         vpmuludq        32*2-128($ap), $B1, $TEMP2
329         vpaddq          $TEMP2, $ACC4, $ACC4
330         vpmuludq        32*2-128($aap), $B1, $TEMP0
331         vpaddq          $TEMP0, $ACC5, $ACC5
332         vpmuludq        32*3-128($aap), $B1, $TEMP1
333         vpaddq          $TEMP1, $ACC6, $ACC6
334         vpmuludq        32*4-128($aap), $B1, $TEMP2
335         vpaddq          $TEMP2, $ACC7, $ACC7
336         vpmuludq        32*5-128($aap), $B1, $TEMP0
337         vmovdqu         32*10-448($tp1,$tmp), $TEMP2
338         vpaddq          $TEMP0, $ACC8, $ACC8
339         vpmuludq        32*6-128($aap), $B1, $TEMP1
340         vpaddq          $TEMP1, $ACC0, $ACC0
341         vpmuludq        32*7-128($aap), $B1, $ACC1
342          vpbroadcastq   32*4-128($ap,$tmp), $B1
343         vpaddq          $TEMP2, $ACC1, $ACC1
344
345         vmovdqu         $ACC4, 32*4-192($tp0,$tmp)
346         vmovdqu         $ACC5, 32*5-192($tp0,$tmp)
347
348         vpmuludq        32*3-128($ap), $B2, $TEMP0
349         vpaddq          $TEMP0, $ACC6, $ACC6
350         vpmuludq        32*3-128($aap), $B2, $TEMP1
351         vpaddq          $TEMP1, $ACC7, $ACC7
352         vpmuludq        32*4-128($aap), $B2, $TEMP2
353         vpaddq          $TEMP2, $ACC8, $ACC8
354         vpmuludq        32*5-128($aap), $B2, $TEMP0
355         vmovdqu         32*11-448($tp1,$tmp), $TEMP2
356         vpaddq          $TEMP0, $ACC0, $ACC0
357         vpmuludq        32*6-128($aap), $B2, $TEMP1
358         vpaddq          $TEMP1, $ACC1, $ACC1
359         vpmuludq        32*7-128($aap), $B2, $ACC2
360          vpbroadcastq   32*5-128($ap,$tmp), $B2
361         vpaddq          $TEMP2, $ACC2, $ACC2    
362
363         vmovdqu         $ACC6, 32*6-192($tp0,$tmp)
364         vmovdqu         $ACC7, 32*7-192($tp0,$tmp)
365
366         vpmuludq        32*4-128($ap), $B1, $TEMP0
367         vpaddq          $TEMP0, $ACC8, $ACC8
368         vpmuludq        32*4-128($aap), $B1, $TEMP1
369         vpaddq          $TEMP1, $ACC0, $ACC0
370         vpmuludq        32*5-128($aap), $B1, $TEMP2
371         vmovdqu         32*12-448($tp1,$tmp), $TEMP1
372         vpaddq          $TEMP2, $ACC1, $ACC1
373         vpmuludq        32*6-128($aap), $B1, $TEMP0
374         vpaddq          $TEMP0, $ACC2, $ACC2
375         vpmuludq        32*7-128($aap), $B1, $ACC3
376          vpbroadcastq   32*6-128($ap,$tmp), $B1
377         vpaddq          $TEMP1, $ACC3, $ACC3
378
379         vmovdqu         $ACC8, 32*8-192($tp0,$tmp)
380         vmovdqu         $ACC0, 32*9-192($tp0,$tmp)
381
382         vpmuludq        32*5-128($ap), $B2, $TEMP2
383         vpaddq          $TEMP2, $ACC1, $ACC1
384         vpmuludq        32*5-128($aap), $B2, $TEMP0
385         vmovdqu         32*13-448($tp1,$tmp), $TEMP2
386         vpaddq          $TEMP0, $ACC2, $ACC2
387         vpmuludq        32*6-128($aap), $B2, $TEMP1
388         vpaddq          $TEMP1, $ACC3, $ACC3
389         vpmuludq        32*7-128($aap), $B2, $ACC4
390          vpbroadcastq   32*7-128($ap,$tmp), $B2
391         vpaddq          $TEMP2, $ACC4, $ACC4
392
393         vmovdqu         $ACC1, 32*10-448($tp1,$tmp)
394         vmovdqu         $ACC2, 32*11-448($tp1,$tmp)
395
396         vpmuludq        32*6-128($ap), $B1, $TEMP0
397         vmovdqu         32*14-448($tp1,$tmp), $TEMP2
398         vpaddq          $TEMP0, $ACC3, $ACC3
399         vpmuludq        32*6-128($aap), $B1, $TEMP1
400          vpbroadcastq   32*8-128($ap,$tmp), $ACC0       # borrow $ACC0 for $B1
401         vpaddq          $TEMP1, $ACC4, $ACC4
402         vpmuludq        32*7-128($aap), $B1, $ACC5
403          vpbroadcastq   32*0+8-128($ap,$tmp), $B1       # for next iteration
404         vpaddq          $TEMP2, $ACC5, $ACC5
405         vmovdqu         32*15-448($tp1,$tmp), $TEMP1
406
407         vmovdqu         $ACC3, 32*12-448($tp1,$tmp)
408         vmovdqu         $ACC4, 32*13-448($tp1,$tmp)
409
410         vpmuludq        32*7-128($ap), $B2, $TEMP0
411         vmovdqu         32*16-448($tp1,$tmp), $TEMP2
412         vpaddq          $TEMP0, $ACC5, $ACC5
413         vpmuludq        32*7-128($aap), $B2, $ACC6
414         vpaddq          $TEMP1, $ACC6, $ACC6
415
416         vpmuludq        32*8-128($ap), $ACC0, $ACC7
417         vmovdqu         $ACC5, 32*14-448($tp1,$tmp)
418         vpaddq          $TEMP2, $ACC7, $ACC7
419         vmovdqu         $ACC6, 32*15-448($tp1,$tmp)
420         vmovdqu         $ACC7, 32*16-448($tp1,$tmp)
421
422         lea     8($tmp), $tmp
423         dec     $i        
424         jnz     .LOOP_SQR_1024
425 ___
426 $ZERO = $ACC9;
427 $TEMP0 = $B1;
428 $TEMP2 = $B2;
429 $TEMP3 = $Y1;
430 $TEMP4 = $Y2;
431 $code.=<<___;
432         #we need to fix indexes 32-39 to avoid overflow
433         vmovdqu         32*8-192($tp0), $ACC8
434         vmovdqu         32*9-192($tp0), $ACC1
435         vmovdqu         32*10-448($tp1), $ACC2
436
437         vpsrlq          \$29, $ACC8, $TEMP1
438         vpand           $AND_MASK, $ACC8, $ACC8
439         vpsrlq          \$29, $ACC1, $TEMP2
440         vpand           $AND_MASK, $ACC1, $ACC1
441
442         vpermq          \$0x93, $TEMP1, $TEMP1
443         vpxor           $ZERO, $ZERO, $ZERO
444         vpermq          \$0x93, $TEMP2, $TEMP2
445
446         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
447         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
448         vpaddq          $TEMP0, $ACC8, $ACC8
449         vpblendd        \$3, $TEMP2, $ZERO, $TEMP2
450         vpaddq          $TEMP1, $ACC1, $ACC1
451         vpaddq          $TEMP2, $ACC2, $ACC2
452         vmovdqu         $ACC1, 32*9-192($tp0)
453         vmovdqu         $ACC2, 32*10-448($tp1)
454
455         mov     (%rsp), %rax
456         mov     8(%rsp), $r1
457         mov     16(%rsp), $r2
458         mov     24(%rsp), $r3
459         vmovdqu 32*1(%rsp), $ACC1
460         vmovdqu 32*2-192($tp0), $ACC2
461         vmovdqu 32*3-192($tp0), $ACC3
462         vmovdqu 32*4-192($tp0), $ACC4
463         vmovdqu 32*5-192($tp0), $ACC5
464         vmovdqu 32*6-192($tp0), $ACC6
465         vmovdqu 32*7-192($tp0), $ACC7
466
467         mov     %rax, $r0
468         imull   $n0, %eax
469         and     \$0x1fffffff, %eax
470         vmovd   %eax, $Y1
471
472         mov     %rax, %rdx
473         imulq   -128($np), %rax
474          vpbroadcastq   $Y1, $Y1
475         add     %rax, $r0
476         mov     %rdx, %rax
477         imulq   8-128($np), %rax
478         shr     \$29, $r0
479         add     %rax, $r1
480         mov     %rdx, %rax
481         imulq   16-128($np), %rax
482         add     $r0, $r1
483         add     %rax, $r2
484         imulq   24-128($np), %rdx
485         add     %rdx, $r3
486
487         mov     $r1, %rax
488         imull   $n0, %eax
489         and     \$0x1fffffff, %eax
490
491         mov \$9, $i
492         jmp .LOOP_REDUCE_1024
493
494 .align  32
495 .LOOP_REDUCE_1024:
496         vmovd   %eax, $Y2
497         vpbroadcastq    $Y2, $Y2
498
499         vpmuludq        32*1-128($np), $Y1, $TEMP0
500          mov    %rax, %rdx
501          imulq  -128($np), %rax
502         vpaddq          $TEMP0, $ACC1, $ACC1
503         vpmuludq        32*2-128($np), $Y1, $TEMP1
504          add    %rax, $r1
505          mov    %rdx, %rax
506          imulq  8-128($np), %rax
507         vpaddq          $TEMP1, $ACC2, $ACC2
508         vpmuludq        32*3-128($np), $Y1, $TEMP2
509          add    %rax, $r2
510          mov    %rdx, %rax
511          imulq  16-128($np), %rax
512          shr    \$29, $r1
513         vpaddq          $TEMP2, $ACC3, $ACC3
514         vpmuludq        32*4-128($np), $Y1, $TEMP0
515          add    %rax, $r3
516          add    $r1, $r2
517         vpaddq          $TEMP0, $ACC4, $ACC4
518         vpmuludq        32*5-128($np), $Y1, $TEMP1
519          mov    $r2, %rax
520          imull  $n0, %eax
521         vpaddq          $TEMP1, $ACC5, $ACC5
522         vpmuludq        32*6-128($np), $Y1, $TEMP2
523          and    \$0x1fffffff, %eax
524         vpaddq          $TEMP2, $ACC6, $ACC6
525         vpmuludq        32*7-128($np), $Y1, $TEMP0
526         vpaddq          $TEMP0, $ACC7, $ACC7
527         vpmuludq        32*8-128($np), $Y1, $TEMP1
528          vmovd  %eax, $Y1
529          vmovdqu        32*1-8-128($np), $TEMP2
530         vpaddq          $TEMP1, $ACC8, $ACC8
531          vmovdqu        32*2-8-128($np), $TEMP0
532          vpbroadcastq   $Y1, $Y1
533
534         vpmuludq        $Y2, $TEMP2, $TEMP2
535         vmovdqu         32*3-8-128($np), $TEMP1
536          mov    %rax, %rdx
537          imulq  -128($np), %rax
538         vpaddq          $TEMP2, $ACC1, $ACC1
539         vpmuludq        $Y2, $TEMP0, $TEMP0
540         vmovdqu         32*4-8-128($np), $TEMP2
541          add    %rax, $r2
542          mov    %rdx, %rax
543          imulq  8-128($np), %rax
544         vpaddq          $TEMP0, $ACC2, $ACC2
545          add    $r3, %rax
546          shr    \$29, $r2
547         vpmuludq        $Y2, $TEMP1, $TEMP1
548         vmovdqu         32*5-8-128($np), $TEMP0
549          add    $r2, %rax
550         vpaddq          $TEMP1, $ACC3, $ACC3
551         vpmuludq        $Y2, $TEMP2, $TEMP2
552         vmovdqu         32*6-8-128($np), $TEMP1
553          mov    %rax, $r3
554          imull  $n0, %eax
555         vpaddq          $TEMP2, $ACC4, $ACC4
556         vpmuludq        $Y2, $TEMP0, $TEMP0
557         vmovdqu         32*7-8-128($np), $TEMP2
558          and    \$0x1fffffff, %eax
559         vpaddq          $TEMP0, $ACC5, $ACC5
560         vpmuludq        $Y2, $TEMP1, $TEMP1
561         vmovdqu         32*8-8-128($np), $TEMP0
562         vpaddq          $TEMP1, $ACC6, $ACC6
563         vpmuludq        $Y2, $TEMP2, $TEMP2
564         vmovdqu         32*9-8-128($np), $ACC9
565          vmovd  %eax, $ACC0                     # borrow ACC0 for Y2
566          imulq  -128($np), %rax
567         vpaddq          $TEMP2, $ACC7, $ACC7
568         vpmuludq        $Y2, $TEMP0, $TEMP0
569          vmovdqu        32*1-16-128($np), $TEMP1
570          vpbroadcastq   $ACC0, $ACC0
571         vpaddq          $TEMP0, $ACC8, $ACC8
572         vpmuludq        $Y2, $ACC9, $ACC9
573          vmovdqu        32*2-16-128($np), $TEMP2
574          add    %rax, $r3
575
576 ___
577 ($ACC0,$Y2)=($Y2,$ACC0);
578 $code.=<<___;
579          vmovdqu        32*1-24-128($np), $ACC0
580         vpmuludq        $Y1, $TEMP1, $TEMP1
581         vmovdqu         32*3-16-128($np), $TEMP0
582         vpaddq          $TEMP1, $ACC1, $ACC1
583          vpmuludq       $Y2, $ACC0, $ACC0
584         vpmuludq        $Y1, $TEMP2, $TEMP2
585         vmovdqu         32*4-16-128($np), $TEMP1
586          vpaddq         $ACC1, $ACC0, $ACC0
587         vpaddq          $TEMP2, $ACC2, $ACC2
588         vpmuludq        $Y1, $TEMP0, $TEMP0
589         vmovdqu         32*5-16-128($np), $TEMP2
590          vmovq          $ACC0, %rax
591          vmovdqu        $ACC0, (%rsp)           # transfer $r0-$r3
592         vpaddq          $TEMP0, $ACC3, $ACC3
593         vpmuludq        $Y1, $TEMP1, $TEMP1
594         vmovdqu         32*6-16-128($np), $TEMP0
595         vpaddq          $TEMP1, $ACC4, $ACC4
596         vpmuludq        $Y1, $TEMP2, $TEMP2
597         vmovdqu         32*7-16-128($np), $TEMP1
598         vpaddq          $TEMP2, $ACC5, $ACC5
599         vpmuludq        $Y1, $TEMP0, $TEMP0
600         vmovdqu         32*8-16-128($np), $TEMP2
601         vpaddq          $TEMP0, $ACC6, $ACC6
602         vpmuludq        $Y1, $TEMP1, $TEMP1
603         vmovdqu         32*9-16-128($np), $TEMP0
604          shr    \$29, $r3
605         vpaddq          $TEMP1, $ACC7, $ACC7
606         vpmuludq        $Y1, $TEMP2, $TEMP2
607          vmovdqu        32*2-24-128($np), $TEMP1
608          add    $r3, %rax
609          mov    %rax, $r0
610          imull  $n0, %eax
611         vpaddq          $TEMP2, $ACC8, $ACC8
612         vpmuludq        $Y1, $TEMP0, $TEMP0
613          and    \$0x1fffffff, %eax
614          vmovd  %eax, $Y1
615          vmovdqu        32*3-24-128($np), $TEMP2
616         vpaddq          $TEMP0, $ACC9, $ACC9
617          vpbroadcastq   $Y1, $Y1
618
619         vpmuludq        $Y2, $TEMP1, $TEMP1
620         vmovdqu         32*4-24-128($np), $TEMP0
621          mov    %rax, %rdx
622          imulq  -128($np), %rax
623          mov    8(%rsp), $r1
624         vpaddq          $TEMP1, $ACC2, $ACC1
625         vpmuludq        $Y2, $TEMP2, $TEMP2
626         vmovdqu         32*5-24-128($np), $TEMP1
627          add    %rax, $r0
628          mov    %rdx, %rax
629          imulq  8-128($np), %rax
630          shr    \$29, $r0
631          mov    16(%rsp), $r2
632         vpaddq          $TEMP2, $ACC3, $ACC2
633         vpmuludq        $Y2, $TEMP0, $TEMP0
634         vmovdqu         32*6-24-128($np), $TEMP2
635          add    %rax, $r1
636          mov    %rdx, %rax
637          imulq  16-128($np), %rax
638         vpaddq          $TEMP0, $ACC4, $ACC3
639         vpmuludq        $Y2, $TEMP1, $TEMP1
640         vmovdqu         32*7-24-128($np), $TEMP0
641          imulq  24-128($np), %rdx               # future $r3
642          add    %rax, $r2
643          lea    ($r0,$r1), %rax
644         vpaddq          $TEMP1, $ACC5, $ACC4
645         vpmuludq        $Y2, $TEMP2, $TEMP2
646         vmovdqu         32*8-24-128($np), $TEMP1
647          mov    %rax, $r1
648          imull  $n0, %eax
649         vpaddq          $TEMP2, $ACC6, $ACC5
650         vpmuludq        $Y2, $TEMP0, $TEMP0
651         vmovdqu         32*9-24-128($np), $TEMP2
652          and    \$0x1fffffff, %eax
653         vpaddq          $TEMP0, $ACC7, $ACC6
654         vpmuludq        $Y2, $TEMP1, $TEMP1
655          add    24(%rsp), %rdx
656         vpaddq          $TEMP1, $ACC8, $ACC7
657         vpmuludq        $Y2, $TEMP2, $TEMP2
658         vpaddq          $TEMP2, $ACC9, $ACC8
659          vmovq  $r3, $ACC9
660          mov    %rdx, $r3
661
662         dec     $i
663         jnz     .LOOP_REDUCE_1024
664 ___
665 ($ACC0,$Y2)=($Y2,$ACC0);
666 $code.=<<___;
667         lea     448(%rsp), $tp1                 # size optimization
668         vpaddq  $ACC9, $Y2, $ACC0
669         vpxor   $ZERO, $ZERO, $ZERO
670
671         vpaddq          32*9-192($tp0), $ACC0, $ACC0
672         vpaddq          32*10-448($tp1), $ACC1, $ACC1
673         vpaddq          32*11-448($tp1), $ACC2, $ACC2
674         vpaddq          32*12-448($tp1), $ACC3, $ACC3
675         vpaddq          32*13-448($tp1), $ACC4, $ACC4
676         vpaddq          32*14-448($tp1), $ACC5, $ACC5
677         vpaddq          32*15-448($tp1), $ACC6, $ACC6
678         vpaddq          32*16-448($tp1), $ACC7, $ACC7
679         vpaddq          32*17-448($tp1), $ACC8, $ACC8
680
681         vpsrlq          \$29, $ACC0, $TEMP1
682         vpand           $AND_MASK, $ACC0, $ACC0
683         vpsrlq          \$29, $ACC1, $TEMP2
684         vpand           $AND_MASK, $ACC1, $ACC1
685         vpsrlq          \$29, $ACC2, $TEMP3
686         vpermq          \$0x93, $TEMP1, $TEMP1
687         vpand           $AND_MASK, $ACC2, $ACC2
688         vpsrlq          \$29, $ACC3, $TEMP4
689         vpermq          \$0x93, $TEMP2, $TEMP2
690         vpand           $AND_MASK, $ACC3, $ACC3
691         vpermq          \$0x93, $TEMP3, $TEMP3
692
693         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
694         vpermq          \$0x93, $TEMP4, $TEMP4
695         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
696         vpaddq          $TEMP0, $ACC0, $ACC0
697         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
698         vpaddq          $TEMP1, $ACC1, $ACC1
699         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
700         vpaddq          $TEMP2, $ACC2, $ACC2
701         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
702         vpaddq          $TEMP3, $ACC3, $ACC3
703         vpaddq          $TEMP4, $ACC4, $ACC4
704
705         vpsrlq          \$29, $ACC0, $TEMP1
706         vpand           $AND_MASK, $ACC0, $ACC0
707         vpsrlq          \$29, $ACC1, $TEMP2
708         vpand           $AND_MASK, $ACC1, $ACC1
709         vpsrlq          \$29, $ACC2, $TEMP3
710         vpermq          \$0x93, $TEMP1, $TEMP1
711         vpand           $AND_MASK, $ACC2, $ACC2
712         vpsrlq          \$29, $ACC3, $TEMP4
713         vpermq          \$0x93, $TEMP2, $TEMP2
714         vpand           $AND_MASK, $ACC3, $ACC3
715         vpermq          \$0x93, $TEMP3, $TEMP3
716
717         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
718         vpermq          \$0x93, $TEMP4, $TEMP4
719         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
720         vpaddq          $TEMP0, $ACC0, $ACC0
721         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
722         vpaddq          $TEMP1, $ACC1, $ACC1
723         vmovdqu         $ACC0, 32*0-128($rp)
724         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
725         vpaddq          $TEMP2, $ACC2, $ACC2
726         vmovdqu         $ACC1, 32*1-128($rp)
727         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
728         vpaddq          $TEMP3, $ACC3, $ACC3
729         vmovdqu         $ACC2, 32*2-128($rp)
730         vpaddq          $TEMP4, $ACC4, $ACC4
731         vmovdqu         $ACC3, 32*3-128($rp)
732 ___
733 $TEMP5=$ACC0;
734 $code.=<<___;
735         vpsrlq          \$29, $ACC4, $TEMP1
736         vpand           $AND_MASK, $ACC4, $ACC4
737         vpsrlq          \$29, $ACC5, $TEMP2
738         vpand           $AND_MASK, $ACC5, $ACC5
739         vpsrlq          \$29, $ACC6, $TEMP3
740         vpermq          \$0x93, $TEMP1, $TEMP1
741         vpand           $AND_MASK, $ACC6, $ACC6
742         vpsrlq          \$29, $ACC7, $TEMP4
743         vpermq          \$0x93, $TEMP2, $TEMP2
744         vpand           $AND_MASK, $ACC7, $ACC7
745         vpsrlq          \$29, $ACC8, $TEMP5
746         vpermq          \$0x93, $TEMP3, $TEMP3
747         vpand           $AND_MASK, $ACC8, $ACC8
748         vpermq          \$0x93, $TEMP4, $TEMP4
749
750         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
751         vpermq          \$0x93, $TEMP5, $TEMP5
752         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
753         vpaddq          $TEMP0, $ACC4, $ACC4
754         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
755         vpaddq          $TEMP1, $ACC5, $ACC5
756         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
757         vpaddq          $TEMP2, $ACC6, $ACC6
758         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
759         vpaddq          $TEMP3, $ACC7, $ACC7
760         vpaddq          $TEMP4, $ACC8, $ACC8
761      
762         vpsrlq          \$29, $ACC4, $TEMP1
763         vpand           $AND_MASK, $ACC4, $ACC4
764         vpsrlq          \$29, $ACC5, $TEMP2
765         vpand           $AND_MASK, $ACC5, $ACC5
766         vpsrlq          \$29, $ACC6, $TEMP3
767         vpermq          \$0x93, $TEMP1, $TEMP1
768         vpand           $AND_MASK, $ACC6, $ACC6
769         vpsrlq          \$29, $ACC7, $TEMP4
770         vpermq          \$0x93, $TEMP2, $TEMP2
771         vpand           $AND_MASK, $ACC7, $ACC7
772         vpsrlq          \$29, $ACC8, $TEMP5
773         vpermq          \$0x93, $TEMP3, $TEMP3
774         vpand           $AND_MASK, $ACC8, $ACC8
775         vpermq          \$0x93, $TEMP4, $TEMP4
776
777         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
778         vpermq          \$0x93, $TEMP5, $TEMP5
779         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
780         vpaddq          $TEMP0, $ACC4, $ACC4
781         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
782         vpaddq          $TEMP1, $ACC5, $ACC5
783         vmovdqu         $ACC4, 32*4-128($rp)
784         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
785         vpaddq          $TEMP2, $ACC6, $ACC6
786         vmovdqu         $ACC5, 32*5-128($rp)
787         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
788         vpaddq          $TEMP3, $ACC7, $ACC7
789         vmovdqu         $ACC6, 32*6-128($rp)
790         vpaddq          $TEMP4, $ACC8, $ACC8
791         vmovdqu         $ACC7, 32*7-128($rp)
792         vmovdqu         $ACC8, 32*8-128($rp)
793
794         mov     $rp, $ap
795         dec     $rep
796         jne     .LOOP_GRANDE_SQR_1024
797
798         vzeroall
799         mov     %rbp, %rax
800 ___
801 $code.=<<___ if ($win64);
802         movaps  -0xd8(%rax),%xmm6
803         movaps  -0xc8(%rax),%xmm7
804         movaps  -0xb8(%rax),%xmm8
805         movaps  -0xa8(%rax),%xmm9
806         movaps  -0x98(%rax),%xmm10
807         movaps  -0x88(%rax),%xmm11
808         movaps  -0x78(%rax),%xmm12
809         movaps  -0x68(%rax),%xmm13
810         movaps  -0x58(%rax),%xmm14
811         movaps  -0x48(%rax),%xmm15
812 ___
813 $code.=<<___;
814         mov     -48(%rax),%r15
815         mov     -40(%rax),%r14
816         mov     -32(%rax),%r13
817         mov     -24(%rax),%r12
818         mov     -16(%rax),%rbp
819         mov     -8(%rax),%rbx
820         lea     (%rax),%rsp             # restore %rsp
821 .Lsqr_1024_epilogue:
822         ret
823 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
824 ___
825 }
826
827 { # void AMM_WW(
828 my $rp="%rdi";  # BN_ULONG *rp,
829 my $ap="%rsi";  # const BN_ULONG *ap,
830 my $bp="%rdx";  # const BN_ULONG *bp,
831 my $np="%rcx";  # const BN_ULONG *np,
832 my $n0="%r8d";  # unsigned int n0);
833
834 # The registers that hold the accumulated redundant result
835 # The AMM works on 1024 bit operands, and redundant word size is 29
836 # Therefore: ceil(1024/29)/4 = 9
837 my $ACC0="%ymm0";
838 my $ACC1="%ymm1";
839 my $ACC2="%ymm2";
840 my $ACC3="%ymm3";
841 my $ACC4="%ymm4";
842 my $ACC5="%ymm5";
843 my $ACC6="%ymm6";
844 my $ACC7="%ymm7";
845 my $ACC8="%ymm8";
846 my $ACC9="%ymm9";
847
848 # Registers that hold the broadcasted words of multiplier, currently used
849 my $Bi="%ymm10";
850 my $Yi="%ymm11";
851
852 # Helper registers
853 my $TEMP0=$ACC0;
854 my $TEMP1="%ymm12";
855 my $TEMP2="%ymm13";
856 my $ZERO="%ymm14";
857 my $AND_MASK="%ymm15";
858
859 # alu registers that hold the first words of the ACC
860 my $r0="%r9";
861 my $r1="%r10";
862 my $r2="%r11";
863 my $r3="%r12";
864
865 my $i="%r14d";
866 my $tmp="%r15";
867
868 $bp="%r13";     # reassigned argument
869
870 $code.=<<___;
871 .globl  rsaz_1024_mul_avx2
872 .type   rsaz_1024_mul_avx2,\@function,5
873 .align  64
874 rsaz_1024_mul_avx2:
875         lea     (%rsp), %rax
876         push    %rbx
877         push    %rbp
878         push    %r12
879         push    %r13
880         push    %r14
881         push    %r15
882 ___
883 $code.=<<___ if ($win64);
884         lea     -0xa8(%rsp),%rsp
885         movaps  %xmm6,-0xd8(%rax)
886         movaps  %xmm7,-0xc8(%rax)
887         movaps  %xmm8,-0xb8(%rax)
888         movaps  %xmm9,-0xa8(%rax)
889         movaps  %xmm10,-0x98(%rax)
890         movaps  %xmm11,-0x88(%rax)
891         movaps  %xmm12,-0x78(%rax)
892         movaps  %xmm13,-0x68(%rax)
893         movaps  %xmm14,-0x58(%rax)
894         movaps  %xmm15,-0x48(%rax)
895 .Lmul_1024_body:
896 ___
897 $code.=<<___;
898         mov     %rax,%rbp
899         vzeroall
900         mov     %rdx, $bp       # reassigned argument
901         sub     \$64,%rsp
902
903         # unaligned 256-bit load that crosses page boundary can
904         # cause severe performance degradation here, so if $ap does
905         # cross page boundary, swap it with $bp [meaning that caller
906         # is advised to lay down $ap and $bp next to each other, so
907         # that only one can cross page boundary].
908         mov     $ap, $tmp
909         and     \$4095, $tmp
910         add     \$32*10, $tmp
911         shr     \$12, $tmp
912         mov     $ap, $tmp
913         cmovnz  $bp, $ap
914         cmovnz  $tmp, $bp
915
916         mov     $np, $tmp
917         sub     \$-128,$ap      # size optimization
918         sub     \$-128,$np
919         sub     \$-128,$rp
920
921         and     \$4095, $tmp    # see if $np crosses page
922         add     \$32*10, $tmp
923         shr     \$12, $tmp
924         jz      .Lmul_1024_no_n_copy
925
926         # unaligned 256-bit load that crosses page boundary can
927         # cause severe performance degradation here, so if $np does
928         # cross page boundary, copy it to stack and make sure stack
929         # frame doesn't...
930         sub             \$32*10,%rsp
931         vmovdqu         32*0-128($np), $ACC0
932         and             \$-512, %rsp
933         vmovdqu         32*1-128($np), $ACC1
934         vmovdqu         32*2-128($np), $ACC2
935         vmovdqu         32*3-128($np), $ACC3
936         vmovdqu         32*4-128($np), $ACC4
937         vmovdqu         32*5-128($np), $ACC5
938         vmovdqu         32*6-128($np), $ACC6
939         vmovdqu         32*7-128($np), $ACC7
940         vmovdqu         32*8-128($np), $ACC8
941         lea             64+128(%rsp),$np
942         vmovdqu         $ACC0, 32*0-128($np)
943         vpxor           $ACC0, $ACC0, $ACC0
944         vmovdqu         $ACC1, 32*1-128($np)
945         vpxor           $ACC1, $ACC1, $ACC1
946         vmovdqu         $ACC2, 32*2-128($np)
947         vpxor           $ACC2, $ACC2, $ACC2
948         vmovdqu         $ACC3, 32*3-128($np)
949         vpxor           $ACC3, $ACC3, $ACC3
950         vmovdqu         $ACC4, 32*4-128($np)
951         vpxor           $ACC4, $ACC4, $ACC4
952         vmovdqu         $ACC5, 32*5-128($np)
953         vpxor           $ACC5, $ACC5, $ACC5
954         vmovdqu         $ACC6, 32*6-128($np)
955         vpxor           $ACC6, $ACC6, $ACC6
956         vmovdqu         $ACC7, 32*7-128($np)
957         vpxor           $ACC7, $ACC7, $ACC7
958         vmovdqu         $ACC8, 32*8-128($np)
959         vmovdqa         $ACC0, $ACC8
960         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
961 .Lmul_1024_no_n_copy:
962         and     \$-64,%rsp
963
964         mov     ($bp), %rbx
965         vpbroadcastq ($bp), $Bi
966         vmovdqu $ACC0, (%rsp)                   # clear top of stack
967         xor     $r0, $r0
968         xor     $r1, $r1
969         xor     $r2, $r2
970         xor     $r3, $r3
971
972         vmovdqu .Land_mask(%rip), $AND_MASK
973         mov     \$9, $i
974         jmp     .Loop_mul_1024
975
976 .align  32
977 .Loop_mul_1024:
978          vpsrlq         \$29, $ACC3, $ACC9              # correct $ACC3(*)
979         mov     %rbx, %rax
980         imulq   -128($ap), %rax
981         add     $r0, %rax
982         mov     %rbx, $r1
983         imulq   8-128($ap), $r1
984         add     8(%rsp), $r1
985
986         mov     %rax, $r0
987         imull   $n0, %eax
988         and     \$0x1fffffff, %eax
989
990          mov    %rbx, $r2
991          imulq  16-128($ap), $r2
992          add    16(%rsp), $r2
993
994          mov    %rbx, $r3
995          imulq  24-128($ap), $r3
996          add    24(%rsp), $r3
997         vpmuludq        32*1-128($ap),$Bi,$TEMP0
998          vmovd          %eax, $Yi
999         vpaddq          $TEMP0,$ACC1,$ACC1
1000         vpmuludq        32*2-128($ap),$Bi,$TEMP1
1001          vpbroadcastq   $Yi, $Yi
1002         vpaddq          $TEMP1,$ACC2,$ACC2
1003         vpmuludq        32*3-128($ap),$Bi,$TEMP2
1004          vpand          $AND_MASK, $ACC3, $ACC3         # correct $ACC3
1005         vpaddq          $TEMP2,$ACC3,$ACC3
1006         vpmuludq        32*4-128($ap),$Bi,$TEMP0
1007         vpaddq          $TEMP0,$ACC4,$ACC4
1008         vpmuludq        32*5-128($ap),$Bi,$TEMP1
1009         vpaddq          $TEMP1,$ACC5,$ACC5
1010         vpmuludq        32*6-128($ap),$Bi,$TEMP2
1011         vpaddq          $TEMP2,$ACC6,$ACC6
1012         vpmuludq        32*7-128($ap),$Bi,$TEMP0
1013          vpermq         \$0x93, $ACC9, $ACC9            # correct $ACC3
1014         vpaddq          $TEMP0,$ACC7,$ACC7
1015         vpmuludq        32*8-128($ap),$Bi,$TEMP1
1016          vpbroadcastq   8($bp), $Bi
1017         vpaddq          $TEMP1,$ACC8,$ACC8
1018
1019         mov     %rax,%rdx
1020         imulq   -128($np),%rax
1021         add     %rax,$r0
1022         mov     %rdx,%rax
1023         imulq   8-128($np),%rax
1024         add     %rax,$r1
1025         mov     %rdx,%rax
1026         imulq   16-128($np),%rax
1027         add     %rax,$r2
1028         shr     \$29, $r0
1029         imulq   24-128($np),%rdx
1030         add     %rdx,$r3
1031         add     $r0, $r1
1032
1033         vpmuludq        32*1-128($np),$Yi,$TEMP2
1034          vmovq          $Bi, %rbx
1035         vpaddq          $TEMP2,$ACC1,$ACC1
1036         vpmuludq        32*2-128($np),$Yi,$TEMP0
1037         vpaddq          $TEMP0,$ACC2,$ACC2
1038         vpmuludq        32*3-128($np),$Yi,$TEMP1
1039         vpaddq          $TEMP1,$ACC3,$ACC3
1040         vpmuludq        32*4-128($np),$Yi,$TEMP2
1041         vpaddq          $TEMP2,$ACC4,$ACC4
1042         vpmuludq        32*5-128($np),$Yi,$TEMP0
1043         vpaddq          $TEMP0,$ACC5,$ACC5
1044         vpmuludq        32*6-128($np),$Yi,$TEMP1
1045         vpaddq          $TEMP1,$ACC6,$ACC6
1046         vpmuludq        32*7-128($np),$Yi,$TEMP2
1047          vpblendd       \$3, $ZERO, $ACC9, $ACC9        # correct $ACC3
1048         vpaddq          $TEMP2,$ACC7,$ACC7
1049         vpmuludq        32*8-128($np),$Yi,$TEMP0
1050          vpaddq         $ACC9, $ACC3, $ACC3             # correct $ACC3
1051         vpaddq          $TEMP0,$ACC8,$ACC8
1052
1053         mov     %rbx, %rax
1054         imulq   -128($ap),%rax
1055         add     %rax,$r1
1056          vmovdqu        -8+32*1-128($ap),$TEMP1
1057         mov     %rbx, %rax
1058         imulq   8-128($ap),%rax
1059         add     %rax,$r2
1060          vmovdqu        -8+32*2-128($ap),$TEMP2
1061
1062         mov     $r1, %rax
1063         imull   $n0, %eax
1064         and     \$0x1fffffff, %eax
1065
1066          imulq  16-128($ap),%rbx
1067          add    %rbx,$r3
1068         vpmuludq        $Bi,$TEMP1,$TEMP1
1069          vmovd          %eax, $Yi
1070         vmovdqu         -8+32*3-128($ap),$TEMP0
1071         vpaddq          $TEMP1,$ACC1,$ACC1
1072         vpmuludq        $Bi,$TEMP2,$TEMP2
1073          vpbroadcastq   $Yi, $Yi
1074         vmovdqu         -8+32*4-128($ap),$TEMP1
1075         vpaddq          $TEMP2,$ACC2,$ACC2
1076         vpmuludq        $Bi,$TEMP0,$TEMP0
1077         vmovdqu         -8+32*5-128($ap),$TEMP2
1078         vpaddq          $TEMP0,$ACC3,$ACC3
1079         vpmuludq        $Bi,$TEMP1,$TEMP1
1080         vmovdqu         -8+32*6-128($ap),$TEMP0
1081         vpaddq          $TEMP1,$ACC4,$ACC4
1082         vpmuludq        $Bi,$TEMP2,$TEMP2
1083         vmovdqu         -8+32*7-128($ap),$TEMP1
1084         vpaddq          $TEMP2,$ACC5,$ACC5
1085         vpmuludq        $Bi,$TEMP0,$TEMP0
1086         vmovdqu         -8+32*8-128($ap),$TEMP2
1087         vpaddq          $TEMP0,$ACC6,$ACC6
1088         vpmuludq        $Bi,$TEMP1,$TEMP1
1089         vmovdqu         -8+32*9-128($ap),$ACC9
1090         vpaddq          $TEMP1,$ACC7,$ACC7
1091         vpmuludq        $Bi,$TEMP2,$TEMP2
1092         vpaddq          $TEMP2,$ACC8,$ACC8
1093         vpmuludq        $Bi,$ACC9,$ACC9
1094          vpbroadcastq   16($bp), $Bi
1095
1096         mov     %rax,%rdx
1097         imulq   -128($np),%rax
1098         add     %rax,$r1
1099          vmovdqu        -8+32*1-128($np),$TEMP0
1100         mov     %rdx,%rax
1101         imulq   8-128($np),%rax
1102         add     %rax,$r2
1103          vmovdqu        -8+32*2-128($np),$TEMP1
1104         shr     \$29, $r1
1105         imulq   16-128($np),%rdx
1106         add     %rdx,$r3
1107         add     $r1, $r2
1108
1109         vpmuludq        $Yi,$TEMP0,$TEMP0
1110          vmovq          $Bi, %rbx
1111         vmovdqu         -8+32*3-128($np),$TEMP2
1112         vpaddq          $TEMP0,$ACC1,$ACC1
1113         vpmuludq        $Yi,$TEMP1,$TEMP1
1114         vmovdqu         -8+32*4-128($np),$TEMP0
1115         vpaddq          $TEMP1,$ACC2,$ACC2
1116         vpmuludq        $Yi,$TEMP2,$TEMP2
1117         vmovdqu         -8+32*5-128($np),$TEMP1
1118         vpaddq          $TEMP2,$ACC3,$ACC3
1119         vpmuludq        $Yi,$TEMP0,$TEMP0
1120         vmovdqu         -8+32*6-128($np),$TEMP2
1121         vpaddq          $TEMP0,$ACC4,$ACC4
1122         vpmuludq        $Yi,$TEMP1,$TEMP1
1123         vmovdqu         -8+32*7-128($np),$TEMP0
1124         vpaddq          $TEMP1,$ACC5,$ACC5
1125         vpmuludq        $Yi,$TEMP2,$TEMP2
1126         vmovdqu         -8+32*8-128($np),$TEMP1
1127         vpaddq          $TEMP2,$ACC6,$ACC6
1128         vpmuludq        $Yi,$TEMP0,$TEMP0
1129         vmovdqu         -8+32*9-128($np),$TEMP2
1130         vpaddq          $TEMP0,$ACC7,$ACC7
1131         vpmuludq        $Yi,$TEMP1,$TEMP1
1132         vpaddq          $TEMP1,$ACC8,$ACC8
1133         vpmuludq        $Yi,$TEMP2,$TEMP2
1134         vpaddq          $TEMP2,$ACC9,$ACC9
1135
1136          vmovdqu        -16+32*1-128($ap),$TEMP0
1137         mov     %rbx,%rax
1138         imulq   -128($ap),%rax
1139         add     $r2,%rax
1140
1141          vmovdqu        -16+32*2-128($ap),$TEMP1
1142         mov     %rax,$r2
1143         imull   $n0, %eax
1144         and     \$0x1fffffff, %eax
1145
1146          imulq  8-128($ap),%rbx
1147          add    %rbx,$r3
1148         vpmuludq        $Bi,$TEMP0,$TEMP0
1149          vmovd          %eax, $Yi
1150         vmovdqu         -16+32*3-128($ap),$TEMP2
1151         vpaddq          $TEMP0,$ACC1,$ACC1
1152         vpmuludq        $Bi,$TEMP1,$TEMP1
1153          vpbroadcastq   $Yi, $Yi
1154         vmovdqu         -16+32*4-128($ap),$TEMP0
1155         vpaddq          $TEMP1,$ACC2,$ACC2
1156         vpmuludq        $Bi,$TEMP2,$TEMP2
1157         vmovdqu         -16+32*5-128($ap),$TEMP1
1158         vpaddq          $TEMP2,$ACC3,$ACC3
1159         vpmuludq        $Bi,$TEMP0,$TEMP0
1160         vmovdqu         -16+32*6-128($ap),$TEMP2
1161         vpaddq          $TEMP0,$ACC4,$ACC4
1162         vpmuludq        $Bi,$TEMP1,$TEMP1
1163         vmovdqu         -16+32*7-128($ap),$TEMP0
1164         vpaddq          $TEMP1,$ACC5,$ACC5
1165         vpmuludq        $Bi,$TEMP2,$TEMP2
1166         vmovdqu         -16+32*8-128($ap),$TEMP1
1167         vpaddq          $TEMP2,$ACC6,$ACC6
1168         vpmuludq        $Bi,$TEMP0,$TEMP0
1169         vmovdqu         -16+32*9-128($ap),$TEMP2
1170         vpaddq          $TEMP0,$ACC7,$ACC7
1171         vpmuludq        $Bi,$TEMP1,$TEMP1
1172         vpaddq          $TEMP1,$ACC8,$ACC8
1173         vpmuludq        $Bi,$TEMP2,$TEMP2
1174          vpbroadcastq   24($bp), $Bi
1175         vpaddq          $TEMP2,$ACC9,$ACC9
1176
1177          vmovdqu        -16+32*1-128($np),$TEMP0
1178         mov     %rax,%rdx
1179         imulq   -128($np),%rax
1180         add     %rax,$r2
1181          vmovdqu        -16+32*2-128($np),$TEMP1
1182         imulq   8-128($np),%rdx
1183         add     %rdx,$r3
1184         shr     \$29, $r2
1185
1186         vpmuludq        $Yi,$TEMP0,$TEMP0
1187          vmovq          $Bi, %rbx
1188         vmovdqu         -16+32*3-128($np),$TEMP2
1189         vpaddq          $TEMP0,$ACC1,$ACC1
1190         vpmuludq        $Yi,$TEMP1,$TEMP1
1191         vmovdqu         -16+32*4-128($np),$TEMP0
1192         vpaddq          $TEMP1,$ACC2,$ACC2
1193         vpmuludq        $Yi,$TEMP2,$TEMP2
1194         vmovdqu         -16+32*5-128($np),$TEMP1
1195         vpaddq          $TEMP2,$ACC3,$ACC3
1196         vpmuludq        $Yi,$TEMP0,$TEMP0
1197         vmovdqu         -16+32*6-128($np),$TEMP2
1198         vpaddq          $TEMP0,$ACC4,$ACC4
1199         vpmuludq        $Yi,$TEMP1,$TEMP1
1200         vmovdqu         -16+32*7-128($np),$TEMP0
1201         vpaddq          $TEMP1,$ACC5,$ACC5
1202         vpmuludq        $Yi,$TEMP2,$TEMP2
1203         vmovdqu         -16+32*8-128($np),$TEMP1
1204         vpaddq          $TEMP2,$ACC6,$ACC6
1205         vpmuludq        $Yi,$TEMP0,$TEMP0
1206         vmovdqu         -16+32*9-128($np),$TEMP2
1207         vpaddq          $TEMP0,$ACC7,$ACC7
1208         vpmuludq        $Yi,$TEMP1,$TEMP1
1209          vmovdqu        -24+32*1-128($ap),$TEMP0
1210         vpaddq          $TEMP1,$ACC8,$ACC8
1211         vpmuludq        $Yi,$TEMP2,$TEMP2
1212          vmovdqu        -24+32*2-128($ap),$TEMP1
1213         vpaddq          $TEMP2,$ACC9,$ACC9
1214
1215         add     $r2, $r3
1216         imulq   -128($ap),%rbx
1217         add     %rbx,$r3
1218
1219         mov     $r3, %rax
1220         imull   $n0, %eax
1221         and     \$0x1fffffff, %eax
1222
1223         vpmuludq        $Bi,$TEMP0,$TEMP0
1224          vmovd          %eax, $Yi
1225         vmovdqu         -24+32*3-128($ap),$TEMP2
1226         vpaddq          $TEMP0,$ACC1,$ACC1
1227         vpmuludq        $Bi,$TEMP1,$TEMP1
1228          vpbroadcastq   $Yi, $Yi
1229         vmovdqu         -24+32*4-128($ap),$TEMP0
1230         vpaddq          $TEMP1,$ACC2,$ACC2
1231         vpmuludq        $Bi,$TEMP2,$TEMP2
1232         vmovdqu         -24+32*5-128($ap),$TEMP1
1233         vpaddq          $TEMP2,$ACC3,$ACC3
1234         vpmuludq        $Bi,$TEMP0,$TEMP0
1235         vmovdqu         -24+32*6-128($ap),$TEMP2
1236         vpaddq          $TEMP0,$ACC4,$ACC4
1237         vpmuludq        $Bi,$TEMP1,$TEMP1
1238         vmovdqu         -24+32*7-128($ap),$TEMP0
1239         vpaddq          $TEMP1,$ACC5,$ACC5
1240         vpmuludq        $Bi,$TEMP2,$TEMP2
1241         vmovdqu         -24+32*8-128($ap),$TEMP1
1242         vpaddq          $TEMP2,$ACC6,$ACC6
1243         vpmuludq        $Bi,$TEMP0,$TEMP0
1244         vmovdqu         -24+32*9-128($ap),$TEMP2
1245         vpaddq          $TEMP0,$ACC7,$ACC7
1246         vpmuludq        $Bi,$TEMP1,$TEMP1
1247         vpaddq          $TEMP1,$ACC8,$ACC8
1248         vpmuludq        $Bi,$TEMP2,$TEMP2
1249          vpbroadcastq   32($bp), $Bi
1250         vpaddq          $TEMP2,$ACC9,$ACC9
1251          add            \$32, $bp                       # $bp++
1252
1253         vmovdqu         -24+32*1-128($np),$TEMP0
1254         imulq   -128($np),%rax
1255         add     %rax,$r3
1256         shr     \$29, $r3
1257
1258         vmovdqu         -24+32*2-128($np),$TEMP1
1259         vpmuludq        $Yi,$TEMP0,$TEMP0
1260          vmovq          $Bi, %rbx
1261         vmovdqu         -24+32*3-128($np),$TEMP2
1262         vpaddq          $TEMP0,$ACC1,$ACC0              # $ACC0==$TEMP0
1263         vpmuludq        $Yi,$TEMP1,$TEMP1
1264          vmovdqu        $ACC0, (%rsp)                   # transfer $r0-$r3
1265         vpaddq          $TEMP1,$ACC2,$ACC1
1266         vmovdqu         -24+32*4-128($np),$TEMP0
1267         vpmuludq        $Yi,$TEMP2,$TEMP2
1268         vmovdqu         -24+32*5-128($np),$TEMP1
1269         vpaddq          $TEMP2,$ACC3,$ACC2
1270         vpmuludq        $Yi,$TEMP0,$TEMP0
1271         vmovdqu         -24+32*6-128($np),$TEMP2
1272         vpaddq          $TEMP0,$ACC4,$ACC3
1273         vpmuludq        $Yi,$TEMP1,$TEMP1
1274         vmovdqu         -24+32*7-128($np),$TEMP0
1275         vpaddq          $TEMP1,$ACC5,$ACC4
1276         vpmuludq        $Yi,$TEMP2,$TEMP2
1277         vmovdqu         -24+32*8-128($np),$TEMP1
1278         vpaddq          $TEMP2,$ACC6,$ACC5
1279         vpmuludq        $Yi,$TEMP0,$TEMP0
1280         vmovdqu         -24+32*9-128($np),$TEMP2
1281          mov    $r3, $r0
1282         vpaddq          $TEMP0,$ACC7,$ACC6
1283         vpmuludq        $Yi,$TEMP1,$TEMP1
1284          add    (%rsp), $r0
1285         vpaddq          $TEMP1,$ACC8,$ACC7
1286         vpmuludq        $Yi,$TEMP2,$TEMP2
1287          vmovq  $r3, $TEMP1
1288         vpaddq          $TEMP2,$ACC9,$ACC8
1289
1290         dec     $i
1291         jnz     .Loop_mul_1024
1292 ___
1293
1294 # (*)   Original implementation was correcting ACC1-ACC3 for overflow
1295 #       after 7 loop runs, or after 28 iterations, or 56 additions.
1296 #       But as we underutilize resources, it's possible to correct in
1297 #       each iteration with marginal performance loss. But then, as
1298 #       we do it in each iteration, we can correct less digits, and
1299 #       avoid performance penalties completely. Also note that we
1300 #       correct only three digits out of four. This works because
1301 #       most significant digit is subjected to less additions.
1302
1303 $TEMP0 = $ACC9;
1304 $TEMP3 = $Bi;
1305 $TEMP4 = $Yi;
1306 $code.=<<___;
1307         vpermq          \$0, $AND_MASK, $AND_MASK
1308         vpaddq          (%rsp), $TEMP1, $ACC0
1309
1310         vpsrlq          \$29, $ACC0, $TEMP1
1311         vpand           $AND_MASK, $ACC0, $ACC0
1312         vpsrlq          \$29, $ACC1, $TEMP2
1313         vpand           $AND_MASK, $ACC1, $ACC1
1314         vpsrlq          \$29, $ACC2, $TEMP3
1315         vpermq          \$0x93, $TEMP1, $TEMP1
1316         vpand           $AND_MASK, $ACC2, $ACC2
1317         vpsrlq          \$29, $ACC3, $TEMP4
1318         vpermq          \$0x93, $TEMP2, $TEMP2
1319         vpand           $AND_MASK, $ACC3, $ACC3
1320
1321         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1322         vpermq          \$0x93, $TEMP3, $TEMP3
1323         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1324         vpermq          \$0x93, $TEMP4, $TEMP4
1325         vpaddq          $TEMP0, $ACC0, $ACC0
1326         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1327         vpaddq          $TEMP1, $ACC1, $ACC1
1328         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1329         vpaddq          $TEMP2, $ACC2, $ACC2
1330         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1331         vpaddq          $TEMP3, $ACC3, $ACC3
1332         vpaddq          $TEMP4, $ACC4, $ACC4
1333
1334         vpsrlq          \$29, $ACC0, $TEMP1
1335         vpand           $AND_MASK, $ACC0, $ACC0
1336         vpsrlq          \$29, $ACC1, $TEMP2
1337         vpand           $AND_MASK, $ACC1, $ACC1
1338         vpsrlq          \$29, $ACC2, $TEMP3
1339         vpermq          \$0x93, $TEMP1, $TEMP1
1340         vpand           $AND_MASK, $ACC2, $ACC2
1341         vpsrlq          \$29, $ACC3, $TEMP4
1342         vpermq          \$0x93, $TEMP2, $TEMP2
1343         vpand           $AND_MASK, $ACC3, $ACC3
1344         vpermq          \$0x93, $TEMP3, $TEMP3
1345
1346         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1347         vpermq          \$0x93, $TEMP4, $TEMP4
1348         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1349         vpaddq          $TEMP0, $ACC0, $ACC0
1350         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1351         vpaddq          $TEMP1, $ACC1, $ACC1
1352         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1353         vpaddq          $TEMP2, $ACC2, $ACC2
1354         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1355         vpaddq          $TEMP3, $ACC3, $ACC3
1356         vpaddq          $TEMP4, $ACC4, $ACC4
1357
1358         vmovdqu         $ACC0, 0-128($rp)
1359         vmovdqu         $ACC1, 32-128($rp)
1360         vmovdqu         $ACC2, 64-128($rp)
1361         vmovdqu         $ACC3, 96-128($rp)
1362 ___
1363
1364 $TEMP5=$ACC0;
1365 $code.=<<___;
1366         vpsrlq          \$29, $ACC4, $TEMP1
1367         vpand           $AND_MASK, $ACC4, $ACC4
1368         vpsrlq          \$29, $ACC5, $TEMP2
1369         vpand           $AND_MASK, $ACC5, $ACC5
1370         vpsrlq          \$29, $ACC6, $TEMP3
1371         vpermq          \$0x93, $TEMP1, $TEMP1
1372         vpand           $AND_MASK, $ACC6, $ACC6
1373         vpsrlq          \$29, $ACC7, $TEMP4
1374         vpermq          \$0x93, $TEMP2, $TEMP2
1375         vpand           $AND_MASK, $ACC7, $ACC7
1376         vpsrlq          \$29, $ACC8, $TEMP5
1377         vpermq          \$0x93, $TEMP3, $TEMP3
1378         vpand           $AND_MASK, $ACC8, $ACC8
1379         vpermq          \$0x93, $TEMP4, $TEMP4
1380
1381         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1382         vpermq          \$0x93, $TEMP5, $TEMP5
1383         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1384         vpaddq          $TEMP0, $ACC4, $ACC4
1385         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1386         vpaddq          $TEMP1, $ACC5, $ACC5
1387         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1388         vpaddq          $TEMP2, $ACC6, $ACC6
1389         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1390         vpaddq          $TEMP3, $ACC7, $ACC7
1391         vpaddq          $TEMP4, $ACC8, $ACC8
1392
1393         vpsrlq          \$29, $ACC4, $TEMP1
1394         vpand           $AND_MASK, $ACC4, $ACC4
1395         vpsrlq          \$29, $ACC5, $TEMP2
1396         vpand           $AND_MASK, $ACC5, $ACC5
1397         vpsrlq          \$29, $ACC6, $TEMP3
1398         vpermq          \$0x93, $TEMP1, $TEMP1
1399         vpand           $AND_MASK, $ACC6, $ACC6
1400         vpsrlq          \$29, $ACC7, $TEMP4
1401         vpermq          \$0x93, $TEMP2, $TEMP2
1402         vpand           $AND_MASK, $ACC7, $ACC7
1403         vpsrlq          \$29, $ACC8, $TEMP5
1404         vpermq          \$0x93, $TEMP3, $TEMP3
1405         vpand           $AND_MASK, $ACC8, $ACC8
1406         vpermq          \$0x93, $TEMP4, $TEMP4
1407
1408         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1409         vpermq          \$0x93, $TEMP5, $TEMP5
1410         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1411         vpaddq          $TEMP0, $ACC4, $ACC4
1412         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1413         vpaddq          $TEMP1, $ACC5, $ACC5
1414         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1415         vpaddq          $TEMP2, $ACC6, $ACC6
1416         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1417         vpaddq          $TEMP3, $ACC7, $ACC7
1418         vpaddq          $TEMP4, $ACC8, $ACC8
1419
1420         vmovdqu         $ACC4, 128-128($rp)
1421         vmovdqu         $ACC5, 160-128($rp)    
1422         vmovdqu         $ACC6, 192-128($rp)
1423         vmovdqu         $ACC7, 224-128($rp)
1424         vmovdqu         $ACC8, 256-128($rp)
1425         vzeroupper
1426
1427         mov     %rbp, %rax
1428 ___
1429 $code.=<<___ if ($win64);
1430         movaps  -0xd8(%rax),%xmm6
1431         movaps  -0xc8(%rax),%xmm7
1432         movaps  -0xb8(%rax),%xmm8
1433         movaps  -0xa8(%rax),%xmm9
1434         movaps  -0x98(%rax),%xmm10
1435         movaps  -0x88(%rax),%xmm11
1436         movaps  -0x78(%rax),%xmm12
1437         movaps  -0x68(%rax),%xmm13
1438         movaps  -0x58(%rax),%xmm14
1439         movaps  -0x48(%rax),%xmm15
1440 ___
1441 $code.=<<___;
1442         mov     -48(%rax),%r15
1443         mov     -40(%rax),%r14
1444         mov     -32(%rax),%r13
1445         mov     -24(%rax),%r12
1446         mov     -16(%rax),%rbp
1447         mov     -8(%rax),%rbx
1448         lea     (%rax),%rsp             # restore %rsp
1449 .Lmul_1024_epilogue:
1450         ret
1451 .size   rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1452 ___
1453 }
1454 {
1455 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1456 my @T = map("%r$_",(8..11));
1457
1458 $code.=<<___;
1459 .globl  rsaz_1024_red2norm_avx2
1460 .type   rsaz_1024_red2norm_avx2,\@abi-omnipotent
1461 .align  32
1462 rsaz_1024_red2norm_avx2:
1463         sub     \$-128,$inp     # size optimization
1464         xor     %rax,%rax
1465 ___
1466
1467 for ($j=0,$i=0; $i<16; $i++) {
1468     my $k=0;
1469     while (29*$j<64*($i+1)) {   # load data till boundary
1470         $code.="        mov     `8*$j-128`($inp), @T[0]\n";
1471         $j++; $k++; push(@T,shift(@T));
1472     }
1473     $l=$k;
1474     while ($k>1) {              # shift loaded data but last value
1475         $code.="        shl     \$`29*($j-$k)`,@T[-$k]\n";
1476         $k--;
1477     }
1478     $code.=<<___;               # shift last value
1479         mov     @T[-1], @T[0]
1480         shl     \$`29*($j-1)`, @T[-1]
1481         shr     \$`-29*($j-1)`, @T[0]
1482 ___
1483     while ($l) {                # accumulate all values
1484         $code.="        add     @T[-$l], %rax\n";
1485         $l--;
1486     }
1487         $code.=<<___;
1488         adc     \$0, @T[0]      # consume eventual carry
1489         mov     %rax, 8*$i($out)
1490         mov     @T[0], %rax
1491 ___
1492     push(@T,shift(@T));
1493 }
1494 $code.=<<___;
1495         ret
1496 .size   rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1497
1498 .globl  rsaz_1024_norm2red_avx2
1499 .type   rsaz_1024_norm2red_avx2,\@abi-omnipotent
1500 .align  32
1501 rsaz_1024_norm2red_avx2:
1502         sub     \$-128,$out     # size optimization
1503         mov     ($inp),@T[0]
1504         mov     \$0x1fffffff,%eax
1505 ___
1506 for ($j=0,$i=0; $i<16; $i++) {
1507     $code.="    mov     `8*($i+1)`($inp),@T[1]\n"       if ($i<15);
1508     $code.="    xor     @T[1],@T[1]\n"                  if ($i==15);
1509     my $k=1;
1510     while (29*($j+1)<64*($i+1)) {
1511         $code.=<<___;
1512         mov     @T[0],@T[-$k]
1513         shr     \$`29*$j`,@T[-$k]
1514         and     %rax,@T[-$k]                            # &0x1fffffff
1515         mov     @T[-$k],`8*$j-128`($out)
1516 ___
1517         $j++; $k++;
1518     }
1519     $code.=<<___;
1520         shrd    \$`29*$j`,@T[1],@T[0]
1521         and     %rax,@T[0]
1522         mov     @T[0],`8*$j-128`($out)
1523 ___
1524     $j++;
1525     push(@T,shift(@T));
1526 }
1527 $code.=<<___;
1528         mov     @T[0],`8*$j-128`($out)                  # zero
1529         mov     @T[0],`8*($j+1)-128`($out)
1530         mov     @T[0],`8*($j+2)-128`($out)
1531         mov     @T[0],`8*($j+3)-128`($out)
1532         ret
1533 .size   rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1534 ___
1535 }
1536 {
1537 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1538
1539 $code.=<<___;
1540 .globl  rsaz_1024_scatter5_avx2
1541 .type   rsaz_1024_scatter5_avx2,\@abi-omnipotent
1542 .align  32
1543 rsaz_1024_scatter5_avx2:
1544         vzeroupper
1545         vmovdqu .Lscatter_permd(%rip),%ymm5
1546         shl     \$4,$power
1547         lea     ($out,$power),$out
1548         mov     \$9,%eax
1549         jmp     .Loop_scatter_1024
1550
1551 .align  32
1552 .Loop_scatter_1024:
1553         vmovdqu         ($inp),%ymm0
1554         lea             32($inp),$inp
1555         vpermd          %ymm0,%ymm5,%ymm0
1556         vmovdqu         %xmm0,($out)
1557         lea             16*32($out),$out
1558         dec     %eax
1559         jnz     .Loop_scatter_1024
1560
1561         vzeroupper
1562         ret
1563 .size   rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1564
1565 .globl  rsaz_1024_gather5_avx2
1566 .type   rsaz_1024_gather5_avx2,\@abi-omnipotent
1567 .align  32
1568 rsaz_1024_gather5_avx2:
1569 ___
1570 $code.=<<___ if ($win64);
1571         lea     -0x88(%rsp),%rax
1572 .LSEH_begin_rsaz_1024_gather5:
1573         # I can't trust assembler to use specific encoding:-(
1574         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1575         .byte   0x0f,0x29,0x70,0xe0             #movaps %xmm6,-0x20(%rax)
1576         .byte   0x0f,0x29,0x78,0xf0             #movaps %xmm7,-0x10(%rax)
1577         .byte   0x44,0x0f,0x29,0x00             #movaps %xmm8,0(%rax)
1578         .byte   0x44,0x0f,0x29,0x48,0x10        #movaps %xmm9,0x10(%rax)
1579         .byte   0x44,0x0f,0x29,0x50,0x20        #movaps %xmm10,0x20(%rax)
1580         .byte   0x44,0x0f,0x29,0x58,0x30        #movaps %xmm11,0x30(%rax)
1581         .byte   0x44,0x0f,0x29,0x60,0x40        #movaps %xmm12,0x40(%rax)
1582         .byte   0x44,0x0f,0x29,0x68,0x50        #movaps %xmm13,0x50(%rax)
1583         .byte   0x44,0x0f,0x29,0x70,0x60        #movaps %xmm14,0x60(%rax)
1584         .byte   0x44,0x0f,0x29,0x78,0x70        #movaps %xmm15,0x70(%rax)
1585 ___
1586 $code.=<<___;
1587         vzeroupper
1588         lea     .Lgather_table(%rip),%r11
1589         mov     $power,%eax
1590         and     \$3,$power
1591         shr     \$2,%eax                        # cache line number
1592         shl     \$4,$power                      # offset within cache line
1593
1594         vmovdqu         -32(%r11),%ymm7         # .Lgather_permd
1595         vpbroadcastb    8(%r11,%rax), %xmm8
1596         vpbroadcastb    7(%r11,%rax), %xmm9
1597         vpbroadcastb    6(%r11,%rax), %xmm10
1598         vpbroadcastb    5(%r11,%rax), %xmm11
1599         vpbroadcastb    4(%r11,%rax), %xmm12
1600         vpbroadcastb    3(%r11,%rax), %xmm13
1601         vpbroadcastb    2(%r11,%rax), %xmm14
1602         vpbroadcastb    1(%r11,%rax), %xmm15
1603
1604         lea     ($inp,$power),$inp
1605         mov     \$64,%r11                       # size optimization
1606         mov     \$9,%eax
1607         jmp     .Loop_gather_1024
1608
1609 .align  32
1610 .Loop_gather_1024:
1611         vpand           ($inp),                 %xmm8,%xmm0
1612         vpand           ($inp,%r11),            %xmm9,%xmm1
1613         vpand           ($inp,%r11,2),          %xmm10,%xmm2
1614         vpand           64($inp,%r11,2),        %xmm11,%xmm3
1615          vpor                                   %xmm0,%xmm1,%xmm1
1616         vpand           ($inp,%r11,4),          %xmm12,%xmm4
1617          vpor                                   %xmm2,%xmm3,%xmm3
1618         vpand           64($inp,%r11,4),        %xmm13,%xmm5
1619          vpor                                   %xmm1,%xmm3,%xmm3
1620         vpand           -128($inp,%r11,8),      %xmm14,%xmm6
1621          vpor                                   %xmm4,%xmm5,%xmm5
1622         vpand           -64($inp,%r11,8),       %xmm15,%xmm2
1623         lea             ($inp,%r11,8),$inp
1624          vpor                                   %xmm3,%xmm5,%xmm5
1625          vpor                                   %xmm2,%xmm6,%xmm6
1626          vpor                                   %xmm5,%xmm6,%xmm6
1627         vpermd          %ymm6,%ymm7,%ymm6
1628         vmovdqu         %ymm6,($out)
1629         lea             32($out),$out
1630         dec     %eax
1631         jnz     .Loop_gather_1024
1632
1633         vpxor   %ymm0,%ymm0,%ymm0
1634         vmovdqu %ymm0,($out)
1635         vzeroupper
1636 ___
1637 $code.=<<___ if ($win64);
1638         movaps  (%rsp),%xmm6
1639         movaps  0x10(%rsp),%xmm7
1640         movaps  0x20(%rsp),%xmm8
1641         movaps  0x30(%rsp),%xmm9
1642         movaps  0x40(%rsp),%xmm10
1643         movaps  0x50(%rsp),%xmm11
1644         movaps  0x60(%rsp),%xmm12
1645         movaps  0x70(%rsp),%xmm13
1646         movaps  0x80(%rsp),%xmm14
1647         movaps  0x90(%rsp),%xmm15
1648         lea     0xa8(%rsp),%rsp
1649 .LSEH_end_rsaz_1024_gather5:
1650 ___
1651 $code.=<<___;
1652         ret
1653 .size   rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1654 ___
1655 }
1656
1657 $code.=<<___;
1658 .extern OPENSSL_ia32cap_P
1659 .globl  rsaz_avx2_eligible
1660 .type   rsaz_avx2_eligible,\@abi-omnipotent
1661 .align  32
1662 rsaz_avx2_eligible:
1663         mov     OPENSSL_ia32cap_P+8(%rip),%eax
1664         and     \$`1<<5`,%eax
1665         shr     \$5,%eax
1666         ret
1667 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1668
1669 .align  64
1670 .Land_mask:
1671         .quad   0x1fffffff,0x1fffffff,0x1fffffff,-1
1672 .Lscatter_permd:
1673         .long   0,2,4,6,7,7,7,7
1674 .Lgather_permd:
1675         .long   0,7,1,7,2,7,3,7
1676 .Lgather_table:
1677         .byte   0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0
1678 .align  64
1679 ___
1680
1681 if ($win64) {
1682 $rec="%rcx";
1683 $frame="%rdx";
1684 $context="%r8";
1685 $disp="%r9";
1686
1687 $code.=<<___
1688 .extern __imp_RtlVirtualUnwind
1689 .type   rsaz_se_handler,\@abi-omnipotent
1690 .align  16
1691 rsaz_se_handler:
1692         push    %rsi
1693         push    %rdi
1694         push    %rbx
1695         push    %rbp
1696         push    %r12
1697         push    %r13
1698         push    %r14
1699         push    %r15
1700         pushfq
1701         sub     \$64,%rsp
1702
1703         mov     120($context),%rax      # pull context->Rax
1704         mov     248($context),%rbx      # pull context->Rip
1705
1706         mov     8($disp),%rsi           # disp->ImageBase
1707         mov     56($disp),%r11          # disp->HandlerData
1708
1709         mov     0(%r11),%r10d           # HandlerData[0]
1710         lea     (%rsi,%r10),%r10        # prologue label
1711         cmp     %r10,%rbx               # context->Rip<prologue label
1712         jb      .Lcommon_seh_tail
1713
1714         mov     152($context),%rax      # pull context->Rsp
1715
1716         mov     4(%r11),%r10d           # HandlerData[1]
1717         lea     (%rsi,%r10),%r10        # epilogue label
1718         cmp     %r10,%rbx               # context->Rip>=epilogue label
1719         jae     .Lcommon_seh_tail
1720
1721         mov     160($context),%rax      # pull context->Rbp
1722
1723         mov     -48(%rax),%r15
1724         mov     -40(%rax),%r14
1725         mov     -32(%rax),%r13
1726         mov     -24(%rax),%r12
1727         mov     -16(%rax),%rbp
1728         mov     -8(%rax),%rbx
1729         mov     %r15,240($context)
1730         mov     %r14,232($context)
1731         mov     %r13,224($context)
1732         mov     %r12,216($context)
1733         mov     %rbp,160($context)
1734         mov     %rbx,144($context)
1735
1736         lea     -0xd8(%rax),%rsi        # %xmm save area
1737         lea     512($context),%rdi      # & context.Xmm6
1738         mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
1739         .long   0xa548f3fc              # cld; rep movsq
1740
1741 .Lcommon_seh_tail:
1742         mov     8(%rax),%rdi
1743         mov     16(%rax),%rsi
1744         mov     %rax,152($context)      # restore context->Rsp
1745         mov     %rsi,168($context)      # restore context->Rsi
1746         mov     %rdi,176($context)      # restore context->Rdi
1747
1748         mov     40($disp),%rdi          # disp->ContextRecord
1749         mov     $context,%rsi           # context
1750         mov     \$154,%ecx              # sizeof(CONTEXT)
1751         .long   0xa548f3fc              # cld; rep movsq
1752
1753         mov     $disp,%rsi
1754         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1755         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1756         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1757         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1758         mov     40(%rsi),%r10           # disp->ContextRecord
1759         lea     56(%rsi),%r11           # &disp->HandlerData
1760         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1761         mov     %r10,32(%rsp)           # arg5
1762         mov     %r11,40(%rsp)           # arg6
1763         mov     %r12,48(%rsp)           # arg7
1764         mov     %rcx,56(%rsp)           # arg8, (NULL)
1765         call    *__imp_RtlVirtualUnwind(%rip)
1766
1767         mov     \$1,%eax                # ExceptionContinueSearch
1768         add     \$64,%rsp
1769         popfq
1770         pop     %r15
1771         pop     %r14
1772         pop     %r13
1773         pop     %r12
1774         pop     %rbp
1775         pop     %rbx
1776         pop     %rdi
1777         pop     %rsi
1778         ret
1779 .size   rsaz_se_handler,.-rsaz_se_handler
1780
1781 .section        .pdata
1782 .align  4
1783         .rva    .LSEH_begin_rsaz_1024_sqr_avx2
1784         .rva    .LSEH_end_rsaz_1024_sqr_avx2
1785         .rva    .LSEH_info_rsaz_1024_sqr_avx2
1786
1787         .rva    .LSEH_begin_rsaz_1024_mul_avx2
1788         .rva    .LSEH_end_rsaz_1024_mul_avx2
1789         .rva    .LSEH_info_rsaz_1024_mul_avx2
1790
1791         .rva    .LSEH_begin_rsaz_1024_gather5
1792         .rva    .LSEH_end_rsaz_1024_gather5
1793         .rva    .LSEH_info_rsaz_1024_gather5
1794 .section        .xdata
1795 .align  8
1796 .LSEH_info_rsaz_1024_sqr_avx2:
1797         .byte   9,0,0,0
1798         .rva    rsaz_se_handler
1799         .rva    .Lsqr_1024_body,.Lsqr_1024_epilogue
1800 .LSEH_info_rsaz_1024_mul_avx2:
1801         .byte   9,0,0,0
1802         .rva    rsaz_se_handler
1803         .rva    .Lmul_1024_body,.Lmul_1024_epilogue
1804 .LSEH_info_rsaz_1024_gather5:
1805         .byte   0x01,0x33,0x16,0x00
1806         .byte   0x33,0xf8,0x09,0x00     #movaps 0x90(rsp),xmm15
1807         .byte   0x2e,0xe8,0x08,0x00     #movaps 0x80(rsp),xmm14
1808         .byte   0x29,0xd8,0x07,0x00     #movaps 0x70(rsp),xmm13
1809         .byte   0x24,0xc8,0x06,0x00     #movaps 0x60(rsp),xmm12
1810         .byte   0x1f,0xb8,0x05,0x00     #movaps 0x50(rsp),xmm11
1811         .byte   0x1a,0xa8,0x04,0x00     #movaps 0x40(rsp),xmm10
1812         .byte   0x15,0x98,0x03,0x00     #movaps 0x30(rsp),xmm9
1813         .byte   0x10,0x88,0x02,0x00     #movaps 0x20(rsp),xmm8
1814         .byte   0x0c,0x78,0x01,0x00     #movaps 0x10(rsp),xmm7
1815         .byte   0x08,0x68,0x00,0x00     #movaps 0x00(rsp),xmm6
1816         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1817 ___
1818 }
1819
1820 foreach (split("\n",$code)) {
1821         s/\`([^\`]*)\`/eval($1)/ge;
1822
1823         s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge               or
1824
1825         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1826         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1827         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1828         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1829         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1830         print $_,"\n";
1831 }
1832
1833 }}} else {{{
1834 print <<___;    # assembler is too old
1835 .text
1836
1837 .globl  rsaz_avx2_eligible
1838 .type   rsaz_avx2_eligible,\@abi-omnipotent
1839 rsaz_avx2_eligible:
1840         xor     %eax,%eax
1841         ret
1842 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1843
1844 .globl  rsaz_1024_sqr_avx2
1845 .globl  rsaz_1024_mul_avx2
1846 .globl  rsaz_1024_norm2red_avx2
1847 .globl  rsaz_1024_red2norm_avx2
1848 .globl  rsaz_1024_scatter5_avx2
1849 .globl  rsaz_1024_gather5_avx2
1850 .type   rsaz_1024_sqr_avx2,\@abi-omnipotent
1851 rsaz_1024_sqr_avx2:
1852 rsaz_1024_mul_avx2:
1853 rsaz_1024_norm2red_avx2:
1854 rsaz_1024_red2norm_avx2:
1855 rsaz_1024_scatter5_avx2:
1856 rsaz_1024_gather5_avx2:
1857         .byte   0x0f,0x0b       # ud2
1858         ret
1859 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
1860 ___
1861 }}}
1862
1863 close STDOUT;