2 # Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 ##############################################################################
12 # Copyright (c) 2012, Intel Corporation #
14 # All rights reserved. #
16 # Redistribution and use in source and binary forms, with or without #
17 # modification, are permitted provided that the following conditions are #
20 # * Redistributions of source code must retain the above copyright #
21 # notice, this list of conditions and the following disclaimer. #
23 # * Redistributions in binary form must reproduce the above copyright #
24 # notice, this list of conditions and the following disclaimer in the #
25 # documentation and/or other materials provided with the #
28 # * Neither the name of the Intel Corporation nor the names of its #
29 # contributors may be used to endorse or promote products derived from #
30 # this software without specific prior written permission. #
33 # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY #
34 # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
35 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR #
36 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR #
37 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #
38 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #
39 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
40 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
41 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
42 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
43 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
45 ##############################################################################
46 # Developers and authors: #
47 # Shay Gueron (1, 2), and Vlad Krasnov (1) #
48 # (1) Intel Corporation, Israel Development Center, Haifa, Israel #
49 # (2) University of Haifa, Israel #
50 ##############################################################################
52 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular #
53 # Exponentiation, Using Advanced Vector Instructions Architectures", #
54 # F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369, #
55 # pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012 #
56 # [2] S. Gueron: "Efficient Software Implementations of Modular #
57 # Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012). #
58 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE #
59 # Proceedings of 9th International Conference on Information Technology: #
60 # New Generations (ITNG 2012), pp.821-823 (2012) #
61 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis #
62 # resistant 1024-bit modular exponentiation, for optimizing RSA2048 #
63 # on AVX2 capable x86_64 platforms", #
64 # http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
65 ##############################################################################
67 # +13% improvement over original submission by <appro@openssl.org>
69 # rsa2048 sign/sec OpenSSL 1.0.1 scalar(*) this
70 # 2.3GHz Haswell 621 765/+23% 1113/+79%
71 # 2.3GHz Broadwell(**) 688 1200(***)/+74% 1120/+63%
73 # (*) if system doesn't support AVX2, for reference purposes;
74 # (**) scaled to 2.3GHz to simplify comparison;
75 # (***) scalar AD*X code is faster than AVX2 and is preferred code
80 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
82 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
84 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
85 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
86 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
87 die "can't locate x86_64-xlate.pl";
89 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
90 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
91 $avx = ($1>=2.19) + ($1>=2.22);
95 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
96 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
97 $avx = ($1>=2.09) + ($1>=2.10);
101 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
102 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
103 $avx = ($1>=10) + ($1>=11);
107 if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
108 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
109 $avx = ($ver>=3.0) + ($ver>=3.01);
110 $addx = ($ver>=3.03);
113 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
118 my $rp="%rdi"; # BN_ULONG *rp,
119 my $ap="%rsi"; # const BN_ULONG *ap,
120 my $np="%rdx"; # const BN_ULONG *np,
121 my $n0="%ecx"; # const BN_ULONG n0,
122 my $rep="%r8d"; # int repeat);
124 # The registers that hold the accumulated redundant result
125 # The AMM works on 1024 bit operands, and redundant word size is 29
126 # Therefore: ceil(1024/29)/4 = 9
137 # Registers that hold the broadcasted words of bp, currently used
140 # Registers that hold the broadcasted words of Y, currently used
145 my $AND_MASK="%ymm15";
146 # alu registers that hold the first words of the ACC
152 my $i="%r14d"; # loop counter
155 my $FrameSize=32*18+32*8; # place for A^2 and 2*A
162 $np="%r13"; # reassigned argument
167 .globl rsaz_1024_sqr_avx2
168 .type rsaz_1024_sqr_avx2,\@function,5
170 rsaz_1024_sqr_avx2: # 702 cycles, 14% faster than rsaz_1024_mul_avx2
180 $code.=<<___ if ($win64);
182 vmovaps %xmm6,-0xd8(%rax)
183 vmovaps %xmm7,-0xc8(%rax)
184 vmovaps %xmm8,-0xb8(%rax)
185 vmovaps %xmm9,-0xa8(%rax)
186 vmovaps %xmm10,-0x98(%rax)
187 vmovaps %xmm11,-0x88(%rax)
188 vmovaps %xmm12,-0x78(%rax)
189 vmovaps %xmm13,-0x68(%rax)
190 vmovaps %xmm14,-0x58(%rax)
191 vmovaps %xmm15,-0x48(%rax)
196 mov %rdx, $np # reassigned argument
197 sub \$$FrameSize, %rsp
199 sub \$-128, $rp # size optimization
203 and \$4095, $tmp # see if $np crosses page
206 vpxor $ACC9,$ACC9,$ACC9
207 jz .Lsqr_1024_no_n_copy
209 # unaligned 256-bit load that crosses page boundary can
210 # cause >2x performance degradation here, so if $np does
211 # cross page boundary, copy it to stack and make sure stack
214 vmovdqu 32*0-128($np), $ACC0
216 vmovdqu 32*1-128($np), $ACC1
217 vmovdqu 32*2-128($np), $ACC2
218 vmovdqu 32*3-128($np), $ACC3
219 vmovdqu 32*4-128($np), $ACC4
220 vmovdqu 32*5-128($np), $ACC5
221 vmovdqu 32*6-128($np), $ACC6
222 vmovdqu 32*7-128($np), $ACC7
223 vmovdqu 32*8-128($np), $ACC8
224 lea $FrameSize+128(%rsp),$np
225 vmovdqu $ACC0, 32*0-128($np)
226 vmovdqu $ACC1, 32*1-128($np)
227 vmovdqu $ACC2, 32*2-128($np)
228 vmovdqu $ACC3, 32*3-128($np)
229 vmovdqu $ACC4, 32*4-128($np)
230 vmovdqu $ACC5, 32*5-128($np)
231 vmovdqu $ACC6, 32*6-128($np)
232 vmovdqu $ACC7, 32*7-128($np)
233 vmovdqu $ACC8, 32*8-128($np)
234 vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero
236 .Lsqr_1024_no_n_copy:
239 vmovdqu 32*1-128($ap), $ACC1
240 vmovdqu 32*2-128($ap), $ACC2
241 vmovdqu 32*3-128($ap), $ACC3
242 vmovdqu 32*4-128($ap), $ACC4
243 vmovdqu 32*5-128($ap), $ACC5
244 vmovdqu 32*6-128($ap), $ACC6
245 vmovdqu 32*7-128($ap), $ACC7
246 vmovdqu 32*8-128($ap), $ACC8
248 lea 192(%rsp), $tp0 # 64+128=192
249 vpbroadcastq .Land_mask(%rip), $AND_MASK
250 jmp .LOOP_GRANDE_SQR_1024
253 .LOOP_GRANDE_SQR_1024:
254 lea 32*18+128(%rsp), $aap # size optimization
255 lea 448(%rsp), $tp1 # 64+128+256=448
257 # the squaring is performed as described in Variant B of
258 # "Speeding up Big-Number Squaring", so start by calculating
260 vpaddq $ACC1, $ACC1, $ACC1
261 vpbroadcastq 32*0-128($ap), $B1
262 vpaddq $ACC2, $ACC2, $ACC2
263 vmovdqa $ACC1, 32*0-128($aap)
264 vpaddq $ACC3, $ACC3, $ACC3
265 vmovdqa $ACC2, 32*1-128($aap)
266 vpaddq $ACC4, $ACC4, $ACC4
267 vmovdqa $ACC3, 32*2-128($aap)
268 vpaddq $ACC5, $ACC5, $ACC5
269 vmovdqa $ACC4, 32*3-128($aap)
270 vpaddq $ACC6, $ACC6, $ACC6
271 vmovdqa $ACC5, 32*4-128($aap)
272 vpaddq $ACC7, $ACC7, $ACC7
273 vmovdqa $ACC6, 32*5-128($aap)
274 vpaddq $ACC8, $ACC8, $ACC8
275 vmovdqa $ACC7, 32*6-128($aap)
276 vpxor $ACC9, $ACC9, $ACC9
277 vmovdqa $ACC8, 32*7-128($aap)
279 vpmuludq 32*0-128($ap), $B1, $ACC0
280 vpbroadcastq 32*1-128($ap), $B2
281 vmovdqu $ACC9, 32*9-192($tp0) # zero upper half
282 vpmuludq $B1, $ACC1, $ACC1
283 vmovdqu $ACC9, 32*10-448($tp1)
284 vpmuludq $B1, $ACC2, $ACC2
285 vmovdqu $ACC9, 32*11-448($tp1)
286 vpmuludq $B1, $ACC3, $ACC3
287 vmovdqu $ACC9, 32*12-448($tp1)
288 vpmuludq $B1, $ACC4, $ACC4
289 vmovdqu $ACC9, 32*13-448($tp1)
290 vpmuludq $B1, $ACC5, $ACC5
291 vmovdqu $ACC9, 32*14-448($tp1)
292 vpmuludq $B1, $ACC6, $ACC6
293 vmovdqu $ACC9, 32*15-448($tp1)
294 vpmuludq $B1, $ACC7, $ACC7
295 vmovdqu $ACC9, 32*16-448($tp1)
296 vpmuludq $B1, $ACC8, $ACC8
297 vpbroadcastq 32*2-128($ap), $B1
298 vmovdqu $ACC9, 32*17-448($tp1)
309 vpbroadcastq 32*1-128($tpa), $B2
310 vpmuludq 32*0-128($ap), $B1, $ACC0
311 vpaddq 32*0-192($tp0), $ACC0, $ACC0
312 vpmuludq 32*0-128($aap), $B1, $ACC1
313 vpaddq 32*1-192($tp0), $ACC1, $ACC1
314 vpmuludq 32*1-128($aap), $B1, $ACC2
315 vpaddq 32*2-192($tp0), $ACC2, $ACC2
316 vpmuludq 32*2-128($aap), $B1, $ACC3
317 vpaddq 32*3-192($tp0), $ACC3, $ACC3
318 vpmuludq 32*3-128($aap), $B1, $ACC4
319 vpaddq 32*4-192($tp0), $ACC4, $ACC4
320 vpmuludq 32*4-128($aap), $B1, $ACC5
321 vpaddq 32*5-192($tp0), $ACC5, $ACC5
322 vpmuludq 32*5-128($aap), $B1, $ACC6
323 vpaddq 32*6-192($tp0), $ACC6, $ACC6
324 vpmuludq 32*6-128($aap), $B1, $ACC7
325 vpaddq 32*7-192($tp0), $ACC7, $ACC7
326 vpmuludq 32*7-128($aap), $B1, $ACC8
327 vpbroadcastq 32*2-128($tpa), $B1
328 vpaddq 32*8-192($tp0), $ACC8, $ACC8
330 vmovdqu $ACC0, 32*0-192($tp0)
331 vmovdqu $ACC1, 32*1-192($tp0)
333 vpmuludq 32*1-128($ap), $B2, $TEMP0
334 vpaddq $TEMP0, $ACC2, $ACC2
335 vpmuludq 32*1-128($aap), $B2, $TEMP1
336 vpaddq $TEMP1, $ACC3, $ACC3
337 vpmuludq 32*2-128($aap), $B2, $TEMP2
338 vpaddq $TEMP2, $ACC4, $ACC4
339 vpmuludq 32*3-128($aap), $B2, $TEMP0
340 vpaddq $TEMP0, $ACC5, $ACC5
341 vpmuludq 32*4-128($aap), $B2, $TEMP1
342 vpaddq $TEMP1, $ACC6, $ACC6
343 vpmuludq 32*5-128($aap), $B2, $TEMP2
344 vpaddq $TEMP2, $ACC7, $ACC7
345 vpmuludq 32*6-128($aap), $B2, $TEMP0
346 vpaddq $TEMP0, $ACC8, $ACC8
347 vpmuludq 32*7-128($aap), $B2, $ACC0
348 vpbroadcastq 32*3-128($tpa), $B2
349 vpaddq 32*9-192($tp0), $ACC0, $ACC0
351 vmovdqu $ACC2, 32*2-192($tp0)
352 vmovdqu $ACC3, 32*3-192($tp0)
354 vpmuludq 32*2-128($ap), $B1, $TEMP2
355 vpaddq $TEMP2, $ACC4, $ACC4
356 vpmuludq 32*2-128($aap), $B1, $TEMP0
357 vpaddq $TEMP0, $ACC5, $ACC5
358 vpmuludq 32*3-128($aap), $B1, $TEMP1
359 vpaddq $TEMP1, $ACC6, $ACC6
360 vpmuludq 32*4-128($aap), $B1, $TEMP2
361 vpaddq $TEMP2, $ACC7, $ACC7
362 vpmuludq 32*5-128($aap), $B1, $TEMP0
363 vpaddq $TEMP0, $ACC8, $ACC8
364 vpmuludq 32*6-128($aap), $B1, $TEMP1
365 vpaddq $TEMP1, $ACC0, $ACC0
366 vpmuludq 32*7-128($aap), $B1, $ACC1
367 vpbroadcastq 32*4-128($tpa), $B1
368 vpaddq 32*10-448($tp1), $ACC1, $ACC1
370 vmovdqu $ACC4, 32*4-192($tp0)
371 vmovdqu $ACC5, 32*5-192($tp0)
373 vpmuludq 32*3-128($ap), $B2, $TEMP0
374 vpaddq $TEMP0, $ACC6, $ACC6
375 vpmuludq 32*3-128($aap), $B2, $TEMP1
376 vpaddq $TEMP1, $ACC7, $ACC7
377 vpmuludq 32*4-128($aap), $B2, $TEMP2
378 vpaddq $TEMP2, $ACC8, $ACC8
379 vpmuludq 32*5-128($aap), $B2, $TEMP0
380 vpaddq $TEMP0, $ACC0, $ACC0
381 vpmuludq 32*6-128($aap), $B2, $TEMP1
382 vpaddq $TEMP1, $ACC1, $ACC1
383 vpmuludq 32*7-128($aap), $B2, $ACC2
384 vpbroadcastq 32*5-128($tpa), $B2
385 vpaddq 32*11-448($tp1), $ACC2, $ACC2
387 vmovdqu $ACC6, 32*6-192($tp0)
388 vmovdqu $ACC7, 32*7-192($tp0)
390 vpmuludq 32*4-128($ap), $B1, $TEMP0
391 vpaddq $TEMP0, $ACC8, $ACC8
392 vpmuludq 32*4-128($aap), $B1, $TEMP1
393 vpaddq $TEMP1, $ACC0, $ACC0
394 vpmuludq 32*5-128($aap), $B1, $TEMP2
395 vpaddq $TEMP2, $ACC1, $ACC1
396 vpmuludq 32*6-128($aap), $B1, $TEMP0
397 vpaddq $TEMP0, $ACC2, $ACC2
398 vpmuludq 32*7-128($aap), $B1, $ACC3
399 vpbroadcastq 32*6-128($tpa), $B1
400 vpaddq 32*12-448($tp1), $ACC3, $ACC3
402 vmovdqu $ACC8, 32*8-192($tp0)
403 vmovdqu $ACC0, 32*9-192($tp0)
406 vpmuludq 32*5-128($ap), $B2, $TEMP2
407 vpaddq $TEMP2, $ACC1, $ACC1
408 vpmuludq 32*5-128($aap), $B2, $TEMP0
409 vpaddq $TEMP0, $ACC2, $ACC2
410 vpmuludq 32*6-128($aap), $B2, $TEMP1
411 vpaddq $TEMP1, $ACC3, $ACC3
412 vpmuludq 32*7-128($aap), $B2, $ACC4
413 vpbroadcastq 32*7-128($tpa), $B2
414 vpaddq 32*13-448($tp1), $ACC4, $ACC4
416 vmovdqu $ACC1, 32*10-448($tp1)
417 vmovdqu $ACC2, 32*11-448($tp1)
419 vpmuludq 32*6-128($ap), $B1, $TEMP0
420 vpaddq $TEMP0, $ACC3, $ACC3
421 vpmuludq 32*6-128($aap), $B1, $TEMP1
422 vpbroadcastq 32*8-128($tpa), $ACC0 # borrow $ACC0 for $B1
423 vpaddq $TEMP1, $ACC4, $ACC4
424 vpmuludq 32*7-128($aap), $B1, $ACC5
425 vpbroadcastq 32*0+8-128($tpa), $B1 # for next iteration
426 vpaddq 32*14-448($tp1), $ACC5, $ACC5
428 vmovdqu $ACC3, 32*12-448($tp1)
429 vmovdqu $ACC4, 32*13-448($tp1)
432 vpmuludq 32*7-128($ap), $B2, $TEMP0
433 vpaddq $TEMP0, $ACC5, $ACC5
434 vpmuludq 32*7-128($aap), $B2, $ACC6
435 vpaddq 32*15-448($tp1), $ACC6, $ACC6
437 vpmuludq 32*8-128($ap), $ACC0, $ACC7
438 vmovdqu $ACC5, 32*14-448($tp1)
439 vpaddq 32*16-448($tp1), $ACC7, $ACC7
440 vmovdqu $ACC6, 32*15-448($tp1)
441 vmovdqu $ACC7, 32*16-448($tp1)
453 # we need to fix indices 32-39 to avoid overflow
454 vmovdqu 32*8(%rsp), $ACC8 # 32*8-192($tp0),
455 vmovdqu 32*9(%rsp), $ACC1 # 32*9-192($tp0)
456 vmovdqu 32*10(%rsp), $ACC2 # 32*10-192($tp0)
457 lea 192(%rsp), $tp0 # 64+128=192
459 vpsrlq \$29, $ACC8, $TEMP1
460 vpand $AND_MASK, $ACC8, $ACC8
461 vpsrlq \$29, $ACC1, $TEMP2
462 vpand $AND_MASK, $ACC1, $ACC1
464 vpermq \$0x93, $TEMP1, $TEMP1
465 vpxor $ZERO, $ZERO, $ZERO
466 vpermq \$0x93, $TEMP2, $TEMP2
468 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
469 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
470 vpaddq $TEMP0, $ACC8, $ACC8
471 vpblendd \$3, $TEMP2, $ZERO, $TEMP2
472 vpaddq $TEMP1, $ACC1, $ACC1
473 vpaddq $TEMP2, $ACC2, $ACC2
474 vmovdqu $ACC1, 32*9-192($tp0)
475 vmovdqu $ACC2, 32*10-192($tp0)
481 vmovdqu 32*1(%rsp), $ACC1
482 vmovdqu 32*2-192($tp0), $ACC2
483 vmovdqu 32*3-192($tp0), $ACC3
484 vmovdqu 32*4-192($tp0), $ACC4
485 vmovdqu 32*5-192($tp0), $ACC5
486 vmovdqu 32*6-192($tp0), $ACC6
487 vmovdqu 32*7-192($tp0), $ACC7
491 and \$0x1fffffff, %eax
495 imulq -128($np), %rax
496 vpbroadcastq $Y1, $Y1
499 imulq 8-128($np), %rax
503 imulq 16-128($np), %rax
506 imulq 24-128($np), %rdx
511 and \$0x1fffffff, %eax
514 jmp .LOOP_REDUCE_1024
519 vpbroadcastq $Y2, $Y2
521 vpmuludq 32*1-128($np), $Y1, $TEMP0
523 imulq -128($np), %rax
524 vpaddq $TEMP0, $ACC1, $ACC1
526 vpmuludq 32*2-128($np), $Y1, $TEMP1
528 imulq 8-128($np), %rax
529 vpaddq $TEMP1, $ACC2, $ACC2
530 vpmuludq 32*3-128($np), $Y1, $TEMP2
535 imulq 16-128($np), %rax
537 vpaddq $TEMP2, $ACC3, $ACC3
538 vpmuludq 32*4-128($np), $Y1, $TEMP0
541 vpaddq $TEMP0, $ACC4, $ACC4
542 vpmuludq 32*5-128($np), $Y1, $TEMP1
545 vpaddq $TEMP1, $ACC5, $ACC5
546 vpmuludq 32*6-128($np), $Y1, $TEMP2
547 and \$0x1fffffff, %eax
548 vpaddq $TEMP2, $ACC6, $ACC6
549 vpmuludq 32*7-128($np), $Y1, $TEMP0
550 vpaddq $TEMP0, $ACC7, $ACC7
551 vpmuludq 32*8-128($np), $Y1, $TEMP1
553 #vmovdqu 32*1-8-128($np), $TEMP2 # moved below
554 vpaddq $TEMP1, $ACC8, $ACC8
555 #vmovdqu 32*2-8-128($np), $TEMP0 # moved below
556 vpbroadcastq $Y1, $Y1
558 vpmuludq 32*1-8-128($np), $Y2, $TEMP2 # see above
559 vmovdqu 32*3-8-128($np), $TEMP1
561 imulq -128($np), %rax
562 vpaddq $TEMP2, $ACC1, $ACC1
563 vpmuludq 32*2-8-128($np), $Y2, $TEMP0 # see above
564 vmovdqu 32*4-8-128($np), $TEMP2
567 imulq 8-128($np), %rax
568 vpaddq $TEMP0, $ACC2, $ACC2
571 vpmuludq $Y2, $TEMP1, $TEMP1
572 vmovdqu 32*5-8-128($np), $TEMP0
574 vpaddq $TEMP1, $ACC3, $ACC3
575 vpmuludq $Y2, $TEMP2, $TEMP2
576 vmovdqu 32*6-8-128($np), $TEMP1
580 vpaddq $TEMP2, $ACC4, $ACC4
581 vpmuludq $Y2, $TEMP0, $TEMP0
582 .byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 # vmovdqu 32*7-8-128($np), $TEMP2
583 and \$0x1fffffff, %eax
584 vpaddq $TEMP0, $ACC5, $ACC5
585 vpmuludq $Y2, $TEMP1, $TEMP1
586 vmovdqu 32*8-8-128($np), $TEMP0
587 vpaddq $TEMP1, $ACC6, $ACC6
588 vpmuludq $Y2, $TEMP2, $TEMP2
589 vmovdqu 32*9-8-128($np), $ACC9
590 vmovd %eax, $ACC0 # borrow ACC0 for Y2
591 imulq -128($np), %rax
592 vpaddq $TEMP2, $ACC7, $ACC7
593 vpmuludq $Y2, $TEMP0, $TEMP0
594 vmovdqu 32*1-16-128($np), $TEMP1
595 vpbroadcastq $ACC0, $ACC0
596 vpaddq $TEMP0, $ACC8, $ACC8
597 vpmuludq $Y2, $ACC9, $ACC9
598 vmovdqu 32*2-16-128($np), $TEMP2
602 ($ACC0,$Y2)=($Y2,$ACC0);
604 vmovdqu 32*1-24-128($np), $ACC0
605 vpmuludq $Y1, $TEMP1, $TEMP1
606 vmovdqu 32*3-16-128($np), $TEMP0
607 vpaddq $TEMP1, $ACC1, $ACC1
608 vpmuludq $Y2, $ACC0, $ACC0
609 vpmuludq $Y1, $TEMP2, $TEMP2
610 .byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff # vmovdqu 32*4-16-128($np), $TEMP1
611 vpaddq $ACC1, $ACC0, $ACC0
612 vpaddq $TEMP2, $ACC2, $ACC2
613 vpmuludq $Y1, $TEMP0, $TEMP0
614 vmovdqu 32*5-16-128($np), $TEMP2
617 vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
618 vpaddq $TEMP0, $ACC3, $ACC3
619 vpmuludq $Y1, $TEMP1, $TEMP1
620 vmovdqu 32*6-16-128($np), $TEMP0
621 vpaddq $TEMP1, $ACC4, $ACC4
622 vpmuludq $Y1, $TEMP2, $TEMP2
623 vmovdqu 32*7-16-128($np), $TEMP1
624 vpaddq $TEMP2, $ACC5, $ACC5
625 vpmuludq $Y1, $TEMP0, $TEMP0
626 vmovdqu 32*8-16-128($np), $TEMP2
627 vpaddq $TEMP0, $ACC6, $ACC6
628 vpmuludq $Y1, $TEMP1, $TEMP1
630 vmovdqu 32*9-16-128($np), $TEMP0
632 vpaddq $TEMP1, $ACC7, $ACC7
633 vpmuludq $Y1, $TEMP2, $TEMP2
634 #vmovdqu 32*2-24-128($np), $TEMP1 # moved below
637 vpaddq $TEMP2, $ACC8, $ACC8
638 vpmuludq $Y1, $TEMP0, $TEMP0
639 and \$0x1fffffff, %eax
641 vmovdqu 32*3-24-128($np), $TEMP2
643 vpaddq $TEMP0, $ACC9, $ACC9
644 vpbroadcastq $Y1, $Y1
646 vpmuludq 32*2-24-128($np), $Y2, $TEMP1 # see above
647 vmovdqu 32*4-24-128($np), $TEMP0
649 imulq -128($np), %rax
651 vpaddq $TEMP1, $ACC2, $ACC1
652 vpmuludq $Y2, $TEMP2, $TEMP2
653 vmovdqu 32*5-24-128($np), $TEMP1
656 imulq 8-128($np), %rax
660 vpaddq $TEMP2, $ACC3, $ACC2
661 vpmuludq $Y2, $TEMP0, $TEMP0
662 vmovdqu 32*6-24-128($np), $TEMP2
665 imulq 16-128($np), %rax
666 vpaddq $TEMP0, $ACC4, $ACC3
667 vpmuludq $Y2, $TEMP1, $TEMP1
668 vmovdqu 32*7-24-128($np), $TEMP0
669 imulq 24-128($np), %rdx # future $r3
672 vpaddq $TEMP1, $ACC5, $ACC4
673 vpmuludq $Y2, $TEMP2, $TEMP2
674 vmovdqu 32*8-24-128($np), $TEMP1
677 vpmuludq $Y2, $TEMP0, $TEMP0
678 vpaddq $TEMP2, $ACC6, $ACC5
679 vmovdqu 32*9-24-128($np), $TEMP2
680 and \$0x1fffffff, %eax
681 vpaddq $TEMP0, $ACC7, $ACC6
682 vpmuludq $Y2, $TEMP1, $TEMP1
684 vpaddq $TEMP1, $ACC8, $ACC7
685 vpmuludq $Y2, $TEMP2, $TEMP2
686 vpaddq $TEMP2, $ACC9, $ACC8
691 jnz .LOOP_REDUCE_1024
693 ($ACC0,$Y2)=($Y2,$ACC0);
695 lea 448(%rsp), $tp1 # size optimization
696 vpaddq $ACC9, $Y2, $ACC0
697 vpxor $ZERO, $ZERO, $ZERO
699 vpaddq 32*9-192($tp0), $ACC0, $ACC0
700 vpaddq 32*10-448($tp1), $ACC1, $ACC1
701 vpaddq 32*11-448($tp1), $ACC2, $ACC2
702 vpaddq 32*12-448($tp1), $ACC3, $ACC3
703 vpaddq 32*13-448($tp1), $ACC4, $ACC4
704 vpaddq 32*14-448($tp1), $ACC5, $ACC5
705 vpaddq 32*15-448($tp1), $ACC6, $ACC6
706 vpaddq 32*16-448($tp1), $ACC7, $ACC7
707 vpaddq 32*17-448($tp1), $ACC8, $ACC8
709 vpsrlq \$29, $ACC0, $TEMP1
710 vpand $AND_MASK, $ACC0, $ACC0
711 vpsrlq \$29, $ACC1, $TEMP2
712 vpand $AND_MASK, $ACC1, $ACC1
713 vpsrlq \$29, $ACC2, $TEMP3
714 vpermq \$0x93, $TEMP1, $TEMP1
715 vpand $AND_MASK, $ACC2, $ACC2
716 vpsrlq \$29, $ACC3, $TEMP4
717 vpermq \$0x93, $TEMP2, $TEMP2
718 vpand $AND_MASK, $ACC3, $ACC3
719 vpermq \$0x93, $TEMP3, $TEMP3
721 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
722 vpermq \$0x93, $TEMP4, $TEMP4
723 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
724 vpaddq $TEMP0, $ACC0, $ACC0
725 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
726 vpaddq $TEMP1, $ACC1, $ACC1
727 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
728 vpaddq $TEMP2, $ACC2, $ACC2
729 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
730 vpaddq $TEMP3, $ACC3, $ACC3
731 vpaddq $TEMP4, $ACC4, $ACC4
733 vpsrlq \$29, $ACC0, $TEMP1
734 vpand $AND_MASK, $ACC0, $ACC0
735 vpsrlq \$29, $ACC1, $TEMP2
736 vpand $AND_MASK, $ACC1, $ACC1
737 vpsrlq \$29, $ACC2, $TEMP3
738 vpermq \$0x93, $TEMP1, $TEMP1
739 vpand $AND_MASK, $ACC2, $ACC2
740 vpsrlq \$29, $ACC3, $TEMP4
741 vpermq \$0x93, $TEMP2, $TEMP2
742 vpand $AND_MASK, $ACC3, $ACC3
743 vpermq \$0x93, $TEMP3, $TEMP3
745 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
746 vpermq \$0x93, $TEMP4, $TEMP4
747 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
748 vpaddq $TEMP0, $ACC0, $ACC0
749 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
750 vpaddq $TEMP1, $ACC1, $ACC1
751 vmovdqu $ACC0, 32*0-128($rp)
752 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
753 vpaddq $TEMP2, $ACC2, $ACC2
754 vmovdqu $ACC1, 32*1-128($rp)
755 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
756 vpaddq $TEMP3, $ACC3, $ACC3
757 vmovdqu $ACC2, 32*2-128($rp)
758 vpaddq $TEMP4, $ACC4, $ACC4
759 vmovdqu $ACC3, 32*3-128($rp)
763 vpsrlq \$29, $ACC4, $TEMP1
764 vpand $AND_MASK, $ACC4, $ACC4
765 vpsrlq \$29, $ACC5, $TEMP2
766 vpand $AND_MASK, $ACC5, $ACC5
767 vpsrlq \$29, $ACC6, $TEMP3
768 vpermq \$0x93, $TEMP1, $TEMP1
769 vpand $AND_MASK, $ACC6, $ACC6
770 vpsrlq \$29, $ACC7, $TEMP4
771 vpermq \$0x93, $TEMP2, $TEMP2
772 vpand $AND_MASK, $ACC7, $ACC7
773 vpsrlq \$29, $ACC8, $TEMP5
774 vpermq \$0x93, $TEMP3, $TEMP3
775 vpand $AND_MASK, $ACC8, $ACC8
776 vpermq \$0x93, $TEMP4, $TEMP4
778 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
779 vpermq \$0x93, $TEMP5, $TEMP5
780 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
781 vpaddq $TEMP0, $ACC4, $ACC4
782 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
783 vpaddq $TEMP1, $ACC5, $ACC5
784 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
785 vpaddq $TEMP2, $ACC6, $ACC6
786 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
787 vpaddq $TEMP3, $ACC7, $ACC7
788 vpaddq $TEMP4, $ACC8, $ACC8
790 vpsrlq \$29, $ACC4, $TEMP1
791 vpand $AND_MASK, $ACC4, $ACC4
792 vpsrlq \$29, $ACC5, $TEMP2
793 vpand $AND_MASK, $ACC5, $ACC5
794 vpsrlq \$29, $ACC6, $TEMP3
795 vpermq \$0x93, $TEMP1, $TEMP1
796 vpand $AND_MASK, $ACC6, $ACC6
797 vpsrlq \$29, $ACC7, $TEMP4
798 vpermq \$0x93, $TEMP2, $TEMP2
799 vpand $AND_MASK, $ACC7, $ACC7
800 vpsrlq \$29, $ACC8, $TEMP5
801 vpermq \$0x93, $TEMP3, $TEMP3
802 vpand $AND_MASK, $ACC8, $ACC8
803 vpermq \$0x93, $TEMP4, $TEMP4
805 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
806 vpermq \$0x93, $TEMP5, $TEMP5
807 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
808 vpaddq $TEMP0, $ACC4, $ACC4
809 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
810 vpaddq $TEMP1, $ACC5, $ACC5
811 vmovdqu $ACC4, 32*4-128($rp)
812 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
813 vpaddq $TEMP2, $ACC6, $ACC6
814 vmovdqu $ACC5, 32*5-128($rp)
815 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
816 vpaddq $TEMP3, $ACC7, $ACC7
817 vmovdqu $ACC6, 32*6-128($rp)
818 vpaddq $TEMP4, $ACC8, $ACC8
819 vmovdqu $ACC7, 32*7-128($rp)
820 vmovdqu $ACC8, 32*8-128($rp)
824 jne .LOOP_GRANDE_SQR_1024
829 $code.=<<___ if ($win64);
831 movaps -0xd8(%rax),%xmm6
832 movaps -0xc8(%rax),%xmm7
833 movaps -0xb8(%rax),%xmm8
834 movaps -0xa8(%rax),%xmm9
835 movaps -0x98(%rax),%xmm10
836 movaps -0x88(%rax),%xmm11
837 movaps -0x78(%rax),%xmm12
838 movaps -0x68(%rax),%xmm13
839 movaps -0x58(%rax),%xmm14
840 movaps -0x48(%rax),%xmm15
849 lea (%rax),%rsp # restore %rsp
852 .size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
857 my $rp="%rdi"; # BN_ULONG *rp,
858 my $ap="%rsi"; # const BN_ULONG *ap,
859 my $bp="%rdx"; # const BN_ULONG *bp,
860 my $np="%rcx"; # const BN_ULONG *np,
861 my $n0="%r8d"; # unsigned int n0);
863 # The registers that hold the accumulated redundant result
864 # The AMM works on 1024 bit operands, and redundant word size is 29
865 # Therefore: ceil(1024/29)/4 = 9
877 # Registers that hold the broadcasted words of multiplier, currently used
886 my $AND_MASK="%ymm15";
888 # alu registers that hold the first words of the ACC
897 $bp="%r13"; # reassigned argument
900 .globl rsaz_1024_mul_avx2
901 .type rsaz_1024_mul_avx2,\@function,5
912 $code.=<<___ if ($win64);
915 vmovaps %xmm6,-0xd8(%rax)
916 vmovaps %xmm7,-0xc8(%rax)
917 vmovaps %xmm8,-0xb8(%rax)
918 vmovaps %xmm9,-0xa8(%rax)
919 vmovaps %xmm10,-0x98(%rax)
920 vmovaps %xmm11,-0x88(%rax)
921 vmovaps %xmm12,-0x78(%rax)
922 vmovaps %xmm13,-0x68(%rax)
923 vmovaps %xmm14,-0x58(%rax)
924 vmovaps %xmm15,-0x48(%rax)
930 mov %rdx, $bp # reassigned argument
933 # unaligned 256-bit load that crosses page boundary can
934 # cause severe performance degradation here, so if $ap does
935 # cross page boundary, swap it with $bp [meaning that caller
936 # is advised to lay down $ap and $bp next to each other, so
937 # that only one can cross page boundary].
948 sub \$-128,$ap # size optimization
952 and \$4095, $tmp # see if $np crosses page
956 jz .Lmul_1024_no_n_copy
958 # unaligned 256-bit load that crosses page boundary can
959 # cause severe performance degradation here, so if $np does
960 # cross page boundary, copy it to stack and make sure stack
963 vmovdqu 32*0-128($np), $ACC0
965 vmovdqu 32*1-128($np), $ACC1
966 vmovdqu 32*2-128($np), $ACC2
967 vmovdqu 32*3-128($np), $ACC3
968 vmovdqu 32*4-128($np), $ACC4
969 vmovdqu 32*5-128($np), $ACC5
970 vmovdqu 32*6-128($np), $ACC6
971 vmovdqu 32*7-128($np), $ACC7
972 vmovdqu 32*8-128($np), $ACC8
974 vmovdqu $ACC0, 32*0-128($np)
975 vpxor $ACC0, $ACC0, $ACC0
976 vmovdqu $ACC1, 32*1-128($np)
977 vpxor $ACC1, $ACC1, $ACC1
978 vmovdqu $ACC2, 32*2-128($np)
979 vpxor $ACC2, $ACC2, $ACC2
980 vmovdqu $ACC3, 32*3-128($np)
981 vpxor $ACC3, $ACC3, $ACC3
982 vmovdqu $ACC4, 32*4-128($np)
983 vpxor $ACC4, $ACC4, $ACC4
984 vmovdqu $ACC5, 32*5-128($np)
985 vpxor $ACC5, $ACC5, $ACC5
986 vmovdqu $ACC6, 32*6-128($np)
987 vpxor $ACC6, $ACC6, $ACC6
988 vmovdqu $ACC7, 32*7-128($np)
989 vpxor $ACC7, $ACC7, $ACC7
990 vmovdqu $ACC8, 32*8-128($np)
992 vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero after vzeroall
993 .Lmul_1024_no_n_copy:
997 vpbroadcastq ($bp), $Bi
998 vmovdqu $ACC0, (%rsp) # clear top of stack
1005 vmovdqu .Land_mask(%rip), $AND_MASK
1007 vmovdqu $ACC9, 32*9-128($rp) # $ACC9 is zero after vzeroall
1012 vpsrlq \$29, $ACC3, $ACC9 # correct $ACC3(*)
1014 imulq -128($ap), %rax
1017 imulq 8-128($ap), $r1
1022 and \$0x1fffffff, %eax
1025 imulq 16-128($ap), $r2
1029 imulq 24-128($ap), $r3
1031 vpmuludq 32*1-128($ap),$Bi,$TEMP0
1033 vpaddq $TEMP0,$ACC1,$ACC1
1034 vpmuludq 32*2-128($ap),$Bi,$TEMP1
1035 vpbroadcastq $Yi, $Yi
1036 vpaddq $TEMP1,$ACC2,$ACC2
1037 vpmuludq 32*3-128($ap),$Bi,$TEMP2
1038 vpand $AND_MASK, $ACC3, $ACC3 # correct $ACC3
1039 vpaddq $TEMP2,$ACC3,$ACC3
1040 vpmuludq 32*4-128($ap),$Bi,$TEMP0
1041 vpaddq $TEMP0,$ACC4,$ACC4
1042 vpmuludq 32*5-128($ap),$Bi,$TEMP1
1043 vpaddq $TEMP1,$ACC5,$ACC5
1044 vpmuludq 32*6-128($ap),$Bi,$TEMP2
1045 vpaddq $TEMP2,$ACC6,$ACC6
1046 vpmuludq 32*7-128($ap),$Bi,$TEMP0
1047 vpermq \$0x93, $ACC9, $ACC9 # correct $ACC3
1048 vpaddq $TEMP0,$ACC7,$ACC7
1049 vpmuludq 32*8-128($ap),$Bi,$TEMP1
1050 vpbroadcastq 8($bp), $Bi
1051 vpaddq $TEMP1,$ACC8,$ACC8
1054 imulq -128($np),%rax
1057 imulq 8-128($np),%rax
1060 imulq 16-128($np),%rax
1063 imulq 24-128($np),%rdx
1067 vpmuludq 32*1-128($np),$Yi,$TEMP2
1069 vpaddq $TEMP2,$ACC1,$ACC1
1070 vpmuludq 32*2-128($np),$Yi,$TEMP0
1071 vpaddq $TEMP0,$ACC2,$ACC2
1072 vpmuludq 32*3-128($np),$Yi,$TEMP1
1073 vpaddq $TEMP1,$ACC3,$ACC3
1074 vpmuludq 32*4-128($np),$Yi,$TEMP2
1075 vpaddq $TEMP2,$ACC4,$ACC4
1076 vpmuludq 32*5-128($np),$Yi,$TEMP0
1077 vpaddq $TEMP0,$ACC5,$ACC5
1078 vpmuludq 32*6-128($np),$Yi,$TEMP1
1079 vpaddq $TEMP1,$ACC6,$ACC6
1080 vpmuludq 32*7-128($np),$Yi,$TEMP2
1081 vpblendd \$3, $ZERO, $ACC9, $ACC9 # correct $ACC3
1082 vpaddq $TEMP2,$ACC7,$ACC7
1083 vpmuludq 32*8-128($np),$Yi,$TEMP0
1084 vpaddq $ACC9, $ACC3, $ACC3 # correct $ACC3
1085 vpaddq $TEMP0,$ACC8,$ACC8
1088 imulq -128($ap),%rax
1090 vmovdqu -8+32*1-128($ap),$TEMP1
1092 imulq 8-128($ap),%rax
1094 vmovdqu -8+32*2-128($ap),$TEMP2
1098 and \$0x1fffffff, %eax
1100 imulq 16-128($ap),%rbx
1102 vpmuludq $Bi,$TEMP1,$TEMP1
1104 vmovdqu -8+32*3-128($ap),$TEMP0
1105 vpaddq $TEMP1,$ACC1,$ACC1
1106 vpmuludq $Bi,$TEMP2,$TEMP2
1107 vpbroadcastq $Yi, $Yi
1108 vmovdqu -8+32*4-128($ap),$TEMP1
1109 vpaddq $TEMP2,$ACC2,$ACC2
1110 vpmuludq $Bi,$TEMP0,$TEMP0
1111 vmovdqu -8+32*5-128($ap),$TEMP2
1112 vpaddq $TEMP0,$ACC3,$ACC3
1113 vpmuludq $Bi,$TEMP1,$TEMP1
1114 vmovdqu -8+32*6-128($ap),$TEMP0
1115 vpaddq $TEMP1,$ACC4,$ACC4
1116 vpmuludq $Bi,$TEMP2,$TEMP2
1117 vmovdqu -8+32*7-128($ap),$TEMP1
1118 vpaddq $TEMP2,$ACC5,$ACC5
1119 vpmuludq $Bi,$TEMP0,$TEMP0
1120 vmovdqu -8+32*8-128($ap),$TEMP2
1121 vpaddq $TEMP0,$ACC6,$ACC6
1122 vpmuludq $Bi,$TEMP1,$TEMP1
1123 vmovdqu -8+32*9-128($ap),$ACC9
1124 vpaddq $TEMP1,$ACC7,$ACC7
1125 vpmuludq $Bi,$TEMP2,$TEMP2
1126 vpaddq $TEMP2,$ACC8,$ACC8
1127 vpmuludq $Bi,$ACC9,$ACC9
1128 vpbroadcastq 16($bp), $Bi
1131 imulq -128($np),%rax
1133 vmovdqu -8+32*1-128($np),$TEMP0
1135 imulq 8-128($np),%rax
1137 vmovdqu -8+32*2-128($np),$TEMP1
1139 imulq 16-128($np),%rdx
1143 vpmuludq $Yi,$TEMP0,$TEMP0
1145 vmovdqu -8+32*3-128($np),$TEMP2
1146 vpaddq $TEMP0,$ACC1,$ACC1
1147 vpmuludq $Yi,$TEMP1,$TEMP1
1148 vmovdqu -8+32*4-128($np),$TEMP0
1149 vpaddq $TEMP1,$ACC2,$ACC2
1150 vpmuludq $Yi,$TEMP2,$TEMP2
1151 vmovdqu -8+32*5-128($np),$TEMP1
1152 vpaddq $TEMP2,$ACC3,$ACC3
1153 vpmuludq $Yi,$TEMP0,$TEMP0
1154 vmovdqu -8+32*6-128($np),$TEMP2
1155 vpaddq $TEMP0,$ACC4,$ACC4
1156 vpmuludq $Yi,$TEMP1,$TEMP1
1157 vmovdqu -8+32*7-128($np),$TEMP0
1158 vpaddq $TEMP1,$ACC5,$ACC5
1159 vpmuludq $Yi,$TEMP2,$TEMP2
1160 vmovdqu -8+32*8-128($np),$TEMP1
1161 vpaddq $TEMP2,$ACC6,$ACC6
1162 vpmuludq $Yi,$TEMP0,$TEMP0
1163 vmovdqu -8+32*9-128($np),$TEMP2
1164 vpaddq $TEMP0,$ACC7,$ACC7
1165 vpmuludq $Yi,$TEMP1,$TEMP1
1166 vpaddq $TEMP1,$ACC8,$ACC8
1167 vpmuludq $Yi,$TEMP2,$TEMP2
1168 vpaddq $TEMP2,$ACC9,$ACC9
1170 vmovdqu -16+32*1-128($ap),$TEMP0
1172 imulq -128($ap),%rax
1175 vmovdqu -16+32*2-128($ap),$TEMP1
1178 and \$0x1fffffff, %eax
1180 imulq 8-128($ap),%rbx
1182 vpmuludq $Bi,$TEMP0,$TEMP0
1184 vmovdqu -16+32*3-128($ap),$TEMP2
1185 vpaddq $TEMP0,$ACC1,$ACC1
1186 vpmuludq $Bi,$TEMP1,$TEMP1
1187 vpbroadcastq $Yi, $Yi
1188 vmovdqu -16+32*4-128($ap),$TEMP0
1189 vpaddq $TEMP1,$ACC2,$ACC2
1190 vpmuludq $Bi,$TEMP2,$TEMP2
1191 vmovdqu -16+32*5-128($ap),$TEMP1
1192 vpaddq $TEMP2,$ACC3,$ACC3
1193 vpmuludq $Bi,$TEMP0,$TEMP0
1194 vmovdqu -16+32*6-128($ap),$TEMP2
1195 vpaddq $TEMP0,$ACC4,$ACC4
1196 vpmuludq $Bi,$TEMP1,$TEMP1
1197 vmovdqu -16+32*7-128($ap),$TEMP0
1198 vpaddq $TEMP1,$ACC5,$ACC5
1199 vpmuludq $Bi,$TEMP2,$TEMP2
1200 vmovdqu -16+32*8-128($ap),$TEMP1
1201 vpaddq $TEMP2,$ACC6,$ACC6
1202 vpmuludq $Bi,$TEMP0,$TEMP0
1203 vmovdqu -16+32*9-128($ap),$TEMP2
1204 vpaddq $TEMP0,$ACC7,$ACC7
1205 vpmuludq $Bi,$TEMP1,$TEMP1
1206 vpaddq $TEMP1,$ACC8,$ACC8
1207 vpmuludq $Bi,$TEMP2,$TEMP2
1208 vpbroadcastq 24($bp), $Bi
1209 vpaddq $TEMP2,$ACC9,$ACC9
1211 vmovdqu -16+32*1-128($np),$TEMP0
1213 imulq -128($np),%rax
1215 vmovdqu -16+32*2-128($np),$TEMP1
1216 imulq 8-128($np),%rdx
1220 vpmuludq $Yi,$TEMP0,$TEMP0
1222 vmovdqu -16+32*3-128($np),$TEMP2
1223 vpaddq $TEMP0,$ACC1,$ACC1
1224 vpmuludq $Yi,$TEMP1,$TEMP1
1225 vmovdqu -16+32*4-128($np),$TEMP0
1226 vpaddq $TEMP1,$ACC2,$ACC2
1227 vpmuludq $Yi,$TEMP2,$TEMP2
1228 vmovdqu -16+32*5-128($np),$TEMP1
1229 vpaddq $TEMP2,$ACC3,$ACC3
1230 vpmuludq $Yi,$TEMP0,$TEMP0
1231 vmovdqu -16+32*6-128($np),$TEMP2
1232 vpaddq $TEMP0,$ACC4,$ACC4
1233 vpmuludq $Yi,$TEMP1,$TEMP1
1234 vmovdqu -16+32*7-128($np),$TEMP0
1235 vpaddq $TEMP1,$ACC5,$ACC5
1236 vpmuludq $Yi,$TEMP2,$TEMP2
1237 vmovdqu -16+32*8-128($np),$TEMP1
1238 vpaddq $TEMP2,$ACC6,$ACC6
1239 vpmuludq $Yi,$TEMP0,$TEMP0
1240 vmovdqu -16+32*9-128($np),$TEMP2
1241 vpaddq $TEMP0,$ACC7,$ACC7
1242 vpmuludq $Yi,$TEMP1,$TEMP1
1243 vmovdqu -24+32*1-128($ap),$TEMP0
1244 vpaddq $TEMP1,$ACC8,$ACC8
1245 vpmuludq $Yi,$TEMP2,$TEMP2
1246 vmovdqu -24+32*2-128($ap),$TEMP1
1247 vpaddq $TEMP2,$ACC9,$ACC9
1250 imulq -128($ap),%rbx
1255 and \$0x1fffffff, %eax
1257 vpmuludq $Bi,$TEMP0,$TEMP0
1259 vmovdqu -24+32*3-128($ap),$TEMP2
1260 vpaddq $TEMP0,$ACC1,$ACC1
1261 vpmuludq $Bi,$TEMP1,$TEMP1
1262 vpbroadcastq $Yi, $Yi
1263 vmovdqu -24+32*4-128($ap),$TEMP0
1264 vpaddq $TEMP1,$ACC2,$ACC2
1265 vpmuludq $Bi,$TEMP2,$TEMP2
1266 vmovdqu -24+32*5-128($ap),$TEMP1
1267 vpaddq $TEMP2,$ACC3,$ACC3
1268 vpmuludq $Bi,$TEMP0,$TEMP0
1269 vmovdqu -24+32*6-128($ap),$TEMP2
1270 vpaddq $TEMP0,$ACC4,$ACC4
1271 vpmuludq $Bi,$TEMP1,$TEMP1
1272 vmovdqu -24+32*7-128($ap),$TEMP0
1273 vpaddq $TEMP1,$ACC5,$ACC5
1274 vpmuludq $Bi,$TEMP2,$TEMP2
1275 vmovdqu -24+32*8-128($ap),$TEMP1
1276 vpaddq $TEMP2,$ACC6,$ACC6
1277 vpmuludq $Bi,$TEMP0,$TEMP0
1278 vmovdqu -24+32*9-128($ap),$TEMP2
1279 vpaddq $TEMP0,$ACC7,$ACC7
1280 vpmuludq $Bi,$TEMP1,$TEMP1
1281 vpaddq $TEMP1,$ACC8,$ACC8
1282 vpmuludq $Bi,$TEMP2,$TEMP2
1283 vpbroadcastq 32($bp), $Bi
1284 vpaddq $TEMP2,$ACC9,$ACC9
1285 add \$32, $bp # $bp++
1287 vmovdqu -24+32*1-128($np),$TEMP0
1288 imulq -128($np),%rax
1292 vmovdqu -24+32*2-128($np),$TEMP1
1293 vpmuludq $Yi,$TEMP0,$TEMP0
1295 vmovdqu -24+32*3-128($np),$TEMP2
1296 vpaddq $TEMP0,$ACC1,$ACC0 # $ACC0==$TEMP0
1297 vpmuludq $Yi,$TEMP1,$TEMP1
1298 vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
1299 vpaddq $TEMP1,$ACC2,$ACC1
1300 vmovdqu -24+32*4-128($np),$TEMP0
1301 vpmuludq $Yi,$TEMP2,$TEMP2
1302 vmovdqu -24+32*5-128($np),$TEMP1
1303 vpaddq $TEMP2,$ACC3,$ACC2
1304 vpmuludq $Yi,$TEMP0,$TEMP0
1305 vmovdqu -24+32*6-128($np),$TEMP2
1306 vpaddq $TEMP0,$ACC4,$ACC3
1307 vpmuludq $Yi,$TEMP1,$TEMP1
1308 vmovdqu -24+32*7-128($np),$TEMP0
1309 vpaddq $TEMP1,$ACC5,$ACC4
1310 vpmuludq $Yi,$TEMP2,$TEMP2
1311 vmovdqu -24+32*8-128($np),$TEMP1
1312 vpaddq $TEMP2,$ACC6,$ACC5
1313 vpmuludq $Yi,$TEMP0,$TEMP0
1314 vmovdqu -24+32*9-128($np),$TEMP2
1316 vpaddq $TEMP0,$ACC7,$ACC6
1317 vpmuludq $Yi,$TEMP1,$TEMP1
1319 vpaddq $TEMP1,$ACC8,$ACC7
1320 vpmuludq $Yi,$TEMP2,$TEMP2
1322 vpaddq $TEMP2,$ACC9,$ACC8
1328 # (*) Original implementation was correcting ACC1-ACC3 for overflow
1329 # after 7 loop runs, or after 28 iterations, or 56 additions.
1330 # But as we underutilize resources, it's possible to correct in
1331 # each iteration with marginal performance loss. But then, as
1332 # we do it in each iteration, we can correct less digits, and
1333 # avoid performance penalties completely. Also note that we
1334 # correct only three digits out of four. This works because
1335 # most significant digit is subjected to less additions.
1341 vpermq \$0, $AND_MASK, $AND_MASK
1342 vpaddq (%rsp), $TEMP1, $ACC0
1344 vpsrlq \$29, $ACC0, $TEMP1
1345 vpand $AND_MASK, $ACC0, $ACC0
1346 vpsrlq \$29, $ACC1, $TEMP2
1347 vpand $AND_MASK, $ACC1, $ACC1
1348 vpsrlq \$29, $ACC2, $TEMP3
1349 vpermq \$0x93, $TEMP1, $TEMP1
1350 vpand $AND_MASK, $ACC2, $ACC2
1351 vpsrlq \$29, $ACC3, $TEMP4
1352 vpermq \$0x93, $TEMP2, $TEMP2
1353 vpand $AND_MASK, $ACC3, $ACC3
1355 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1356 vpermq \$0x93, $TEMP3, $TEMP3
1357 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1358 vpermq \$0x93, $TEMP4, $TEMP4
1359 vpaddq $TEMP0, $ACC0, $ACC0
1360 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1361 vpaddq $TEMP1, $ACC1, $ACC1
1362 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1363 vpaddq $TEMP2, $ACC2, $ACC2
1364 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
1365 vpaddq $TEMP3, $ACC3, $ACC3
1366 vpaddq $TEMP4, $ACC4, $ACC4
1368 vpsrlq \$29, $ACC0, $TEMP1
1369 vpand $AND_MASK, $ACC0, $ACC0
1370 vpsrlq \$29, $ACC1, $TEMP2
1371 vpand $AND_MASK, $ACC1, $ACC1
1372 vpsrlq \$29, $ACC2, $TEMP3
1373 vpermq \$0x93, $TEMP1, $TEMP1
1374 vpand $AND_MASK, $ACC2, $ACC2
1375 vpsrlq \$29, $ACC3, $TEMP4
1376 vpermq \$0x93, $TEMP2, $TEMP2
1377 vpand $AND_MASK, $ACC3, $ACC3
1378 vpermq \$0x93, $TEMP3, $TEMP3
1380 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1381 vpermq \$0x93, $TEMP4, $TEMP4
1382 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1383 vpaddq $TEMP0, $ACC0, $ACC0
1384 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1385 vpaddq $TEMP1, $ACC1, $ACC1
1386 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1387 vpaddq $TEMP2, $ACC2, $ACC2
1388 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
1389 vpaddq $TEMP3, $ACC3, $ACC3
1390 vpaddq $TEMP4, $ACC4, $ACC4
1392 vmovdqu $ACC0, 0-128($rp)
1393 vmovdqu $ACC1, 32-128($rp)
1394 vmovdqu $ACC2, 64-128($rp)
1395 vmovdqu $ACC3, 96-128($rp)
1400 vpsrlq \$29, $ACC4, $TEMP1
1401 vpand $AND_MASK, $ACC4, $ACC4
1402 vpsrlq \$29, $ACC5, $TEMP2
1403 vpand $AND_MASK, $ACC5, $ACC5
1404 vpsrlq \$29, $ACC6, $TEMP3
1405 vpermq \$0x93, $TEMP1, $TEMP1
1406 vpand $AND_MASK, $ACC6, $ACC6
1407 vpsrlq \$29, $ACC7, $TEMP4
1408 vpermq \$0x93, $TEMP2, $TEMP2
1409 vpand $AND_MASK, $ACC7, $ACC7
1410 vpsrlq \$29, $ACC8, $TEMP5
1411 vpermq \$0x93, $TEMP3, $TEMP3
1412 vpand $AND_MASK, $ACC8, $ACC8
1413 vpermq \$0x93, $TEMP4, $TEMP4
1415 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1416 vpermq \$0x93, $TEMP5, $TEMP5
1417 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1418 vpaddq $TEMP0, $ACC4, $ACC4
1419 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1420 vpaddq $TEMP1, $ACC5, $ACC5
1421 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1422 vpaddq $TEMP2, $ACC6, $ACC6
1423 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
1424 vpaddq $TEMP3, $ACC7, $ACC7
1425 vpaddq $TEMP4, $ACC8, $ACC8
1427 vpsrlq \$29, $ACC4, $TEMP1
1428 vpand $AND_MASK, $ACC4, $ACC4
1429 vpsrlq \$29, $ACC5, $TEMP2
1430 vpand $AND_MASK, $ACC5, $ACC5
1431 vpsrlq \$29, $ACC6, $TEMP3
1432 vpermq \$0x93, $TEMP1, $TEMP1
1433 vpand $AND_MASK, $ACC6, $ACC6
1434 vpsrlq \$29, $ACC7, $TEMP4
1435 vpermq \$0x93, $TEMP2, $TEMP2
1436 vpand $AND_MASK, $ACC7, $ACC7
1437 vpsrlq \$29, $ACC8, $TEMP5
1438 vpermq \$0x93, $TEMP3, $TEMP3
1439 vpand $AND_MASK, $ACC8, $ACC8
1440 vpermq \$0x93, $TEMP4, $TEMP4
1442 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1443 vpermq \$0x93, $TEMP5, $TEMP5
1444 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1445 vpaddq $TEMP0, $ACC4, $ACC4
1446 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1447 vpaddq $TEMP1, $ACC5, $ACC5
1448 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1449 vpaddq $TEMP2, $ACC6, $ACC6
1450 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
1451 vpaddq $TEMP3, $ACC7, $ACC7
1452 vpaddq $TEMP4, $ACC8, $ACC8
1454 vmovdqu $ACC4, 128-128($rp)
1455 vmovdqu $ACC5, 160-128($rp)
1456 vmovdqu $ACC6, 192-128($rp)
1457 vmovdqu $ACC7, 224-128($rp)
1458 vmovdqu $ACC8, 256-128($rp)
1463 $code.=<<___ if ($win64);
1465 movaps -0xd8(%rax),%xmm6
1466 movaps -0xc8(%rax),%xmm7
1467 movaps -0xb8(%rax),%xmm8
1468 movaps -0xa8(%rax),%xmm9
1469 movaps -0x98(%rax),%xmm10
1470 movaps -0x88(%rax),%xmm11
1471 movaps -0x78(%rax),%xmm12
1472 movaps -0x68(%rax),%xmm13
1473 movaps -0x58(%rax),%xmm14
1474 movaps -0x48(%rax),%xmm15
1483 lea (%rax),%rsp # restore %rsp
1484 .Lmul_1024_epilogue:
1486 .size rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1490 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1491 my @T = map("%r$_",(8..11));
1494 .globl rsaz_1024_red2norm_avx2
1495 .type rsaz_1024_red2norm_avx2,\@abi-omnipotent
1497 rsaz_1024_red2norm_avx2:
1498 sub \$-128,$inp # size optimization
1502 for ($j=0,$i=0; $i<16; $i++) {
1504 while (29*$j<64*($i+1)) { # load data till boundary
1505 $code.=" mov `8*$j-128`($inp), @T[0]\n";
1506 $j++; $k++; push(@T,shift(@T));
1509 while ($k>1) { # shift loaded data but last value
1510 $code.=" shl \$`29*($j-$k)`,@T[-$k]\n";
1513 $code.=<<___; # shift last value
1515 shl \$`29*($j-1)`, @T[-1]
1516 shr \$`-29*($j-1)`, @T[0]
1518 while ($l) { # accumulate all values
1519 $code.=" add @T[-$l], %rax\n";
1523 adc \$0, @T[0] # consume eventual carry
1524 mov %rax, 8*$i($out)
1531 .size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1533 .globl rsaz_1024_norm2red_avx2
1534 .type rsaz_1024_norm2red_avx2,\@abi-omnipotent
1536 rsaz_1024_norm2red_avx2:
1537 sub \$-128,$out # size optimization
1539 mov \$0x1fffffff,%eax
1541 for ($j=0,$i=0; $i<16; $i++) {
1542 $code.=" mov `8*($i+1)`($inp),@T[1]\n" if ($i<15);
1543 $code.=" xor @T[1],@T[1]\n" if ($i==15);
1545 while (29*($j+1)<64*($i+1)) {
1548 shr \$`29*$j`,@T[-$k]
1549 and %rax,@T[-$k] # &0x1fffffff
1550 mov @T[-$k],`8*$j-128`($out)
1555 shrd \$`29*$j`,@T[1],@T[0]
1557 mov @T[0],`8*$j-128`($out)
1563 mov @T[0],`8*$j-128`($out) # zero
1564 mov @T[0],`8*($j+1)-128`($out)
1565 mov @T[0],`8*($j+2)-128`($out)
1566 mov @T[0],`8*($j+3)-128`($out)
1568 .size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1572 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1575 .globl rsaz_1024_scatter5_avx2
1576 .type rsaz_1024_scatter5_avx2,\@abi-omnipotent
1578 rsaz_1024_scatter5_avx2:
1580 vmovdqu .Lscatter_permd(%rip),%ymm5
1582 lea ($out,$power),$out
1584 jmp .Loop_scatter_1024
1588 vmovdqu ($inp),%ymm0
1590 vpermd %ymm0,%ymm5,%ymm0
1591 vmovdqu %xmm0,($out)
1592 lea 16*32($out),$out
1594 jnz .Loop_scatter_1024
1598 .size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1600 .globl rsaz_1024_gather5_avx2
1601 .type rsaz_1024_gather5_avx2,\@abi-omnipotent
1603 rsaz_1024_gather5_avx2:
1607 $code.=<<___ if ($win64);
1608 lea -0x88(%rsp),%rax
1609 .LSEH_begin_rsaz_1024_gather5:
1610 # I can't trust assembler to use specific encoding:-(
1611 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax),%rsp
1612 .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6,-0x20(%rax)
1613 .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7,-0x10(%rax)
1614 .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8,0(%rax)
1615 .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9,0x10(%rax)
1616 .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10,0x20(%rax)
1617 .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11,0x30(%rax)
1618 .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12,0x40(%rax)
1619 .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13,0x50(%rax)
1620 .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14,0x60(%rax)
1621 .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15,0x70(%rax)
1624 lea -0x100(%rsp),%rsp
1626 lea .Linc(%rip), %r10
1627 lea -128(%rsp),%rax # control u-op density
1630 vmovdqa (%r10),%ymm0
1631 vmovdqa 32(%r10),%ymm1
1632 vmovdqa 64(%r10),%ymm5
1633 vpbroadcastd %xmm4,%ymm4
1635 vpaddd %ymm5, %ymm0, %ymm2
1636 vpcmpeqd %ymm4, %ymm0, %ymm0
1637 vpaddd %ymm5, %ymm1, %ymm3
1638 vpcmpeqd %ymm4, %ymm1, %ymm1
1639 vmovdqa %ymm0, 32*0+128(%rax)
1640 vpaddd %ymm5, %ymm2, %ymm0
1641 vpcmpeqd %ymm4, %ymm2, %ymm2
1642 vmovdqa %ymm1, 32*1+128(%rax)
1643 vpaddd %ymm5, %ymm3, %ymm1
1644 vpcmpeqd %ymm4, %ymm3, %ymm3
1645 vmovdqa %ymm2, 32*2+128(%rax)
1646 vpaddd %ymm5, %ymm0, %ymm2
1647 vpcmpeqd %ymm4, %ymm0, %ymm0
1648 vmovdqa %ymm3, 32*3+128(%rax)
1649 vpaddd %ymm5, %ymm1, %ymm3
1650 vpcmpeqd %ymm4, %ymm1, %ymm1
1651 vmovdqa %ymm0, 32*4+128(%rax)
1652 vpaddd %ymm5, %ymm2, %ymm8
1653 vpcmpeqd %ymm4, %ymm2, %ymm2
1654 vmovdqa %ymm1, 32*5+128(%rax)
1655 vpaddd %ymm5, %ymm3, %ymm9
1656 vpcmpeqd %ymm4, %ymm3, %ymm3
1657 vmovdqa %ymm2, 32*6+128(%rax)
1658 vpaddd %ymm5, %ymm8, %ymm10
1659 vpcmpeqd %ymm4, %ymm8, %ymm8
1660 vmovdqa %ymm3, 32*7+128(%rax)
1661 vpaddd %ymm5, %ymm9, %ymm11
1662 vpcmpeqd %ymm4, %ymm9, %ymm9
1663 vpaddd %ymm5, %ymm10, %ymm12
1664 vpcmpeqd %ymm4, %ymm10, %ymm10
1665 vpaddd %ymm5, %ymm11, %ymm13
1666 vpcmpeqd %ymm4, %ymm11, %ymm11
1667 vpaddd %ymm5, %ymm12, %ymm14
1668 vpcmpeqd %ymm4, %ymm12, %ymm12
1669 vpaddd %ymm5, %ymm13, %ymm15
1670 vpcmpeqd %ymm4, %ymm13, %ymm13
1671 vpcmpeqd %ymm4, %ymm14, %ymm14
1672 vpcmpeqd %ymm4, %ymm15, %ymm15
1674 vmovdqa -32(%r10),%ymm7 # .Lgather_permd
1679 vmovdqa 32*0-128($inp), %ymm0
1680 vmovdqa 32*1-128($inp), %ymm1
1681 vmovdqa 32*2-128($inp), %ymm2
1682 vmovdqa 32*3-128($inp), %ymm3
1683 vpand 32*0+128(%rax), %ymm0, %ymm0
1684 vpand 32*1+128(%rax), %ymm1, %ymm1
1685 vpand 32*2+128(%rax), %ymm2, %ymm2
1686 vpor %ymm0, %ymm1, %ymm4
1687 vpand 32*3+128(%rax), %ymm3, %ymm3
1688 vmovdqa 32*4-128($inp), %ymm0
1689 vmovdqa 32*5-128($inp), %ymm1
1690 vpor %ymm2, %ymm3, %ymm5
1691 vmovdqa 32*6-128($inp), %ymm2
1692 vmovdqa 32*7-128($inp), %ymm3
1693 vpand 32*4+128(%rax), %ymm0, %ymm0
1694 vpand 32*5+128(%rax), %ymm1, %ymm1
1695 vpand 32*6+128(%rax), %ymm2, %ymm2
1696 vpor %ymm0, %ymm4, %ymm4
1697 vpand 32*7+128(%rax), %ymm3, %ymm3
1698 vpand 32*8-128($inp), %ymm8, %ymm0
1699 vpor %ymm1, %ymm5, %ymm5
1700 vpand 32*9-128($inp), %ymm9, %ymm1
1701 vpor %ymm2, %ymm4, %ymm4
1702 vpand 32*10-128($inp),%ymm10, %ymm2
1703 vpor %ymm3, %ymm5, %ymm5
1704 vpand 32*11-128($inp),%ymm11, %ymm3
1705 vpor %ymm0, %ymm4, %ymm4
1706 vpand 32*12-128($inp),%ymm12, %ymm0
1707 vpor %ymm1, %ymm5, %ymm5
1708 vpand 32*13-128($inp),%ymm13, %ymm1
1709 vpor %ymm2, %ymm4, %ymm4
1710 vpand 32*14-128($inp),%ymm14, %ymm2
1711 vpor %ymm3, %ymm5, %ymm5
1712 vpand 32*15-128($inp),%ymm15, %ymm3
1713 lea 32*16($inp), $inp
1714 vpor %ymm0, %ymm4, %ymm4
1715 vpor %ymm1, %ymm5, %ymm5
1716 vpor %ymm2, %ymm4, %ymm4
1717 vpor %ymm3, %ymm5, %ymm5
1719 vpor %ymm5, %ymm4, %ymm4
1720 vextracti128 \$1, %ymm4, %xmm5 # upper half is cleared
1721 vpor %xmm4, %xmm5, %xmm5
1722 vpermd %ymm5,%ymm7,%ymm5
1723 vmovdqu %ymm5,($out)
1726 jnz .Loop_gather_1024
1728 vpxor %ymm0,%ymm0,%ymm0
1729 vmovdqu %ymm0,($out)
1732 $code.=<<___ if ($win64);
1733 movaps -0xa8(%r11),%xmm6
1734 movaps -0x98(%r11),%xmm7
1735 movaps -0x88(%r11),%xmm8
1736 movaps -0x78(%r11),%xmm9
1737 movaps -0x68(%r11),%xmm10
1738 movaps -0x58(%r11),%xmm11
1739 movaps -0x48(%r11),%xmm12
1740 movaps -0x38(%r11),%xmm13
1741 movaps -0x28(%r11),%xmm14
1742 movaps -0x18(%r11),%xmm15
1747 .LSEH_end_rsaz_1024_gather5:
1748 .size rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1753 .extern OPENSSL_ia32cap_P
1754 .globl rsaz_avx2_eligible
1755 .type rsaz_avx2_eligible,\@abi-omnipotent
1758 mov OPENSSL_ia32cap_P+8(%rip),%eax
1760 $code.=<<___ if ($addx);
1761 mov \$`1<<8|1<<19`,%ecx
1764 cmp \$`1<<8|1<<19`,%ecx # check for BMI2+AD*X
1771 .size rsaz_avx2_eligible,.-rsaz_avx2_eligible
1775 .quad 0x1fffffff,0x1fffffff,0x1fffffff,-1
1777 .long 0,2,4,6,7,7,7,7
1779 .long 0,7,1,7,2,7,3,7
1781 .long 0,0,0,0, 1,1,1,1
1782 .long 2,2,2,2, 3,3,3,3
1783 .long 4,4,4,4, 4,4,4,4
1794 .extern __imp_RtlVirtualUnwind
1795 .type rsaz_se_handler,\@abi-omnipotent
1809 mov 120($context),%rax # pull context->Rax
1810 mov 248($context),%rbx # pull context->Rip
1812 mov 8($disp),%rsi # disp->ImageBase
1813 mov 56($disp),%r11 # disp->HandlerData
1815 mov 0(%r11),%r10d # HandlerData[0]
1816 lea (%rsi,%r10),%r10 # prologue label
1817 cmp %r10,%rbx # context->Rip<prologue label
1818 jb .Lcommon_seh_tail
1820 mov 4(%r11),%r10d # HandlerData[1]
1821 lea (%rsi,%r10),%r10 # epilogue label
1822 cmp %r10,%rbx # context->Rip>=epilogue label
1823 jae .Lcommon_seh_tail
1825 mov 160($context),%rbp # pull context->Rbp
1827 mov 8(%r11),%r10d # HandlerData[2]
1828 lea (%rsi,%r10),%r10 # "in tail" label
1829 cmp %r10,%rbx # context->Rip>="in tail" label
1838 mov %r15,240($context)
1839 mov %r14,232($context)
1840 mov %r13,224($context)
1841 mov %r12,216($context)
1842 mov %rbp,160($context)
1843 mov %rbx,144($context)
1845 lea -0xd8(%rax),%rsi # %xmm save area
1846 lea 512($context),%rdi # & context.Xmm6
1847 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
1848 .long 0xa548f3fc # cld; rep movsq
1853 mov %rax,152($context) # restore context->Rsp
1854 mov %rsi,168($context) # restore context->Rsi
1855 mov %rdi,176($context) # restore context->Rdi
1857 mov 40($disp),%rdi # disp->ContextRecord
1858 mov $context,%rsi # context
1859 mov \$154,%ecx # sizeof(CONTEXT)
1860 .long 0xa548f3fc # cld; rep movsq
1863 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1864 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1865 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1866 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1867 mov 40(%rsi),%r10 # disp->ContextRecord
1868 lea 56(%rsi),%r11 # &disp->HandlerData
1869 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1870 mov %r10,32(%rsp) # arg5
1871 mov %r11,40(%rsp) # arg6
1872 mov %r12,48(%rsp) # arg7
1873 mov %rcx,56(%rsp) # arg8, (NULL)
1874 call *__imp_RtlVirtualUnwind(%rip)
1876 mov \$1,%eax # ExceptionContinueSearch
1888 .size rsaz_se_handler,.-rsaz_se_handler
1892 .rva .LSEH_begin_rsaz_1024_sqr_avx2
1893 .rva .LSEH_end_rsaz_1024_sqr_avx2
1894 .rva .LSEH_info_rsaz_1024_sqr_avx2
1896 .rva .LSEH_begin_rsaz_1024_mul_avx2
1897 .rva .LSEH_end_rsaz_1024_mul_avx2
1898 .rva .LSEH_info_rsaz_1024_mul_avx2
1900 .rva .LSEH_begin_rsaz_1024_gather5
1901 .rva .LSEH_end_rsaz_1024_gather5
1902 .rva .LSEH_info_rsaz_1024_gather5
1905 .LSEH_info_rsaz_1024_sqr_avx2:
1907 .rva rsaz_se_handler
1908 .rva .Lsqr_1024_body,.Lsqr_1024_epilogue,.Lsqr_1024_in_tail
1910 .LSEH_info_rsaz_1024_mul_avx2:
1912 .rva rsaz_se_handler
1913 .rva .Lmul_1024_body,.Lmul_1024_epilogue,.Lmul_1024_in_tail
1915 .LSEH_info_rsaz_1024_gather5:
1916 .byte 0x01,0x36,0x17,0x0b
1917 .byte 0x36,0xf8,0x09,0x00 # vmovaps 0x90(rsp),xmm15
1918 .byte 0x31,0xe8,0x08,0x00 # vmovaps 0x80(rsp),xmm14
1919 .byte 0x2c,0xd8,0x07,0x00 # vmovaps 0x70(rsp),xmm13
1920 .byte 0x27,0xc8,0x06,0x00 # vmovaps 0x60(rsp),xmm12
1921 .byte 0x22,0xb8,0x05,0x00 # vmovaps 0x50(rsp),xmm11
1922 .byte 0x1d,0xa8,0x04,0x00 # vmovaps 0x40(rsp),xmm10
1923 .byte 0x18,0x98,0x03,0x00 # vmovaps 0x30(rsp),xmm9
1924 .byte 0x13,0x88,0x02,0x00 # vmovaps 0x20(rsp),xmm8
1925 .byte 0x0e,0x78,0x01,0x00 # vmovaps 0x10(rsp),xmm7
1926 .byte 0x09,0x68,0x00,0x00 # vmovaps 0x00(rsp),xmm6
1927 .byte 0x04,0x01,0x15,0x00 # sub rsp,0xa8
1928 .byte 0x00,0xb3,0x00,0x00 # set_frame r11
1932 foreach (split("\n",$code)) {
1933 s/\`([^\`]*)\`/eval($1)/ge;
1935 s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge or
1937 s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1938 s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
1939 s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1940 s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1941 s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1946 print <<___; # assembler is too old
1949 .globl rsaz_avx2_eligible
1950 .type rsaz_avx2_eligible,\@abi-omnipotent
1954 .size rsaz_avx2_eligible,.-rsaz_avx2_eligible
1956 .globl rsaz_1024_sqr_avx2
1957 .globl rsaz_1024_mul_avx2
1958 .globl rsaz_1024_norm2red_avx2
1959 .globl rsaz_1024_red2norm_avx2
1960 .globl rsaz_1024_scatter5_avx2
1961 .globl rsaz_1024_gather5_avx2
1962 .type rsaz_1024_sqr_avx2,\@abi-omnipotent
1965 rsaz_1024_norm2red_avx2:
1966 rsaz_1024_red2norm_avx2:
1967 rsaz_1024_scatter5_avx2:
1968 rsaz_1024_gather5_avx2:
1969 .byte 0x0f,0x0b # ud2
1971 .size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2