bn/asm/x86_64-mont.pl: minor optimization [for Decoded ICache].
[openssl.git] / crypto / bn / asm / x86_64-mont.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # October 2005.
11 #
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
17
18 # July 2011.
19 #
20 # Add dedicated squaring procedure. Performance improvement varies
21 # from platform to platform, but in average it's ~5%/15%/25%/33%
22 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
23
24 # August 2011.
25 #
26 # Unroll and modulo-schedule inner loops in such manner that they
27 # are "fallen through" for input lengths of 8, which is critical for
28 # 1024-bit RSA *sign*. Average performance improvement in comparison
29 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
31
32 # June 2013.
33 #
34 # Optimize reduction in squaring procedure and improve 1024+-bit RSA
35 # sign performance by 10-16% on Intel Sandy Bridge and later
36 # (virtually same on non-Intel processors).
37
38 # August 2013.
39 #
40 # Add MULX/ADOX/ADCX code path.
41
42 $flavour = shift;
43 $output  = shift;
44 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
45
46 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
47
48 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51 die "can't locate x86_64-xlate.pl";
52
53 open OUT,"| \"$^X\" $xlate $flavour $output";
54 *STDOUT=*OUT;
55
56 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
57                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
58         $addx = ($1>=2.23);
59 }
60
61 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
62             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
63         $addx = ($1>=2.10);
64 }
65
66 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
67             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
68         $addx = ($1>=11);
69 }
70
71 # int bn_mul_mont(
72 $rp="%rdi";     # BN_ULONG *rp,
73 $ap="%rsi";     # const BN_ULONG *ap,
74 $bp="%rdx";     # const BN_ULONG *bp,
75 $np="%rcx";     # const BN_ULONG *np,
76 $n0="%r8";      # const BN_ULONG *n0,
77 $num="%r9";     # int num);
78 $lo0="%r10";
79 $hi0="%r11";
80 $hi1="%r13";
81 $i="%r14";
82 $j="%r15";
83 $m0="%rbx";
84 $m1="%rbp";
85
86 $code=<<___;
87 .text
88
89 .extern OPENSSL_ia32cap_P
90
91 .globl  bn_mul_mont
92 .type   bn_mul_mont,\@function,6
93 .align  16
94 bn_mul_mont:
95         test    \$3,${num}d
96         jnz     .Lmul_enter
97         cmp     \$8,${num}d
98         jb      .Lmul_enter
99 ___
100 $code.=<<___ if ($addx);
101         mov     OPENSSL_ia32cap_P+8(%rip),%r11d
102 ___
103 $code.=<<___;
104         cmp     $ap,$bp
105         jne     .Lmul4x_enter
106         test    \$7,${num}d
107         jz      .Lsqr8x_enter
108         jmp     .Lmul4x_enter
109
110 .align  16
111 .Lmul_enter:
112         push    %rbx
113         push    %rbp
114         push    %r12
115         push    %r13
116         push    %r14
117         push    %r15
118
119         mov     ${num}d,${num}d
120         lea     2($num),%r10
121         mov     %rsp,%r11
122         neg     %r10
123         lea     (%rsp,%r10,8),%rsp      # tp=alloca(8*(num+2))
124         and     \$-1024,%rsp            # minimize TLB usage
125
126         mov     %r11,8(%rsp,$num,8)     # tp[num+1]=%rsp
127 .Lmul_body:
128         mov     $bp,%r12                # reassign $bp
129 ___
130                 $bp="%r12";
131 $code.=<<___;
132         mov     ($n0),$n0               # pull n0[0] value
133         mov     ($bp),$m0               # m0=bp[0]
134         mov     ($ap),%rax
135
136         xor     $i,$i                   # i=0
137         xor     $j,$j                   # j=0
138
139         mov     $n0,$m1
140         mulq    $m0                     # ap[0]*bp[0]
141         mov     %rax,$lo0
142         mov     ($np),%rax
143
144         imulq   $lo0,$m1                # "tp[0]"*n0
145         mov     %rdx,$hi0
146
147         mulq    $m1                     # np[0]*m1
148         add     %rax,$lo0               # discarded
149         mov     8($ap),%rax
150         adc     \$0,%rdx
151         mov     %rdx,$hi1
152
153         lea     1($j),$j                # j++
154         jmp     .L1st_enter
155
156 .align  16
157 .L1st:
158         add     %rax,$hi1
159         mov     ($ap,$j,8),%rax
160         adc     \$0,%rdx
161         add     $hi0,$hi1               # np[j]*m1+ap[j]*bp[0]
162         mov     $lo0,$hi0
163         adc     \$0,%rdx
164         mov     $hi1,-16(%rsp,$j,8)     # tp[j-1]
165         mov     %rdx,$hi1
166
167 .L1st_enter:
168         mulq    $m0                     # ap[j]*bp[0]
169         add     %rax,$hi0
170         mov     ($np,$j,8),%rax
171         adc     \$0,%rdx
172         lea     1($j),$j                # j++
173         mov     %rdx,$lo0
174
175         mulq    $m1                     # np[j]*m1
176         cmp     $num,$j
177         jne     .L1st
178
179         add     %rax,$hi1
180         mov     ($ap),%rax              # ap[0]
181         adc     \$0,%rdx
182         add     $hi0,$hi1               # np[j]*m1+ap[j]*bp[0]
183         adc     \$0,%rdx
184         mov     $hi1,-16(%rsp,$j,8)     # tp[j-1]
185         mov     %rdx,$hi1
186         mov     $lo0,$hi0
187
188         xor     %rdx,%rdx
189         add     $hi0,$hi1
190         adc     \$0,%rdx
191         mov     $hi1,-8(%rsp,$num,8)
192         mov     %rdx,(%rsp,$num,8)      # store upmost overflow bit
193
194         lea     1($i),$i                # i++
195         jmp     .Louter
196 .align  16
197 .Louter:
198         mov     ($bp,$i,8),$m0          # m0=bp[i]
199         xor     $j,$j                   # j=0
200         mov     $n0,$m1
201         mov     (%rsp),$lo0
202         mulq    $m0                     # ap[0]*bp[i]
203         add     %rax,$lo0               # ap[0]*bp[i]+tp[0]
204         mov     ($np),%rax
205         adc     \$0,%rdx
206
207         imulq   $lo0,$m1                # tp[0]*n0
208         mov     %rdx,$hi0
209
210         mulq    $m1                     # np[0]*m1
211         add     %rax,$lo0               # discarded
212         mov     8($ap),%rax
213         adc     \$0,%rdx
214         mov     8(%rsp),$lo0            # tp[1]
215         mov     %rdx,$hi1
216
217         lea     1($j),$j                # j++
218         jmp     .Linner_enter
219
220 .align  16
221 .Linner:
222         add     %rax,$hi1
223         mov     ($ap,$j,8),%rax
224         adc     \$0,%rdx
225         add     $lo0,$hi1               # np[j]*m1+ap[j]*bp[i]+tp[j]
226         mov     (%rsp,$j,8),$lo0
227         adc     \$0,%rdx
228         mov     $hi1,-16(%rsp,$j,8)     # tp[j-1]
229         mov     %rdx,$hi1
230
231 .Linner_enter:
232         mulq    $m0                     # ap[j]*bp[i]
233         add     %rax,$hi0
234         mov     ($np,$j,8),%rax
235         adc     \$0,%rdx
236         add     $hi0,$lo0               # ap[j]*bp[i]+tp[j]
237         mov     %rdx,$hi0
238         adc     \$0,$hi0
239         lea     1($j),$j                # j++
240
241         mulq    $m1                     # np[j]*m1
242         cmp     $num,$j
243         jne     .Linner
244
245         add     %rax,$hi1
246         mov     ($ap),%rax              # ap[0]
247         adc     \$0,%rdx
248         add     $lo0,$hi1               # np[j]*m1+ap[j]*bp[i]+tp[j]
249         mov     (%rsp,$j,8),$lo0
250         adc     \$0,%rdx
251         mov     $hi1,-16(%rsp,$j,8)     # tp[j-1]
252         mov     %rdx,$hi1
253
254         xor     %rdx,%rdx
255         add     $hi0,$hi1
256         adc     \$0,%rdx
257         add     $lo0,$hi1               # pull upmost overflow bit
258         adc     \$0,%rdx
259         mov     $hi1,-8(%rsp,$num,8)
260         mov     %rdx,(%rsp,$num,8)      # store upmost overflow bit
261
262         lea     1($i),$i                # i++
263         cmp     $num,$i
264         jl      .Louter
265
266         xor     $i,$i                   # i=0 and clear CF!
267         mov     (%rsp),%rax             # tp[0]
268         lea     (%rsp),$ap              # borrow ap for tp
269         mov     $num,$j                 # j=num
270         jmp     .Lsub
271 .align  16
272 .Lsub:  sbb     ($np,$i,8),%rax
273         mov     %rax,($rp,$i,8)         # rp[i]=tp[i]-np[i]
274         mov     8($ap,$i,8),%rax        # tp[i+1]
275         lea     1($i),$i                # i++
276         dec     $j                      # doesnn't affect CF!
277         jnz     .Lsub
278
279         sbb     \$0,%rax                # handle upmost overflow bit
280         xor     $i,$i
281         and     %rax,$ap
282         not     %rax
283         mov     $rp,$np
284         and     %rax,$np
285         mov     $num,$j                 # j=num
286         or      $np,$ap                 # ap=borrow?tp:rp
287 .align  16
288 .Lcopy:                                 # copy or in-place refresh
289         mov     ($ap,$i,8),%rax
290         mov     $i,(%rsp,$i,8)          # zap temporary vector
291         mov     %rax,($rp,$i,8)         # rp[i]=tp[i]
292         lea     1($i),$i
293         sub     \$1,$j
294         jnz     .Lcopy
295
296         mov     8(%rsp,$num,8),%rsi     # restore %rsp
297         mov     \$1,%rax
298         mov     (%rsi),%r15
299         mov     8(%rsi),%r14
300         mov     16(%rsi),%r13
301         mov     24(%rsi),%r12
302         mov     32(%rsi),%rbp
303         mov     40(%rsi),%rbx
304         lea     48(%rsi),%rsp
305 .Lmul_epilogue:
306         ret
307 .size   bn_mul_mont,.-bn_mul_mont
308 ___
309 {{{
310 my @A=("%r10","%r11");
311 my @N=("%r13","%rdi");
312 $code.=<<___;
313 .type   bn_mul4x_mont,\@function,6
314 .align  16
315 bn_mul4x_mont:
316 .Lmul4x_enter:
317 ___
318 $code.=<<___ if ($addx);
319         and     \$0x80100,%r11d
320         cmp     \$0x80100,%r11d
321         je      .Lmulx4x_enter
322 ___
323 $code.=<<___;
324         push    %rbx
325         push    %rbp
326         push    %r12
327         push    %r13
328         push    %r14
329         push    %r15
330
331         mov     ${num}d,${num}d
332         lea     4($num),%r10
333         mov     %rsp,%r11
334         neg     %r10
335         lea     (%rsp,%r10,8),%rsp      # tp=alloca(8*(num+4))
336         and     \$-1024,%rsp            # minimize TLB usage
337
338         mov     %r11,8(%rsp,$num,8)     # tp[num+1]=%rsp
339 .Lmul4x_body:
340         mov     $rp,16(%rsp,$num,8)     # tp[num+2]=$rp
341         mov     %rdx,%r12               # reassign $bp
342 ___
343                 $bp="%r12";
344 $code.=<<___;
345         mov     ($n0),$n0               # pull n0[0] value
346         mov     ($bp),$m0               # m0=bp[0]
347         mov     ($ap),%rax
348
349         xor     $i,$i                   # i=0
350         xor     $j,$j                   # j=0
351
352         mov     $n0,$m1
353         mulq    $m0                     # ap[0]*bp[0]
354         mov     %rax,$A[0]
355         mov     ($np),%rax
356
357         imulq   $A[0],$m1               # "tp[0]"*n0
358         mov     %rdx,$A[1]
359
360         mulq    $m1                     # np[0]*m1
361         add     %rax,$A[0]              # discarded
362         mov     8($ap),%rax
363         adc     \$0,%rdx
364         mov     %rdx,$N[1]
365
366         mulq    $m0
367         add     %rax,$A[1]
368         mov     8($np),%rax
369         adc     \$0,%rdx
370         mov     %rdx,$A[0]
371
372         mulq    $m1
373         add     %rax,$N[1]
374         mov     16($ap),%rax
375         adc     \$0,%rdx
376         add     $A[1],$N[1]
377         lea     4($j),$j                # j++
378         adc     \$0,%rdx
379         mov     $N[1],(%rsp)
380         mov     %rdx,$N[0]
381         jmp     .L1st4x
382 .align  16
383 .L1st4x:
384         mulq    $m0                     # ap[j]*bp[0]
385         add     %rax,$A[0]
386         mov     -16($np,$j,8),%rax
387         adc     \$0,%rdx
388         mov     %rdx,$A[1]
389
390         mulq    $m1                     # np[j]*m1
391         add     %rax,$N[0]
392         mov     -8($ap,$j,8),%rax
393         adc     \$0,%rdx
394         add     $A[0],$N[0]             # np[j]*m1+ap[j]*bp[0]
395         adc     \$0,%rdx
396         mov     $N[0],-24(%rsp,$j,8)    # tp[j-1]
397         mov     %rdx,$N[1]
398
399         mulq    $m0                     # ap[j]*bp[0]
400         add     %rax,$A[1]
401         mov     -8($np,$j,8),%rax
402         adc     \$0,%rdx
403         mov     %rdx,$A[0]
404
405         mulq    $m1                     # np[j]*m1
406         add     %rax,$N[1]
407         mov     ($ap,$j,8),%rax
408         adc     \$0,%rdx
409         add     $A[1],$N[1]             # np[j]*m1+ap[j]*bp[0]
410         adc     \$0,%rdx
411         mov     $N[1],-16(%rsp,$j,8)    # tp[j-1]
412         mov     %rdx,$N[0]
413
414         mulq    $m0                     # ap[j]*bp[0]
415         add     %rax,$A[0]
416         mov     ($np,$j,8),%rax
417         adc     \$0,%rdx
418         mov     %rdx,$A[1]
419
420         mulq    $m1                     # np[j]*m1
421         add     %rax,$N[0]
422         mov     8($ap,$j,8),%rax
423         adc     \$0,%rdx
424         add     $A[0],$N[0]             # np[j]*m1+ap[j]*bp[0]
425         adc     \$0,%rdx
426         mov     $N[0],-8(%rsp,$j,8)     # tp[j-1]
427         mov     %rdx,$N[1]
428
429         mulq    $m0                     # ap[j]*bp[0]
430         add     %rax,$A[1]
431         mov     8($np,$j,8),%rax
432         adc     \$0,%rdx
433         lea     4($j),$j                # j++
434         mov     %rdx,$A[0]
435
436         mulq    $m1                     # np[j]*m1
437         add     %rax,$N[1]
438         mov     -16($ap,$j,8),%rax
439         adc     \$0,%rdx
440         add     $A[1],$N[1]             # np[j]*m1+ap[j]*bp[0]
441         adc     \$0,%rdx
442         mov     $N[1],-32(%rsp,$j,8)    # tp[j-1]
443         mov     %rdx,$N[0]
444         cmp     $num,$j
445         jl      .L1st4x
446
447         mulq    $m0                     # ap[j]*bp[0]
448         add     %rax,$A[0]
449         mov     -16($np,$j,8),%rax
450         adc     \$0,%rdx
451         mov     %rdx,$A[1]
452
453         mulq    $m1                     # np[j]*m1
454         add     %rax,$N[0]
455         mov     -8($ap,$j,8),%rax
456         adc     \$0,%rdx
457         add     $A[0],$N[0]             # np[j]*m1+ap[j]*bp[0]
458         adc     \$0,%rdx
459         mov     $N[0],-24(%rsp,$j,8)    # tp[j-1]
460         mov     %rdx,$N[1]
461
462         mulq    $m0                     # ap[j]*bp[0]
463         add     %rax,$A[1]
464         mov     -8($np,$j,8),%rax
465         adc     \$0,%rdx
466         mov     %rdx,$A[0]
467
468         mulq    $m1                     # np[j]*m1
469         add     %rax,$N[1]
470         mov     ($ap),%rax              # ap[0]
471         adc     \$0,%rdx
472         add     $A[1],$N[1]             # np[j]*m1+ap[j]*bp[0]
473         adc     \$0,%rdx
474         mov     $N[1],-16(%rsp,$j,8)    # tp[j-1]
475         mov     %rdx,$N[0]
476
477         xor     $N[1],$N[1]
478         add     $A[0],$N[0]
479         adc     \$0,$N[1]
480         mov     $N[0],-8(%rsp,$j,8)
481         mov     $N[1],(%rsp,$j,8)       # store upmost overflow bit
482
483         lea     1($i),$i                # i++
484 .align  4
485 .Louter4x:
486         mov     ($bp,$i,8),$m0          # m0=bp[i]
487         xor     $j,$j                   # j=0
488         mov     (%rsp),$A[0]
489         mov     $n0,$m1
490         mulq    $m0                     # ap[0]*bp[i]
491         add     %rax,$A[0]              # ap[0]*bp[i]+tp[0]
492         mov     ($np),%rax
493         adc     \$0,%rdx
494
495         imulq   $A[0],$m1               # tp[0]*n0
496         mov     %rdx,$A[1]
497
498         mulq    $m1                     # np[0]*m1
499         add     %rax,$A[0]              # "$N[0]", discarded
500         mov     8($ap),%rax
501         adc     \$0,%rdx
502         mov     %rdx,$N[1]
503
504         mulq    $m0                     # ap[j]*bp[i]
505         add     %rax,$A[1]
506         mov     8($np),%rax
507         adc     \$0,%rdx
508         add     8(%rsp),$A[1]           # +tp[1]
509         adc     \$0,%rdx
510         mov     %rdx,$A[0]
511
512         mulq    $m1                     # np[j]*m1
513         add     %rax,$N[1]
514         mov     16($ap),%rax
515         adc     \$0,%rdx
516         add     $A[1],$N[1]             # np[j]*m1+ap[j]*bp[i]+tp[j]
517         lea     4($j),$j                # j+=2
518         adc     \$0,%rdx
519         mov     $N[1],(%rsp)            # tp[j-1]
520         mov     %rdx,$N[0]
521         jmp     .Linner4x
522 .align  16
523 .Linner4x:
524         mulq    $m0                     # ap[j]*bp[i]
525         add     %rax,$A[0]
526         mov     -16($np,$j,8),%rax
527         adc     \$0,%rdx
528         add     -16(%rsp,$j,8),$A[0]    # ap[j]*bp[i]+tp[j]
529         adc     \$0,%rdx
530         mov     %rdx,$A[1]
531
532         mulq    $m1                     # np[j]*m1
533         add     %rax,$N[0]
534         mov     -8($ap,$j,8),%rax
535         adc     \$0,%rdx
536         add     $A[0],$N[0]
537         adc     \$0,%rdx
538         mov     $N[0],-24(%rsp,$j,8)    # tp[j-1]
539         mov     %rdx,$N[1]
540
541         mulq    $m0                     # ap[j]*bp[i]
542         add     %rax,$A[1]
543         mov     -8($np,$j,8),%rax
544         adc     \$0,%rdx
545         add     -8(%rsp,$j,8),$A[1]
546         adc     \$0,%rdx
547         mov     %rdx,$A[0]
548
549         mulq    $m1                     # np[j]*m1
550         add     %rax,$N[1]
551         mov     ($ap,$j,8),%rax
552         adc     \$0,%rdx
553         add     $A[1],$N[1]
554         adc     \$0,%rdx
555         mov     $N[1],-16(%rsp,$j,8)    # tp[j-1]
556         mov     %rdx,$N[0]
557
558         mulq    $m0                     # ap[j]*bp[i]
559         add     %rax,$A[0]
560         mov     ($np,$j,8),%rax
561         adc     \$0,%rdx
562         add     (%rsp,$j,8),$A[0]       # ap[j]*bp[i]+tp[j]
563         adc     \$0,%rdx
564         mov     %rdx,$A[1]
565
566         mulq    $m1                     # np[j]*m1
567         add     %rax,$N[0]
568         mov     8($ap,$j,8),%rax
569         adc     \$0,%rdx
570         add     $A[0],$N[0]
571         adc     \$0,%rdx
572         mov     $N[0],-8(%rsp,$j,8)     # tp[j-1]
573         mov     %rdx,$N[1]
574
575         mulq    $m0                     # ap[j]*bp[i]
576         add     %rax,$A[1]
577         mov     8($np,$j,8),%rax
578         adc     \$0,%rdx
579         add     8(%rsp,$j,8),$A[1]
580         adc     \$0,%rdx
581         lea     4($j),$j                # j++
582         mov     %rdx,$A[0]
583
584         mulq    $m1                     # np[j]*m1
585         add     %rax,$N[1]
586         mov     -16($ap,$j,8),%rax
587         adc     \$0,%rdx
588         add     $A[1],$N[1]
589         adc     \$0,%rdx
590         mov     $N[1],-32(%rsp,$j,8)    # tp[j-1]
591         mov     %rdx,$N[0]
592         cmp     $num,$j
593         jl      .Linner4x
594
595         mulq    $m0                     # ap[j]*bp[i]
596         add     %rax,$A[0]
597         mov     -16($np,$j,8),%rax
598         adc     \$0,%rdx
599         add     -16(%rsp,$j,8),$A[0]    # ap[j]*bp[i]+tp[j]
600         adc     \$0,%rdx
601         mov     %rdx,$A[1]
602
603         mulq    $m1                     # np[j]*m1
604         add     %rax,$N[0]
605         mov     -8($ap,$j,8),%rax
606         adc     \$0,%rdx
607         add     $A[0],$N[0]
608         adc     \$0,%rdx
609         mov     $N[0],-24(%rsp,$j,8)    # tp[j-1]
610         mov     %rdx,$N[1]
611
612         mulq    $m0                     # ap[j]*bp[i]
613         add     %rax,$A[1]
614         mov     -8($np,$j,8),%rax
615         adc     \$0,%rdx
616         add     -8(%rsp,$j,8),$A[1]
617         adc     \$0,%rdx
618         lea     1($i),$i                # i++
619         mov     %rdx,$A[0]
620
621         mulq    $m1                     # np[j]*m1
622         add     %rax,$N[1]
623         mov     ($ap),%rax              # ap[0]
624         adc     \$0,%rdx
625         add     $A[1],$N[1]
626         adc     \$0,%rdx
627         mov     $N[1],-16(%rsp,$j,8)    # tp[j-1]
628         mov     %rdx,$N[0]
629
630         xor     $N[1],$N[1]
631         add     $A[0],$N[0]
632         adc     \$0,$N[1]
633         add     (%rsp,$num,8),$N[0]     # pull upmost overflow bit
634         adc     \$0,$N[1]
635         mov     $N[0],-8(%rsp,$j,8)
636         mov     $N[1],(%rsp,$j,8)       # store upmost overflow bit
637
638         cmp     $num,$i
639         jl      .Louter4x
640 ___
641 {
642 my @ri=("%rax","%rdx",$m0,$m1);
643 $code.=<<___;
644         mov     16(%rsp,$num,8),$rp     # restore $rp
645         mov     0(%rsp),@ri[0]          # tp[0]
646         pxor    %xmm0,%xmm0
647         mov     8(%rsp),@ri[1]          # tp[1]
648         shr     \$2,$num                # num/=4
649         lea     (%rsp),$ap              # borrow ap for tp
650         xor     $i,$i                   # i=0 and clear CF!
651
652         sub     0($np),@ri[0]
653         mov     16($ap),@ri[2]          # tp[2]
654         mov     24($ap),@ri[3]          # tp[3]
655         sbb     8($np),@ri[1]
656         lea     -1($num),$j             # j=num/4-1
657         jmp     .Lsub4x
658 .align  16
659 .Lsub4x:
660         mov     @ri[0],0($rp,$i,8)      # rp[i]=tp[i]-np[i]
661         mov     @ri[1],8($rp,$i,8)      # rp[i]=tp[i]-np[i]
662         sbb     16($np,$i,8),@ri[2]
663         mov     32($ap,$i,8),@ri[0]     # tp[i+1]
664         mov     40($ap,$i,8),@ri[1]
665         sbb     24($np,$i,8),@ri[3]
666         mov     @ri[2],16($rp,$i,8)     # rp[i]=tp[i]-np[i]
667         mov     @ri[3],24($rp,$i,8)     # rp[i]=tp[i]-np[i]
668         sbb     32($np,$i,8),@ri[0]
669         mov     48($ap,$i,8),@ri[2]
670         mov     56($ap,$i,8),@ri[3]
671         sbb     40($np,$i,8),@ri[1]
672         lea     4($i),$i                # i++
673         dec     $j                      # doesnn't affect CF!
674         jnz     .Lsub4x
675
676         mov     @ri[0],0($rp,$i,8)      # rp[i]=tp[i]-np[i]
677         mov     32($ap,$i,8),@ri[0]     # load overflow bit
678         sbb     16($np,$i,8),@ri[2]
679         mov     @ri[1],8($rp,$i,8)      # rp[i]=tp[i]-np[i]
680         sbb     24($np,$i,8),@ri[3]
681         mov     @ri[2],16($rp,$i,8)     # rp[i]=tp[i]-np[i]
682
683         sbb     \$0,@ri[0]              # handle upmost overflow bit
684         mov     @ri[3],24($rp,$i,8)     # rp[i]=tp[i]-np[i]
685         xor     $i,$i                   # i=0
686         and     @ri[0],$ap
687         not     @ri[0]
688         mov     $rp,$np
689         and     @ri[0],$np
690         lea     -1($num),$j
691         or      $np,$ap                 # ap=borrow?tp:rp
692
693         movdqu  ($ap),%xmm1
694         movdqa  %xmm0,(%rsp)
695         movdqu  %xmm1,($rp)
696         jmp     .Lcopy4x
697 .align  16
698 .Lcopy4x:                                       # copy or in-place refresh
699         movdqu  16($ap,$i),%xmm2
700         movdqu  32($ap,$i),%xmm1
701         movdqa  %xmm0,16(%rsp,$i)
702         movdqu  %xmm2,16($rp,$i)
703         movdqa  %xmm0,32(%rsp,$i)
704         movdqu  %xmm1,32($rp,$i)
705         lea     32($i),$i
706         dec     $j
707         jnz     .Lcopy4x
708
709         shl     \$2,$num
710         movdqu  16($ap,$i),%xmm2
711         movdqa  %xmm0,16(%rsp,$i)
712         movdqu  %xmm2,16($rp,$i)
713 ___
714 }
715 $code.=<<___;
716         mov     8(%rsp,$num,8),%rsi     # restore %rsp
717         mov     \$1,%rax
718         mov     (%rsi),%r15
719         mov     8(%rsi),%r14
720         mov     16(%rsi),%r13
721         mov     24(%rsi),%r12
722         mov     32(%rsi),%rbp
723         mov     40(%rsi),%rbx
724         lea     48(%rsi),%rsp
725 .Lmul4x_epilogue:
726         ret
727 .size   bn_mul4x_mont,.-bn_mul4x_mont
728 ___
729 }}}
730 \f{{{
731 ######################################################################
732 # void bn_sqr8x_mont(
733 my $rptr="%rdi";        # const BN_ULONG *rptr,
734 my $aptr="%rsi";        # const BN_ULONG *aptr,
735 my $bptr="%rdx";        # not used
736 my $nptr="%rcx";        # const BN_ULONG *nptr,
737 my $n0  ="%r8";         # const BN_ULONG *n0);
738 my $num ="%r9";         # int num, has to be divisible by 8
739
740 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
741 my @A0=("%r10","%r11");
742 my @A1=("%r12","%r13");
743 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
744
745 $code.=<<___;
746 .type   bn_sqr8x_mont,\@function,6
747 .align  32
748 bn_sqr8x_mont:
749 .Lsqr8x_enter:
750 ___
751 $code.=<<___ if ($addx);
752         and     \$0x80100,%r11d
753         cmp     \$0x80100,%r11d
754         je      .Lsqrx8x_enter
755 ___
756 $code.=<<___;
757         push    %rbx
758         push    %rbp
759         push    %r12
760         push    %r13
761         push    %r14
762         push    %r15
763
764         shl     \$3,${num}d             # convert $num to bytes
765         xor     %r10,%r10
766         mov     %rsp,%r11               # put aside %rsp
767         sub     $num,%r10               # -$num
768         mov     ($n0),$n0               # *n0
769         lea     -72(%rsp,%r10,2),%rsp   # alloca(frame+2*$num)
770         and     \$-1024,%rsp            # minimize TLB usage
771         ##############################################################
772         # Stack layout
773         #
774         # +0    saved $num, used in reduction section
775         # +8    &t[2*$num], used in reduction section
776         # +32   saved $rptr
777         # +40   saved $nptr
778         # +48   saved *n0
779         # +56   saved %rsp
780         # +64   t[2*$num]
781         #
782         mov     $rptr,32(%rsp)          # save $rptr
783         mov     $nptr,40(%rsp)
784         mov     $n0,  48(%rsp)
785         mov     %r11, 56(%rsp)          # save original %rsp
786 .Lsqr8x_body:
787         ##############################################################
788         # Squaring part:
789         #
790         # a) multiply-n-add everything but a[i]*a[i];
791         # b) shift result of a) by 1 to the left and accumulate
792         #    a[i]*a[i] products;
793         #
794         ##############################################################
795         #                                                     a[1]a[0]
796         #                                                 a[2]a[0]
797         #                                             a[3]a[0]
798         #                                             a[2]a[1]
799         #                                         a[4]a[0]
800         #                                         a[3]a[1]
801         #                                     a[5]a[0]
802         #                                     a[4]a[1]
803         #                                     a[3]a[2]
804         #                                 a[6]a[0]
805         #                                 a[5]a[1]
806         #                                 a[4]a[2]
807         #                             a[7]a[0]
808         #                             a[6]a[1]
809         #                             a[5]a[2]
810         #                             a[4]a[3]
811         #                         a[7]a[1]
812         #                         a[6]a[2]
813         #                         a[5]a[3]
814         #                     a[7]a[2]
815         #                     a[6]a[3]
816         #                     a[5]a[4]
817         #                 a[7]a[3]
818         #                 a[6]a[4]
819         #             a[7]a[4]
820         #             a[6]a[5]
821         #         a[7]a[5]
822         #     a[7]a[6]
823         #                                                     a[1]a[0]
824         #                                                 a[2]a[0]
825         #                                             a[3]a[0]
826         #                                         a[4]a[0]
827         #                                     a[5]a[0]
828         #                                 a[6]a[0]
829         #                             a[7]a[0]
830         #                                             a[2]a[1]
831         #                                         a[3]a[1]
832         #                                     a[4]a[1]
833         #                                 a[5]a[1]
834         #                             a[6]a[1]
835         #                         a[7]a[1]
836         #                                     a[3]a[2]
837         #                                 a[4]a[2]
838         #                             a[5]a[2]
839         #                         a[6]a[2]
840         #                     a[7]a[2]
841         #                             a[4]a[3]
842         #                         a[5]a[3]
843         #                     a[6]a[3]
844         #                 a[7]a[3]
845         #                     a[5]a[4]
846         #                 a[6]a[4]
847         #             a[7]a[4]
848         #             a[6]a[5]
849         #         a[7]a[5]
850         #     a[7]a[6]
851         #                                                         a[0]a[0]
852         #                                                 a[1]a[1]
853         #                                         a[2]a[2]
854         #                                 a[3]a[3]
855         #                         a[4]a[4]
856         #                 a[5]a[5]
857         #         a[6]a[6]
858         # a[7]a[7]
859
860         lea     32(%r10),$i             # $i=-($num-32)
861         lea     ($aptr,$num),$aptr      # end of a[] buffer, ($aptr,$i)=&ap[2]
862
863         mov     $num,$j                 # $j=$num
864
865                                         # comments apply to $num==8 case
866         mov     -32($aptr,$i),$a0       # a[0]
867         lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
868         mov     -24($aptr,$i),%rax      # a[1]
869         lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
870         mov     -16($aptr,$i),$ai       # a[2]
871         mov     %rax,$a1
872
873         mul     $a0                     # a[1]*a[0]
874         mov     %rax,$A0[0]             # a[1]*a[0]
875          mov    $ai,%rax                # a[2]
876         mov     %rdx,$A0[1]
877         mov     $A0[0],-24($tptr,$i)    # t[1]
878
879         mul     $a0                     # a[2]*a[0]
880         add     %rax,$A0[1]
881          mov    $ai,%rax
882         adc     \$0,%rdx
883         mov     $A0[1],-16($tptr,$i)    # t[2]
884         mov     %rdx,$A0[0]
885
886         lea     -16($i),$j              # j=-16
887
888
889          mov    8($aptr,$j),$ai         # a[3]
890         mul     $a1                     # a[2]*a[1]
891         mov     %rax,$A1[0]             # a[2]*a[1]+t[3]
892          mov    $ai,%rax
893         mov     %rdx,$A1[1]
894
895          lea    16($j),$j
896         mul     $a0                     # a[3]*a[0]
897         add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
898          mov    $ai,%rax
899         mov     %rdx,$A0[1]
900         adc     \$0,$A0[1]
901         add     $A1[0],$A0[0]
902         adc     \$0,$A0[1]
903         mov     $A0[0],-8($tptr,$j)     # t[3]
904         jmp     .Lsqr4x_1st
905
906 .align  32
907 .Lsqr4x_1st:
908          mov    ($aptr,$j),$ai          # a[4]
909         mul     $a1                     # a[3]*a[1]
910         add     %rax,$A1[1]             # a[3]*a[1]+t[4]
911          mov    $ai,%rax
912         mov     %rdx,$A1[0]
913         adc     \$0,$A1[0]
914
915         mul     $a0                     # a[4]*a[0]
916         add     %rax,$A0[1]             # a[4]*a[0]+a[3]*a[1]+t[4]
917          mov    $ai,%rax                # a[3]
918          mov    8($aptr,$j),$ai         # a[5]
919         mov     %rdx,$A0[0]
920         adc     \$0,$A0[0]
921         add     $A1[1],$A0[1]
922         adc     \$0,$A0[0]
923
924
925         mul     $a1                     # a[4]*a[3]
926         add     %rax,$A1[0]             # a[4]*a[3]+t[5]
927          mov    $ai,%rax
928          mov    $A0[1],($tptr,$j)       # t[4]
929         mov     %rdx,$A1[1]
930         adc     \$0,$A1[1]
931
932         mul     $a0                     # a[5]*a[2]
933         add     %rax,$A0[0]             # a[5]*a[2]+a[4]*a[3]+t[5]
934          mov    $ai,%rax
935          mov    16($aptr,$j),$ai        # a[6]
936         mov     %rdx,$A0[1]
937         adc     \$0,$A0[1]
938         add     $A1[0],$A0[0]
939         adc     \$0,$A0[1]
940
941         mul     $a1                     # a[5]*a[3]
942         add     %rax,$A1[1]             # a[5]*a[3]+t[6]
943          mov    $ai,%rax
944          mov    $A0[0],8($tptr,$j)      # t[5]
945         mov     %rdx,$A1[0]
946         adc     \$0,$A1[0]
947
948         mul     $a0                     # a[6]*a[2]
949         add     %rax,$A0[1]             # a[6]*a[2]+a[5]*a[3]+t[6]
950          mov    $ai,%rax                # a[3]
951          mov    24($aptr,$j),$ai        # a[7]
952         mov     %rdx,$A0[0]
953         adc     \$0,$A0[0]
954         add     $A1[1],$A0[1]
955         adc     \$0,$A0[0]
956
957
958         mul     $a1                     # a[6]*a[5]
959         add     %rax,$A1[0]             # a[6]*a[5]+t[7]
960          mov    $ai,%rax
961          mov    $A0[1],16($tptr,$j)     # t[6]
962         mov     %rdx,$A1[1]
963         adc     \$0,$A1[1]
964
965         mul     $a0                     # a[7]*a[4]
966         add     %rax,$A0[0]             # a[7]*a[4]+a[6]*a[5]+t[6]
967          mov    $ai,%rax
968          lea    32($j),$j
969         mov     %rdx,$A0[1]
970         adc     \$0,$A0[1]
971         add     $A1[0],$A0[0]
972         adc     \$0,$A0[1]
973         mov     $A0[0],-8($tptr,$j)     # t[7]
974
975         cmp     \$0,$j
976         jne     .Lsqr4x_1st
977
978         mul     $a1                     # a[7]*a[5]
979         add     %rax,$A1[1]
980         lea     16($i),$i
981         adc     \$0,%rdx
982         add     $A0[1],$A1[1]
983         adc     \$0,%rdx
984
985         mov     $A1[1],($tptr)          # t[8]
986         mov     %rdx,$A1[0]
987         mov     %rdx,8($tptr)           # t[9]
988         jmp     .Lsqr4x_outer
989
990 .align  32
991 .Lsqr4x_outer:                          # comments apply to $num==6 case
992         mov     -32($aptr,$i),$a0       # a[0]
993         lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
994         mov     -24($aptr,$i),%rax      # a[1]
995         lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
996         mov     -16($aptr,$i),$ai       # a[2]
997         mov     %rax,$a1
998
999         mov     -24($tptr,$i),$A0[0]    # t[1]
1000         mul     $a0                     # a[1]*a[0]
1001         add     %rax,$A0[0]             # a[1]*a[0]+t[1]
1002          mov    $ai,%rax                # a[2]
1003         adc     \$0,%rdx
1004         mov     $A0[0],-24($tptr,$i)    # t[1]
1005         mov     %rdx,$A0[1]
1006
1007         mul     $a0                     # a[2]*a[0]
1008         add     %rax,$A0[1]
1009          mov    $ai,%rax
1010         adc     \$0,%rdx
1011         add     -16($tptr,$i),$A0[1]    # a[2]*a[0]+t[2]
1012         mov     %rdx,$A0[0]
1013         adc     \$0,$A0[0]
1014         mov     $A0[1],-16($tptr,$i)    # t[2]
1015
1016         lea     -16($i),$j              # j=-16
1017         xor     $A1[0],$A1[0]
1018
1019
1020          mov    8($aptr,$j),$ai         # a[3]
1021         mul     $a1                     # a[2]*a[1]
1022         add     %rax,$A1[0]             # a[2]*a[1]+t[3]
1023          mov    $ai,%rax
1024         adc     \$0,%rdx
1025         add     8($tptr,$j),$A1[0]
1026         mov     %rdx,$A1[1]
1027         adc     \$0,$A1[1]
1028
1029         mul     $a0                     # a[3]*a[0]
1030         add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
1031          mov    $ai,%rax
1032         adc     \$0,%rdx
1033         add     $A1[0],$A0[0]
1034         mov     %rdx,$A0[1]
1035         adc     \$0,$A0[1]
1036         mov     $A0[0],8($tptr,$j)      # t[3]
1037
1038         lea     16($j),$j
1039         jmp     .Lsqr4x_inner
1040
1041 .align  32
1042 .Lsqr4x_inner:
1043          mov    ($aptr,$j),$ai          # a[4]
1044         mul     $a1                     # a[3]*a[1]
1045         add     %rax,$A1[1]             # a[3]*a[1]+t[4]
1046          mov    $ai,%rax
1047         mov     %rdx,$A1[0]
1048         adc     \$0,$A1[0]
1049         add     ($tptr,$j),$A1[1]
1050         adc     \$0,$A1[0]
1051
1052         mul     $a0                     # a[4]*a[0]
1053         add     %rax,$A0[1]             # a[4]*a[0]+a[3]*a[1]+t[4]
1054          mov    $ai,%rax                # a[3]
1055          mov    8($aptr,$j),$ai         # a[5]
1056         mov     %rdx,$A0[0]
1057         adc     \$0,$A0[0]
1058         add     $A1[1],$A0[1]
1059         adc     \$0,$A0[0]
1060
1061         mul     $a1                     # a[4]*a[3]
1062         add     %rax,$A1[0]             # a[4]*a[3]+t[5]
1063         mov     $A0[1],($tptr,$j)       # t[4]
1064          mov    $ai,%rax
1065         mov     %rdx,$A1[1]
1066         adc     \$0,$A1[1]
1067         add     8($tptr,$j),$A1[0]
1068         lea     16($j),$j               # j++
1069         adc     \$0,$A1[1]
1070
1071         mul     $a0                     # a[5]*a[2]
1072         add     %rax,$A0[0]             # a[5]*a[2]+a[4]*a[3]+t[5]
1073          mov    $ai,%rax
1074         adc     \$0,%rdx
1075         add     $A1[0],$A0[0]
1076         mov     %rdx,$A0[1]
1077         adc     \$0,$A0[1]
1078         mov     $A0[0],-8($tptr,$j)     # t[5], "preloaded t[1]" below
1079
1080         cmp     \$0,$j
1081         jne     .Lsqr4x_inner
1082
1083         mul     $a1                     # a[5]*a[3]
1084         add     %rax,$A1[1]
1085         adc     \$0,%rdx
1086         add     $A0[1],$A1[1]
1087         adc     \$0,%rdx
1088
1089         mov     $A1[1],($tptr)          # t[6], "preloaded t[2]" below
1090         mov     %rdx,$A1[0]
1091         mov     %rdx,8($tptr)           # t[7], "preloaded t[3]" below
1092
1093         add     \$16,$i
1094         jnz     .Lsqr4x_outer
1095
1096                                         # comments apply to $num==4 case
1097         mov     -32($aptr),$a0          # a[0]
1098         lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
1099         mov     -24($aptr),%rax         # a[1]
1100         lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
1101         mov     -16($aptr),$ai          # a[2]
1102         mov     %rax,$a1
1103
1104         mul     $a0                     # a[1]*a[0]
1105         add     %rax,$A0[0]             # a[1]*a[0]+t[1], preloaded t[1]
1106          mov    $ai,%rax                # a[2]
1107         mov     %rdx,$A0[1]
1108         adc     \$0,$A0[1]
1109
1110         mul     $a0                     # a[2]*a[0]
1111         add     %rax,$A0[1]
1112          mov    $ai,%rax
1113          mov    $A0[0],-24($tptr)       # t[1]
1114         mov     %rdx,$A0[0]
1115         adc     \$0,$A0[0]
1116         add     $A1[1],$A0[1]           # a[2]*a[0]+t[2], preloaded t[2]
1117          mov    -8($aptr),$ai           # a[3]
1118         adc     \$0,$A0[0]
1119
1120         mul     $a1                     # a[2]*a[1]
1121         add     %rax,$A1[0]             # a[2]*a[1]+t[3], preloaded t[3]
1122          mov    $ai,%rax
1123          mov    $A0[1],-16($tptr)       # t[2]
1124         mov     %rdx,$A1[1]
1125         adc     \$0,$A1[1]
1126
1127         mul     $a0                     # a[3]*a[0]
1128         add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
1129          mov    $ai,%rax
1130         mov     %rdx,$A0[1]
1131         adc     \$0,$A0[1]
1132         add     $A1[0],$A0[0]
1133         adc     \$0,$A0[1]
1134         mov     $A0[0],-8($tptr)        # t[3]
1135
1136         mul     $a1                     # a[3]*a[1]
1137         add     %rax,$A1[1]
1138          mov    -16($aptr),%rax         # a[2]
1139         adc     \$0,%rdx
1140         add     $A0[1],$A1[1]
1141         adc     \$0,%rdx
1142
1143         mov     $A1[1],($tptr)          # t[4]
1144         mov     %rdx,$A1[0]
1145         mov     %rdx,8($tptr)           # t[5]
1146
1147         mul     $ai                     # a[2]*a[3]
1148 ___
1149 {
1150 my ($shift,$carry)=($a0,$a1);
1151 my @S=(@A1,$ai,$n0);
1152 $code.=<<___;
1153          add    \$16,$i
1154          xor    $shift,$shift
1155          sub    $num,$i                 # $i=16-$num
1156          xor    $carry,$carry
1157
1158         add     $A1[0],%rax             # t[5]
1159         adc     \$0,%rdx
1160         mov     %rax,8($tptr)           # t[5]
1161         mov     %rdx,16($tptr)          # t[6]
1162         mov     $carry,24($tptr)        # t[7]
1163
1164          mov    -16($aptr,$i),%rax      # a[0]
1165         lea     64(%rsp),$tptr
1166          xor    $A0[0],$A0[0]           # t[0]
1167          mov    8($tptr),$A0[1]         # t[1]
1168
1169         lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1170         shr     \$63,$A0[0]
1171         lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
1172         shr     \$63,$A0[1]
1173         or      $A0[0],$S[1]            # | t[2*i]>>63
1174          mov    16($tptr),$A0[0]        # t[2*i+2]      # prefetch
1175         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1176         mul     %rax                    # a[i]*a[i]
1177         neg     $carry                  # mov $carry,cf
1178          mov    24($tptr),$A0[1]        # t[2*i+2+1]    # prefetch
1179         adc     %rax,$S[0]
1180          mov    -8($aptr,$i),%rax       # a[i+1]        # prefetch
1181         mov     $S[0],($tptr)
1182         adc     %rdx,$S[1]
1183
1184         lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1185          mov    $S[1],8($tptr)
1186          sbb    $carry,$carry           # mov cf,$carry
1187         shr     \$63,$A0[0]
1188         lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
1189         shr     \$63,$A0[1]
1190         or      $A0[0],$S[3]            # | t[2*i]>>63
1191          mov    32($tptr),$A0[0]        # t[2*i+2]      # prefetch
1192         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1193         mul     %rax                    # a[i]*a[i]
1194         neg     $carry                  # mov $carry,cf
1195          mov    40($tptr),$A0[1]        # t[2*i+2+1]    # prefetch
1196         adc     %rax,$S[2]
1197          mov    0($aptr,$i),%rax        # a[i+1]        # prefetch
1198         mov     $S[2],16($tptr)
1199         adc     %rdx,$S[3]
1200         lea     16($i),$i
1201         mov     $S[3],24($tptr)
1202         sbb     $carry,$carry           # mov cf,$carry
1203         lea     64($tptr),$tptr
1204         jmp     .Lsqr4x_shift_n_add
1205
1206 .align  32
1207 .Lsqr4x_shift_n_add:
1208         lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1209         shr     \$63,$A0[0]
1210         lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
1211         shr     \$63,$A0[1]
1212         or      $A0[0],$S[1]            # | t[2*i]>>63
1213          mov    -16($tptr),$A0[0]       # t[2*i+2]      # prefetch
1214         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1215         mul     %rax                    # a[i]*a[i]
1216         neg     $carry                  # mov $carry,cf
1217          mov    -8($tptr),$A0[1]        # t[2*i+2+1]    # prefetch
1218         adc     %rax,$S[0]
1219          mov    -8($aptr,$i),%rax       # a[i+1]        # prefetch
1220         mov     $S[0],-32($tptr)
1221         adc     %rdx,$S[1]
1222
1223         lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1224          mov    $S[1],-24($tptr)
1225          sbb    $carry,$carry           # mov cf,$carry
1226         shr     \$63,$A0[0]
1227         lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
1228         shr     \$63,$A0[1]
1229         or      $A0[0],$S[3]            # | t[2*i]>>63
1230          mov    0($tptr),$A0[0]         # t[2*i+2]      # prefetch
1231         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1232         mul     %rax                    # a[i]*a[i]
1233         neg     $carry                  # mov $carry,cf
1234          mov    8($tptr),$A0[1]         # t[2*i+2+1]    # prefetch
1235         adc     %rax,$S[2]
1236          mov    0($aptr,$i),%rax        # a[i+1]        # prefetch
1237         mov     $S[2],-16($tptr)
1238         adc     %rdx,$S[3]
1239
1240         lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1241          mov    $S[3],-8($tptr)
1242          sbb    $carry,$carry           # mov cf,$carry
1243         shr     \$63,$A0[0]
1244         lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
1245         shr     \$63,$A0[1]
1246         or      $A0[0],$S[1]            # | t[2*i]>>63
1247          mov    16($tptr),$A0[0]        # t[2*i+2]      # prefetch
1248         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1249         mul     %rax                    # a[i]*a[i]
1250         neg     $carry                  # mov $carry,cf
1251          mov    24($tptr),$A0[1]        # t[2*i+2+1]    # prefetch
1252         adc     %rax,$S[0]
1253          mov    8($aptr,$i),%rax        # a[i+1]        # prefetch
1254         mov     $S[0],0($tptr)
1255         adc     %rdx,$S[1]
1256
1257         lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1258          mov    $S[1],8($tptr)
1259          sbb    $carry,$carry           # mov cf,$carry
1260         shr     \$63,$A0[0]
1261         lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
1262         shr     \$63,$A0[1]
1263         or      $A0[0],$S[3]            # | t[2*i]>>63
1264          mov    32($tptr),$A0[0]        # t[2*i+2]      # prefetch
1265         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1266         mul     %rax                    # a[i]*a[i]
1267         neg     $carry                  # mov $carry,cf
1268          mov    40($tptr),$A0[1]        # t[2*i+2+1]    # prefetch
1269         adc     %rax,$S[2]
1270          mov    16($aptr,$i),%rax       # a[i+1]        # prefetch
1271         mov     $S[2],16($tptr)
1272         adc     %rdx,$S[3]
1273         mov     $S[3],24($tptr)
1274         sbb     $carry,$carry           # mov cf,$carry
1275         lea     64($tptr),$tptr
1276         add     \$32,$i
1277         jnz     .Lsqr4x_shift_n_add
1278
1279         lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1280         shr     \$63,$A0[0]
1281         lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
1282         shr     \$63,$A0[1]
1283         or      $A0[0],$S[1]            # | t[2*i]>>63
1284          mov    -16($tptr),$A0[0]       # t[2*i+2]      # prefetch
1285         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1286         mul     %rax                    # a[i]*a[i]
1287         neg     $carry                  # mov $carry,cf
1288          mov    -8($tptr),$A0[1]        # t[2*i+2+1]    # prefetch
1289         adc     %rax,$S[0]
1290          mov    -8($aptr),%rax          # a[i+1]        # prefetch
1291         mov     $S[0],-32($tptr)
1292         adc     %rdx,$S[1]
1293
1294         lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1295          mov    $S[1],-24($tptr)
1296          sbb    $carry,$carry           # mov cf,$carry
1297         shr     \$63,$A0[0]
1298         lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
1299         shr     \$63,$A0[1]
1300         or      $A0[0],$S[3]            # | t[2*i]>>63
1301         mul     %rax                    # a[i]*a[i]
1302         neg     $carry                  # mov $carry,cf
1303         adc     %rax,$S[2]
1304         adc     %rdx,$S[3]
1305         mov     $S[2],-16($tptr)
1306         mov     $S[3],-8($tptr)
1307 ___
1308 }\f
1309 ######################################################################
1310 # Montgomery reduction part, "word-by-word" algorithm.
1311 #
1312 # This new path is inspired by multiple submissions from Intel, by
1313 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1314 # Vinodh Gopal...
1315 {
1316 my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1317
1318 $code.=<<___;
1319         mov     40(%rsp),$nptr          # pull $nptr
1320         xor     %rax,%rax
1321         lea     ($nptr,$num),%rdx       # end of n[]
1322         lea     64(%rsp,$num,2),$tptr   # end of t[] buffer
1323         mov     %rdx,0(%rsp)
1324         mov     $tptr,8(%rsp)
1325         mov     %rax,($tptr)            # clear top-most carry bit
1326         lea     64(%rsp,$num),$tptr     # end of initial t[] window
1327         neg     $num
1328         jmp     .L8x_reduction_loop
1329
1330 .align  32
1331 .L8x_reduction_loop:
1332         lea     ($tptr,$num),$tptr      # start of current t[] window
1333         mov     8*0($tptr),$m0
1334         mov     8*1($tptr),%r9
1335         mov     8*2($tptr),%r10
1336         mov     8*3($tptr),%r11
1337         mov     8*4($tptr),%r12
1338         mov     8*5($tptr),%r13
1339         mov     8*6($tptr),%r14
1340         mov     8*7($tptr),%r15
1341         lea     8*8($tptr),$tptr
1342
1343         mov     $m0,%r8
1344         imulq   48(%rsp),$m0            # n0*a[0]
1345         mov     8*0($nptr),%rax         # n[0]
1346         mov     \$8,%ecx
1347         jmp     .L8x_reduce
1348
1349 .align  32
1350 .L8x_reduce:
1351         mulq    $m0
1352          mov    8*1($nptr),%rax         # n[1]
1353         neg     %r8
1354         mov     %rdx,%r8
1355         adc     \$0,%r8
1356
1357         mulq    $m0
1358         add     %rax,%r9
1359          mov    8*2($nptr),%rax
1360         adc     \$0,%rdx
1361         add     %r9,%r8
1362          mov    $m0,64-8(%rsp,%rcx,8)   # put aside n0*a[i]
1363         mov     %rdx,%r9
1364         adc     \$0,%r9
1365
1366         mulq    $m0
1367         add     %rax,%r10
1368          mov    8*3($nptr),%rax
1369         adc     \$0,%rdx
1370         add     %r10,%r9
1371          mov    48(%rsp),$carry         # pull n0, borrow $carry
1372         mov     %rdx,%r10
1373         adc     \$0,%r10
1374
1375         mulq    $m0
1376         add     %rax,%r11
1377          mov    8*4($nptr),%rax
1378         adc     \$0,%rdx
1379          imulq  %r8,$carry              # modulo-scheduled
1380         add     %r11,%r10
1381         mov     %rdx,%r11
1382         adc     \$0,%r11
1383
1384         mulq    $m0
1385         add     %rax,%r12
1386          mov    8*5($nptr),%rax
1387         adc     \$0,%rdx
1388         add     %r12,%r11
1389         mov     %rdx,%r12
1390         adc     \$0,%r12
1391
1392         mulq    $m0
1393         add     %rax,%r13
1394          mov    8*6($nptr),%rax
1395         adc     \$0,%rdx
1396         add     %r13,%r12
1397         mov     %rdx,%r13
1398         adc     \$0,%r13
1399
1400         mulq    $m0
1401         add     %rax,%r14
1402          mov    8*7($nptr),%rax
1403         adc     \$0,%rdx
1404         add     %r14,%r13
1405         mov     %rdx,%r14
1406         adc     \$0,%r14
1407
1408         mulq    $m0
1409          mov    $carry,$m0              # n0*a[i]
1410         add     %rax,%r15
1411          mov    8*0($nptr),%rax         # n[0]
1412         adc     \$0,%rdx
1413         add     %r15,%r14
1414         mov     %rdx,%r15
1415         adc     \$0,%r15
1416
1417         dec     %ecx
1418         jnz     .L8x_reduce
1419
1420         lea     8*8($nptr),$nptr
1421         xor     %rax,%rax
1422         mov     8(%rsp),%rdx            # pull end of t[]
1423         cmp     0(%rsp),$nptr           # end of n[]?
1424         jae     .L8x_no_tail
1425
1426         add     8*0($tptr),%r8
1427         adc     8*1($tptr),%r9
1428         adc     8*2($tptr),%r10
1429         adc     8*3($tptr),%r11
1430         adc     8*4($tptr),%r12
1431         adc     8*5($tptr),%r13
1432         adc     8*6($tptr),%r14
1433         adc     8*7($tptr),%r15
1434         sbb     $carry,$carry           # top carry
1435
1436         mov     64+56(%rsp),$m0         # pull n0*a[0]
1437         mov     \$8,%ecx
1438         mov     8*0($nptr),%rax
1439         jmp     .L8x_tail
1440
1441 .align  32
1442 .L8x_tail:
1443         mulq    $m0
1444         add     %rax,%r8
1445          mov    8*1($nptr),%rax
1446          mov    %r8,($tptr)             # save result
1447         mov     %rdx,%r8
1448         adc     \$0,%r8
1449
1450         mulq    $m0
1451         add     %rax,%r9
1452          mov    8*2($nptr),%rax
1453         adc     \$0,%rdx
1454         add     %r9,%r8
1455          lea    8($tptr),$tptr          # $tptr++
1456         mov     %rdx,%r9
1457         adc     \$0,%r9
1458
1459         mulq    $m0
1460         add     %rax,%r10
1461          mov    8*3($nptr),%rax
1462         adc     \$0,%rdx
1463         add     %r10,%r9
1464         mov     %rdx,%r10
1465         adc     \$0,%r10
1466
1467         mulq    $m0
1468         add     %rax,%r11
1469          mov    8*4($nptr),%rax
1470         adc     \$0,%rdx
1471         add     %r11,%r10
1472         mov     %rdx,%r11
1473         adc     \$0,%r11
1474
1475         mulq    $m0
1476         add     %rax,%r12
1477          mov    8*5($nptr),%rax
1478         adc     \$0,%rdx
1479         add     %r12,%r11
1480         mov     %rdx,%r12
1481         adc     \$0,%r12
1482
1483         mulq    $m0
1484         add     %rax,%r13
1485          mov    8*6($nptr),%rax
1486         adc     \$0,%rdx
1487         add     %r13,%r12
1488         mov     %rdx,%r13
1489         adc     \$0,%r13
1490
1491         mulq    $m0
1492         add     %rax,%r14
1493          mov    8*7($nptr),%rax
1494         adc     \$0,%rdx
1495         add     %r14,%r13
1496         mov     %rdx,%r14
1497         adc     \$0,%r14
1498
1499         mulq    $m0
1500          mov    64-16(%rsp,%rcx,8),$m0  # pull n0*a[i]
1501         add     %rax,%r15
1502         adc     \$0,%rdx
1503         add     %r15,%r14
1504          mov    8*0($nptr),%rax         # pull n[0]
1505         mov     %rdx,%r15
1506         adc     \$0,%r15
1507
1508         dec     %ecx
1509         jnz     .L8x_tail
1510
1511         lea     8*8($nptr),$nptr
1512         mov     8(%rsp),%rdx            # pull end of t[]
1513         cmp     0(%rsp),$nptr           # end of n[]?
1514         jae     .L8x_tail_done          # break out of loop
1515
1516          mov    64+56(%rsp),$m0         # pull n0*a[0]
1517         neg     $carry
1518          mov    8*0($nptr),%rax         # pull n[0]
1519         adc     8*0($tptr),%r8
1520         adc     8*1($tptr),%r9
1521         adc     8*2($tptr),%r10
1522         adc     8*3($tptr),%r11
1523         adc     8*4($tptr),%r12
1524         adc     8*5($tptr),%r13
1525         adc     8*6($tptr),%r14
1526         adc     8*7($tptr),%r15
1527         sbb     $carry,$carry           # top carry
1528
1529         mov     \$8,%ecx
1530         jmp     .L8x_tail
1531
1532 .align  32
1533 .L8x_tail_done:
1534         add     (%rdx),%r8              # can this overflow?
1535         xor     %rax,%rax
1536
1537         neg     $carry
1538 .L8x_no_tail:
1539         adc     8*0($tptr),%r8
1540         adc     8*1($tptr),%r9
1541         adc     8*2($tptr),%r10
1542         adc     8*3($tptr),%r11
1543         adc     8*4($tptr),%r12
1544         adc     8*5($tptr),%r13
1545         adc     8*6($tptr),%r14
1546         adc     8*7($tptr),%r15
1547         adc     \$0,%rax                # top-most carry
1548
1549         mov     40(%rsp),$nptr          # restore $nptr
1550
1551         mov     %r8,8*0($tptr)          # store top 512 bits
1552         mov     %r9,8*1($tptr)
1553          mov    $nptr,$num              # $num is %r9, can't be moved upwards
1554         mov     %r10,8*2($tptr)
1555          sub    0(%rsp),$num            # -$num
1556         mov     %r11,8*3($tptr)
1557         mov     %r12,8*4($tptr)
1558         mov     %r13,8*5($tptr)
1559         mov     %r14,8*6($tptr)
1560         mov     %r15,8*7($tptr)
1561         lea     8*8($tptr),$tptr
1562         mov     %rax,(%rdx)             # store top-most carry
1563
1564         cmp     %rdx,$tptr              # end of t[]?
1565         jb      .L8x_reduction_loop
1566
1567         neg     $num                    # restore $num
1568 ___
1569 }\f
1570 ##############################################################
1571 # Post-condition, 4x unrolled copy from bn_mul_mont
1572 #
1573 {
1574 my ($tptr,$nptr)=("%rbx",$aptr);
1575 my @ri=("%rax","%rdx","%r10","%r11");
1576 $code.=<<___;
1577         mov     64(%rsp,$num),@ri[0]    # tp[0]
1578         lea     64(%rsp,$num),$tptr     # upper half of t[2*$num] holds result
1579         mov     40(%rsp),$nptr          # restore $nptr
1580         shr     \$5,$num                # num/4
1581         mov     8($tptr),@ri[1]         # t[1]
1582         xor     $i,$i                   # i=0 and clear CF!
1583
1584         mov     32(%rsp),$rptr          # restore $rptr
1585         sub     0($nptr),@ri[0]
1586         mov     16($tptr),@ri[2]        # t[2]
1587         mov     24($tptr),@ri[3]        # t[3]
1588         sbb     8($nptr),@ri[1]
1589         lea     -1($num),$j             # j=num/4-1
1590         jmp     .Lsqr4x_sub
1591 .align  32
1592 .Lsqr4x_sub:
1593         mov     @ri[0],0($rptr)         # rp[i]=tp[i]-np[i]
1594         mov     @ri[1],8($rptr)         # rp[i]=tp[i]-np[i]
1595         sbb     16($nptr,$i,8),@ri[2]
1596         mov     32($tptr,$i,8),@ri[0]   # tp[i+1]
1597         mov     40($tptr,$i,8),@ri[1]
1598         sbb     24($nptr,$i,8),@ri[3]
1599         mov     @ri[2],16($rptr)        # rp[i]=tp[i]-np[i]
1600         mov     @ri[3],24($rptr)        # rp[i]=tp[i]-np[i]
1601         lea     32($rptr),$rptr
1602         sbb     32($nptr,$i,8),@ri[0]
1603         mov     48($tptr,$i,8),@ri[2]
1604         mov     56($tptr,$i,8),@ri[3]
1605         sbb     40($nptr,$i,8),@ri[1]
1606         lea     4($i),$i                # i++
1607         dec     $j                      # doesn't affect CF!
1608         jnz     .Lsqr4x_sub
1609
1610         mov     @ri[0],0($rptr)         # rp[i]=tp[i]-np[i]
1611         mov     32($tptr,$i,8),@ri[0]   # load overflow bit
1612         sbb     16($nptr,$i,8),@ri[2]
1613         mov     @ri[1],8($rptr)         # rp[i]=tp[i]-np[i]
1614         sbb     24($nptr,$i,8),@ri[3]
1615         mov     @ri[2],16($rptr)        # rp[i]=tp[i]-np[i]
1616
1617         sbb     \$0,@ri[0]              # handle upmost overflow bit
1618         mov     @ri[3],24($rptr)        # rp[i]=tp[i]-np[i]
1619         mov     32(%rsp),$rptr          # restore $rptr
1620         xor     $i,$i                   # i=0
1621         and     @ri[0],$tptr
1622         not     @ri[0]
1623         mov     $rptr,$nptr
1624         and     @ri[0],$nptr
1625         lea     -1($num),$j
1626         or      $nptr,$tptr             # tp=borrow?tp:rp
1627
1628         pxor    %xmm0,%xmm0
1629         lea     64(%rsp,$num,8),$nptr
1630         movdqu  ($tptr),%xmm1
1631         lea     ($nptr,$num,8),$nptr
1632         movdqa  %xmm0,64(%rsp)          # zap lower half of temporary vector
1633         movdqa  %xmm0,($nptr)           # zap upper half of temporary vector
1634         movdqu  %xmm1,($rptr)
1635         jmp     .Lsqr4x_copy
1636 .align  32
1637 .Lsqr4x_copy:                           # copy or in-place refresh
1638         movdqu  16($tptr,$i),%xmm2
1639         movdqu  32($tptr,$i),%xmm1
1640         movdqa  %xmm0,80(%rsp,$i)       # zap lower half of temporary vector
1641         movdqa  %xmm0,96(%rsp,$i)       # zap lower half of temporary vector
1642         movdqa  %xmm0,16($nptr,$i)      # zap upper half of temporary vector
1643         movdqa  %xmm0,32($nptr,$i)      # zap upper half of temporary vector
1644         movdqu  %xmm2,16($rptr,$i)
1645         movdqu  %xmm1,32($rptr,$i)
1646         lea     32($i),$i
1647         dec     $j
1648         jnz     .Lsqr4x_copy
1649
1650         movdqu  16($tptr,$i),%xmm2
1651         movdqa  %xmm0,80(%rsp,$i)       # zap lower half of temporary vector
1652         movdqa  %xmm0,16($nptr,$i)      # zap upper half of temporary vector
1653         movdqu  %xmm2,16($rptr,$i)
1654 ___
1655 }
1656 $code.=<<___;
1657         mov     56(%rsp),%rsi           # restore %rsp
1658         mov     \$1,%rax
1659         mov     0(%rsi),%r15
1660         mov     8(%rsi),%r14
1661         mov     16(%rsi),%r13
1662         mov     24(%rsi),%r12
1663         mov     32(%rsi),%rbp
1664         mov     40(%rsi),%rbx
1665         lea     48(%rsi),%rsp
1666 .Lsqr8x_epilogue:
1667         ret
1668 .size   bn_sqr8x_mont,.-bn_sqr8x_mont
1669 ___
1670 }}}
1671 \f
1672 if ($addx) {{{
1673 my $bp="%rdx";  # original value
1674
1675 $code.=<<___;
1676 .type   bn_mulx4x_mont,\@function,6
1677 .align  32
1678 bn_mulx4x_mont:
1679 .Lmulx4x_enter:
1680         push    %rbx
1681         push    %rbp
1682         push    %r12
1683         push    %r13
1684         push    %r14
1685         push    %r15
1686
1687         shl     \$3,${num}d             # convert $num to bytes
1688         .byte   0x67
1689         xor     %r10,%r10
1690         mov     %rsp,%r11               # put aside %rsp
1691         sub     $num,%r10               # -$num
1692         mov     ($n0),$n0               # *n0
1693         lea     -72(%rsp,%r10),%rsp     # alloca(frame+$num+8)
1694         lea     ($bp,$num),%r10
1695         and     \$-128,%rsp
1696         ##############################################################
1697         # Stack layout
1698         # +0    num
1699         # +8    off-loaded &b[i]
1700         # +16   end of b[num]
1701         # +24   saved n0
1702         # +32   saved rp
1703         # +40
1704         # +48   inner counter
1705         # +56   saved %rsp
1706         # +64   tmp[num+1]
1707         #
1708         mov     $num,0(%rsp)            # save $num
1709         shr     \$5,$num
1710         mov     %r10,16(%rsp)           # end of b[num]
1711         sub     \$1,$num
1712         mov     $n0, 24(%rsp)           # save *n0
1713         mov     $rp, 32(%rsp)           # save $rp
1714         mov     $num,48(%rsp)           # inner counter
1715         mov     %r11,56(%rsp)           # save original %rsp
1716         jmp     .Lmulx4x_body
1717
1718 .align  32
1719 .Lmulx4x_body:
1720 ___
1721 my ($aptr, $bptr, $nptr, $tptr, $mi,  $bi,  $zero, $num)=
1722    ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
1723 my $rptr=$bptr;
1724 $code.=<<___;
1725         lea     8($bp),$bptr
1726         mov     ($bp),%rdx              # b[0], $bp==%rdx actually
1727         lea     64+32(%rsp),$tptr
1728         mov     %rdx,$bi
1729
1730         mulx    0*8($aptr),$mi,%rax     # a[0]*b[0]
1731         mulx    1*8($aptr),%r11,%r14    # a[1]*b[0]
1732         add     %rax,%r11
1733         mov     $bptr,8(%rsp)           # off-load &b[i]
1734         mulx    2*8($aptr),%r12,%r13    # ...
1735         adc     %r14,%r12
1736         adc     \$0,%r13
1737
1738         mov     $mi,$bptr               # borrow $bptr
1739         imulq   24(%rsp),$mi            # "t[0]"*n0
1740         xor     $zero,$zero             # cf=0, of=0
1741
1742         mulx    3*8($aptr),%rax,%r14
1743          mov    $mi,%rdx
1744         lea     4*8($aptr),$aptr
1745         adcx    %rax,%r13
1746         adcx    $zero,%r14              # cf=0
1747
1748         mulx    0*8($nptr),%rax,%r10
1749         adcx    %rax,$bptr              # discarded
1750         adox    %r11,%r10
1751         mulx    1*8($nptr),%rax,%r11
1752         adcx    %rax,%r10
1753         adox    %r12,%r11
1754         .byte   0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00    # mulx  2*8($nptr),%rax,%r12
1755         mov     48(%rsp),$bptr          # counter value
1756         mov     %r10,-4*8($tptr)
1757         adcx    %rax,%r11
1758         adox    %r13,%r12
1759         mulx    3*8($nptr),%rax,%r15
1760          mov    $bi,%rdx
1761         mov     %r11,-3*8($tptr)
1762         adcx    %rax,%r12
1763         adox    $zero,%r15              # of=0
1764         lea     4*8($nptr),$nptr
1765         mov     %r12,-2*8($tptr)
1766
1767         jmp     .Lmulx4x_1st
1768
1769 .align  32
1770 .Lmulx4x_1st:
1771         adcx    $zero,%r15              # cf=0, modulo-scheduled
1772         mulx    0*8($aptr),%r10,%rax    # a[4]*b[0]
1773         adcx    %r14,%r10
1774         mulx    1*8($aptr),%r11,%r14    # a[5]*b[0]
1775         adcx    %rax,%r11
1776         mulx    2*8($aptr),%r12,%rax    # ...
1777         adcx    %r14,%r12
1778         mulx    3*8($aptr),%r13,%r14
1779          .byte  0x66,0x66
1780          mov    $mi,%rdx
1781         adcx    %rax,%r13
1782         adcx    $zero,%r14              # cf=0
1783         lea     4*8($aptr),$aptr
1784         lea     4*8($tptr),$tptr
1785
1786         adox    %r15,%r10
1787         mulx    0*8($nptr),%rax,%r15
1788         adcx    %rax,%r10
1789         adox    %r15,%r11
1790         mulx    1*8($nptr),%rax,%r15
1791         adcx    %rax,%r11
1792         adox    %r15,%r12
1793         mulx    2*8($nptr),%rax,%r15
1794         mov     %r10,-5*8($tptr)
1795         adcx    %rax,%r12
1796         mov     %r11,-4*8($tptr)
1797         adox    %r15,%r13
1798         mulx    3*8($nptr),%rax,%r15
1799          mov    $bi,%rdx
1800         mov     %r12,-3*8($tptr)
1801         adcx    %rax,%r13
1802         adox    $zero,%r15
1803         lea     4*8($nptr),$nptr
1804         mov     %r13,-2*8($tptr)
1805
1806         dec     $bptr                   # of=0, pass cf
1807         jnz     .Lmulx4x_1st
1808
1809         mov     0(%rsp),$num            # load num
1810         mov     8(%rsp),$bptr           # re-load &b[i]
1811         adc     $zero,%r15              # modulo-scheduled
1812         add     %r15,%r14
1813         sbb     %r15,%r15               # top-most carry
1814         mov     %r14,-1*8($tptr)
1815         jmp     .Lmulx4x_outer
1816
1817 .align  32
1818 .Lmulx4x_outer:
1819         mov     ($bptr),%rdx            # b[i]
1820         lea     8($bptr),$bptr
1821         sub     $num,$aptr              # rewind $aptr
1822         mov     %r15,($tptr)            # save top-most carry
1823         mov     64(%rsp),%r10
1824         lea     64(%rsp),$tptr
1825         sub     $num,$nptr              # rewind $nptr
1826         xor     $zero,$zero             # cf=0, of=0
1827         mov     %rdx,$bi
1828
1829         mulx    0*8($aptr),$mi,%rax     # a[0]*b[i]
1830         adox    %r10,$mi
1831         mov     1*8($tptr),%r10
1832         mulx    1*8($aptr),%r11,%r14    # a[1]*b[i]
1833         adcx    %rax,%r11
1834         mov     $bptr,8(%rsp)           # off-load &b[i]
1835         mulx    2*8($aptr),%r12,%r13    # ...
1836         adox    %r10,%r11
1837         adcx    %r14,%r12
1838         adox    $zero,%r12
1839         .byte   0x66,0x66
1840         adcx    $zero,%r13
1841         mov     2*8($tptr),%r10
1842
1843         mov     $mi,$bptr               # borrow $bptr
1844         imulq   24(%rsp),$mi            # "t[0]"*n0
1845         xor     $zero,$zero             # cf=0, of=0
1846
1847         mulx    3*8($aptr),%rax,%r14
1848          mov    $mi,%rdx
1849         adox    %r10,%r12
1850         adcx    %rax,%r13
1851         adox    3*8($tptr),%r13
1852         adcx    $zero,%r14
1853         lea     4*8($aptr),$aptr
1854         lea     4*8($tptr),$tptr
1855         adox    $zero,%r14
1856
1857         mulx    0*8($nptr),%rax,%r10
1858         adcx    %rax,$bptr              # discarded
1859         adox    %r11,%r10
1860         mulx    1*8($nptr),%rax,%r11
1861         adcx    %rax,%r10
1862         adox    %r12,%r11
1863         mulx    2*8($nptr),%rax,%r12
1864         mov     %r10,-4*8($tptr)
1865         adcx    %rax,%r11
1866         adox    %r13,%r12
1867         mulx    3*8($nptr),%rax,%r15
1868          mov    $bi,%rdx
1869         mov     %r11,-3*8($tptr)
1870         adcx    %rax,%r12
1871         adox    $zero,%r15              # of=0
1872         mov     48(%rsp),$bptr          # counter value
1873         mov     %r12,-2*8($tptr)
1874         .byte   0x66
1875         lea     4*8($nptr),$nptr
1876
1877         #jmp    .Lmulx4x_inner
1878
1879 .align  32
1880 .Lmulx4x_inner:
1881         adcx    $zero,%r15              # cf=0, modulo-scheduled
1882         adox    0*8($tptr),%r14
1883         mulx    0*8($aptr),%r10,%rax    # a[4]*b[i]
1884         adcx    %r14,%r10
1885         mulx    1*8($aptr),%r11,%r14    # a[5]*b[i]
1886         adox    %rax,%r11
1887         mulx    2*8($aptr),%r12,%rax    # ...
1888         adcx    1*8($tptr),%r11
1889         adox    %r14,%r12
1890         mulx    3*8($aptr),%r13,%r14
1891          mov    $mi,%rdx
1892         adcx    2*8($tptr),%r12
1893         adox    %rax,%r13
1894         adcx    3*8($tptr),%r13
1895         adox    $zero,%r14              # of=0
1896         lea     4*8($aptr),$aptr
1897         lea     4*8($tptr),$tptr
1898         adcx    $zero,%r14              # cf=0
1899
1900         adox    %r15,%r10
1901         mulx    0*8($nptr),%rax,%r15
1902         adcx    %rax,%r10
1903         adox    %r15,%r11
1904         mulx    1*8($nptr),%rax,%r15
1905         adcx    %rax,%r11
1906         adox    %r15,%r12
1907         mulx    2*8($nptr),%rax,%r15
1908         mov     %r10,-5*8($tptr)
1909         adcx    %rax,%r12
1910         adox    %r15,%r13
1911         mulx    3*8($nptr),%rax,%r15
1912          mov    $bi,%rdx
1913         mov     %r11,-4*8($tptr)
1914         mov     %r12,-3*8($tptr)
1915         adcx    %rax,%r13
1916         adox    $zero,%r15
1917         lea     4*8($nptr),$nptr
1918         mov     %r13,-2*8($tptr)
1919
1920         dec     $bptr                   # of=0, pass cf
1921         jnz     .Lmulx4x_inner
1922
1923         mov     0(%rsp),$num            # load num
1924         mov     8(%rsp),$bptr           # re-load &b[i]
1925         adc     $zero,%r15              # modulo-scheduled
1926         sub     0*8($tptr),$zero        # pull top-most carry
1927         adc     %r15,%r14
1928         sbb     %r15,%r15               # top-most carry
1929         mov     %r14,-1*8($tptr)
1930
1931         cmp     16(%rsp),$bptr
1932         jne     .Lmulx4x_outer
1933
1934         neg     $num
1935         xor     %rdx,%rdx
1936         mov     32(%rsp),$rptr          # restore rp
1937         lea     64(%rsp),$tptr
1938
1939         pxor    %xmm0,%xmm0
1940         mov     0*8($nptr,$num),%r8
1941         mov     1*8($nptr,$num),%r9
1942         neg     %r8
1943         jmp     .Lmulx4x_sub_entry
1944
1945 .align  32
1946 .Lmulx4x_sub:
1947         mov     0*8($nptr,$num),%r8
1948         mov     1*8($nptr,$num),%r9
1949         not     %r8
1950 .Lmulx4x_sub_entry:
1951         mov     2*8($nptr,$num),%r10
1952         not     %r9
1953         and     %r15,%r8
1954         mov     3*8($nptr,$num),%r11
1955         not     %r10
1956         and     %r15,%r9
1957         not     %r11
1958         and     %r15,%r10
1959         and     %r15,%r11
1960
1961         neg     %rdx                    # mov %rdx,%cf
1962         adc     0*8($tptr),%r8
1963         adc     1*8($tptr),%r9
1964         movdqa  %xmm0,($tptr)
1965         adc     2*8($tptr),%r10
1966         adc     3*8($tptr),%r11
1967         movdqa  %xmm0,16($tptr)
1968         lea     4*8($tptr),$tptr
1969         sbb     %rdx,%rdx               # mov %cf,%rdx
1970
1971         mov     %r8,0*8($rptr)
1972         mov     %r9,1*8($rptr)
1973         mov     %r10,2*8($rptr)
1974         mov     %r11,3*8($rptr)
1975         lea     4*8($rptr),$rptr
1976
1977         add     \$32,$num
1978         jnz     .Lmulx4x_sub
1979
1980         mov     56(%rsp),%rsi           # restore %rsp
1981         mov     \$1,%rax
1982         mov     (%rsi),%r15
1983         mov     8(%rsi),%r14
1984         mov     16(%rsi),%r13
1985         mov     24(%rsi),%r12
1986         mov     32(%rsi),%rbp
1987         mov     40(%rsi),%rbx
1988         lea     48(%rsi),%rsp
1989 .Lmulx4x_epilogue:
1990         ret
1991 .size   bn_mulx4x_mont,.-bn_mulx4x_mont
1992 ___
1993 }\f{
1994 ######################################################################
1995 # void bn_sqr8x_mont(
1996 my $rptr="%rdi";        # const BN_ULONG *rptr,
1997 my $aptr="%rsi";        # const BN_ULONG *aptr,
1998 my $bptr="%rdx";        # not used
1999 my $nptr="%rcx";        # const BN_ULONG *nptr,
2000 my $n0  ="%r8";         # const BN_ULONG *n0);
2001 my $num ="%r9";         # int num, has to be divisible by 8
2002
2003 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2004 my @A0=("%r10","%r11");
2005 my @A1=("%r12","%r13");
2006 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2007
2008 $code.=<<___;
2009 .type   bn_sqrx8x_mont,\@function,6
2010 .align  32
2011 bn_sqrx8x_mont:
2012 .Lsqrx8x_enter:
2013         push    %rbx
2014         push    %rbp
2015         push    %r12
2016         push    %r13
2017         push    %r14
2018         push    %r15
2019
2020         shl     \$3,${num}d             # convert $num to bytes
2021         .byte   0x67
2022         xor     %r10,%r10
2023         mov     %rsp,%r11               # put aside %rsp
2024         sub     $num,%r10               # -$num
2025         mov     ($n0),$n0               # *n0
2026         lea     -64(%rsp,%r10,2),%rsp   # alloca(frame+2*$num)
2027         and     \$-1024,%rsp            # minimize TLB usage
2028         ##############################################################
2029         # Stack layout
2030         #
2031         # +0    saved $num, used in reduction section
2032         # +8    &t[2*$num], used in reduction section
2033         # +16   intermediate carry bit
2034         # +24   top-most carry bit, used in reduction section
2035         # +32   saved *n0
2036         # +48   t[2*$num]
2037         #
2038         movq    $rptr,%xmm1             # save $rptr
2039         movq    $nptr,%xmm2             # save $nptr
2040         movq    %r10, %xmm3             # -$num
2041         movq    %r11, %xmm4             # save original %rsp
2042         mov     $n0,  32(%rsp)
2043 ___
2044 $code.=<<___ if ($win64);
2045         jmp     .Lsqrx8x_body
2046 .align  32
2047 ___
2048 $code.=<<___;
2049 .Lsqrx8x_body:
2050         ##################################################################
2051         # Squaring part:
2052         #
2053         # a) multiply-n-add everything but a[i]*a[i];
2054         # b) shift result of a) by 1 to the left and accumulate
2055         #    a[i]*a[i] products;
2056         #
2057         ##################################################################
2058         # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2059         #                                                     a[1]a[0]
2060         #                                                 a[2]a[0]
2061         #                                             a[3]a[0]
2062         #                                             a[2]a[1]
2063         #                                         a[3]a[1]
2064         #                                     a[3]a[2]
2065         #
2066         #                                         a[4]a[0]
2067         #                                     a[5]a[0]
2068         #                                 a[6]a[0]
2069         #                             a[7]a[0]
2070         #                                     a[4]a[1]
2071         #                                 a[5]a[1]
2072         #                             a[6]a[1]
2073         #                         a[7]a[1]
2074         #                                 a[4]a[2]
2075         #                             a[5]a[2]
2076         #                         a[6]a[2]
2077         #                     a[7]a[2]
2078         #                             a[4]a[3]
2079         #                         a[5]a[3]
2080         #                     a[6]a[3]
2081         #                 a[7]a[3]
2082         #
2083         #                     a[5]a[4]
2084         #                 a[6]a[4]
2085         #             a[7]a[4]
2086         #             a[6]a[5]
2087         #         a[7]a[5]
2088         #     a[7]a[6]
2089         # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2090 ___
2091 {
2092 my ($zero,$carry)=("%rbp","%rcx");
2093 my $aaptr=$zero;
2094 $code.=<<___;
2095         pxor    %xmm0,%xmm0
2096         lea     48(%rsp),$tptr
2097         lea     ($aptr,$num),$aaptr
2098         mov     $num,(%rsp)             # save $num
2099         mov     $aaptr,8(%rsp)          # save end of $aptr
2100         jmp     .Lsqr8x_zero_start
2101
2102 .align  32
2103 .byte   0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2104 .Lsqrx8x_zero:
2105         .byte   0x3e
2106         movdqa  %xmm0,0*8($tptr)
2107         movdqa  %xmm0,2*8($tptr)
2108         movdqa  %xmm0,4*8($tptr)
2109         movdqa  %xmm0,6*8($tptr)
2110 .Lsqr8x_zero_start:                     # aligned at 32
2111         movdqa  %xmm0,8*8($tptr)
2112         movdqa  %xmm0,10*8($tptr)
2113         movdqa  %xmm0,12*8($tptr)
2114         movdqa  %xmm0,14*8($tptr)
2115         lea     16*8($tptr),$tptr
2116         sub     \$64,$num
2117         jnz     .Lsqrx8x_zero
2118
2119         mov     0*8($aptr),%rdx         # a[0], modulo-scheduled
2120         #xor    %r9,%r9                 # t[1], ex-$num, zero already
2121         xor     %r10,%r10
2122         xor     %r11,%r11
2123         xor     %r12,%r12
2124         xor     %r13,%r13
2125         xor     %r14,%r14
2126         xor     %r15,%r15
2127         lea     48(%rsp),$tptr
2128         xor     $zero,$zero             # cf=0, cf=0
2129         jmp     .Lsqrx8x_outer_loop
2130
2131 .align  32
2132 .Lsqrx8x_outer_loop:
2133         mulx    1*8($aptr),%r8,%rax     # a[1]*a[0]
2134         adcx    %r9,%r8                 # a[1]*a[0]+=t[1]
2135         adox    %rax,%r10
2136         mulx    2*8($aptr),%r9,%rax     # a[2]*a[0]
2137         adcx    %r10,%r9
2138         adox    %rax,%r11
2139         .byte   0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00    # mulx  3*8($aptr),%r10,%rax    # ...
2140         adcx    %r11,%r10
2141         adox    %rax,%r12
2142         .byte   0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00    # mulx  4*8($aptr),%r11,%rax
2143         adcx    %r12,%r11
2144         adox    %rax,%r13
2145         mulx    5*8($aptr),%r12,%rax
2146         adcx    %r13,%r12
2147         adox    %rax,%r14
2148         mulx    6*8($aptr),%r13,%rax
2149         adcx    %r14,%r13
2150         adox    %r15,%rax
2151         mulx    7*8($aptr),%r14,%r15
2152          mov    1*8($aptr),%rdx         # a[1]
2153         adcx    %rax,%r14
2154         adox    $zero,%r15
2155         adc     8*8($tptr),%r15
2156         mov     %r8,1*8($tptr)          # t[1]
2157         mov     %r9,2*8($tptr)          # t[2]
2158         sbb     $carry,$carry           # mov %cf,$carry
2159         xor     $zero,$zero             # cf=0, of=0
2160
2161
2162         mulx    2*8($aptr),%r8,%rbx     # a[2]*a[1]
2163         mulx    3*8($aptr),%r9,%rax     # a[3]*a[1]
2164         adcx    %r10,%r8
2165         adox    %rbx,%r9
2166         mulx    4*8($aptr),%r10,%rbx    # ...
2167         adcx    %r11,%r9
2168         adox    %rax,%r10
2169         .byte   0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00    # mulx  5*8($aptr),%r11,%rax
2170         adcx    %r12,%r10
2171         adox    %rbx,%r11
2172         .byte   0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00    # mulx  6*8($aptr),%r12,%rbx
2173         adcx    %r13,%r11
2174         adox    %r14,%r12
2175         .byte   0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00    # mulx  7*8($aptr),%r13,%r14
2176          mov    2*8($aptr),%rdx         # a[2]
2177         adcx    %rax,%r12
2178         adox    %rbx,%r13
2179         adcx    %r15,%r13
2180         adox    $zero,%r14              # of=0
2181         adcx    $zero,%r14              # cf=0
2182
2183         mov     %r8,3*8($tptr)          # t[3]
2184         mov     %r9,4*8($tptr)          # t[4]
2185
2186         mulx    3*8($aptr),%r8,%rbx     # a[3]*a[2]
2187         mulx    4*8($aptr),%r9,%rax     # a[4]*a[2]
2188         adcx    %r10,%r8
2189         adox    %rbx,%r9
2190         mulx    5*8($aptr),%r10,%rbx    # ...
2191         adcx    %r11,%r9
2192         adox    %rax,%r10
2193         .byte   0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00    # mulx  6*8($aptr),%r11,%rax
2194         adcx    %r12,%r10
2195         adox    %r13,%r11
2196         .byte   0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00    # mulx  7*8($aptr),%r12,%r13
2197         .byte   0x3e
2198          mov    3*8($aptr),%rdx         # a[3]
2199         adcx    %rbx,%r11
2200         adox    %rax,%r12
2201         adcx    %r14,%r12
2202         mov     %r8,5*8($tptr)          # t[5]
2203         mov     %r9,6*8($tptr)          # t[6]
2204          mulx   4*8($aptr),%r8,%rax     # a[4]*a[3]
2205         adox    $zero,%r13              # of=0
2206         adcx    $zero,%r13              # cf=0
2207
2208         mulx    5*8($aptr),%r9,%rbx     # a[5]*a[3]
2209         adcx    %r10,%r8
2210         adox    %rax,%r9
2211         mulx    6*8($aptr),%r10,%rax    # ...
2212         adcx    %r11,%r9
2213         adox    %r12,%r10
2214         mulx    7*8($aptr),%r11,%r12
2215          mov    4*8($aptr),%rdx         # a[4]
2216          mov    5*8($aptr),%r14         # a[5]
2217         adcx    %rbx,%r10
2218         adox    %rax,%r11
2219          mov    6*8($aptr),%r15         # a[6]
2220         adcx    %r13,%r11
2221         adox    $zero,%r12              # of=0
2222         adcx    $zero,%r12              # cf=0
2223
2224         mov     %r8,7*8($tptr)          # t[7]
2225         mov     %r9,8*8($tptr)          # t[8]
2226
2227         mulx    %r14,%r9,%rax           # a[5]*a[4]
2228          mov    7*8($aptr),%r8          # a[7]
2229         adcx    %r10,%r9
2230         mulx    %r15,%r10,%rbx          # a[6]*a[4]
2231         adox    %rax,%r10
2232         adcx    %r11,%r10
2233         mulx    %r8,%r11,%rax           # a[7]*a[4]
2234          mov    %r14,%rdx               # a[5]
2235         adox    %rbx,%r11
2236         adcx    %r12,%r11
2237         #adox   $zero,%rax              # of=0
2238         adcx    $zero,%rax              # cf=0
2239
2240         mulx    %r15,%r14,%rbx          # a[6]*a[5]
2241         mulx    %r8,%r12,%r13           # a[7]*a[5]
2242          mov    %r15,%rdx               # a[6]
2243          lea    8*8($aptr),$aptr
2244         adcx    %r14,%r11
2245         adox    %rbx,%r12
2246         adcx    %rax,%r12
2247         adox    $zero,%r13
2248
2249         .byte   0x67,0x67
2250         mulx    %r8,%r8,%r14            # a[7]*a[6]
2251         adcx    %r8,%r13
2252         adcx    $zero,%r14
2253
2254         cmp     8(%rsp),$aptr
2255         je      .Lsqrx8x_outer_break
2256
2257         neg     $carry                  # mov $carry,%cf
2258         mov     \$-8,%rcx
2259         mov     $zero,%r15
2260         mov     8*8($tptr),%r8
2261         adcx    9*8($tptr),%r9          # +=t[9]
2262         adcx    10*8($tptr),%r10        # ...
2263         adcx    11*8($tptr),%r11
2264         adc     12*8($tptr),%r12
2265         adc     13*8($tptr),%r13
2266         adc     14*8($tptr),%r14
2267         adc     15*8($tptr),%r15
2268         lea     ($aptr),$aaptr
2269         lea     2*8*8($tptr),$tptr
2270         sbb     %rax,%rax               # mov %cf,$carry
2271
2272         mov     -64($aptr),%rdx         # a[0]
2273         mov     %rax,16(%rsp)           # offload $carry
2274         mov     $tptr,24(%rsp)
2275
2276         #lea    8*8($tptr),$tptr        # see 2*8*8($tptr) above
2277         xor     %eax,%eax               # cf=0, of=0
2278         jmp     .Lsqrx8x_loop
2279
2280 .align  32
2281 .Lsqrx8x_loop:
2282         mov     %r8,%rbx
2283         mulx    0*8($aaptr),%rax,%r8    # a[8]*a[i]
2284         adcx    %rax,%rbx               # +=t[8]
2285         adox    %r9,%r8
2286
2287         mulx    1*8($aaptr),%rax,%r9    # ...
2288         adcx    %rax,%r8
2289         adox    %r10,%r9
2290
2291         mulx    2*8($aaptr),%rax,%r10
2292         adcx    %rax,%r9
2293         adox    %r11,%r10
2294
2295         mulx    3*8($aaptr),%rax,%r11
2296         adcx    %rax,%r10
2297         adox    %r12,%r11
2298
2299         .byte   0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00    # mulx  4*8($aaptr),%rax,%r12
2300         adcx    %rax,%r11
2301         adox    %r13,%r12
2302
2303         mulx    5*8($aaptr),%rax,%r13
2304         adcx    %rax,%r12
2305         adox    %r14,%r13
2306
2307         mulx    6*8($aaptr),%rax,%r14
2308          mov    %rbx,($tptr,%rcx,8)     # store t[8+i]
2309          mov    \$0,%ebx
2310         adcx    %rax,%r13
2311         adox    %r15,%r14
2312
2313         .byte   0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00    # mulx  7*8($aaptr),%rax,%r15
2314          mov    8($aptr,%rcx,8),%rdx    # a[i]
2315         adcx    %rax,%r14
2316         adox    %rbx,%r15               # %rbx is 0, of=0
2317         adcx    %rbx,%r15               # cf=0
2318
2319         .byte   0x67
2320         inc     %rcx                    # of=0
2321         jnz     .Lsqrx8x_loop
2322
2323         lea     8*8($aaptr),$aaptr
2324         mov     \$-8,%rcx
2325         cmp     8(%rsp),$aaptr          # done?
2326         je      .Lsqrx8x_break
2327
2328         sub     16(%rsp),%rbx           # mov 16(%rsp),%cf
2329         .byte   0x66
2330         mov     -64($aptr),%rdx
2331         adcx    0*8($tptr),%r8
2332         adcx    1*8($tptr),%r9
2333         adc     2*8($tptr),%r10
2334         adc     3*8($tptr),%r11
2335         adc     4*8($tptr),%r12
2336         adc     5*8($tptr),%r13
2337         adc     6*8($tptr),%r14
2338         adc     7*8($tptr),%r15
2339         lea     8*8($tptr),$tptr
2340         .byte   0x67
2341         sbb     %rax,%rax               # mov %cf,%rax
2342         xor     %ebx,%ebx               # cf=0, of=0
2343         mov     %rax,16(%rsp)           # offload carry
2344         jmp     .Lsqrx8x_loop
2345
2346 .align  32
2347 .Lsqrx8x_break:
2348         sub     16(%rsp),%r8            # consume last carry
2349         mov     24(%rsp),$carry         # initial $tptr, borrow $carry
2350         mov     0*8($aptr),%rdx         # a[8], modulo-scheduled
2351         xor     %ebp,%ebp               # xor   $zero,$zero
2352         mov     %r8,0*8($tptr)
2353         cmp     $carry,$tptr            # cf=0, of=0
2354         je      .Lsqrx8x_outer_loop
2355
2356         mov     %r9,1*8($tptr)
2357          mov    1*8($carry),%r9
2358         mov     %r10,2*8($tptr)
2359          mov    2*8($carry),%r10
2360         mov     %r11,3*8($tptr)
2361          mov    3*8($carry),%r11
2362         mov     %r12,4*8($tptr)
2363          mov    4*8($carry),%r12
2364         mov     %r13,5*8($tptr)
2365          mov    5*8($carry),%r13
2366         mov     %r14,6*8($tptr)
2367          mov    6*8($carry),%r14
2368         mov     %r15,7*8($tptr)
2369          mov    7*8($carry),%r15
2370         mov     $carry,$tptr
2371         jmp     .Lsqrx8x_outer_loop
2372
2373 .align  32
2374 .Lsqrx8x_outer_break:
2375         mov     %r9,9*8($tptr)          # t[9]
2376          movq   %xmm3,%rcx              # -$num
2377         mov     %r10,10*8($tptr)        # ...
2378         mov     %r11,11*8($tptr)
2379         mov     %r12,12*8($tptr)
2380         mov     %r13,13*8($tptr)
2381         mov     %r14,14*8($tptr)
2382 ___
2383 }\f{
2384 my $i="%rcx";
2385 $code.=<<___;
2386         lea     48(%rsp),$tptr
2387         mov     ($aptr,$i),%rdx         # a[0]
2388
2389         mov     8($tptr),$A0[1]         # t[1]
2390         xor     $A0[0],$A0[0]           # t[0], of=0, cf=0
2391         mov     (%rsp),$num             # restore $num
2392         adox    $A0[1],$A0[1]
2393          mov    16($tptr),$A1[0]        # t[2]  # prefetch
2394          mov    24($tptr),$A1[1]        # t[3]  # prefetch
2395         nop
2396         #jmp    .Lsqrx4x_shift_n_add    # happens to be aligned
2397
2398 .align  32
2399 .Lsqrx4x_shift_n_add:
2400         mulx    %rdx,%rax,%rbx
2401          adox   $A1[0],$A1[0]
2402         adcx    $A0[0],%rax
2403          .byte  0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov   8($aptr,$i),%rdx        # a[i+1]        # prefetch
2404          .byte  0x4c,0x8b,0x97,0x20,0x00,0x00,0x00      # mov   32($tptr),$A0[0]        # t[2*i+4]      # prefetch
2405          adox   $A1[1],$A1[1]
2406         adcx    $A0[1],%rbx
2407          mov    40($tptr),$A0[1]                # t[2*i+4+1]    # prefetch
2408         mov     %rax,0($tptr)
2409         mov     %rbx,8($tptr)
2410
2411         mulx    %rdx,%rax,%rbx
2412          adox   $A0[0],$A0[0]
2413         adcx    $A1[0],%rax
2414          mov    16($aptr,$i),%rdx       # a[i+2]        # prefetch
2415          mov    48($tptr),$A1[0]        # t[2*i+6]      # prefetch
2416          adox   $A0[1],$A0[1]
2417         adcx    $A1[1],%rbx
2418          mov    56($tptr),$A1[1]        # t[2*i+6+1]    # prefetch
2419         mov     %rax,16($tptr)
2420         mov     %rbx,24($tptr)
2421
2422         mulx    %rdx,%rax,%rbx
2423          adox   $A1[0],$A1[0]
2424         adcx    $A0[0],%rax
2425          mov    24($aptr,$i),%rdx       # a[i+3]        # prefetch
2426          lea    32($i),$i
2427          mov    64($tptr),$A0[0]        # t[2*i+8]      # prefetch
2428          adox   $A1[1],$A1[1]
2429         adcx    $A0[1],%rbx
2430          mov    72($tptr),$A0[1]        # t[2*i+8+1]    # prefetch
2431         mov     %rax,32($tptr)
2432         mov     %rbx,40($tptr)
2433
2434         mulx    %rdx,%rax,%rbx
2435          adox   $A0[0],$A0[0]
2436         adcx    $A1[0],%rax
2437         jrcxz   .Lsqrx4x_shift_n_add_break
2438          .byte  0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov   0($aptr,$i),%rdx        # a[i+4]        # prefetch
2439          adox   $A0[1],$A0[1]
2440         adcx    $A1[1],%rbx
2441          mov    80($tptr),$A1[0]        # t[2*i+10]     # prefetch
2442          mov    88($tptr),$A1[1]        # t[2*i+10+1]   # prefetch
2443         mov     %rax,48($tptr)
2444         mov     %rbx,56($tptr)
2445         lea     64($tptr),$tptr
2446         nop
2447         jmp     .Lsqrx4x_shift_n_add
2448
2449 .align  32
2450 .Lsqrx4x_shift_n_add_break:
2451         adcx    $A1[1],%rbx
2452         mov     %rax,48($tptr)
2453         mov     %rbx,56($tptr)
2454         lea     64($tptr),$tptr         # end of t[] buffer
2455 ___
2456 }\f
2457 ######################################################################
2458 # Montgomery reduction part, "word-by-word" algorithm.
2459 #
2460 # This new path is inspired by multiple submissions from Intel, by
2461 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
2462 # Vinodh Gopal...
2463 {
2464 my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
2465
2466 $code.=<<___;
2467         movq    %xmm2,$nptr
2468         xor     %eax,%eax               # initial top-most carry bit
2469         mov     32(%rsp),%rbx           # n0
2470         mov     48(%rsp),%rdx           # "%r8", 8*0($tptr)
2471         lea     -64($nptr,$num),%rcx    # end of n[]
2472         #lea    48(%rsp,$num,2),$tptr   # end of t[] buffer
2473         mov     %rcx, 0(%rsp)           # save end of n[]
2474         mov     $tptr,8(%rsp)           # save end of t[]
2475
2476         lea     48(%rsp),$tptr          # initial t[] window
2477         jmp     .Lsqrx8x_reduction_loop
2478
2479 .align  32
2480 .Lsqrx8x_reduction_loop:
2481         mov     8*1($tptr),%r9
2482         mov     8*2($tptr),%r10
2483         mov     8*3($tptr),%r11
2484         mov     8*4($tptr),%r12
2485         mov     %rdx,%r8
2486         imulq   %rbx,%rdx               # n0*a[i]
2487         mov     8*5($tptr),%r13
2488         mov     8*6($tptr),%r14
2489         mov     8*7($tptr),%r15
2490         mov     %rax,24(%rsp)           # store top-most carry bit
2491
2492         lea     8*8($tptr),$tptr
2493         xor     $carry,$carry           # cf=0,of=0
2494         mov     \$-8,%rcx
2495         jmp     .Lsqrx8x_reduce
2496
2497 .align  32
2498 .Lsqrx8x_reduce:
2499         mov     %r8, %rbx
2500         mulx    8*0($nptr),%rax,%r8     # n[0]
2501         adcx    %rbx,%rax               # discarded
2502         adox    %r9,%r8
2503
2504         mulx    8*1($nptr),%rbx,%r9     # n[1]
2505         adcx    %rbx,%r8
2506         adox    %r10,%r9
2507
2508         mulx    8*2($nptr),%rbx,%r10
2509         adcx    %rbx,%r9
2510         adox    %r11,%r10
2511
2512         mulx    8*3($nptr),%rbx,%r11
2513         adcx    %rbx,%r10
2514         adox    %r12,%r11
2515
2516         .byte   0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00    # mulx  8*4($nptr),%rbx,%r12
2517          mov    %rdx,%rax
2518          mov    %r8,%rdx
2519         adcx    %rbx,%r11
2520         adox    %r13,%r12
2521
2522          mulx   32(%rsp),%rbx,%rdx      # %rdx discarded
2523          mov    %rax,%rdx
2524          mov    %rax,48+64(%rsp,%rcx,8) # put aside n0*a[i]
2525
2526         mulx    8*5($nptr),%rax,%r13
2527         adcx    %rax,%r12
2528         adox    %r14,%r13
2529
2530         mulx    8*6($nptr),%rax,%r14
2531         adcx    %rax,%r13
2532         adox    %r15,%r14
2533
2534         mulx    8*7($nptr),%rax,%r15
2535          mov    %rbx,%rdx
2536         adcx    %rax,%r14
2537         adox    $carry,%r15             # $carry is 0
2538         adcx    $carry,%r15             # cf=0
2539
2540         .byte   0x67
2541         inc     %rcx                    # of=0
2542         jnz     .Lsqrx8x_reduce
2543
2544         .byte   0x66,0x67
2545         mov     $carry,%rax             # xor   %rax,%rax
2546         cmp     0(%rsp),$nptr           # end of n[]?
2547         jae     .Lsqrx8x_no_tail
2548
2549         mov     48(%rsp),%rdx           # pull n0*a[0]
2550         add     8*0($tptr),%r8
2551         lea     8*8($nptr),$nptr
2552         mov     \$-8,%rcx
2553         adc     8*1($tptr),%r9
2554         adc     8*2($tptr),%r10
2555         adc     8*3($tptr),%r11
2556         adc     8*4($tptr),%r12
2557         adc     8*5($tptr),%r13
2558         adc     8*6($tptr),%r14
2559         adc     8*7($tptr),%r15
2560         lea     8*8($tptr),$tptr
2561         sbb     %rax,%rax               # top carry
2562
2563         xor     $carry,$carry           # of=0, cf=0
2564         mov     %rax,16(%rsp)
2565         jmp     .Lsqrx8x_tail
2566
2567 .align  32
2568 .Lsqrx8x_tail:
2569         mov     %r8,%rbx
2570         mulx    8*0($nptr),%rax,%r8
2571         adcx    %rax,%rbx
2572         adox    %r9,%r8
2573
2574         mulx    8*1($nptr),%rax,%r9
2575         adcx    %rax,%r8
2576         adox    %r10,%r9
2577
2578         mulx    8*2($nptr),%rax,%r10
2579         adcx    %rax,%r9
2580         adox    %r11,%r10
2581
2582         mulx    8*3($nptr),%rax,%r11
2583         adcx    %rax,%r10
2584         adox    %r12,%r11
2585
2586         .byte   0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00    # mulx  8*4($nptr),%rax,%r12
2587         adcx    %rax,%r11
2588         adox    %r13,%r12
2589
2590         mulx    8*5($nptr),%rax,%r13
2591         adcx    %rax,%r12
2592         adox    %r14,%r13
2593
2594         mulx    8*6($nptr),%rax,%r14
2595         adcx    %rax,%r13
2596         adox    %r15,%r14
2597
2598         mulx    8*7($nptr),%rax,%r15
2599          mov    48+72(%rsp,%rcx,8),%rdx # pull n0*a[i]
2600         adcx    %rax,%r14
2601         .byte   0x67
2602         adox    $carry,%r15
2603          mov    %rbx,($tptr,%rcx,8)     # save result
2604          mov    %r8,%rbx
2605         adcx    $carry,%r15             # cf=0
2606
2607         inc     %rcx                    # of=0
2608         jnz     .Lsqrx8x_tail
2609
2610         cmp     0(%rsp),$nptr           # end of n[]?
2611         jae     .Lsqrx8x_tail_done      # break out of loop
2612
2613         sub     16(%rsp),$carry         # mov 16(%rsp),%cf
2614          mov    48(%rsp),%rdx           # pull n0*a[0]
2615          lea    8*8($nptr),$nptr
2616         adc     8*0($tptr),%r8
2617         adc     8*1($tptr),%r9
2618         adc     8*2($tptr),%r10
2619         adc     8*3($tptr),%r11
2620         adc     8*4($tptr),%r12
2621         adc     8*5($tptr),%r13
2622         adc     8*6($tptr),%r14
2623         adc     8*7($tptr),%r15
2624         lea     8*8($tptr),$tptr
2625         mov     \$-8,%rcx
2626         sbb     %rax,%rax
2627
2628         xor     $carry,$carry           # of=0, cf=0
2629         mov     %rax,16(%rsp)
2630         jmp     .Lsqrx8x_tail
2631
2632 .align  32
2633 .Lsqrx8x_tail_done:
2634         add     24(%rsp),%r8            # can this overflow?
2635         mov     $carry,%rax             # xor   %rax,%rax
2636
2637         sub     16(%rsp),$carry         # mov 16(%rsp),%cf
2638 .Lsqrx8x_no_tail:                       # %cf is 0 if jumped here
2639         adc     8*0($tptr),%r8
2640          movq   %xmm3,%rcx
2641         adc     8*1($tptr),%r9
2642          movq   %xmm2,$nptr             # restore $nptr
2643         adc     8*2($tptr),%r10
2644          lea    8*8($tptr),$carry       # borrow $carry
2645         adc     8*3($tptr),%r11
2646         adc     8*4($tptr),%r12
2647         adc     8*5($tptr),%r13
2648         adc     8*6($tptr),%r14
2649         adc     8*7($tptr),%r15
2650         adc     %rax,%rax               # top-most carry
2651
2652         mov     32(%rsp),%rbx           # n0
2653         mov     8*8($tptr,%rcx),%rdx    # modulo-scheduled "%r8"
2654
2655         mov     %r8,8*0($tptr)          # store top 512 bits
2656         mov     %r9,8*1($tptr)
2657         mov     %r10,8*2($tptr)
2658         mov     %r11,8*3($tptr)
2659         mov     %r12,8*4($tptr)
2660         mov     %r13,8*5($tptr)
2661         mov     %r14,8*6($tptr)
2662         mov     %r15,8*7($tptr)
2663
2664         lea     8*8($tptr,%rcx),$tptr   # start of current t[] window
2665         cmp     8(%rsp),$carry          # end of t[]?
2666         jb      .Lsqrx8x_reduction_loop
2667
2668         mov     %rcx,%rdx               # -$num
2669         jmp     .Lsqrx8x_post
2670 ___
2671 }\f
2672 ##############################################################
2673 # Post-condition, 8x unrolled
2674 #
2675 {
2676 my ($rptr,$nptr,$lptr,$i)=($aptr,"%rbp","%rbx","%rcx");
2677 my @ri=map("%r$_",(10..13));
2678 my @ni=map("%r$_",(14..15));
2679 $code.=<<___;
2680 .align  32
2681 .Lsqrx8x_post:
2682         neg     %rdx                    # restore $num
2683         neg     %rax                    # top-most carry as mask
2684         mov     0*8($nptr),%r8
2685         mov     1*8($nptr),%r9
2686         lea     ($nptr,%rdx),$nptr      # end of $nptr
2687         lea     48(%rsp,%rdx),$lptr     # end of lower half of t[2*num]
2688         lea     48(%rsp,%rdx),$tptr
2689         .byte   0x67
2690         xor     %rdx,%rdx
2691         movq    %xmm1,$rptr             # restore $rptr
2692
2693         neg     %r8
2694         jmp     .Lsqrx8x_sub_entry
2695
2696 .byte   0x66,0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2697 .Lsqrx8x_sub:
2698         mov     0*8($nptr,$i),%r8
2699         mov     1*8($nptr,$i),%r9
2700         not     %r8
2701 .Lsqrx8x_sub_entry:                     # aligned at 32
2702         mov     2*8($nptr,$i),%r10
2703         not     %r9
2704         and     %rax,%r8
2705         mov     3*8($nptr,$i),%r11
2706         not     %r10
2707         and     %rax,%r9
2708         mov     4*8($nptr,$i),%r12
2709         not     %r11
2710         and     %rax,%r10
2711         mov     5*8($nptr,$i),%r13
2712         not     %r12
2713         and     %rax,%r11
2714         mov     6*8($nptr,$i),%r14
2715         not     %r13
2716         and     %rax,%r12
2717         mov     7*8($nptr,$i),%r15
2718         not     %r14
2719         and     %rax,%r13
2720         movdqa  %xmm0,0*8($lptr,$i)     # zap lower half
2721         not     %r15
2722         and     %rax,%r14
2723         movdqa  %xmm0,2*8($lptr,$i)
2724         and     %rax,%r15
2725
2726         neg     %edx                    # mov %edx,%cf
2727         movdqa  %xmm0,4*8($lptr,$i)
2728         adc     0*8($tptr),%r8
2729         mov     %r8,0*8($rptr)          # result
2730         adc     1*8($tptr),%r9
2731         movdqa  %xmm0,6*8($lptr,$i)
2732         adc     2*8($tptr),%r10
2733         mov     %r9,1*8($rptr)
2734         adc     3*8($tptr),%r11
2735         movdqa  %xmm0,0*8($tptr)        # zap upper half
2736         adc     4*8($tptr),%r12
2737         mov     %r10,2*8($rptr)
2738         adc     5*8($tptr),%r13
2739         movdqa  %xmm0,2*8($tptr)
2740         adc     6*8($tptr),%r14
2741         mov     %r11,3*8($rptr)
2742         adc     7*8($tptr),%r15
2743         sbb     %edx,%edx               # mov %cf,%edx
2744         movdqa  %xmm0,4*8($tptr)
2745         movdqa  %xmm0,6*8($tptr)
2746         lea     8*8($tptr),$tptr
2747         mov     %r12,4*8($rptr)
2748         mov     %r13,5*8($rptr)
2749         mov     %r14,6*8($rptr)
2750         mov     %r15,7*8($rptr)
2751         lea     8*8($rptr),$rptr
2752
2753         add     \$64,$i
2754         jnz     .Lsqrx8x_sub
2755 ___
2756 }
2757 $code.=<<___;
2758         movq    %xmm4,%rsi              # restore %rsp
2759         mov     \$1,%rax
2760         mov     0(%rsi),%r15
2761         mov     8(%rsi),%r14
2762         mov     16(%rsi),%r13
2763         mov     24(%rsi),%r12
2764         mov     32(%rsi),%rbp
2765         mov     40(%rsi),%rbx
2766         lea     48(%rsi),%rsp
2767 .Lsqrx8x_epilogue:
2768         ret
2769 .size   bn_sqrx8x_mont,.-bn_sqrx8x_mont
2770 ___
2771 }}}
2772 $code.=<<___;
2773 .asciz  "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2774 .align  16
2775 ___
2776
2777 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2778 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
2779 if ($win64) {
2780 $rec="%rcx";
2781 $frame="%rdx";
2782 $context="%r8";
2783 $disp="%r9";
2784
2785 $code.=<<___;
2786 .extern __imp_RtlVirtualUnwind
2787 .type   mul_handler,\@abi-omnipotent
2788 .align  16
2789 mul_handler:
2790         push    %rsi
2791         push    %rdi
2792         push    %rbx
2793         push    %rbp
2794         push    %r12
2795         push    %r13
2796         push    %r14
2797         push    %r15
2798         pushfq
2799         sub     \$64,%rsp
2800
2801         mov     120($context),%rax      # pull context->Rax
2802         mov     248($context),%rbx      # pull context->Rip
2803
2804         mov     8($disp),%rsi           # disp->ImageBase
2805         mov     56($disp),%r11          # disp->HandlerData
2806
2807         mov     0(%r11),%r10d           # HandlerData[0]
2808         lea     (%rsi,%r10),%r10        # end of prologue label
2809         cmp     %r10,%rbx               # context->Rip<end of prologue label
2810         jb      .Lcommon_seh_tail
2811
2812         mov     152($context),%rax      # pull context->Rsp
2813
2814         mov     4(%r11),%r10d           # HandlerData[1]
2815         lea     (%rsi,%r10),%r10        # epilogue label
2816         cmp     %r10,%rbx               # context->Rip>=epilogue label
2817         jae     .Lcommon_seh_tail
2818
2819         mov     192($context),%r10      # pull $num
2820         mov     8(%rax,%r10,8),%rax     # pull saved stack pointer
2821         lea     48(%rax),%rax
2822
2823         mov     -8(%rax),%rbx
2824         mov     -16(%rax),%rbp
2825         mov     -24(%rax),%r12
2826         mov     -32(%rax),%r13
2827         mov     -40(%rax),%r14
2828         mov     -48(%rax),%r15
2829         mov     %rbx,144($context)      # restore context->Rbx
2830         mov     %rbp,160($context)      # restore context->Rbp
2831         mov     %r12,216($context)      # restore context->R12
2832         mov     %r13,224($context)      # restore context->R13
2833         mov     %r14,232($context)      # restore context->R14
2834         mov     %r15,240($context)      # restore context->R15
2835
2836         jmp     .Lcommon_seh_tail
2837 .size   mul_handler,.-mul_handler
2838
2839 .type   sqr_handler,\@abi-omnipotent
2840 .align  16
2841 sqr_handler:
2842         push    %rsi
2843         push    %rdi
2844         push    %rbx
2845         push    %rbp
2846         push    %r12
2847         push    %r13
2848         push    %r14
2849         push    %r15
2850         pushfq
2851         sub     \$64,%rsp
2852
2853         mov     120($context),%rax      # pull context->Rax
2854         mov     248($context),%rbx      # pull context->Rip
2855
2856         mov     8($disp),%rsi           # disp->ImageBase
2857         mov     56($disp),%r11          # disp->HandlerData
2858
2859         mov     0(%r11),%r10d           # HandlerData[0]
2860         lea     (%rsi,%r10),%r10        # end of prologue label
2861         cmp     %r10,%rbx               # context->Rip<.Lsqr_body
2862         jb      .Lcommon_seh_tail
2863
2864         mov     152($context),%rax      # pull context->Rsp
2865
2866         mov     4(%r11),%r10d           # HandlerData[1]
2867         lea     (%rsi,%r10),%r10        # epilogue label
2868         cmp     %r10,%rbx               # context->Rip>=.Lsqr_epilogue
2869         jae     .Lcommon_seh_tail
2870
2871         mov     56(%rax),%rax           # pull saved stack pointer
2872         lea     48(%rax),%rax
2873
2874         mov     -8(%rax),%rbx
2875         mov     -16(%rax),%rbp
2876         mov     -24(%rax),%r12
2877         mov     -32(%rax),%r13
2878         mov     -40(%rax),%r14
2879         mov     -48(%rax),%r15
2880         mov     %rbx,144($context)      # restore context->Rbx
2881         mov     %rbp,160($context)      # restore context->Rbp
2882         mov     %r12,216($context)      # restore context->R12
2883         mov     %r13,224($context)      # restore context->R13
2884         mov     %r14,232($context)      # restore context->R14
2885         mov     %r15,240($context)      # restore context->R15
2886
2887 .Lcommon_seh_tail:
2888         mov     8(%rax),%rdi
2889         mov     16(%rax),%rsi
2890         mov     %rax,152($context)      # restore context->Rsp
2891         mov     %rsi,168($context)      # restore context->Rsi
2892         mov     %rdi,176($context)      # restore context->Rdi
2893
2894         mov     40($disp),%rdi          # disp->ContextRecord
2895         mov     $context,%rsi           # context
2896         mov     \$154,%ecx              # sizeof(CONTEXT)
2897         .long   0xa548f3fc              # cld; rep movsq
2898
2899         mov     $disp,%rsi
2900         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
2901         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
2902         mov     0(%rsi),%r8             # arg3, disp->ControlPc
2903         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
2904         mov     40(%rsi),%r10           # disp->ContextRecord
2905         lea     56(%rsi),%r11           # &disp->HandlerData
2906         lea     24(%rsi),%r12           # &disp->EstablisherFrame
2907         mov     %r10,32(%rsp)           # arg5
2908         mov     %r11,40(%rsp)           # arg6
2909         mov     %r12,48(%rsp)           # arg7
2910         mov     %rcx,56(%rsp)           # arg8, (NULL)
2911         call    *__imp_RtlVirtualUnwind(%rip)
2912
2913         mov     \$1,%eax                # ExceptionContinueSearch
2914         add     \$64,%rsp
2915         popfq
2916         pop     %r15
2917         pop     %r14
2918         pop     %r13
2919         pop     %r12
2920         pop     %rbp
2921         pop     %rbx
2922         pop     %rdi
2923         pop     %rsi
2924         ret
2925 .size   sqr_handler,.-sqr_handler
2926
2927 .section        .pdata
2928 .align  4
2929         .rva    .LSEH_begin_bn_mul_mont
2930         .rva    .LSEH_end_bn_mul_mont
2931         .rva    .LSEH_info_bn_mul_mont
2932
2933         .rva    .LSEH_begin_bn_mul4x_mont
2934         .rva    .LSEH_end_bn_mul4x_mont
2935         .rva    .LSEH_info_bn_mul4x_mont
2936
2937         .rva    .LSEH_begin_bn_sqr8x_mont
2938         .rva    .LSEH_end_bn_sqr8x_mont
2939         .rva    .LSEH_info_bn_sqr8x_mont
2940 ___
2941 $code.=<<___ if ($addx);
2942         .rva    .LSEH_begin_bn_mulx4x_mont
2943         .rva    .LSEH_end_bn_mulx4x_mont
2944         .rva    .LSEH_info_bn_mulx4x_mont
2945
2946         .rva    .LSEH_begin_bn_sqrx8x_mont
2947         .rva    .LSEH_end_bn_sqrx8x_mont
2948         .rva    .LSEH_info_bn_sqrx8x_mont
2949 ___
2950 $code.=<<___;
2951 .section        .xdata
2952 .align  8
2953 .LSEH_info_bn_mul_mont:
2954         .byte   9,0,0,0
2955         .rva    mul_handler
2956         .rva    .Lmul_body,.Lmul_epilogue       # HandlerData[]
2957 .LSEH_info_bn_mul4x_mont:
2958         .byte   9,0,0,0
2959         .rva    mul_handler
2960         .rva    .Lmul4x_body,.Lmul4x_epilogue   # HandlerData[]
2961 .LSEH_info_bn_sqr8x_mont:
2962         .byte   9,0,0,0
2963         .rva    sqr_handler
2964         .rva    .Lsqr8x_body,.Lsqr8x_epilogue   # HandlerData[]
2965 ___
2966 $code.=<<___ if ($addx);
2967 .LSEH_info_bn_mulx4x_mont:
2968         .byte   9,0,0,0
2969         .rva    sqr_handler
2970         .rva    .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
2971 .LSEH_info_bn_sqrx8x_mont:
2972         .byte   9,0,0,0
2973         .rva    sqr_handler
2974         .rva    .Lsqrx8x_body,.Lsqrx8x_epilogue # HandlerData[]
2975 ___
2976 }
2977
2978 print $code;
2979 close STDOUT;