This commit completes recent modular exponentiation optimizations on
[openssl.git] / crypto / bn / asm / x86_64-mont.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9
10 # October 2005.
11 #
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
17
18 # July 2011.
19 #
20 # Add dedicated squaring procedure. Performance improvement varies
21 # from platform to platform, but in average it's ~5%/15%/25%/33%
22 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
23
24 # August 2011.
25 #
26 # Unroll and modulo-schedule inner loops in such manner that they
27 # are "fallen through" for input lengths of 8, which is critical for
28 # 1024-bit RSA *sign*. Average performance improvement in comparison
29 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
31
32 $flavour = shift;
33 $output  = shift;
34 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
35
36 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
37
38 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
39 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
40 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
41 die "can't locate x86_64-xlate.pl";
42
43 open STDOUT,"| $^X $xlate $flavour $output";
44
45 # int bn_mul_mont(
46 $rp="%rdi";     # BN_ULONG *rp,
47 $ap="%rsi";     # const BN_ULONG *ap,
48 $bp="%rdx";     # const BN_ULONG *bp,
49 $np="%rcx";     # const BN_ULONG *np,
50 $n0="%r8";      # const BN_ULONG *n0,
51 $num="%r9";     # int num);
52 $lo0="%r10";
53 $hi0="%r11";
54 $hi1="%r13";
55 $i="%r14";
56 $j="%r15";
57 $m0="%rbx";
58 $m1="%rbp";
59
60 $code=<<___;
61 .text
62
63 .globl  bn_mul_mont
64 .type   bn_mul_mont,\@function,6
65 .align  16
66 bn_mul_mont:
67         test    \$3,${num}d
68         jnz     .Lmul_enter
69         cmp     \$8,${num}d
70         jb      .Lmul_enter
71         cmp     $ap,$bp
72         jne     .Lmul4x_enter
73         jmp     .Lsqr4x_enter
74
75 .align  16
76 .Lmul_enter:
77         push    %rbx
78         push    %rbp
79         push    %r12
80         push    %r13
81         push    %r14
82         push    %r15
83
84         mov     ${num}d,${num}d
85         lea     2($num),%r10
86         mov     %rsp,%r11
87         neg     %r10
88         lea     (%rsp,%r10,8),%rsp      # tp=alloca(8*(num+2))
89         and     \$-1024,%rsp            # minimize TLB usage
90
91         mov     %r11,8(%rsp,$num,8)     # tp[num+1]=%rsp
92 .Lmul_body:
93         mov     $bp,%r12                # reassign $bp
94 ___
95                 $bp="%r12";
96 $code.=<<___;
97         mov     ($n0),$n0               # pull n0[0] value
98         mov     ($bp),$m0               # m0=bp[0]
99         mov     ($ap),%rax
100
101         xor     $i,$i                   # i=0
102         xor     $j,$j                   # j=0
103
104         mov     $n0,$m1
105         mulq    $m0                     # ap[0]*bp[0]
106         mov     %rax,$lo0
107         mov     ($np),%rax
108
109         imulq   $lo0,$m1                # "tp[0]"*n0
110         mov     %rdx,$hi0
111
112         mulq    $m1                     # np[0]*m1
113         add     %rax,$lo0               # discarded
114         mov     8($ap),%rax
115         adc     \$0,%rdx
116         mov     %rdx,$hi1
117
118         lea     1($j),$j                # j++
119         jmp     .L1st_enter
120
121 .align  16
122 .L1st:
123         add     %rax,$hi1
124         mov     ($ap,$j,8),%rax
125         adc     \$0,%rdx
126         add     $hi0,$hi1               # np[j]*m1+ap[j]*bp[0]
127         mov     $lo0,$hi0
128         adc     \$0,%rdx
129         mov     $hi1,-16(%rsp,$j,8)     # tp[j-1]
130         mov     %rdx,$hi1
131
132 .L1st_enter:
133         mulq    $m0                     # ap[j]*bp[0]
134         add     %rax,$hi0
135         mov     ($np,$j,8),%rax
136         adc     \$0,%rdx
137         lea     1($j),$j                # j++
138         mov     %rdx,$lo0
139
140         mulq    $m1                     # np[j]*m1
141         cmp     $num,$j
142         jne     .L1st
143
144         add     %rax,$hi1
145         mov     ($ap),%rax              # ap[0]
146         adc     \$0,%rdx
147         add     $hi0,$hi1               # np[j]*m1+ap[j]*bp[0]
148         adc     \$0,%rdx
149         mov     $hi1,-16(%rsp,$j,8)     # tp[j-1]
150         mov     %rdx,$hi1
151         mov     $lo0,$hi0
152
153         xor     %rdx,%rdx
154         add     $hi0,$hi1
155         adc     \$0,%rdx
156         mov     $hi1,-8(%rsp,$num,8)
157         mov     %rdx,(%rsp,$num,8)      # store upmost overflow bit
158
159         lea     1($i),$i                # i++
160         jmp     .Louter
161 .align  16
162 .Louter:
163         mov     ($bp,$i,8),$m0          # m0=bp[i]
164         xor     $j,$j                   # j=0
165         mov     $n0,$m1
166         mov     (%rsp),$lo0
167         mulq    $m0                     # ap[0]*bp[i]
168         add     %rax,$lo0               # ap[0]*bp[i]+tp[0]
169         mov     ($np),%rax
170         adc     \$0,%rdx
171
172         imulq   $lo0,$m1                # tp[0]*n0
173         mov     %rdx,$hi0
174
175         mulq    $m1                     # np[0]*m1
176         add     %rax,$lo0               # discarded
177         mov     8($ap),%rax
178         adc     \$0,%rdx
179         mov     8(%rsp),$lo0            # tp[1]
180         mov     %rdx,$hi1
181
182         lea     1($j),$j                # j++
183         jmp     .Linner_enter
184
185 .align  16
186 .Linner:
187         add     %rax,$hi1
188         mov     ($ap,$j,8),%rax
189         adc     \$0,%rdx
190         add     $lo0,$hi1               # np[j]*m1+ap[j]*bp[i]+tp[j]
191         mov     (%rsp,$j,8),$lo0
192         adc     \$0,%rdx
193         mov     $hi1,-16(%rsp,$j,8)     # tp[j-1]
194         mov     %rdx,$hi1
195
196 .Linner_enter:
197         mulq    $m0                     # ap[j]*bp[i]
198         add     %rax,$hi0
199         mov     ($np,$j,8),%rax
200         adc     \$0,%rdx
201         add     $hi0,$lo0               # ap[j]*bp[i]+tp[j]
202         mov     %rdx,$hi0
203         adc     \$0,$hi0
204         lea     1($j),$j                # j++
205
206         mulq    $m1                     # np[j]*m1
207         cmp     $num,$j
208         jne     .Linner
209
210         add     %rax,$hi1
211         mov     ($ap),%rax              # ap[0]
212         adc     \$0,%rdx
213         add     $lo0,$hi1               # np[j]*m1+ap[j]*bp[i]+tp[j]
214         mov     (%rsp,$j,8),$lo0
215         adc     \$0,%rdx
216         mov     $hi1,-16(%rsp,$j,8)     # tp[j-1]
217         mov     %rdx,$hi1
218
219         xor     %rdx,%rdx
220         add     $hi0,$hi1
221         adc     \$0,%rdx
222         add     $lo0,$hi1               # pull upmost overflow bit
223         adc     \$0,%rdx
224         mov     $hi1,-8(%rsp,$num,8)
225         mov     %rdx,(%rsp,$num,8)      # store upmost overflow bit
226
227         lea     1($i),$i                # i++
228         cmp     $num,$i
229         jl      .Louter
230
231         xor     $i,$i                   # i=0 and clear CF!
232         mov     (%rsp),%rax             # tp[0]
233         lea     (%rsp),$ap              # borrow ap for tp
234         mov     $num,$j                 # j=num
235         jmp     .Lsub
236 .align  16
237 .Lsub:  sbb     ($np,$i,8),%rax
238         mov     %rax,($rp,$i,8)         # rp[i]=tp[i]-np[i]
239         mov     8($ap,$i,8),%rax        # tp[i+1]
240         lea     1($i),$i                # i++
241         dec     $j                      # doesnn't affect CF!
242         jnz     .Lsub
243
244         sbb     \$0,%rax                # handle upmost overflow bit
245         xor     $i,$i
246         and     %rax,$ap
247         not     %rax
248         mov     $rp,$np
249         and     %rax,$np
250         mov     $num,$j                 # j=num
251         or      $np,$ap                 # ap=borrow?tp:rp
252 .align  16
253 .Lcopy:                                 # copy or in-place refresh
254         mov     ($ap,$i,8),%rax
255         mov     $i,(%rsp,$i,8)          # zap temporary vector
256         mov     %rax,($rp,$i,8)         # rp[i]=tp[i]
257         lea     1($i),$i
258         sub     \$1,$j
259         jnz     .Lcopy
260
261         mov     8(%rsp,$num,8),%rsi     # restore %rsp
262         mov     \$1,%rax
263         mov     (%rsi),%r15
264         mov     8(%rsi),%r14
265         mov     16(%rsi),%r13
266         mov     24(%rsi),%r12
267         mov     32(%rsi),%rbp
268         mov     40(%rsi),%rbx
269         lea     48(%rsi),%rsp
270 .Lmul_epilogue:
271         ret
272 .size   bn_mul_mont,.-bn_mul_mont
273 ___
274 {{{
275 my @A=("%r10","%r11");
276 my @N=("%r13","%rdi");
277 $code.=<<___;
278 .type   bn_mul4x_mont,\@function,6
279 .align  16
280 bn_mul4x_mont:
281 .Lmul4x_enter:
282         push    %rbx
283         push    %rbp
284         push    %r12
285         push    %r13
286         push    %r14
287         push    %r15
288
289         mov     ${num}d,${num}d
290         lea     4($num),%r10
291         mov     %rsp,%r11
292         neg     %r10
293         lea     (%rsp,%r10,8),%rsp      # tp=alloca(8*(num+4))
294         and     \$-1024,%rsp            # minimize TLB usage
295
296         mov     %r11,8(%rsp,$num,8)     # tp[num+1]=%rsp
297 .Lmul4x_body:
298         mov     $rp,16(%rsp,$num,8)     # tp[num+2]=$rp
299         mov     %rdx,%r12               # reassign $bp
300 ___
301                 $bp="%r12";
302 $code.=<<___;
303         mov     ($n0),$n0               # pull n0[0] value
304         mov     ($bp),$m0               # m0=bp[0]
305         mov     ($ap),%rax
306
307         xor     $i,$i                   # i=0
308         xor     $j,$j                   # j=0
309
310         mov     $n0,$m1
311         mulq    $m0                     # ap[0]*bp[0]
312         mov     %rax,$A[0]
313         mov     ($np),%rax
314
315         imulq   $A[0],$m1               # "tp[0]"*n0
316         mov     %rdx,$A[1]
317
318         mulq    $m1                     # np[0]*m1
319         add     %rax,$A[0]              # discarded
320         mov     8($ap),%rax
321         adc     \$0,%rdx
322         mov     %rdx,$N[1]
323
324         mulq    $m0
325         add     %rax,$A[1]
326         mov     8($np),%rax
327         adc     \$0,%rdx
328         mov     %rdx,$A[0]
329
330         mulq    $m1
331         add     %rax,$N[1]
332         mov     16($ap),%rax
333         adc     \$0,%rdx
334         add     $A[1],$N[1]
335         lea     4($j),$j                # j++
336         adc     \$0,%rdx
337         mov     $N[1],(%rsp)
338         mov     %rdx,$N[0]
339         jmp     .L1st4x
340 .align  16
341 .L1st4x:
342         mulq    $m0                     # ap[j]*bp[0]
343         add     %rax,$A[0]
344         mov     -16($np,$j,8),%rax
345         adc     \$0,%rdx
346         mov     %rdx,$A[1]
347
348         mulq    $m1                     # np[j]*m1
349         add     %rax,$N[0]
350         mov     -8($ap,$j,8),%rax
351         adc     \$0,%rdx
352         add     $A[0],$N[0]             # np[j]*m1+ap[j]*bp[0]
353         adc     \$0,%rdx
354         mov     $N[0],-24(%rsp,$j,8)    # tp[j-1]
355         mov     %rdx,$N[1]
356
357         mulq    $m0                     # ap[j]*bp[0]
358         add     %rax,$A[1]
359         mov     -8($np,$j,8),%rax
360         adc     \$0,%rdx
361         mov     %rdx,$A[0]
362
363         mulq    $m1                     # np[j]*m1
364         add     %rax,$N[1]
365         mov     ($ap,$j,8),%rax
366         adc     \$0,%rdx
367         add     $A[1],$N[1]             # np[j]*m1+ap[j]*bp[0]
368         adc     \$0,%rdx
369         mov     $N[1],-16(%rsp,$j,8)    # tp[j-1]
370         mov     %rdx,$N[0]
371
372         mulq    $m0                     # ap[j]*bp[0]
373         add     %rax,$A[0]
374         mov     ($np,$j,8),%rax
375         adc     \$0,%rdx
376         mov     %rdx,$A[1]
377
378         mulq    $m1                     # np[j]*m1
379         add     %rax,$N[0]
380         mov     8($ap,$j,8),%rax
381         adc     \$0,%rdx
382         add     $A[0],$N[0]             # np[j]*m1+ap[j]*bp[0]
383         adc     \$0,%rdx
384         mov     $N[0],-8(%rsp,$j,8)     # tp[j-1]
385         mov     %rdx,$N[1]
386
387         mulq    $m0                     # ap[j]*bp[0]
388         add     %rax,$A[1]
389         mov     8($np,$j,8),%rax
390         adc     \$0,%rdx
391         lea     4($j),$j                # j++
392         mov     %rdx,$A[0]
393
394         mulq    $m1                     # np[j]*m1
395         add     %rax,$N[1]
396         mov     -16($ap,$j,8),%rax
397         adc     \$0,%rdx
398         add     $A[1],$N[1]             # np[j]*m1+ap[j]*bp[0]
399         adc     \$0,%rdx
400         mov     $N[1],-32(%rsp,$j,8)    # tp[j-1]
401         mov     %rdx,$N[0]
402         cmp     $num,$j
403         jl      .L1st4x
404
405         mulq    $m0                     # ap[j]*bp[0]
406         add     %rax,$A[0]
407         mov     -16($np,$j,8),%rax
408         adc     \$0,%rdx
409         mov     %rdx,$A[1]
410
411         mulq    $m1                     # np[j]*m1
412         add     %rax,$N[0]
413         mov     -8($ap,$j,8),%rax
414         adc     \$0,%rdx
415         add     $A[0],$N[0]             # np[j]*m1+ap[j]*bp[0]
416         adc     \$0,%rdx
417         mov     $N[0],-24(%rsp,$j,8)    # tp[j-1]
418         mov     %rdx,$N[1]
419
420         mulq    $m0                     # ap[j]*bp[0]
421         add     %rax,$A[1]
422         mov     -8($np,$j,8),%rax
423         adc     \$0,%rdx
424         mov     %rdx,$A[0]
425
426         mulq    $m1                     # np[j]*m1
427         add     %rax,$N[1]
428         mov     ($ap),%rax              # ap[0]
429         adc     \$0,%rdx
430         add     $A[1],$N[1]             # np[j]*m1+ap[j]*bp[0]
431         adc     \$0,%rdx
432         mov     $N[1],-16(%rsp,$j,8)    # tp[j-1]
433         mov     %rdx,$N[0]
434
435         xor     $N[1],$N[1]
436         add     $A[0],$N[0]
437         adc     \$0,$N[1]
438         mov     $N[0],-8(%rsp,$j,8)
439         mov     $N[1],(%rsp,$j,8)       # store upmost overflow bit
440
441         lea     1($i),$i                # i++
442 .align  4
443 .Louter4x:
444         mov     ($bp,$i,8),$m0          # m0=bp[i]
445         xor     $j,$j                   # j=0
446         mov     (%rsp),$A[0]
447         mov     $n0,$m1
448         mulq    $m0                     # ap[0]*bp[i]
449         add     %rax,$A[0]              # ap[0]*bp[i]+tp[0]
450         mov     ($np),%rax
451         adc     \$0,%rdx
452
453         imulq   $A[0],$m1               # tp[0]*n0
454         mov     %rdx,$A[1]
455
456         mulq    $m1                     # np[0]*m1
457         add     %rax,$A[0]              # "$N[0]", discarded
458         mov     8($ap),%rax
459         adc     \$0,%rdx
460         mov     %rdx,$N[1]
461
462         mulq    $m0                     # ap[j]*bp[i]
463         add     %rax,$A[1]
464         mov     8($np),%rax
465         adc     \$0,%rdx
466         add     8(%rsp),$A[1]           # +tp[1]
467         adc     \$0,%rdx
468         mov     %rdx,$A[0]
469
470         mulq    $m1                     # np[j]*m1
471         add     %rax,$N[1]
472         mov     16($ap),%rax
473         adc     \$0,%rdx
474         add     $A[1],$N[1]             # np[j]*m1+ap[j]*bp[i]+tp[j]
475         lea     4($j),$j                # j+=2
476         adc     \$0,%rdx
477         mov     $N[1],(%rsp)            # tp[j-1]
478         mov     %rdx,$N[0]
479         jmp     .Linner4x
480 .align  16
481 .Linner4x:
482         mulq    $m0                     # ap[j]*bp[i]
483         add     %rax,$A[0]
484         mov     -16($np,$j,8),%rax
485         adc     \$0,%rdx
486         add     -16(%rsp,$j,8),$A[0]    # ap[j]*bp[i]+tp[j]
487         adc     \$0,%rdx
488         mov     %rdx,$A[1]
489
490         mulq    $m1                     # np[j]*m1
491         add     %rax,$N[0]
492         mov     -8($ap,$j,8),%rax
493         adc     \$0,%rdx
494         add     $A[0],$N[0]
495         adc     \$0,%rdx
496         mov     $N[0],-24(%rsp,$j,8)    # tp[j-1]
497         mov     %rdx,$N[1]
498
499         mulq    $m0                     # ap[j]*bp[i]
500         add     %rax,$A[1]
501         mov     -8($np,$j,8),%rax
502         adc     \$0,%rdx
503         add     -8(%rsp,$j,8),$A[1]
504         adc     \$0,%rdx
505         mov     %rdx,$A[0]
506
507         mulq    $m1                     # np[j]*m1
508         add     %rax,$N[1]
509         mov     ($ap,$j,8),%rax
510         adc     \$0,%rdx
511         add     $A[1],$N[1]
512         adc     \$0,%rdx
513         mov     $N[1],-16(%rsp,$j,8)    # tp[j-1]
514         mov     %rdx,$N[0]
515
516         mulq    $m0                     # ap[j]*bp[i]
517         add     %rax,$A[0]
518         mov     ($np,$j,8),%rax
519         adc     \$0,%rdx
520         add     (%rsp,$j,8),$A[0]       # ap[j]*bp[i]+tp[j]
521         adc     \$0,%rdx
522         mov     %rdx,$A[1]
523
524         mulq    $m1                     # np[j]*m1
525         add     %rax,$N[0]
526         mov     8($ap,$j,8),%rax
527         adc     \$0,%rdx
528         add     $A[0],$N[0]
529         adc     \$0,%rdx
530         mov     $N[0],-8(%rsp,$j,8)     # tp[j-1]
531         mov     %rdx,$N[1]
532
533         mulq    $m0                     # ap[j]*bp[i]
534         add     %rax,$A[1]
535         mov     8($np,$j,8),%rax
536         adc     \$0,%rdx
537         add     8(%rsp,$j,8),$A[1]
538         adc     \$0,%rdx
539         lea     4($j),$j                # j++
540         mov     %rdx,$A[0]
541
542         mulq    $m1                     # np[j]*m1
543         add     %rax,$N[1]
544         mov     -16($ap,$j,8),%rax
545         adc     \$0,%rdx
546         add     $A[1],$N[1]
547         adc     \$0,%rdx
548         mov     $N[1],-32(%rsp,$j,8)    # tp[j-1]
549         mov     %rdx,$N[0]
550         cmp     $num,$j
551         jl      .Linner4x
552
553         mulq    $m0                     # ap[j]*bp[i]
554         add     %rax,$A[0]
555         mov     -16($np,$j,8),%rax
556         adc     \$0,%rdx
557         add     -16(%rsp,$j,8),$A[0]    # ap[j]*bp[i]+tp[j]
558         adc     \$0,%rdx
559         mov     %rdx,$A[1]
560
561         mulq    $m1                     # np[j]*m1
562         add     %rax,$N[0]
563         mov     -8($ap,$j,8),%rax
564         adc     \$0,%rdx
565         add     $A[0],$N[0]
566         adc     \$0,%rdx
567         mov     $N[0],-24(%rsp,$j,8)    # tp[j-1]
568         mov     %rdx,$N[1]
569
570         mulq    $m0                     # ap[j]*bp[i]
571         add     %rax,$A[1]
572         mov     -8($np,$j,8),%rax
573         adc     \$0,%rdx
574         add     -8(%rsp,$j,8),$A[1]
575         adc     \$0,%rdx
576         lea     1($i),$i                # i++
577         mov     %rdx,$A[0]
578
579         mulq    $m1                     # np[j]*m1
580         add     %rax,$N[1]
581         mov     ($ap),%rax              # ap[0]
582         adc     \$0,%rdx
583         add     $A[1],$N[1]
584         adc     \$0,%rdx
585         mov     $N[1],-16(%rsp,$j,8)    # tp[j-1]
586         mov     %rdx,$N[0]
587
588         xor     $N[1],$N[1]
589         add     $A[0],$N[0]
590         adc     \$0,$N[1]
591         add     (%rsp,$num,8),$N[0]     # pull upmost overflow bit
592         adc     \$0,$N[1]
593         mov     $N[0],-8(%rsp,$j,8)
594         mov     $N[1],(%rsp,$j,8)       # store upmost overflow bit
595
596         cmp     $num,$i
597         jl      .Louter4x
598 ___
599 {
600 my @ri=("%rax","%rdx",$m0,$m1);
601 $code.=<<___;
602         mov     16(%rsp,$num,8),$rp     # restore $rp
603         mov     0(%rsp),@ri[0]          # tp[0]
604         pxor    %xmm0,%xmm0
605         mov     8(%rsp),@ri[1]          # tp[1]
606         shr     \$2,$num                # num/=4
607         lea     (%rsp),$ap              # borrow ap for tp
608         xor     $i,$i                   # i=0 and clear CF!
609
610         sub     0($np),@ri[0]
611         mov     16($ap),@ri[2]          # tp[2]
612         mov     24($ap),@ri[3]          # tp[3]
613         sbb     8($np),@ri[1]
614         lea     -1($num),$j             # j=num/4-1
615         jmp     .Lsub4x
616 .align  16
617 .Lsub4x:
618         mov     @ri[0],0($rp,$i,8)      # rp[i]=tp[i]-np[i]
619         mov     @ri[1],8($rp,$i,8)      # rp[i]=tp[i]-np[i]
620         sbb     16($np,$i,8),@ri[2]
621         mov     32($ap,$i,8),@ri[0]     # tp[i+1]
622         mov     40($ap,$i,8),@ri[1]
623         sbb     24($np,$i,8),@ri[3]
624         mov     @ri[2],16($rp,$i,8)     # rp[i]=tp[i]-np[i]
625         mov     @ri[3],24($rp,$i,8)     # rp[i]=tp[i]-np[i]
626         sbb     32($np,$i,8),@ri[0]
627         mov     48($ap,$i,8),@ri[2]
628         mov     56($ap,$i,8),@ri[3]
629         sbb     40($np,$i,8),@ri[1]
630         lea     4($i),$i                # i++
631         dec     $j                      # doesnn't affect CF!
632         jnz     .Lsub4x
633
634         mov     @ri[0],0($rp,$i,8)      # rp[i]=tp[i]-np[i]
635         mov     32($ap,$i,8),@ri[0]     # load overflow bit
636         sbb     16($np,$i,8),@ri[2]
637         mov     @ri[1],8($rp,$i,8)      # rp[i]=tp[i]-np[i]
638         sbb     24($np,$i,8),@ri[3]
639         mov     @ri[2],16($rp,$i,8)     # rp[i]=tp[i]-np[i]
640
641         sbb     \$0,@ri[0]              # handle upmost overflow bit
642         mov     @ri[3],24($rp,$i,8)     # rp[i]=tp[i]-np[i]
643         xor     $i,$i                   # i=0
644         and     @ri[0],$ap
645         not     @ri[0]
646         mov     $rp,$np
647         and     @ri[0],$np
648         lea     -1($num),$j
649         or      $np,$ap                 # ap=borrow?tp:rp
650
651         movdqu  ($ap),%xmm1
652         movdqa  %xmm0,(%rsp)
653         movdqu  %xmm1,($rp)
654         jmp     .Lcopy4x
655 .align  16
656 .Lcopy4x:                                       # copy or in-place refresh
657         movdqu  16($ap,$i),%xmm2
658         movdqu  32($ap,$i),%xmm1
659         movdqa  %xmm0,16(%rsp,$i)
660         movdqu  %xmm2,16($rp,$i)
661         movdqa  %xmm0,32(%rsp,$i)
662         movdqu  %xmm1,32($rp,$i)
663         lea     32($i),$i
664         dec     $j
665         jnz     .Lcopy4x
666
667         shl     \$2,$num
668         movdqu  16($ap,$i),%xmm2
669         movdqa  %xmm0,16(%rsp,$i)
670         movdqu  %xmm2,16($rp,$i)
671 ___
672 }
673 $code.=<<___;
674         mov     8(%rsp,$num,8),%rsi     # restore %rsp
675         mov     \$1,%rax
676         mov     (%rsi),%r15
677         mov     8(%rsi),%r14
678         mov     16(%rsi),%r13
679         mov     24(%rsi),%r12
680         mov     32(%rsi),%rbp
681         mov     40(%rsi),%rbx
682         lea     48(%rsi),%rsp
683 .Lmul4x_epilogue:
684         ret
685 .size   bn_mul4x_mont,.-bn_mul4x_mont
686 ___
687 }}}
688 \f{{{
689 ######################################################################
690 # void bn_sqr4x_mont(
691 my $rptr="%rdi";        # const BN_ULONG *rptr,
692 my $aptr="%rsi";        # const BN_ULONG *aptr,
693 my $bptr="%rdx";        # not used
694 my $nptr="%rcx";        # const BN_ULONG *nptr,
695 my $n0  ="%r8";         # const BN_ULONG *n0);
696 my $num ="%r9";         # int num, has to be divisible by 4 and
697                         # not less than 8
698
699 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
700 my @A0=("%r10","%r11");
701 my @A1=("%r12","%r13");
702 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
703
704 $code.=<<___;
705 .type   bn_sqr4x_mont,\@function,6
706 .align  16
707 bn_sqr4x_mont:
708 .Lsqr4x_enter:
709         push    %rbx
710         push    %rbp
711         push    %r12
712         push    %r13
713         push    %r14
714         push    %r15
715
716         shl     \$3,${num}d             # convert $num to bytes
717         xor     %r10,%r10
718         mov     %rsp,%r11               # put aside %rsp
719         sub     $num,%r10               # -$num
720         mov     ($n0),$n0               # *n0
721         lea     -72(%rsp,%r10,2),%rsp   # alloca(frame+2*$num)
722         and     \$-1024,%rsp            # minimize TLB usage
723         ##############################################################
724         # Stack layout
725         #
726         # +0    saved $num, used in reduction section
727         # +8    &t[2*$num], used in reduction section
728         # +32   saved $rptr
729         # +40   saved $nptr
730         # +48   saved *n0
731         # +56   saved %rsp
732         # +64   t[2*$num]
733         #
734         mov     $rptr,32(%rsp)          # save $rptr
735         mov     $nptr,40(%rsp)
736         mov     $n0,  48(%rsp)
737         mov     %r11, 56(%rsp)          # save original %rsp
738 .Lsqr4x_body:
739         ##############################################################
740         # Squaring part:
741         #
742         # a) multiply-n-add everything but a[i]*a[i];
743         # b) shift result of a) by 1 to the left and accumulate
744         #    a[i]*a[i] products;
745         #
746         lea     32(%r10),$i             # $i=-($num-32)
747         lea     ($aptr,$num),$aptr      # end of a[] buffer, ($aptr,$i)=&ap[2]
748
749         mov     $num,$j                 # $j=$num
750
751                                         # comments apply to $num==8 case
752         mov     -32($aptr,$i),$a0       # a[0]
753         lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
754         mov     -24($aptr,$i),%rax      # a[1]
755         lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
756         mov     -16($aptr,$i),$ai       # a[2]
757         mov     %rax,$a1
758
759         mul     $a0                     # a[1]*a[0]
760         mov     %rax,$A0[0]             # a[1]*a[0]
761          mov    $ai,%rax                # a[2]
762         mov     %rdx,$A0[1]
763         mov     $A0[0],-24($tptr,$i)    # t[1]
764
765         xor     $A0[0],$A0[0]
766         mul     $a0                     # a[2]*a[0]
767         add     %rax,$A0[1]
768          mov    $ai,%rax
769         adc     %rdx,$A0[0]
770         mov     $A0[1],-16($tptr,$i)    # t[2]
771
772         lea     -16($i),$j              # j=-16
773
774
775          mov    8($aptr,$j),$ai         # a[3]
776         mul     $a1                     # a[2]*a[1]
777         mov     %rax,$A1[0]             # a[2]*a[1]+t[3]
778          mov    $ai,%rax
779         mov     %rdx,$A1[1]
780
781         xor     $A0[1],$A0[1]
782         add     $A1[0],$A0[0]
783          lea    16($j),$j
784         adc     \$0,$A0[1]
785         mul     $a0                     # a[3]*a[0]
786         add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
787          mov    $ai,%rax
788         adc     %rdx,$A0[1]
789         mov     $A0[0],-8($tptr,$j)     # t[3]
790         jmp     .Lsqr4x_1st
791
792 .align  16
793 .Lsqr4x_1st:
794          mov    ($aptr,$j),$ai          # a[4]
795         xor     $A1[0],$A1[0]
796         mul     $a1                     # a[3]*a[1]
797         add     %rax,$A1[1]             # a[3]*a[1]+t[4]
798          mov    $ai,%rax
799         adc     %rdx,$A1[0]
800
801         xor     $A0[0],$A0[0]
802         add     $A1[1],$A0[1]
803         adc     \$0,$A0[0]
804         mul     $a0                     # a[4]*a[0]
805         add     %rax,$A0[1]             # a[4]*a[0]+a[3]*a[1]+t[4]
806          mov    $ai,%rax                # a[3]
807         adc     %rdx,$A0[0]
808         mov     $A0[1],($tptr,$j)       # t[4]
809
810
811          mov    8($aptr,$j),$ai         # a[5]
812         xor     $A1[1],$A1[1]
813         mul     $a1                     # a[4]*a[3]
814         add     %rax,$A1[0]             # a[4]*a[3]+t[5]
815          mov    $ai,%rax
816         adc     %rdx,$A1[1]
817
818         xor     $A0[1],$A0[1]
819         add     $A1[0],$A0[0]
820          lea    16($j),$j
821         adc     \$0,$A0[1]
822         mul     $a0                     # a[5]*a[2]
823         add     %rax,$A0[0]             # a[5]*a[2]+a[4]*a[3]+t[5]
824          mov    $ai,%rax
825         adc     %rdx,$A0[1]
826         mov     $A0[0],-8($tptr,$j)     # t[5]
827
828          mov    ($aptr,$j),$ai          # a[6]
829         xor     $A1[0],$A1[0]
830         mul     $a1                     # a[5]*a[3]
831         add     %rax,$A1[1]             # a[5]*a[3]+t[6]
832          mov    $ai,%rax
833         adc     %rdx,$A1[0]
834
835         xor     $A0[0],$A0[0]
836         add     $A1[1],$A0[1]
837         adc     \$0,$A0[0]
838         mul     $a0                     # a[6]*a[2]
839         add     %rax,$A0[1]             # a[6]*a[2]+a[5]*a[3]+t[6]
840          mov    $ai,%rax                # a[3]
841         adc     %rdx,$A0[0]
842         mov     $A0[1],($tptr,$j)       # t[6]
843
844
845          mov    8($aptr,$j),$ai         # a[7]
846         xor     $A1[1],$A1[1]
847         mul     $a1                     # a[6]*a[5]
848         add     %rax,$A1[0]             # a[6]*a[5]+t[7]
849          mov    $ai,%rax
850         adc     %rdx,$A1[1]
851
852         xor     $A0[1],$A0[1]
853         add     $A1[0],$A0[0]
854          lea    16($j),$j
855         adc     \$0,$A0[1]
856         mul     $a0                     # a[7]*a[4]
857         add     %rax,$A0[0]             # a[7]*a[4]+a[6]*a[5]+t[6]
858          mov    $ai,%rax
859         adc     %rdx,$A0[1]
860         mov     $A0[0],-8($tptr,$j)     # t[7]
861
862         cmp     \$0,$j
863         jne     .Lsqr4x_1st
864
865         xor     $A1[0],$A1[0]
866         add     $A0[1],$A1[1]
867         adc     \$0,$A1[0]
868         mul     $a1                     # a[7]*a[5]
869         add     %rax,$A1[1]
870         adc     %rdx,$A1[0]
871
872         mov     $A1[1],($tptr)          # t[8]
873         lea     16($i),$i
874         mov     $A1[0],8($tptr)         # t[9]
875         jmp     .Lsqr4x_outer
876
877 .align  16
878 .Lsqr4x_outer:                          # comments apply to $num==6 case
879         mov     -32($aptr,$i),$a0       # a[0]
880         lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
881         mov     -24($aptr,$i),%rax      # a[1]
882         lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
883         mov     -16($aptr,$i),$ai       # a[2]
884         mov     %rax,$a1
885
886         mov     -24($tptr,$i),$A0[0]    # t[1]
887         xor     $A0[1],$A0[1]
888         mul     $a0                     # a[1]*a[0]
889         add     %rax,$A0[0]             # a[1]*a[0]+t[1]
890          mov    $ai,%rax                # a[2]
891         adc     %rdx,$A0[1]
892         mov     $A0[0],-24($tptr,$i)    # t[1]
893
894         xor     $A0[0],$A0[0]
895         add     -16($tptr,$i),$A0[1]    # a[2]*a[0]+t[2]
896         adc     \$0,$A0[0]
897         mul     $a0                     # a[2]*a[0]
898         add     %rax,$A0[1]
899          mov    $ai,%rax
900         adc     %rdx,$A0[0]
901         mov     $A0[1],-16($tptr,$i)    # t[2]
902
903         lea     -16($i),$j              # j=-16
904         xor     $A1[0],$A1[0]
905
906
907          mov    8($aptr,$j),$ai         # a[3]
908         xor     $A1[1],$A1[1]
909         add     8($tptr,$j),$A1[0]
910         adc     \$0,$A1[1]
911         mul     $a1                     # a[2]*a[1]
912         add     %rax,$A1[0]             # a[2]*a[1]+t[3]
913          mov    $ai,%rax
914         adc     %rdx,$A1[1]
915
916         xor     $A0[1],$A0[1]
917         add     $A1[0],$A0[0]
918         adc     \$0,$A0[1]
919         mul     $a0                     # a[3]*a[0]
920         add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
921          mov    $ai,%rax
922         adc     %rdx,$A0[1]
923         mov     $A0[0],8($tptr,$j)      # t[3]
924
925         lea     16($j),$j
926         jmp     .Lsqr4x_inner
927
928 .align  16
929 .Lsqr4x_inner:
930          mov    ($aptr,$j),$ai          # a[4]
931         xor     $A1[0],$A1[0]
932         add     ($tptr,$j),$A1[1]
933         adc     \$0,$A1[0]
934         mul     $a1                     # a[3]*a[1]
935         add     %rax,$A1[1]             # a[3]*a[1]+t[4]
936          mov    $ai,%rax
937         adc     %rdx,$A1[0]
938
939         xor     $A0[0],$A0[0]
940         add     $A1[1],$A0[1]
941         adc     \$0,$A0[0]
942         mul     $a0                     # a[4]*a[0]
943         add     %rax,$A0[1]             # a[4]*a[0]+a[3]*a[1]+t[4]
944          mov    $ai,%rax                # a[3]
945         adc     %rdx,$A0[0]
946         mov     $A0[1],($tptr,$j)       # t[4]
947
948          mov    8($aptr,$j),$ai         # a[5]
949         xor     $A1[1],$A1[1]
950         add     8($tptr,$j),$A1[0]
951         adc     \$0,$A1[1]
952         mul     $a1                     # a[4]*a[3]
953         add     %rax,$A1[0]             # a[4]*a[3]+t[5]
954          mov    $ai,%rax
955         adc     %rdx,$A1[1]
956
957         xor     $A0[1],$A0[1]
958         add     $A1[0],$A0[0]
959         lea     16($j),$j               # j++
960         adc     \$0,$A0[1]
961         mul     $a0                     # a[5]*a[2]
962         add     %rax,$A0[0]             # a[5]*a[2]+a[4]*a[3]+t[5]
963          mov    $ai,%rax
964         adc     %rdx,$A0[1]
965         mov     $A0[0],-8($tptr,$j)     # t[5]
966
967         cmp     \$0,$j
968         jne     .Lsqr4x_inner
969
970         xor     $A1[0],$A1[0]
971         add     $A0[1],$A1[1]
972         adc     \$0,$A1[0]
973         mul     $a1                     # a[5]*a[3]
974         add     %rax,$A1[1]
975         adc     %rdx,$A1[0]
976
977         mov     $A1[1],($tptr)          # t[6]
978         mov     $A1[0],8($tptr)         # t[7]
979
980         add     \$16,$i
981         jnz     .Lsqr4x_outer
982
983                                         # comments apply to $num==4 case
984         mov     -32($aptr),$a0          # a[0]
985         lea     64(%rsp,$num,2),$tptr   # end of tp[] buffer, &tp[2*$num]
986         mov     -24($aptr),%rax         # a[1]
987         lea     -32($tptr,$i),$tptr     # end of tp[] window, &tp[2*$num-"$i"]
988         mov     -16($aptr),$ai          # a[2]
989         mov     %rax,$a1
990
991         mov     -24($tptr),$A0[0]       # t[1]
992         xor     $A0[1],$A0[1]
993         mul     $a0                     # a[1]*a[0]
994         add     %rax,$A0[0]             # a[1]*a[0]+t[1]
995          mov    $ai,%rax                # a[2]
996         adc     %rdx,$A0[1]
997         mov     $A0[0],-24($tptr)       # t[1]
998
999         xor     $A0[0],$A0[0]
1000         add     -16($tptr),$A0[1]       # a[2]*a[0]+t[2]
1001         adc     \$0,$A0[0]
1002         mul     $a0                     # a[2]*a[0]
1003         add     %rax,$A0[1]
1004          mov    $ai,%rax
1005         adc     %rdx,$A0[0]
1006         mov     $A0[1],-16($tptr)       # t[2]
1007
1008         xor     $A1[0],$A1[0]
1009          mov    -8($aptr),$ai           # a[3]
1010         xor     $A1[1],$A1[1]
1011         add     -8($tptr),$A1[0]
1012         adc     \$0,$A1[1]
1013         mul     $a1                     # a[2]*a[1]
1014         add     %rax,$A1[0]             # a[2]*a[1]+t[3]
1015          mov    $ai,%rax
1016         adc     %rdx,$A1[1]
1017
1018         xor     $A0[1],$A0[1]
1019         add     $A1[0],$A0[0]
1020         adc     \$0,$A0[1]
1021         mul     $a0                     # a[3]*a[0]
1022         add     %rax,$A0[0]             # a[3]*a[0]+a[2]*a[1]+t[3]
1023          mov    $ai,%rax
1024         adc     %rdx,$A0[1]
1025         mov     $A0[0],-8($tptr)        # t[3]
1026
1027         xor     $A1[0],$A1[0]
1028         add     $A0[1],$A1[1]
1029         adc     \$0,$A1[0]
1030         mul     $a1                     # a[3]*a[1]
1031         add     %rax,$A1[1]
1032          mov    -16($aptr),%rax         # a[2]
1033         adc     %rdx,$A1[0]
1034
1035         mov     $A1[1],($tptr)          # t[4]
1036         mov     $A1[0],8($tptr)         # t[5]
1037
1038         mul     $ai                     # a[2]*a[3]
1039 ___
1040 {
1041 my ($shift,$carry)=($a0,$a1);
1042 my @S=(@A1,$ai,$n0);
1043 $code.=<<___;
1044          add    \$16,$i
1045          xor    $shift,$shift
1046          sub    $num,$i                 # $i=16-$num
1047          xor    $carry,$carry
1048
1049         add     $A1[0],%rax             # t[5]
1050         adc     \$0,%rdx
1051         mov     %rax,8($tptr)           # t[5]
1052         mov     %rdx,16($tptr)          # t[6]
1053         mov     $carry,24($tptr)        # t[7]
1054
1055          mov    -16($aptr,$i),%rax      # a[0]
1056         lea     64(%rsp,$num,2),$tptr
1057          xor    $A0[0],$A0[0]           # t[0]
1058          mov    -24($tptr,$i,2),$A0[1]  # t[1]
1059
1060         lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1061         shr     \$63,$A0[0]
1062         lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
1063         shr     \$63,$A0[1]
1064         or      $A0[0],$S[1]            # | t[2*i]>>63
1065          mov    -16($tptr,$i,2),$A0[0]  # t[2*i+2]      # prefetch
1066         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1067         mul     %rax                    # a[i]*a[i]
1068         neg     $carry                  # mov $carry,cf
1069          mov    -8($tptr,$i,2),$A0[1]   # t[2*i+2+1]    # prefetch
1070         adc     %rax,$S[0]
1071          mov    -8($aptr,$i),%rax       # a[i+1]        # prefetch
1072         mov     $S[0],-32($tptr,$i,2)
1073         adc     %rdx,$S[1]
1074
1075         lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1076          mov    $S[1],-24($tptr,$i,2)
1077          sbb    $carry,$carry           # mov cf,$carry
1078         shr     \$63,$A0[0]
1079         lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
1080         shr     \$63,$A0[1]
1081         or      $A0[0],$S[3]            # | t[2*i]>>63
1082          mov    0($tptr,$i,2),$A0[0]    # t[2*i+2]      # prefetch
1083         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1084         mul     %rax                    # a[i]*a[i]
1085         neg     $carry                  # mov $carry,cf
1086          mov    8($tptr,$i,2),$A0[1]    # t[2*i+2+1]    # prefetch
1087         adc     %rax,$S[2]
1088          mov    0($aptr,$i),%rax        # a[i+1]        # prefetch
1089         mov     $S[2],-16($tptr,$i,2)
1090         adc     %rdx,$S[3]
1091         lea     16($i),$i
1092         mov     $S[3],-40($tptr,$i,2)
1093         sbb     $carry,$carry           # mov cf,$carry
1094         jmp     .Lsqr4x_shift_n_add
1095
1096 .align  16
1097 .Lsqr4x_shift_n_add:
1098         lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1099         shr     \$63,$A0[0]
1100         lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
1101         shr     \$63,$A0[1]
1102         or      $A0[0],$S[1]            # | t[2*i]>>63
1103          mov    -16($tptr,$i,2),$A0[0]  # t[2*i+2]      # prefetch
1104         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1105         mul     %rax                    # a[i]*a[i]
1106         neg     $carry                  # mov $carry,cf
1107          mov    -8($tptr,$i,2),$A0[1]   # t[2*i+2+1]    # prefetch
1108         adc     %rax,$S[0]
1109          mov    -8($aptr,$i),%rax       # a[i+1]        # prefetch
1110         mov     $S[0],-32($tptr,$i,2)
1111         adc     %rdx,$S[1]
1112
1113         lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1114          mov    $S[1],-24($tptr,$i,2)
1115          sbb    $carry,$carry           # mov cf,$carry
1116         shr     \$63,$A0[0]
1117         lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
1118         shr     \$63,$A0[1]
1119         or      $A0[0],$S[3]            # | t[2*i]>>63
1120          mov    0($tptr,$i,2),$A0[0]    # t[2*i+2]      # prefetch
1121         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1122         mul     %rax                    # a[i]*a[i]
1123         neg     $carry                  # mov $carry,cf
1124          mov    8($tptr,$i,2),$A0[1]    # t[2*i+2+1]    # prefetch
1125         adc     %rax,$S[2]
1126          mov    0($aptr,$i),%rax        # a[i+1]        # prefetch
1127         mov     $S[2],-16($tptr,$i,2)
1128         adc     %rdx,$S[3]
1129
1130         lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1131          mov    $S[3],-8($tptr,$i,2)
1132          sbb    $carry,$carry           # mov cf,$carry
1133         shr     \$63,$A0[0]
1134         lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
1135         shr     \$63,$A0[1]
1136         or      $A0[0],$S[1]            # | t[2*i]>>63
1137          mov    16($tptr,$i,2),$A0[0]   # t[2*i+2]      # prefetch
1138         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1139         mul     %rax                    # a[i]*a[i]
1140         neg     $carry                  # mov $carry,cf
1141          mov    24($tptr,$i,2),$A0[1]   # t[2*i+2+1]    # prefetch
1142         adc     %rax,$S[0]
1143          mov    8($aptr,$i),%rax        # a[i+1]        # prefetch
1144         mov     $S[0],0($tptr,$i,2)
1145         adc     %rdx,$S[1]
1146
1147         lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1148          mov    $S[1],8($tptr,$i,2)
1149          sbb    $carry,$carry           # mov cf,$carry
1150         shr     \$63,$A0[0]
1151         lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
1152         shr     \$63,$A0[1]
1153         or      $A0[0],$S[3]            # | t[2*i]>>63
1154          mov    32($tptr,$i,2),$A0[0]   # t[2*i+2]      # prefetch
1155         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1156         mul     %rax                    # a[i]*a[i]
1157         neg     $carry                  # mov $carry,cf
1158          mov    40($tptr,$i,2),$A0[1]   # t[2*i+2+1]    # prefetch
1159         adc     %rax,$S[2]
1160          mov    16($aptr,$i),%rax       # a[i+1]        # prefetch
1161         mov     $S[2],16($tptr,$i,2)
1162         adc     %rdx,$S[3]
1163         mov     $S[3],24($tptr,$i,2)
1164         sbb     $carry,$carry           # mov cf,$carry
1165         add     \$32,$i
1166         jnz     .Lsqr4x_shift_n_add
1167
1168         lea     ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1169         shr     \$63,$A0[0]
1170         lea     ($j,$A0[1],2),$S[1]     # t[2*i+1]<<1 |
1171         shr     \$63,$A0[1]
1172         or      $A0[0],$S[1]            # | t[2*i]>>63
1173          mov    -16($tptr),$A0[0]       # t[2*i+2]      # prefetch
1174         mov     $A0[1],$shift           # shift=t[2*i+1]>>63
1175         mul     %rax                    # a[i]*a[i]
1176         neg     $carry                  # mov $carry,cf
1177          mov    -8($tptr),$A0[1]        # t[2*i+2+1]    # prefetch
1178         adc     %rax,$S[0]
1179          mov    -8($aptr),%rax          # a[i+1]        # prefetch
1180         mov     $S[0],-32($tptr)
1181         adc     %rdx,$S[1]
1182
1183         lea     ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1184          mov    $S[1],-24($tptr)
1185          sbb    $carry,$carry           # mov cf,$carry
1186         shr     \$63,$A0[0]
1187         lea     ($j,$A0[1],2),$S[3]     # t[2*i+1]<<1 |
1188         shr     \$63,$A0[1]
1189         or      $A0[0],$S[3]            # | t[2*i]>>63
1190         mul     %rax                    # a[i]*a[i]
1191         neg     $carry                  # mov $carry,cf
1192         adc     %rax,$S[2]
1193         adc     %rdx,$S[3]
1194         mov     $S[2],-16($tptr)
1195         mov     $S[3],-8($tptr)
1196 ___
1197 }\f
1198 ##############################################################
1199 # Montgomery reduction part, "word-by-word" algorithm.
1200 #
1201 {
1202 my ($topbit,$nptr)=("%rbp",$aptr);
1203 my ($m0,$m1)=($a0,$a1);
1204 my @Ni=("%rbx","%r9");
1205 $code.=<<___;
1206         mov     40(%rsp),$nptr          # restore $nptr
1207         mov     48(%rsp),$n0            # restore *n0
1208         xor     $j,$j
1209         mov     $num,0(%rsp)            # save $num
1210         sub     $num,$j                 # $j=-$num
1211          mov    64(%rsp),$A0[0]         # t[0]          # modsched #
1212          mov    $n0,$m0                 #               # modsched #
1213         lea     64(%rsp,$num,2),%rax    # end of t[] buffer
1214         lea     64(%rsp,$num),$tptr     # end of t[] window
1215         mov     %rax,8(%rsp)            # save end of t[] buffer
1216         lea     ($nptr,$num),$nptr      # end of n[] buffer
1217         xor     $topbit,$topbit         # $topbit=0
1218
1219         mov     0($nptr,$j),%rax        # n[0]          # modsched #
1220         mov     8($nptr,$j),$Ni[1]      # n[1]          # modsched #
1221          imulq  $A0[0],$m0              # m0=t[0]*n0    # modsched #
1222          mov    %rax,$Ni[0]             #               # modsched #
1223         jmp     .Lsqr4x_mont_outer
1224
1225 .align  16
1226 .Lsqr4x_mont_outer:
1227         xor     $A0[1],$A0[1]
1228         mul     $m0                     # n[0]*m0
1229         add     %rax,$A0[0]             # n[0]*m0+t[0]
1230          mov    $Ni[1],%rax
1231         adc     %rdx,$A0[1]
1232         mov     $n0,$m1
1233
1234         xor     $A0[0],$A0[0]
1235         add     8($tptr,$j),$A0[1]
1236         adc     \$0,$A0[0]
1237         mul     $m0                     # n[1]*m0
1238         add     %rax,$A0[1]             # n[1]*m0+t[1]
1239          mov    $Ni[0],%rax
1240         adc     %rdx,$A0[0]
1241
1242         imulq   $A0[1],$m1
1243
1244         mov     16($nptr,$j),$Ni[0]     # n[2]
1245         xor     $A1[1],$A1[1]
1246         add     $A0[1],$A1[0]
1247         adc     \$0,$A1[1]
1248         mul     $m1                     # n[0]*m1
1249         add     %rax,$A1[0]             # n[0]*m1+"t[1]"
1250          mov    $Ni[0],%rax
1251         adc     %rdx,$A1[1]
1252         mov     $A1[0],8($tptr,$j)      # "t[1]"
1253
1254         xor     $A0[1],$A0[1]
1255         add     16($tptr,$j),$A0[0]
1256         adc     \$0,$A0[1]
1257         mul     $m0                     # n[2]*m0
1258         add     %rax,$A0[0]             # n[2]*m0+t[2]
1259          mov    $Ni[1],%rax
1260         adc     %rdx,$A0[1]
1261
1262         mov     24($nptr,$j),$Ni[1]     # n[3]
1263         xor     $A1[0],$A1[0]
1264         add     $A0[0],$A1[1]
1265         adc     \$0,$A1[0]
1266         mul     $m1                     # n[1]*m1
1267         add     %rax,$A1[1]             # n[1]*m1+"t[2]"
1268          mov    $Ni[1],%rax
1269         adc     %rdx,$A1[0]
1270         mov     $A1[1],16($tptr,$j)     # "t[2]"
1271
1272         xor     $A0[0],$A0[0]
1273         add     24($tptr,$j),$A0[1]
1274         lea     32($j),$j
1275         adc     \$0,$A0[0]
1276         mul     $m0                     # n[3]*m0
1277         add     %rax,$A0[1]             # n[3]*m0+t[3]
1278          mov    $Ni[0],%rax
1279         adc     %rdx,$A0[0]
1280         jmp     .Lsqr4x_mont_inner
1281
1282 .align  16
1283 .Lsqr4x_mont_inner:
1284         mov     ($nptr,$j),$Ni[0]       # n[4]
1285         xor     $A1[1],$A1[1]
1286         add     $A0[1],$A1[0]
1287         adc     \$0,$A1[1]
1288         mul     $m1                     # n[2]*m1
1289         add     %rax,$A1[0]             # n[2]*m1+"t[3]"
1290          mov    $Ni[0],%rax
1291         adc     %rdx,$A1[1]
1292         mov     $A1[0],-8($tptr,$j)     # "t[3]"
1293
1294         xor     $A0[1],$A0[1]
1295         add     ($tptr,$j),$A0[0]
1296         adc     \$0,$A0[1]
1297         mul     $m0                     # n[4]*m0
1298         add     %rax,$A0[0]             # n[4]*m0+t[4]
1299          mov    $Ni[1],%rax
1300         adc     %rdx,$A0[1]
1301
1302         mov     8($nptr,$j),$Ni[1]      # n[5]
1303         xor     $A1[0],$A1[0]
1304         add     $A0[0],$A1[1]
1305         adc     \$0,$A1[0]
1306         mul     $m1                     # n[3]*m1
1307         add     %rax,$A1[1]             # n[3]*m1+"t[4]"
1308          mov    $Ni[1],%rax
1309         adc     %rdx,$A1[0]
1310         mov     $A1[1],($tptr,$j)       # "t[4]"
1311
1312         xor     $A0[0],$A0[0]
1313         add     8($tptr,$j),$A0[1]
1314         adc     \$0,$A0[0]
1315         mul     $m0                     # n[5]*m0
1316         add     %rax,$A0[1]             # n[5]*m0+t[5]
1317          mov    $Ni[0],%rax
1318         adc     %rdx,$A0[0]
1319
1320
1321         mov     16($nptr,$j),$Ni[0]     # n[6]
1322         xor     $A1[1],$A1[1]
1323         add     $A0[1],$A1[0]
1324         adc     \$0,$A1[1]
1325         mul     $m1                     # n[4]*m1
1326         add     %rax,$A1[0]             # n[4]*m1+"t[5]"
1327          mov    $Ni[0],%rax
1328         adc     %rdx,$A1[1]
1329         mov     $A1[0],8($tptr,$j)      # "t[5]"
1330
1331         xor     $A0[1],$A0[1]
1332         add     16($tptr,$j),$A0[0]
1333         adc     \$0,$A0[1]
1334         mul     $m0                     # n[6]*m0
1335         add     %rax,$A0[0]             # n[6]*m0+t[6]
1336          mov    $Ni[1],%rax
1337         adc     %rdx,$A0[1]
1338
1339         mov     24($nptr,$j),$Ni[1]     # n[7]
1340         xor     $A1[0],$A1[0]
1341         add     $A0[0],$A1[1]
1342         adc     \$0,$A1[0]
1343         mul     $m1                     # n[5]*m1
1344         add     %rax,$A1[1]             # n[5]*m1+"t[6]"
1345          mov    $Ni[1],%rax
1346         adc     %rdx,$A1[0]
1347         mov     $A1[1],16($tptr,$j)     # "t[6]"
1348
1349         xor     $A0[0],$A0[0]
1350         add     24($tptr,$j),$A0[1]
1351         lea     32($j),$j
1352         adc     \$0,$A0[0]
1353         mul     $m0                     # n[7]*m0
1354         add     %rax,$A0[1]             # n[7]*m0+t[7]
1355          mov    $Ni[0],%rax
1356         adc     %rdx,$A0[0]
1357         cmp     \$0,$j
1358         jne     .Lsqr4x_mont_inner
1359
1360          sub    0(%rsp),$j              # $j=-$num      # modsched #
1361          mov    $n0,$m0                 #               # modsched #
1362
1363         xor     $A1[1],$A1[1]
1364         add     $A0[1],$A1[0]
1365         adc     \$0,$A1[1]
1366         mul     $m1                     # n[6]*m1
1367         add     %rax,$A1[0]             # n[6]*m1+"t[7]"
1368         mov     $Ni[1],%rax
1369         adc     %rdx,$A1[1]
1370         mov     $A1[0],-8($tptr)        # "t[7]"
1371
1372         xor     $A0[1],$A0[1]
1373         add     ($tptr),$A0[0]          # +t[8]
1374         adc     \$0,$A0[1]
1375          mov    0($nptr,$j),$Ni[0]      # n[0]          # modsched #
1376         add     $topbit,$A0[0]
1377         adc     \$0,$A0[1]
1378
1379          imulq  16($tptr,$j),$m0        # m0=t[0]*n0    # modsched #
1380         xor     $A1[0],$A1[0]
1381          mov    8($nptr,$j),$Ni[1]      # n[1]          # modsched #
1382         add     $A0[0],$A1[1]
1383          mov    16($tptr,$j),$A0[0]     # t[0]          # modsched #
1384         adc     \$0,$A1[0]
1385         mul     $m1                     # n[7]*m1
1386         add     %rax,$A1[1]             # n[7]*m1+"t[8]"
1387          mov    $Ni[0],%rax             #               # modsched #
1388         adc     %rdx,$A1[0]
1389         mov     $A1[1],($tptr)          # "t[8]"
1390
1391         xor     $topbit,$topbit
1392         add     8($tptr),$A1[0]         # +t[9]
1393         adc     $topbit,$topbit
1394         add     $A0[1],$A1[0]
1395         lea     16($tptr),$tptr         # "t[$num]>>128"
1396         adc     \$0,$topbit
1397         mov     $A1[0],-8($tptr)        # "t[9]"
1398         cmp     8(%rsp),$tptr           # are we done?
1399         jb      .Lsqr4x_mont_outer
1400
1401         mov     0(%rsp),$num            # restore $num
1402         mov     $topbit,($tptr)         # save $topbit
1403 ___
1404 }\f
1405 ##############################################################
1406 # Post-condition, 4x unrolled copy from bn_mul_mont
1407 #
1408 {
1409 my ($tptr,$nptr)=("%rbx",$aptr);
1410 my @ri=("%rax","%rdx","%r10","%r11");
1411 $code.=<<___;
1412         mov     64(%rsp,$num),@ri[0]    # tp[0]
1413         lea     64(%rsp,$num),$tptr     # upper half of t[2*$num] holds result
1414         mov     40(%rsp),$nptr          # restore $nptr
1415         shr     \$5,$num                # num/4
1416         mov     8($tptr),@ri[1]         # t[1]
1417         xor     $i,$i                   # i=0 and clear CF!
1418
1419         mov     32(%rsp),$rptr          # restore $rptr
1420         sub     0($nptr),@ri[0]
1421         mov     16($tptr),@ri[2]        # t[2]
1422         mov     24($tptr),@ri[3]        # t[3]
1423         sbb     8($nptr),@ri[1]
1424         lea     -1($num),$j             # j=num/4-1
1425         jmp     .Lsqr4x_sub
1426 .align  16
1427 .Lsqr4x_sub:
1428         mov     @ri[0],0($rptr,$i,8)    # rp[i]=tp[i]-np[i]
1429         mov     @ri[1],8($rptr,$i,8)    # rp[i]=tp[i]-np[i]
1430         sbb     16($nptr,$i,8),@ri[2]
1431         mov     32($tptr,$i,8),@ri[0]   # tp[i+1]
1432         mov     40($tptr,$i,8),@ri[1]
1433         sbb     24($nptr,$i,8),@ri[3]
1434         mov     @ri[2],16($rptr,$i,8)   # rp[i]=tp[i]-np[i]
1435         mov     @ri[3],24($rptr,$i,8)   # rp[i]=tp[i]-np[i]
1436         sbb     32($nptr,$i,8),@ri[0]
1437         mov     48($tptr,$i,8),@ri[2]
1438         mov     56($tptr,$i,8),@ri[3]
1439         sbb     40($nptr,$i,8),@ri[1]
1440         lea     4($i),$i                # i++
1441         dec     $j                      # doesn't affect CF!
1442         jnz     .Lsqr4x_sub
1443
1444         mov     @ri[0],0($rptr,$i,8)    # rp[i]=tp[i]-np[i]
1445         mov     32($tptr,$i,8),@ri[0]   # load overflow bit
1446         sbb     16($nptr,$i,8),@ri[2]
1447         mov     @ri[1],8($rptr,$i,8)    # rp[i]=tp[i]-np[i]
1448         sbb     24($nptr,$i,8),@ri[3]
1449         mov     @ri[2],16($rptr,$i,8)   # rp[i]=tp[i]-np[i]
1450
1451         sbb     \$0,@ri[0]              # handle upmost overflow bit
1452         mov     @ri[3],24($rptr,$i,8)   # rp[i]=tp[i]-np[i]
1453         xor     $i,$i                   # i=0
1454         and     @ri[0],$tptr
1455         not     @ri[0]
1456         mov     $rptr,$nptr
1457         and     @ri[0],$nptr
1458         lea     -1($num),$j
1459         or      $nptr,$tptr             # tp=borrow?tp:rp
1460
1461         pxor    %xmm0,%xmm0
1462         lea     64(%rsp,$num,8),$nptr
1463         movdqu  ($tptr),%xmm1
1464         lea     ($nptr,$num,8),$nptr
1465         movdqa  %xmm0,64(%rsp)          # zap lower half of temporary vector
1466         movdqa  %xmm0,($nptr)           # zap upper half of temporary vector
1467         movdqu  %xmm1,($rptr)
1468         jmp     .Lsqr4x_copy
1469 .align  16
1470 .Lsqr4x_copy:                           # copy or in-place refresh
1471         movdqu  16($tptr,$i),%xmm2
1472         movdqu  32($tptr,$i),%xmm1
1473         movdqa  %xmm0,80(%rsp,$i)       # zap lower half of temporary vector
1474         movdqa  %xmm0,96(%rsp,$i)       # zap lower half of temporary vector
1475         movdqa  %xmm0,16($nptr,$i)      # zap upper half of temporary vector
1476         movdqa  %xmm0,32($nptr,$i)      # zap upper half of temporary vector
1477         movdqu  %xmm2,16($rptr,$i)
1478         movdqu  %xmm1,32($rptr,$i)
1479         lea     32($i),$i
1480         dec     $j
1481         jnz     .Lsqr4x_copy
1482
1483         movdqu  16($tptr,$i),%xmm2
1484         movdqa  %xmm0,80(%rsp,$i)       # zap lower half of temporary vector
1485         movdqa  %xmm0,16($nptr,$i)      # zap upper half of temporary vector
1486         movdqu  %xmm2,16($rptr,$i)
1487 ___
1488 }
1489 $code.=<<___;
1490         mov     56(%rsp),%rsi           # restore %rsp
1491         mov     \$1,%rax
1492         mov     0(%rsi),%r15
1493         mov     8(%rsi),%r14
1494         mov     16(%rsi),%r13
1495         mov     24(%rsi),%r12
1496         mov     32(%rsi),%rbp
1497         mov     40(%rsi),%rbx
1498         lea     48(%rsi),%rsp
1499 .Lsqr4x_epilogue:
1500         ret
1501 .size   bn_sqr4x_mont,.-bn_sqr4x_mont
1502 ___
1503 }}}
1504 $code.=<<___;
1505 .asciz  "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1506 .align  16
1507 ___
1508
1509 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1510 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
1511 if ($win64) {
1512 $rec="%rcx";
1513 $frame="%rdx";
1514 $context="%r8";
1515 $disp="%r9";
1516
1517 $code.=<<___;
1518 .extern __imp_RtlVirtualUnwind
1519 .type   mul_handler,\@abi-omnipotent
1520 .align  16
1521 mul_handler:
1522         push    %rsi
1523         push    %rdi
1524         push    %rbx
1525         push    %rbp
1526         push    %r12
1527         push    %r13
1528         push    %r14
1529         push    %r15
1530         pushfq
1531         sub     \$64,%rsp
1532
1533         mov     120($context),%rax      # pull context->Rax
1534         mov     248($context),%rbx      # pull context->Rip
1535
1536         mov     8($disp),%rsi           # disp->ImageBase
1537         mov     56($disp),%r11          # disp->HandlerData
1538
1539         mov     0(%r11),%r10d           # HandlerData[0]
1540         lea     (%rsi,%r10),%r10        # end of prologue label
1541         cmp     %r10,%rbx               # context->Rip<end of prologue label
1542         jb      .Lcommon_seh_tail
1543
1544         mov     152($context),%rax      # pull context->Rsp
1545
1546         mov     4(%r11),%r10d           # HandlerData[1]
1547         lea     (%rsi,%r10),%r10        # epilogue label
1548         cmp     %r10,%rbx               # context->Rip>=epilogue label
1549         jae     .Lcommon_seh_tail
1550
1551         mov     192($context),%r10      # pull $num
1552         mov     8(%rax,%r10,8),%rax     # pull saved stack pointer
1553         lea     48(%rax),%rax
1554
1555         mov     -8(%rax),%rbx
1556         mov     -16(%rax),%rbp
1557         mov     -24(%rax),%r12
1558         mov     -32(%rax),%r13
1559         mov     -40(%rax),%r14
1560         mov     -48(%rax),%r15
1561         mov     %rbx,144($context)      # restore context->Rbx
1562         mov     %rbp,160($context)      # restore context->Rbp
1563         mov     %r12,216($context)      # restore context->R12
1564         mov     %r13,224($context)      # restore context->R13
1565         mov     %r14,232($context)      # restore context->R14
1566         mov     %r15,240($context)      # restore context->R15
1567
1568         jmp     .Lcommon_seh_tail
1569 .size   mul_handler,.-mul_handler
1570
1571 .type   sqr_handler,\@abi-omnipotent
1572 .align  16
1573 sqr_handler:
1574         push    %rsi
1575         push    %rdi
1576         push    %rbx
1577         push    %rbp
1578         push    %r12
1579         push    %r13
1580         push    %r14
1581         push    %r15
1582         pushfq
1583         sub     \$64,%rsp
1584
1585         mov     120($context),%rax      # pull context->Rax
1586         mov     248($context),%rbx      # pull context->Rip
1587
1588         lea     .Lsqr4x_body(%rip),%r10
1589         cmp     %r10,%rbx               # context->Rip<.Lsqr_body
1590         jb      .Lcommon_seh_tail
1591
1592         mov     152($context),%rax      # pull context->Rsp
1593
1594         lea     .Lsqr4x_epilogue(%rip),%r10
1595         cmp     %r10,%rbx               # context->Rip>=.Lsqr_epilogue
1596         jae     .Lcommon_seh_tail
1597
1598         mov     56(%rax),%rax           # pull saved stack pointer
1599         lea     48(%rax),%rax
1600
1601         mov     -8(%rax),%rbx
1602         mov     -16(%rax),%rbp
1603         mov     -24(%rax),%r12
1604         mov     -32(%rax),%r13
1605         mov     -40(%rax),%r14
1606         mov     -48(%rax),%r15
1607         mov     %rbx,144($context)      # restore context->Rbx
1608         mov     %rbp,160($context)      # restore context->Rbp
1609         mov     %r12,216($context)      # restore context->R12
1610         mov     %r13,224($context)      # restore context->R13
1611         mov     %r14,232($context)      # restore context->R14
1612         mov     %r15,240($context)      # restore context->R15
1613
1614 .Lcommon_seh_tail:
1615         mov     8(%rax),%rdi
1616         mov     16(%rax),%rsi
1617         mov     %rax,152($context)      # restore context->Rsp
1618         mov     %rsi,168($context)      # restore context->Rsi
1619         mov     %rdi,176($context)      # restore context->Rdi
1620
1621         mov     40($disp),%rdi          # disp->ContextRecord
1622         mov     $context,%rsi           # context
1623         mov     \$154,%ecx              # sizeof(CONTEXT)
1624         .long   0xa548f3fc              # cld; rep movsq
1625
1626         mov     $disp,%rsi
1627         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1628         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1629         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1630         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1631         mov     40(%rsi),%r10           # disp->ContextRecord
1632         lea     56(%rsi),%r11           # &disp->HandlerData
1633         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1634         mov     %r10,32(%rsp)           # arg5
1635         mov     %r11,40(%rsp)           # arg6
1636         mov     %r12,48(%rsp)           # arg7
1637         mov     %rcx,56(%rsp)           # arg8, (NULL)
1638         call    *__imp_RtlVirtualUnwind(%rip)
1639
1640         mov     \$1,%eax                # ExceptionContinueSearch
1641         add     \$64,%rsp
1642         popfq
1643         pop     %r15
1644         pop     %r14
1645         pop     %r13
1646         pop     %r12
1647         pop     %rbp
1648         pop     %rbx
1649         pop     %rdi
1650         pop     %rsi
1651         ret
1652 .size   sqr_handler,.-sqr_handler
1653
1654 .section        .pdata
1655 .align  4
1656         .rva    .LSEH_begin_bn_mul_mont
1657         .rva    .LSEH_end_bn_mul_mont
1658         .rva    .LSEH_info_bn_mul_mont
1659
1660         .rva    .LSEH_begin_bn_mul4x_mont
1661         .rva    .LSEH_end_bn_mul4x_mont
1662         .rva    .LSEH_info_bn_mul4x_mont
1663
1664         .rva    .LSEH_begin_bn_sqr4x_mont
1665         .rva    .LSEH_end_bn_sqr4x_mont
1666         .rva    .LSEH_info_bn_sqr4x_mont
1667
1668 .section        .xdata
1669 .align  8
1670 .LSEH_info_bn_mul_mont:
1671         .byte   9,0,0,0
1672         .rva    mul_handler
1673         .rva    .Lmul_body,.Lmul_epilogue       # HandlerData[]
1674 .LSEH_info_bn_mul4x_mont:
1675         .byte   9,0,0,0
1676         .rva    mul_handler
1677         .rva    .Lmul4x_body,.Lmul4x_epilogue   # HandlerData[]
1678 .LSEH_info_bn_sqr4x_mont:
1679         .byte   9,0,0,0
1680         .rva    sqr_handler
1681 ___
1682 }
1683
1684 print $code;
1685 close STDOUT;