3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
20 # Add dedicated squaring procedure. Performance improvement varies
21 # from platform to platform, but in average it's ~5%/15%/25%/33%
22 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
26 # Unroll and modulo-schedule inner loops in such manner that they
27 # are "fallen through" for input lengths of 8, which is critical for
28 # 1024-bit RSA *sign*. Average performance improvement in comparison
29 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
34 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
36 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
38 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
39 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
40 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
41 die "can't locate x86_64-xlate.pl";
43 open STDOUT,"| $^X $xlate $flavour $output";
46 $rp="%rdi"; # BN_ULONG *rp,
47 $ap="%rsi"; # const BN_ULONG *ap,
48 $bp="%rdx"; # const BN_ULONG *bp,
49 $np="%rcx"; # const BN_ULONG *np,
50 $n0="%r8"; # const BN_ULONG *n0,
51 $num="%r9"; # int num);
64 .type bn_mul_mont,\@function,6
88 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
89 and \$-1024,%rsp # minimize TLB usage
91 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
93 mov $bp,%r12 # reassign $bp
97 mov ($n0),$n0 # pull n0[0] value
98 mov ($bp),$m0 # m0=bp[0]
105 mulq $m0 # ap[0]*bp[0]
109 imulq $lo0,$m1 # "tp[0]"*n0
113 add %rax,$lo0 # discarded
126 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
129 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
133 mulq $m0 # ap[j]*bp[0]
146 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
148 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
155 mov $hi1,-8(%rsp,$num,8)
156 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
162 mov ($bp,$i,8),$m0 # m0=bp[i]
164 mov ($ap),%rax # ap[0]
167 mulq $m0 # ap[0]*bp[i]
168 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
172 imulq $lo0,$m1 # tp[0]*n0
176 add %rax,$lo0 # discarded
179 mov 8(%rsp),$lo0 # tp[1]
190 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
193 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
197 mulq $m0 # ap[j]*bp[i]
201 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
212 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
215 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
221 add $lo0,$hi1 # pull upmost overflow bit
223 mov $hi1,-8(%rsp,$num,8)
224 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
230 xor $i,$i # i=0 and clear CF!
231 mov (%rsp),%rax # tp[0]
232 lea (%rsp),$ap # borrow ap for tp
236 .Lsub: sbb ($np,$i,8),%rax
237 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
238 mov 8($ap,$i,8),%rax # tp[i+1]
240 dec $j # doesnn't affect CF!
243 sbb \$0,%rax # handle upmost overflow bit
250 or $np,$ap # ap=borrow?tp:rp
252 .Lcopy: # copy or in-place refresh
254 mov $i,(%rsp,$i,8) # zap temporary vector
255 mov %rax,($rp,$i,8) # rp[i]=tp[i]
260 mov 8(%rsp,$num,8),%rsi # restore %rsp
271 .size bn_mul_mont,.-bn_mul_mont
274 my @A=("%r10","%r11");
275 my @N=("%r13","%rdi");
277 .type bn_mul4x_mont,\@function,6
292 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
293 and \$-1024,%rsp # minimize TLB usage
295 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
297 mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
298 mov %rdx,%r12 # reassign $bp
302 mov ($n0),$n0 # pull n0[0] value
303 mov ($bp),$m0 # m0=bp[0]
310 mulq $m0 # ap[0]*bp[0]
314 imulq $A[0],$m1 # "tp[0]"*n0
318 add %rax,$A[0] # discarded
341 mulq $m0 # ap[j]*bp[0]
343 mov -16($np,$j,8),%rax
349 mov -8($ap,$j,8),%rax
351 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
353 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
356 mulq $m0 # ap[j]*bp[0]
358 mov -8($np,$j,8),%rax
366 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
368 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
371 mulq $m0 # ap[j]*bp[0]
381 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
383 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
386 mulq $m0 # ap[j]*bp[0]
395 mov -16($ap,$j,8),%rax
397 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
399 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
404 mulq $m0 # ap[j]*bp[0]
406 mov -16($np,$j,8),%rax
412 mov -8($ap,$j,8),%rax
414 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
416 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
419 mulq $m0 # ap[j]*bp[0]
421 mov -8($np,$j,8),%rax
427 mov ($ap),%rax # ap[0]
429 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
431 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
437 mov $N[0],-8(%rsp,$j,8)
438 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
443 mov ($bp,$i,8),$m0 # m0=bp[i]
447 mulq $m0 # ap[0]*bp[i]
448 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
452 imulq $A[0],$m1 # tp[0]*n0
456 add %rax,$A[0] # "$N[0]", discarded
461 mulq $m0 # ap[j]*bp[i]
465 add 8(%rsp),$A[1] # +tp[1]
473 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
476 mov $N[1],(%rsp) # tp[j-1]
481 mulq $m0 # ap[j]*bp[i]
483 mov -16($np,$j,8),%rax
485 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
491 mov -8($ap,$j,8),%rax
495 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
498 mulq $m0 # ap[j]*bp[i]
500 mov -8($np,$j,8),%rax
502 add -8(%rsp,$j,8),$A[1]
512 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
515 mulq $m0 # ap[j]*bp[i]
519 add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
529 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
532 mulq $m0 # ap[j]*bp[i]
536 add 8(%rsp,$j,8),$A[1]
543 mov -16($ap,$j,8),%rax
547 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
552 mulq $m0 # ap[j]*bp[i]
554 mov -16($np,$j,8),%rax
556 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
562 mov -8($ap,$j,8),%rax
566 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
569 mulq $m0 # ap[j]*bp[i]
571 mov -8($np,$j,8),%rax
573 add -8(%rsp,$j,8),$A[1]
580 mov ($ap),%rax # ap[0]
584 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
590 add (%rsp,$num,8),$N[0] # pull upmost overflow bit
592 mov $N[0],-8(%rsp,$j,8)
593 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
599 my @ri=("%rax","%rdx",$m0,$m1);
601 mov 16(%rsp,$num,8),$rp # restore $rp
602 mov 0(%rsp),@ri[0] # tp[0]
604 mov 8(%rsp),@ri[1] # tp[1]
605 shr \$2,$num # num/=4
606 lea (%rsp),$ap # borrow ap for tp
607 xor $i,$i # i=0 and clear CF!
610 mov 16($ap),@ri[2] # tp[2]
611 mov 24($ap),@ri[3] # tp[3]
613 lea -1($num),$j # j=num/4-1
617 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
618 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
619 sbb 16($np,$i,8),@ri[2]
620 mov 32($ap,$i,8),@ri[0] # tp[i+1]
621 mov 40($ap,$i,8),@ri[1]
622 sbb 24($np,$i,8),@ri[3]
623 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
624 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
625 sbb 32($np,$i,8),@ri[0]
626 mov 48($ap,$i,8),@ri[2]
627 mov 56($ap,$i,8),@ri[3]
628 sbb 40($np,$i,8),@ri[1]
630 dec $j # doesnn't affect CF!
633 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
634 mov 32($ap,$i,8),@ri[0] # load overflow bit
635 sbb 16($np,$i,8),@ri[2]
636 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
637 sbb 24($np,$i,8),@ri[3]
638 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
640 sbb \$0,@ri[0] # handle upmost overflow bit
641 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
648 or $np,$ap # ap=borrow?tp:rp
655 .Lcopy4x: # copy or in-place refresh
656 movdqu 16($ap,$i),%xmm2
657 movdqu 32($ap,$i),%xmm1
658 movdqa %xmm0,16(%rsp,$i)
659 movdqu %xmm2,16($rp,$i)
660 movdqa %xmm0,32(%rsp,$i)
661 movdqu %xmm1,32($rp,$i)
667 movdqu 16($ap,$i),%xmm2
668 movdqa %xmm0,16(%rsp,$i)
669 movdqu %xmm2,16($rp,$i)
673 mov 8(%rsp,$num,8),%rsi # restore %rsp
684 .size bn_mul4x_mont,.-bn_mul4x_mont
688 ######################################################################
689 # void bn_sqr4x_mont(
690 my $rptr="%rdi"; # const BN_ULONG *rptr,
691 my $aptr="%rsi"; # const BN_ULONG *aptr,
692 my $bptr="%rdx"; # not used
693 my $nptr="%rcx"; # const BN_ULONG *nptr,
694 my $n0 ="%r8"; # const BN_ULONG *n0);
695 my $num ="%r9"; # int num, has to be divisible by 4 and
698 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
699 my @A0=("%r10","%r11");
700 my @A1=("%r12","%r13");
701 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
704 .type bn_sqr4x_mont,\@function,6
715 shl \$3,${num}d # convert $num to bytes
717 mov %rsp,%r11 # put aside %rsp
718 sub $num,%r10 # -$num
720 lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
721 and \$-1024,%rsp # minimize TLB usage
722 ##############################################################
725 # +0 saved $num, used in reduction section
726 # +8 &t[2*$num], used in reduction section
733 mov $rptr,32(%rsp) # save $rptr
736 mov %r11, 56(%rsp) # save original %rsp
738 ##############################################################
741 # a) multiply-n-add everything but a[i]*a[i];
742 # b) shift result of a) by 1 to the left and accumulate
743 # a[i]*a[i] products;
745 lea 32(%r10),$i # $i=-($num-32)
746 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
748 mov $num,$j # $j=$num
750 # comments apply to $num==8 case
751 mov -32($aptr,$i),$a0 # a[0]
752 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
753 mov -24($aptr,$i),%rax # a[1]
754 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
755 mov -16($aptr,$i),$ai # a[2]
759 mov %rax,$A0[0] # a[1]*a[0]
762 mov $A0[0],-24($tptr,$i) # t[1]
769 mov $A0[1],-16($tptr,$i) # t[2]
771 lea -16($i),$j # j=-16
774 mov 8($aptr,$j),$ai # a[3]
776 mov %rax,$A1[0] # a[2]*a[1]+t[3]
785 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
788 mov $A0[0],-8($tptr,$j) # t[3]
793 mov ($aptr,$j),$ai # a[4]
796 add %rax,$A1[1] # a[3]*a[1]+t[4]
804 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
807 mov $A0[1],($tptr,$j) # t[4]
810 mov 8($aptr,$j),$ai # a[5]
813 add %rax,$A1[0] # a[4]*a[3]+t[5]
822 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
825 mov $A0[0],-8($tptr,$j) # t[5]
827 mov ($aptr,$j),$ai # a[6]
830 add %rax,$A1[1] # a[5]*a[3]+t[6]
838 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
841 mov $A0[1],($tptr,$j) # t[6]
844 mov 8($aptr,$j),$ai # a[7]
847 add %rax,$A1[0] # a[6]*a[5]+t[7]
856 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
859 mov $A0[0],-8($tptr,$j) # t[7]
871 mov $A1[1],($tptr) # t[8]
873 mov $A1[0],8($tptr) # t[9]
877 .Lsqr4x_outer: # comments apply to $num==6 case
878 mov -32($aptr,$i),$a0 # a[0]
879 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
880 mov -24($aptr,$i),%rax # a[1]
881 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
882 mov -16($aptr,$i),$ai # a[2]
885 mov -24($tptr,$i),$A0[0] # t[1]
888 add %rax,$A0[0] # a[1]*a[0]+t[1]
891 mov $A0[0],-24($tptr,$i) # t[1]
894 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
900 mov $A0[1],-16($tptr,$i) # t[2]
902 lea -16($i),$j # j=-16
906 mov 8($aptr,$j),$ai # a[3]
908 add 8($tptr,$j),$A1[0]
911 add %rax,$A1[0] # a[2]*a[1]+t[3]
919 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
922 mov $A0[0],8($tptr,$j) # t[3]
929 mov ($aptr,$j),$ai # a[4]
931 add ($tptr,$j),$A1[1]
934 add %rax,$A1[1] # a[3]*a[1]+t[4]
942 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
945 mov $A0[1],($tptr,$j) # t[4]
947 mov 8($aptr,$j),$ai # a[5]
949 add 8($tptr,$j),$A1[0]
952 add %rax,$A1[0] # a[4]*a[3]+t[5]
961 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
964 mov $A0[0],-8($tptr,$j) # t[5]
976 mov $A1[1],($tptr) # t[6]
977 mov $A1[0],8($tptr) # t[7]
982 # comments apply to $num==4 case
983 mov -32($aptr),$a0 # a[0]
984 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
985 mov -24($aptr),%rax # a[1]
986 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
987 mov -16($aptr),$ai # a[2]
990 mov -24($tptr),$A0[0] # t[1]
993 add %rax,$A0[0] # a[1]*a[0]+t[1]
996 mov $A0[0],-24($tptr) # t[1]
999 add -16($tptr),$A0[1] # a[2]*a[0]+t[2]
1005 mov $A0[1],-16($tptr) # t[2]
1008 mov -8($aptr),$ai # a[3]
1010 add -8($tptr),$A1[0]
1013 add %rax,$A1[0] # a[2]*a[1]+t[3]
1021 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1024 mov $A0[0],-8($tptr) # t[3]
1031 mov -16($aptr),%rax # a[2]
1034 mov $A1[1],($tptr) # t[4]
1035 mov $A1[0],8($tptr) # t[5]
1040 my ($shift,$carry)=($a0,$a1);
1041 my @S=(@A1,$ai,$n0);
1045 sub $num,$i # $i=16-$num
1048 add $A1[0],%rax # t[5]
1050 mov %rax,8($tptr) # t[5]
1051 mov %rdx,16($tptr) # t[6]
1052 mov $carry,24($tptr) # t[7]
1054 mov -16($aptr,$i),%rax # a[0]
1055 lea 64(%rsp,$num,2),$tptr
1056 xor $A0[0],$A0[0] # t[0]
1057 mov -24($tptr,$i,2),$A0[1] # t[1]
1059 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1061 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1063 or $A0[0],$S[1] # | t[2*i]>>63
1064 mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1065 mov $A0[1],$shift # shift=t[2*i+1]>>63
1066 mul %rax # a[i]*a[i]
1067 neg $carry # mov $carry,cf
1068 mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1070 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1071 mov $S[0],-32($tptr,$i,2)
1074 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1075 mov $S[1],-24($tptr,$i,2)
1076 sbb $carry,$carry # mov cf,$carry
1078 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1080 or $A0[0],$S[3] # | t[2*i]>>63
1081 mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1082 mov $A0[1],$shift # shift=t[2*i+1]>>63
1083 mul %rax # a[i]*a[i]
1084 neg $carry # mov $carry,cf
1085 mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1087 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1088 mov $S[2],-16($tptr,$i,2)
1091 mov $S[3],-40($tptr,$i,2)
1092 sbb $carry,$carry # mov cf,$carry
1093 jmp .Lsqr4x_shift_n_add
1096 .Lsqr4x_shift_n_add:
1097 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1099 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1101 or $A0[0],$S[1] # | t[2*i]>>63
1102 mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1103 mov $A0[1],$shift # shift=t[2*i+1]>>63
1104 mul %rax # a[i]*a[i]
1105 neg $carry # mov $carry,cf
1106 mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1108 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1109 mov $S[0],-32($tptr,$i,2)
1112 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1113 mov $S[1],-24($tptr,$i,2)
1114 sbb $carry,$carry # mov cf,$carry
1116 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1118 or $A0[0],$S[3] # | t[2*i]>>63
1119 mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1120 mov $A0[1],$shift # shift=t[2*i+1]>>63
1121 mul %rax # a[i]*a[i]
1122 neg $carry # mov $carry,cf
1123 mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1125 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1126 mov $S[2],-16($tptr,$i,2)
1129 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1130 mov $S[3],-8($tptr,$i,2)
1131 sbb $carry,$carry # mov cf,$carry
1133 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1135 or $A0[0],$S[1] # | t[2*i]>>63
1136 mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1137 mov $A0[1],$shift # shift=t[2*i+1]>>63
1138 mul %rax # a[i]*a[i]
1139 neg $carry # mov $carry,cf
1140 mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1142 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1143 mov $S[0],0($tptr,$i,2)
1146 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1147 mov $S[1],8($tptr,$i,2)
1148 sbb $carry,$carry # mov cf,$carry
1150 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1152 or $A0[0],$S[3] # | t[2*i]>>63
1153 mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1154 mov $A0[1],$shift # shift=t[2*i+1]>>63
1155 mul %rax # a[i]*a[i]
1156 neg $carry # mov $carry,cf
1157 mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1159 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1160 mov $S[2],16($tptr,$i,2)
1162 mov $S[3],24($tptr,$i,2)
1163 sbb $carry,$carry # mov cf,$carry
1165 jnz .Lsqr4x_shift_n_add
1167 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1169 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1171 or $A0[0],$S[1] # | t[2*i]>>63
1172 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1173 mov $A0[1],$shift # shift=t[2*i+1]>>63
1174 mul %rax # a[i]*a[i]
1175 neg $carry # mov $carry,cf
1176 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1178 mov -8($aptr),%rax # a[i+1] # prefetch
1179 mov $S[0],-32($tptr)
1182 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1183 mov $S[1],-24($tptr)
1184 sbb $carry,$carry # mov cf,$carry
1186 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1188 or $A0[0],$S[3] # | t[2*i]>>63
1189 mul %rax # a[i]*a[i]
1190 neg $carry # mov $carry,cf
1193 mov $S[2],-16($tptr)
1197 ##############################################################
1198 # Montgomery reduction part, "word-by-word" algorithm.
1201 my ($topbit,$nptr)=("%rbp",$aptr);
1202 my ($m0,$m1)=($a0,$a1);
1203 my @Ni=("%rbx","%r9");
1205 mov 40(%rsp),$nptr # restore $nptr
1206 mov 48(%rsp),$n0 # restore *n0
1208 mov $num,0(%rsp) # save $num
1209 sub $num,$j # $j=-$num
1210 mov 64(%rsp),$A0[0] # t[0] # modsched #
1211 mov $n0,$m0 # # modsched #
1212 lea 64(%rsp,$num,2),%rax # end of t[] buffer
1213 lea 64(%rsp,$num),$tptr # end of t[] window
1214 mov %rax,8(%rsp) # save end of t[] buffer
1215 lea ($nptr,$num),$nptr # end of n[] buffer
1216 xor $topbit,$topbit # $topbit=0
1218 mov 0($nptr,$j),%rax # n[0] # modsched #
1219 mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
1220 imulq $A0[0],$m0 # m0=t[0]*n0 # modsched #
1221 mov %rax,$Ni[0] # # modsched #
1222 jmp .Lsqr4x_mont_outer
1228 add %rax,$A0[0] # n[0]*m0+t[0]
1234 add 8($tptr,$j),$A0[1]
1237 add %rax,$A0[1] # n[1]*m0+t[1]
1243 mov 16($nptr,$j),$Ni[0] # n[2]
1248 add %rax,$A1[0] # n[0]*m1+"t[1]"
1251 mov $A1[0],8($tptr,$j) # "t[1]"
1254 add 16($tptr,$j),$A0[0]
1257 add %rax,$A0[0] # n[2]*m0+t[2]
1261 mov 24($nptr,$j),$Ni[1] # n[3]
1266 add %rax,$A1[1] # n[1]*m1+"t[2]"
1269 mov $A1[1],16($tptr,$j) # "t[2]"
1272 add 24($tptr,$j),$A0[1]
1276 add %rax,$A0[1] # n[3]*m0+t[3]
1279 jmp .Lsqr4x_mont_inner
1283 mov ($nptr,$j),$Ni[0] # n[4]
1288 add %rax,$A1[0] # n[2]*m1+"t[3]"
1291 mov $A1[0],-8($tptr,$j) # "t[3]"
1294 add ($tptr,$j),$A0[0]
1297 add %rax,$A0[0] # n[4]*m0+t[4]
1301 mov 8($nptr,$j),$Ni[1] # n[5]
1306 add %rax,$A1[1] # n[3]*m1+"t[4]"
1309 mov $A1[1],($tptr,$j) # "t[4]"
1312 add 8($tptr,$j),$A0[1]
1315 add %rax,$A0[1] # n[5]*m0+t[5]
1320 mov 16($nptr,$j),$Ni[0] # n[6]
1325 add %rax,$A1[0] # n[4]*m1+"t[5]"
1328 mov $A1[0],8($tptr,$j) # "t[5]"
1331 add 16($tptr,$j),$A0[0]
1334 add %rax,$A0[0] # n[6]*m0+t[6]
1338 mov 24($nptr,$j),$Ni[1] # n[7]
1343 add %rax,$A1[1] # n[5]*m1+"t[6]"
1346 mov $A1[1],16($tptr,$j) # "t[6]"
1349 add 24($tptr,$j),$A0[1]
1353 add %rax,$A0[1] # n[7]*m0+t[7]
1357 jne .Lsqr4x_mont_inner
1359 sub 0(%rsp),$j # $j=-$num # modsched #
1360 mov $n0,$m0 # # modsched #
1366 add %rax,$A1[0] # n[6]*m1+"t[7]"
1369 mov $A1[0],-8($tptr) # "t[7]"
1372 add ($tptr),$A0[0] # +t[8]
1374 mov 0($nptr,$j),$Ni[0] # n[0] # modsched #
1378 imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched #
1380 mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
1382 mov 16($tptr,$j),$A0[0] # t[0] # modsched #
1385 add %rax,$A1[1] # n[7]*m1+"t[8]"
1386 mov $Ni[0],%rax # # modsched #
1388 mov $A1[1],($tptr) # "t[8]"
1391 add 8($tptr),$A1[0] # +t[9]
1394 lea 16($tptr),$tptr # "t[$num]>>128"
1396 mov $A1[0],-8($tptr) # "t[9]"
1397 cmp 8(%rsp),$tptr # are we done?
1398 jb .Lsqr4x_mont_outer
1400 mov 0(%rsp),$num # restore $num
1401 mov $topbit,($tptr) # save $topbit
1404 ##############################################################
1405 # Post-condition, 4x unrolled copy from bn_mul_mont
1408 my ($tptr,$nptr)=("%rbx",$aptr);
1409 my @ri=("%rax","%rdx","%r10","%r11");
1411 mov 64(%rsp,$num),@ri[0] # tp[0]
1412 lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
1413 mov 40(%rsp),$nptr # restore $nptr
1414 shr \$5,$num # num/4
1415 mov 8($tptr),@ri[1] # t[1]
1416 xor $i,$i # i=0 and clear CF!
1418 mov 32(%rsp),$rptr # restore $rptr
1420 mov 16($tptr),@ri[2] # t[2]
1421 mov 24($tptr),@ri[3] # t[3]
1423 lea -1($num),$j # j=num/4-1
1427 mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
1428 mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
1429 sbb 16($nptr,$i,8),@ri[2]
1430 mov 32($tptr,$i,8),@ri[0] # tp[i+1]
1431 mov 40($tptr,$i,8),@ri[1]
1432 sbb 24($nptr,$i,8),@ri[3]
1433 mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
1434 mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
1435 sbb 32($nptr,$i,8),@ri[0]
1436 mov 48($tptr,$i,8),@ri[2]
1437 mov 56($tptr,$i,8),@ri[3]
1438 sbb 40($nptr,$i,8),@ri[1]
1440 dec $j # doesn't affect CF!
1443 mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
1444 mov 32($tptr,$i,8),@ri[0] # load overflow bit
1445 sbb 16($nptr,$i,8),@ri[2]
1446 mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
1447 sbb 24($nptr,$i,8),@ri[3]
1448 mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
1450 sbb \$0,@ri[0] # handle upmost overflow bit
1451 mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
1458 or $nptr,$tptr # tp=borrow?tp:rp
1461 lea 64(%rsp,$num,8),$nptr
1462 movdqu ($tptr),%xmm1
1463 lea ($nptr,$num,8),$nptr
1464 movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
1465 movdqa %xmm0,($nptr) # zap upper half of temporary vector
1466 movdqu %xmm1,($rptr)
1469 .Lsqr4x_copy: # copy or in-place refresh
1470 movdqu 16($tptr,$i),%xmm2
1471 movdqu 32($tptr,$i),%xmm1
1472 movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
1473 movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
1474 movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
1475 movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
1476 movdqu %xmm2,16($rptr,$i)
1477 movdqu %xmm1,32($rptr,$i)
1482 movdqu 16($tptr,$i),%xmm2
1483 movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
1484 movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
1485 movdqu %xmm2,16($rptr,$i)
1489 mov 56(%rsp),%rsi # restore %rsp
1500 .size bn_sqr4x_mont,.-bn_sqr4x_mont
1504 .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1508 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1509 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1517 .extern __imp_RtlVirtualUnwind
1518 .type mul_handler,\@abi-omnipotent
1532 mov 120($context),%rax # pull context->Rax
1533 mov 248($context),%rbx # pull context->Rip
1535 mov 8($disp),%rsi # disp->ImageBase
1536 mov 56($disp),%r11 # disp->HandlerData
1538 mov 0(%r11),%r10d # HandlerData[0]
1539 lea (%rsi,%r10),%r10 # end of prologue label
1540 cmp %r10,%rbx # context->Rip<end of prologue label
1541 jb .Lcommon_seh_tail
1543 mov 152($context),%rax # pull context->Rsp
1545 mov 4(%r11),%r10d # HandlerData[1]
1546 lea (%rsi,%r10),%r10 # epilogue label
1547 cmp %r10,%rbx # context->Rip>=epilogue label
1548 jae .Lcommon_seh_tail
1550 mov 192($context),%r10 # pull $num
1551 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
1560 mov %rbx,144($context) # restore context->Rbx
1561 mov %rbp,160($context) # restore context->Rbp
1562 mov %r12,216($context) # restore context->R12
1563 mov %r13,224($context) # restore context->R13
1564 mov %r14,232($context) # restore context->R14
1565 mov %r15,240($context) # restore context->R15
1567 jmp .Lcommon_seh_tail
1568 .size mul_handler,.-mul_handler
1570 .type sqr_handler,\@abi-omnipotent
1584 mov 120($context),%rax # pull context->Rax
1585 mov 248($context),%rbx # pull context->Rip
1587 lea .Lsqr4x_body(%rip),%r10
1588 cmp %r10,%rbx # context->Rip<.Lsqr_body
1589 jb .Lcommon_seh_tail
1591 mov 152($context),%rax # pull context->Rsp
1593 lea .Lsqr4x_epilogue(%rip),%r10
1594 cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
1595 jae .Lcommon_seh_tail
1597 mov 56(%rax),%rax # pull saved stack pointer
1606 mov %rbx,144($context) # restore context->Rbx
1607 mov %rbp,160($context) # restore context->Rbp
1608 mov %r12,216($context) # restore context->R12
1609 mov %r13,224($context) # restore context->R13
1610 mov %r14,232($context) # restore context->R14
1611 mov %r15,240($context) # restore context->R15
1616 mov %rax,152($context) # restore context->Rsp
1617 mov %rsi,168($context) # restore context->Rsi
1618 mov %rdi,176($context) # restore context->Rdi
1620 mov 40($disp),%rdi # disp->ContextRecord
1621 mov $context,%rsi # context
1622 mov \$154,%ecx # sizeof(CONTEXT)
1623 .long 0xa548f3fc # cld; rep movsq
1626 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1627 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1628 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1629 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1630 mov 40(%rsi),%r10 # disp->ContextRecord
1631 lea 56(%rsi),%r11 # &disp->HandlerData
1632 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1633 mov %r10,32(%rsp) # arg5
1634 mov %r11,40(%rsp) # arg6
1635 mov %r12,48(%rsp) # arg7
1636 mov %rcx,56(%rsp) # arg8, (NULL)
1637 call *__imp_RtlVirtualUnwind(%rip)
1639 mov \$1,%eax # ExceptionContinueSearch
1651 .size sqr_handler,.-sqr_handler
1655 .rva .LSEH_begin_bn_mul_mont
1656 .rva .LSEH_end_bn_mul_mont
1657 .rva .LSEH_info_bn_mul_mont
1659 .rva .LSEH_begin_bn_mul4x_mont
1660 .rva .LSEH_end_bn_mul4x_mont
1661 .rva .LSEH_info_bn_mul4x_mont
1663 .rva .LSEH_begin_bn_sqr4x_mont
1664 .rva .LSEH_end_bn_sqr4x_mont
1665 .rva .LSEH_info_bn_sqr4x_mont
1669 .LSEH_info_bn_mul_mont:
1672 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
1673 .LSEH_info_bn_mul4x_mont:
1676 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
1677 .LSEH_info_bn_sqr4x_mont: