3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Companion to x86_64-mont.pl that optimizes cache-timing attack
13 # countermeasures. The subroutines are produced by replacing bp[i]
14 # references in their x86_64-mont.pl counterparts with cache-neutral
15 # references to powers table computed in BN_mod_exp_mont_consttime.
16 # In addition subroutine that scatters elements of the powers table
17 # is implemented, so that scatter-/gathering can be tuned without
18 # bn_exp.c modifications.
22 # Add MULX/AD*X code paths and additional interfaces to optimize for
23 # branch prediction unit. For input lengths that are multiples of 8
24 # the np argument is not just modulus value, but one interleaved
25 # with 0. This is to optimize post-condition...
29 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
31 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
33 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
34 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
35 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
36 die "can't locate x86_64-xlate.pl";
38 open OUT,"| \"$^X\" $xlate $flavour $output";
41 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
42 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
46 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
47 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
51 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
52 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
56 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
57 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
61 # int bn_mul_mont_gather5(
62 $rp="%rdi"; # BN_ULONG *rp,
63 $ap="%rsi"; # const BN_ULONG *ap,
64 $bp="%rdx"; # const BN_ULONG *bp,
65 $np="%rcx"; # const BN_ULONG *np,
66 $n0="%r8"; # const BN_ULONG *n0,
67 $num="%r9"; # int num,
68 # int idx); # 0 to 2^5-1, "index" in $bp holding
69 # pre-computed powers of a', interlaced
70 # in such manner that b[0] is $bp[idx],
71 # b[1] is [2^5+idx], etc.
83 .extern OPENSSL_ia32cap_P
85 .globl bn_mul_mont_gather5
86 .type bn_mul_mont_gather5,\@function,6
92 $code.=<<___ if ($addx);
93 mov OPENSSL_ia32cap_P+8(%rip),%r11d
102 movd `($win64?56:8)`(%rsp),%xmm0 # load 7th argument
103 lea .Lmagic_masks(%rip),%r10
111 $code.=<<___ if ($win64);
114 movaps %xmm7,0x10(%rsp)
115 movaps %xmm8,0x20(%rsp)
120 lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+2))
121 and \$-1024,%rsp # minimize TLB usage
123 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
125 lea 128($bp),%r12 # reassign $bp (+size optimization)
128 $STRIDE=2**5*8; # 5 is "window size"
129 $N=$STRIDE/4; # should match cache line size
131 ################################################################
132 # calculate mask: one of %xmm4..7 will contain 0xff..00 or
133 # 0x00..ff denoting which half of a quarter of corresponding
134 # cache line is significant.
136 movq 56(%r10),%xmm1 # 0b11001
140 pshufd \$0,%xmm0,%xmm0 # broadcast masked index
141 movdqa 16(%r10),%xmm5
142 movdqa 32(%r10),%xmm6
144 movdqa 48(%r10),%xmm7
149 ################################################################
150 # calculate index in 1st cache line, but in such manner that
151 # if target data is in another cache line, then relevant
152 # "rotating" reference would land on it...
154 shr \$1,%rdx # idx/=2
158 and \$3,$j # (idx-idx/4)%4
159 shl \$4,$j # scale for xmm references
161 ################################################################
162 # "rotating" references are touching different cache banks in
163 # different cache lines, so that not only all cache lines are
164 # referred in each iteration, but even all cache banks.
172 movdqa `0*$STRIDE/4-128`($bp,$j),%xmm0
173 movdqa `1*$STRIDE/4-128`($bp,$m0),%xmm1
174 movdqa `2*$STRIDE/4-128`($bp,$m1),%xmm2
175 movdqa `3*$STRIDE/4-128`($bp,%rdx),%xmm3
186 pshufd \$0x4e,%xmm0,%xmm1
187 por %xmm1,%xmm0 # merge upper and lower halves
189 movq %xmm0,$m0 # m0=bp[0]
191 mov ($n0),$n0 # pull n0[0] value
198 mulq $m0 # ap[0]*bp[0]
202 imulq $lo0,$m1 # "tp[0]"*n0
206 add %rax,$lo0 # discarded
219 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
222 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
226 mulq $m0 # ap[j]*bp[0]
241 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
243 mov $hi1,-16(%rsp,$num,8) # tp[j-1]
250 mov $hi1,-8(%rsp,$num,8)
251 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
263 movdqa `0*$STRIDE/4-128`($bp,$j),%xmm0
264 movdqa `1*$STRIDE/4-128`($bp,$m0),%xmm1
265 movdqa `2*$STRIDE/4-128`($bp,$m1),%xmm2
266 movdqa `3*$STRIDE/4-128`($bp,%rdx),%xmm3
276 pshufd \$0x4e,%xmm0,%xmm1
277 por %xmm1,%xmm0 # merge upper and lower halves
279 mov ($ap),%rax # ap[0]
280 movq %xmm0,$m0 # m0=bp[i]
286 mulq $m0 # ap[0]*bp[i]
287 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
291 imulq $lo0,$m1 # tp[0]*n0
295 add %rax,$lo0 # discarded
298 mov 8(%rsp),$lo0 # tp[1]
309 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
312 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
316 mulq $m0 # ap[j]*bp[i]
320 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
333 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
334 mov (%rsp,$num,8),$lo0
336 mov $hi1,-16(%rsp,$num,8) # tp[j-1]
342 add $lo0,$hi1 # pull upmost overflow bit
344 mov $hi1,-8(%rsp,$num,8)
345 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
351 xor $i,$i # i=0 and clear CF!
352 mov (%rsp),%rax # tp[0]
353 lea (%rsp),$ap # borrow ap for tp
357 .Lsub: sbb ($np,$i,8),%rax
358 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
359 mov 8($ap,$i,8),%rax # tp[i+1]
361 dec $j # doesnn't affect CF!
364 sbb \$0,%rax # handle upmost overflow bit
371 or $np,$ap # ap=borrow?tp:rp
373 .Lcopy: # copy or in-place refresh
375 mov $i,(%rsp,$i,8) # zap temporary vector
376 mov %rax,($rp,$i,8) # rp[i]=tp[i]
381 mov 8(%rsp,$num,8),%rsi # restore %rsp
384 $code.=<<___ if ($win64);
385 movaps -104(%rsi),%xmm6
386 movaps -88(%rsi),%xmm7
387 movaps -72(%rsi),%xmm8
399 .size bn_mul_mont_gather5,.-bn_mul_mont_gather5
402 my @A=("%r10","%r11");
403 my @N=("%r13","%rdi");
405 .type bn_mul4x_mont_gather5,\@function,6
407 bn_mul4x_mont_gather5:
410 $code.=<<___ if ($addx);
412 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
426 shl \$3,${num}d # convert $num to bytes
427 lea ($num,$num,2),%r10 # 3*$num in bytes
430 ##############################################################
431 # Ensure that stack frame doesn't alias with $rptr+3*$num
432 # modulo 4096, which covers ret[num], am[num] and n[num]
433 # (see bn_exp.c). This is done to allow memory disambiguation
434 # logic do its magic. [Extra [num] is allocated in order
435 # to align with bn_power5's frame, which is cleansed after
436 # completing exponentiation. Extra 256 bytes is for power mask
437 # calculated from 7th argument, the index.]
439 lea -320(%rsp,$num,2),%r11
444 sub %r11,%rsp # align with $rp
445 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
450 lea 4096-320(,$num,2),%r10
451 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
465 mov 40(%rsp),%rsi # restore %rsp
477 .size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
479 .type mul4x_internal,\@abi-omnipotent
482 shl \$5,$num # $num was in bytes
483 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
485 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
486 shr \$5,$num # restore $num
489 $STRIDE=2**5*8; # 5 is "window size"
490 $N=$STRIDE/4; # should match cache line size
493 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
494 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
495 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
496 lea 128(%rdx),$bp # size optimization
498 pshufd \$0,%xmm5,%xmm5 # broadcast index
503 ########################################################################
504 # calculate mask by comparing 0..31 to index and save result to stack
508 pcmpeqd %xmm5,%xmm0 # compare to 1,0
512 for($i=0;$i<$STRIDE/16-4;$i+=4) {
515 pcmpeqd %xmm5,%xmm1 # compare to 3,2
516 movdqa %xmm0,`16*($i+0)+112`(%r10)
520 pcmpeqd %xmm5,%xmm2 # compare to 5,4
521 movdqa %xmm1,`16*($i+1)+112`(%r10)
525 pcmpeqd %xmm5,%xmm3 # compare to 7,6
526 movdqa %xmm2,`16*($i+2)+112`(%r10)
531 movdqa %xmm3,`16*($i+3)+112`(%r10)
535 $code.=<<___; # last iteration can be optimized
538 movdqa %xmm0,`16*($i+0)+112`(%r10)
543 movdqa %xmm1,`16*($i+1)+112`(%r10)
546 movdqa %xmm2,`16*($i+2)+112`(%r10)
547 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
549 pand `16*($i+1)-128`($bp),%xmm1
550 pand `16*($i+2)-128`($bp),%xmm2
551 movdqa %xmm3,`16*($i+3)+112`(%r10)
552 pand `16*($i+3)-128`($bp),%xmm3
556 for($i=0;$i<$STRIDE/16-4;$i+=4) {
558 movdqa `16*($i+0)-128`($bp),%xmm4
559 movdqa `16*($i+1)-128`($bp),%xmm5
560 movdqa `16*($i+2)-128`($bp),%xmm2
561 pand `16*($i+0)+112`(%r10),%xmm4
562 movdqa `16*($i+3)-128`($bp),%xmm3
563 pand `16*($i+1)+112`(%r10),%xmm5
565 pand `16*($i+2)+112`(%r10),%xmm2
567 pand `16*($i+3)+112`(%r10),%xmm3
574 pshufd \$0x4e,%xmm0,%xmm1
577 movq %xmm0,$m0 # m0=bp[0]
579 mov %r13,16+8(%rsp) # save end of b[num]
580 mov $rp, 56+8(%rsp) # save $rp
582 mov ($n0),$n0 # pull n0[0] value
584 lea ($ap,$num),$ap # end of a[num]
588 mulq $m0 # ap[0]*bp[0]
592 imulq $A[0],$m1 # "tp[0]"*n0
597 add %rax,$A[0] # discarded
610 mov 16($ap,$num),%rax
613 lea 4*8($num),$j # j=4
622 mulq $m0 # ap[j]*bp[0]
633 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
635 mov $N[0],-24($tp) # tp[j-1]
638 mulq $m0 # ap[j]*bp[0]
648 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
650 mov $N[1],-16($tp) # tp[j-1]
653 mulq $m0 # ap[j]*bp[0]
663 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
665 mov $N[0],-8($tp) # tp[j-1]
668 mulq $m0 # ap[j]*bp[0]
678 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
681 mov $N[1],($tp) # tp[j-1]
687 mulq $m0 # ap[j]*bp[0]
698 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
700 mov $N[0],-24($tp) # tp[j-1]
703 mulq $m0 # ap[j]*bp[0]
711 mov ($ap,$num),%rax # ap[0]
713 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
715 mov $N[1],-16($tp) # tp[j-1]
718 lea ($np,$num),$np # rewind $np
729 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
733 for($i=0;$i<$STRIDE/16;$i+=4) {
735 movdqa `16*($i+0)-128`($bp),%xmm0
736 movdqa `16*($i+1)-128`($bp),%xmm1
737 movdqa `16*($i+2)-128`($bp),%xmm2
738 movdqa `16*($i+3)-128`($bp),%xmm3
739 pand `16*($i+0)-128`(%rdx),%xmm0
740 pand `16*($i+1)-128`(%rdx),%xmm1
742 pand `16*($i+2)-128`(%rdx),%xmm2
744 pand `16*($i+3)-128`(%rdx),%xmm3
751 pshufd \$0x4e,%xmm4,%xmm0
754 movq %xmm0,$m0 # m0=bp[i]
758 mulq $m0 # ap[0]*bp[i]
759 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
763 imulq $A[0],$m1 # tp[0]*n0
765 mov $N[1],($tp) # store upmost overflow bit
767 lea ($tp,$num),$tp # rewind $tp
770 add %rax,$A[0] # "$N[0]", discarded
775 mulq $m0 # ap[j]*bp[i]
779 add 8($tp),$A[1] # +tp[1]
785 mov 16($ap,$num),%rax
787 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
788 lea 4*8($num),$j # j=4
796 mulq $m0 # ap[j]*bp[i]
800 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
811 mov $N[1],-32($tp) # tp[j-1]
814 mulq $m0 # ap[j]*bp[i]
828 mov $N[0],-24($tp) # tp[j-1]
831 mulq $m0 # ap[j]*bp[i]
835 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
845 mov $N[1],-16($tp) # tp[j-1]
848 mulq $m0 # ap[j]*bp[i]
863 mov $N[0],-8($tp) # tp[j-1]
869 mulq $m0 # ap[j]*bp[i]
873 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
884 mov $N[1],-32($tp) # tp[j-1]
887 mulq $m0 # ap[j]*bp[i]
898 mov ($ap,$num),%rax # ap[0]
902 mov $N[0],-24($tp) # tp[j-1]
905 mov $N[1],-16($tp) # tp[j-1]
906 lea ($np,$num),$np # rewind $np
911 add ($tp),$N[0] # pull upmost overflow bit
912 adc \$0,$N[1] # upmost overflow bit
921 sub $N[0],$m1 # compare top-most words
922 adc $j,$j # $j is zero
924 sub $N[1],%rax # %rax=-$N[1]
925 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
927 lea ($np),%rbp # nptr in .sqr4x_sub
930 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
931 dec %r12 # so that after 'not' we get -n[0]
936 jmp .Lsqr4x_sub_entry
939 my @ri=("%rax",$bp,$m0,$m1);
943 lea ($tp,$num),$tp # rewind $tp
945 lea ($np,$N[1],8),$np
946 mov 56+8(%rsp),$rp # restore $rp
976 .size mul4x_internal,.-mul4x_internal
980 ######################################################################
982 my $rptr="%rdi"; # BN_ULONG *rptr,
983 my $aptr="%rsi"; # const BN_ULONG *aptr,
984 my $bptr="%rdx"; # const void *table,
985 my $nptr="%rcx"; # const BN_ULONG *nptr,
986 my $n0 ="%r8"; # const BN_ULONG *n0);
987 my $num ="%r9"; # int num, has to be divisible by 8
990 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
991 my @A0=("%r10","%r11");
992 my @A1=("%r12","%r13");
993 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
997 .type bn_power5,\@function,6
1001 $code.=<<___ if ($addx);
1002 mov OPENSSL_ia32cap_P+8(%rip),%r11d
1004 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
1016 shl \$3,${num}d # convert $num to bytes
1017 lea ($num,$num,2),%r10d # 3*$num
1021 ##############################################################
1022 # Ensure that stack frame doesn't alias with $rptr+3*$num
1023 # modulo 4096, which covers ret[num], am[num] and n[num]
1024 # (see bn_exp.c). This is done to allow memory disambiguation
1025 # logic do its magic. [Extra 256 bytes is for power mask
1026 # calculated from 7th argument, the index.]
1028 lea -320(%rsp,$num,2),%r11
1033 sub %r11,%rsp # align with $aptr
1034 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
1039 lea 4096-320(,$num,2),%r10
1040 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
1050 ##############################################################
1053 # +0 saved $num, used in reduction section
1054 # +8 &t[2*$num], used in reduction section
1060 mov %rax, 40(%rsp) # save original %rsp
1062 movq $rptr,%xmm1 # save $rptr, used in sqr8x
1063 movq $nptr,%xmm2 # save $nptr
1064 movq %r10, %xmm3 # -$num, used in sqr8x
1067 call __bn_sqr8x_internal
1068 call __bn_sqr8x_internal
1069 call __bn_sqr8x_internal
1070 call __bn_sqr8x_internal
1071 call __bn_sqr8x_internal
1081 mov 40(%rsp),%rsi # restore %rsp
1092 .size bn_power5,.-bn_power5
1094 .globl bn_sqr8x_internal
1095 .hidden bn_sqr8x_internal
1096 .type bn_sqr8x_internal,\@abi-omnipotent
1099 __bn_sqr8x_internal:
1100 ##############################################################
1103 # a) multiply-n-add everything but a[i]*a[i];
1104 # b) shift result of a) by 1 to the left and accumulate
1105 # a[i]*a[i] products;
1107 ##############################################################
1173 lea 32(%r10),$i # $i=-($num-32)
1174 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1176 mov $num,$j # $j=$num
1178 # comments apply to $num==8 case
1179 mov -32($aptr,$i),$a0 # a[0]
1180 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1181 mov -24($aptr,$i),%rax # a[1]
1182 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1183 mov -16($aptr,$i),$ai # a[2]
1187 mov %rax,$A0[0] # a[1]*a[0]
1190 mov $A0[0],-24($tptr,$i) # t[1]
1196 mov $A0[1],-16($tptr,$i) # t[2]
1200 mov -8($aptr,$i),$ai # a[3]
1202 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1208 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1214 mov $A0[0],-8($tptr,$j) # t[3]
1219 mov ($aptr,$j),$ai # a[4]
1221 add %rax,$A1[1] # a[3]*a[1]+t[4]
1227 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1229 mov 8($aptr,$j),$ai # a[5]
1237 add %rax,$A1[0] # a[4]*a[3]+t[5]
1239 mov $A0[1],($tptr,$j) # t[4]
1244 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1246 mov 16($aptr,$j),$ai # a[6]
1253 add %rax,$A1[1] # a[5]*a[3]+t[6]
1255 mov $A0[0],8($tptr,$j) # t[5]
1260 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1262 mov 24($aptr,$j),$ai # a[7]
1270 add %rax,$A1[0] # a[6]*a[5]+t[7]
1272 mov $A0[1],16($tptr,$j) # t[6]
1278 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1284 mov $A0[0],-8($tptr,$j) # t[7]
1296 mov $A1[1],($tptr) # t[8]
1298 mov %rdx,8($tptr) # t[9]
1302 .Lsqr4x_outer: # comments apply to $num==6 case
1303 mov -32($aptr,$i),$a0 # a[0]
1304 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1305 mov -24($aptr,$i),%rax # a[1]
1306 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1307 mov -16($aptr,$i),$ai # a[2]
1311 mov -24($tptr,$i),$A0[0] # t[1]
1312 add %rax,$A0[0] # a[1]*a[0]+t[1]
1315 mov $A0[0],-24($tptr,$i) # t[1]
1322 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1325 mov $A0[1],-16($tptr,$i) # t[2]
1329 mov -8($aptr,$i),$ai # a[3]
1331 add %rax,$A1[0] # a[2]*a[1]+t[3]
1334 add -8($tptr,$i),$A1[0]
1339 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1345 mov $A0[0],-8($tptr,$i) # t[3]
1352 mov ($aptr,$j),$ai # a[4]
1354 add %rax,$A1[1] # a[3]*a[1]+t[4]
1358 add ($tptr,$j),$A1[1]
1363 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1365 mov 8($aptr,$j),$ai # a[5]
1372 add %rax,$A1[0] # a[4]*a[3]+t[5]
1373 mov $A0[1],($tptr,$j) # t[4]
1377 add 8($tptr,$j),$A1[0]
1382 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1388 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1400 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1402 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1407 # comments apply to $num==4 case
1408 mov -32($aptr),$a0 # a[0]
1409 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1410 mov -24($aptr),%rax # a[1]
1411 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1412 mov -16($aptr),$ai # a[2]
1416 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1424 mov $A0[0],-24($tptr) # t[1]
1427 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1428 mov -8($aptr),$ai # a[3]
1432 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1434 mov $A0[1],-16($tptr) # t[2]
1439 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1445 mov $A0[0],-8($tptr) # t[3]
1449 mov -16($aptr),%rax # a[2]
1454 mov $A1[1],($tptr) # t[4]
1456 mov %rdx,8($tptr) # t[5]
1461 my ($shift,$carry)=($a0,$a1);
1462 my @S=(@A1,$ai,$n0);
1466 sub $num,$i # $i=16-$num
1469 add $A1[0],%rax # t[5]
1471 mov %rax,8($tptr) # t[5]
1472 mov %rdx,16($tptr) # t[6]
1473 mov $carry,24($tptr) # t[7]
1475 mov -16($aptr,$i),%rax # a[0]
1476 lea 48+8(%rsp),$tptr
1477 xor $A0[0],$A0[0] # t[0]
1478 mov 8($tptr),$A0[1] # t[1]
1480 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1482 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1484 or $A0[0],$S[1] # | t[2*i]>>63
1485 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1486 mov $A0[1],$shift # shift=t[2*i+1]>>63
1487 mul %rax # a[i]*a[i]
1488 neg $carry # mov $carry,cf
1489 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1491 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1495 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1497 sbb $carry,$carry # mov cf,$carry
1499 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1501 or $A0[0],$S[3] # | t[2*i]>>63
1502 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1503 mov $A0[1],$shift # shift=t[2*i+1]>>63
1504 mul %rax # a[i]*a[i]
1505 neg $carry # mov $carry,cf
1506 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1508 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1513 sbb $carry,$carry # mov cf,$carry
1515 jmp .Lsqr4x_shift_n_add
1518 .Lsqr4x_shift_n_add:
1519 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1521 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1523 or $A0[0],$S[1] # | t[2*i]>>63
1524 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1525 mov $A0[1],$shift # shift=t[2*i+1]>>63
1526 mul %rax # a[i]*a[i]
1527 neg $carry # mov $carry,cf
1528 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1530 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1531 mov $S[0],-32($tptr)
1534 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1535 mov $S[1],-24($tptr)
1536 sbb $carry,$carry # mov cf,$carry
1538 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1540 or $A0[0],$S[3] # | t[2*i]>>63
1541 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1542 mov $A0[1],$shift # shift=t[2*i+1]>>63
1543 mul %rax # a[i]*a[i]
1544 neg $carry # mov $carry,cf
1545 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1547 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1548 mov $S[2],-16($tptr)
1551 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1553 sbb $carry,$carry # mov cf,$carry
1555 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1557 or $A0[0],$S[1] # | t[2*i]>>63
1558 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1559 mov $A0[1],$shift # shift=t[2*i+1]>>63
1560 mul %rax # a[i]*a[i]
1561 neg $carry # mov $carry,cf
1562 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1564 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1568 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1570 sbb $carry,$carry # mov cf,$carry
1572 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1574 or $A0[0],$S[3] # | t[2*i]>>63
1575 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1576 mov $A0[1],$shift # shift=t[2*i+1]>>63
1577 mul %rax # a[i]*a[i]
1578 neg $carry # mov $carry,cf
1579 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1581 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1585 sbb $carry,$carry # mov cf,$carry
1588 jnz .Lsqr4x_shift_n_add
1590 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1593 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1595 or $A0[0],$S[1] # | t[2*i]>>63
1596 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1597 mov $A0[1],$shift # shift=t[2*i+1]>>63
1598 mul %rax # a[i]*a[i]
1599 neg $carry # mov $carry,cf
1600 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1602 mov -8($aptr),%rax # a[i+1] # prefetch
1603 mov $S[0],-32($tptr)
1606 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1607 mov $S[1],-24($tptr)
1608 sbb $carry,$carry # mov cf,$carry
1610 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1612 or $A0[0],$S[3] # | t[2*i]>>63
1613 mul %rax # a[i]*a[i]
1614 neg $carry # mov $carry,cf
1617 mov $S[2],-16($tptr)
1621 ######################################################################
1622 # Montgomery reduction part, "word-by-word" algorithm.
1624 # This new path is inspired by multiple submissions from Intel, by
1625 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1628 my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1634 lea ($nptr,$num),%rcx # end of n[]
1635 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1637 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1640 jmp .L8x_reduction_loop
1643 .L8x_reduction_loop:
1644 lea ($tptr,$num),$tptr # start of current t[] window
1654 mov %rax,(%rdx) # store top-most carry bit
1655 lea 8*8($tptr),$tptr
1659 imulq 32+8(%rsp),$m0 # n0*a[0]
1660 mov 8*0($nptr),%rax # n[0]
1667 mov 8*1($nptr),%rax # n[1]
1677 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1686 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1694 imulq %r8,$carry # modulo-scheduled
1724 mov $carry,$m0 # n0*a[i]
1726 mov 8*0($nptr),%rax # n[0]
1735 lea 8*8($nptr),$nptr
1737 mov 8+8(%rsp),%rdx # pull end of t[]
1738 cmp 0+8(%rsp),$nptr # end of n[]?
1750 sbb $carry,$carry # top carry
1752 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1762 mov %r8,($tptr) # save result
1771 lea 8($tptr),$tptr # $tptr++
1816 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1820 mov 8*0($nptr),%rax # pull n[0]
1827 lea 8*8($nptr),$nptr
1828 mov 8+8(%rsp),%rdx # pull end of t[]
1829 cmp 0+8(%rsp),$nptr # end of n[]?
1830 jae .L8x_tail_done # break out of loop
1832 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1834 mov 8*0($nptr),%rax # pull n[0]
1843 sbb $carry,$carry # top carry
1850 add (%rdx),%r8 # can this overflow?
1857 adc \$0,%r15 # can't overflow, because we
1858 # started with "overhung" part
1872 adc \$0,%rax # top-most carry
1873 mov -8($nptr),%rcx # np[num-1]
1876 movq %xmm2,$nptr # restore $nptr
1878 mov %r8,8*0($tptr) # store top 512 bits
1880 movq %xmm3,$num # $num is %r9, can't be moved upwards
1887 lea 8*8($tptr),$tptr
1889 cmp %rdx,$tptr # end of t[]?
1890 jb .L8x_reduction_loop
1893 ##############################################################
1894 # Post-condition, 4x unrolled
1897 my ($tptr,$nptr)=("%rbx","%rbp");
1899 #xor %rsi,%rsi # %rsi was $carry above
1901 sub %r15,%rcx # compare top-most words
1902 lea (%rdi,$num),$tptr # %rdi was $tptr above
1906 movq %xmm1,$rptr # restore $rptr
1908 movq %xmm1,$aptr # prepare for back-to-back call
1910 dec %r12 # so that after 'not' we get -n[0]
1915 jmp .Lsqr4x_sub_entry
1924 lea 8*4($nptr),$nptr
1934 neg %r10 # mov %r10,%cf
1940 lea 8*4($tptr),$tptr
1942 sbb %r10,%r10 # mov %cf,%r10
1945 lea 8*4($rptr),$rptr
1952 mov $num,%r10 # prepare for back-to-back call
1953 neg $num # restore $num
1955 .size bn_sqr8x_internal,.-bn_sqr8x_internal
1959 .globl bn_from_montgomery
1960 .type bn_from_montgomery,\@abi-omnipotent
1963 testl \$7,`($win64?"48(%rsp)":"%r9d")`
1967 .size bn_from_montgomery,.-bn_from_montgomery
1969 .type bn_from_mont8x,\@function,6
1981 shl \$3,${num}d # convert $num to bytes
1982 lea ($num,$num,2),%r10 # 3*$num in bytes
1986 ##############################################################
1987 # Ensure that stack frame doesn't alias with $rptr+3*$num
1988 # modulo 4096, which covers ret[num], am[num] and n[num]
1989 # (see bn_exp.c). The stack is allocated to aligned with
1990 # bn_power5's frame, and as bn_from_montgomery happens to be
1991 # last operation, we use the opportunity to cleanse it.
1993 lea -320(%rsp,$num,2),%r11
1998 sub %r11,%rsp # align with $aptr
1999 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2004 lea 4096-320(,$num,2),%r10
2005 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2015 ##############################################################
2018 # +0 saved $num, used in reduction section
2019 # +8 &t[2*$num], used in reduction section
2025 mov %rax, 40(%rsp) # save original %rsp
2034 movdqu ($aptr),%xmm1
2035 movdqu 16($aptr),%xmm2
2036 movdqu 32($aptr),%xmm3
2037 movdqa %xmm0,(%rax,$num)
2038 movdqu 48($aptr),%xmm4
2039 movdqa %xmm0,16(%rax,$num)
2040 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
2042 movdqa %xmm0,32(%rax,$num)
2043 movdqa %xmm2,16(%rax)
2044 movdqa %xmm0,48(%rax,$num)
2045 movdqa %xmm3,32(%rax)
2046 movdqa %xmm4,48(%rax)
2055 movq %r10, %xmm3 # -num
2057 $code.=<<___ if ($addx);
2058 mov OPENSSL_ia32cap_P+8(%rip),%r11d
2060 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
2063 lea (%rax,$num),$rptr
2064 call sqrx8x_reduction
2068 mov 40(%rsp),%rsi # restore %rsp
2069 jmp .Lfrom_mont_zero
2075 call sqr8x_reduction
2079 mov 40(%rsp),%rsi # restore %rsp
2080 jmp .Lfrom_mont_zero
2084 movdqa %xmm0,16*0(%rax)
2085 movdqa %xmm0,16*1(%rax)
2086 movdqa %xmm0,16*2(%rax)
2087 movdqa %xmm0,16*3(%rax)
2090 jnz .Lfrom_mont_zero
2102 .size bn_from_mont8x,.-bn_from_mont8x
2108 my $bp="%rdx"; # restore original value
2111 .type bn_mulx4x_mont_gather5,\@function,6
2113 bn_mulx4x_mont_gather5:
2123 shl \$3,${num}d # convert $num to bytes
2124 lea ($num,$num,2),%r10 # 3*$num in bytes
2128 ##############################################################
2129 # Ensure that stack frame doesn't alias with $rptr+3*$num
2130 # modulo 4096, which covers ret[num], am[num] and n[num]
2131 # (see bn_exp.c). This is done to allow memory disambiguation
2132 # logic do its magic. [Extra [num] is allocated in order
2133 # to align with bn_power5's frame, which is cleansed after
2134 # completing exponentiation. Extra 256 bytes is for power mask
2135 # calculated from 7th argument, the index.]
2137 lea -320(%rsp,$num,2),%r11
2142 sub %r11,%rsp # align with $aptr
2143 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2147 lea 4096-320(,$num,2),%r10
2148 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2154 and \$-64,%rsp # ensure alignment
2155 ##############################################################
2158 # +8 off-loaded &b[i]
2167 mov $n0, 32(%rsp) # save *n0
2168 mov %rax,40(%rsp) # save original %rsp
2170 call mulx4x_internal
2172 mov 40(%rsp),%rsi # restore %rsp
2184 .size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2186 .type mulx4x_internal,\@abi-omnipotent
2189 mov $num,8(%rsp) # save -$num (it was in bytes)
2191 neg $num # restore $num
2193 neg %r10 # restore $num
2194 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
2196 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
2198 lea .Linc(%rip),%rax
2199 mov %r13,16+8(%rsp) # end of b[num]
2200 mov $num,24+8(%rsp) # inner counter
2201 mov $rp, 56+8(%rsp) # save $rp
2203 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2204 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2206 my $STRIDE=2**5*8; # 5 is "window size"
2207 my $N=$STRIDE/4; # should match cache line size
2209 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2210 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2211 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
2212 lea 128($bp),$bptr # size optimization
2214 pshufd \$0,%xmm5,%xmm5 # broadcast index
2219 ########################################################################
2220 # calculate mask by comparing 0..31 to index and save result to stack
2225 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2228 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2231 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2232 movdqa %xmm0,`16*($i+0)+112`(%r10)
2236 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2237 movdqa %xmm1,`16*($i+1)+112`(%r10)
2241 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2242 movdqa %xmm2,`16*($i+2)+112`(%r10)
2247 movdqa %xmm3,`16*($i+3)+112`(%r10)
2251 $code.=<<___; # last iteration can be optimized
2255 movdqa %xmm0,`16*($i+0)+112`(%r10)
2259 movdqa %xmm1,`16*($i+1)+112`(%r10)
2262 movdqa %xmm2,`16*($i+2)+112`(%r10)
2264 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2265 pand `16*($i+1)-128`($bptr),%xmm1
2266 pand `16*($i+2)-128`($bptr),%xmm2
2267 movdqa %xmm3,`16*($i+3)+112`(%r10)
2268 pand `16*($i+3)-128`($bptr),%xmm3
2272 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2274 movdqa `16*($i+0)-128`($bptr),%xmm4
2275 movdqa `16*($i+1)-128`($bptr),%xmm5
2276 movdqa `16*($i+2)-128`($bptr),%xmm2
2277 pand `16*($i+0)+112`(%r10),%xmm4
2278 movdqa `16*($i+3)-128`($bptr),%xmm3
2279 pand `16*($i+1)+112`(%r10),%xmm5
2281 pand `16*($i+2)+112`(%r10),%xmm2
2283 pand `16*($i+3)+112`(%r10),%xmm3
2290 pshufd \$0x4e,%xmm0,%xmm1
2292 lea $STRIDE($bptr),$bptr
2293 movq %xmm0,%rdx # bp[0]
2294 lea 64+8*4+8(%rsp),$tptr
2297 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2298 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2300 mulx 2*8($aptr),%rax,%r13 # ...
2303 mulx 3*8($aptr),%rax,%r14
2306 imulq 32+8(%rsp),$mi # "t[0]"*n0
2307 xor $zero,$zero # cf=0, of=0
2310 mov $bptr,8+8(%rsp) # off-load &b[i]
2312 lea 4*8($aptr),$aptr
2314 adcx $zero,%r14 # cf=0
2316 mulx 0*8($nptr),%rax,%r10
2317 adcx %rax,%r15 # discarded
2319 mulx 1*8($nptr),%rax,%r11
2322 mulx 2*8($nptr),%rax,%r12
2323 mov 24+8(%rsp),$bptr # counter value
2324 mov %r10,-8*4($tptr)
2327 mulx 3*8($nptr),%rax,%r15
2329 mov %r11,-8*3($tptr)
2331 adox $zero,%r15 # of=0
2332 lea 4*8($nptr),$nptr
2333 mov %r12,-8*2($tptr)
2338 adcx $zero,%r15 # cf=0, modulo-scheduled
2339 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2341 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2343 mulx 2*8($aptr),%r12,%rax # ...
2345 mulx 3*8($aptr),%r13,%r14
2349 adcx $zero,%r14 # cf=0
2350 lea 4*8($aptr),$aptr
2351 lea 4*8($tptr),$tptr
2354 mulx 0*8($nptr),%rax,%r15
2357 mulx 1*8($nptr),%rax,%r15
2360 mulx 2*8($nptr),%rax,%r15
2361 mov %r10,-5*8($tptr)
2363 mov %r11,-4*8($tptr)
2365 mulx 3*8($nptr),%rax,%r15
2367 mov %r12,-3*8($tptr)
2370 lea 4*8($nptr),$nptr
2371 mov %r13,-2*8($tptr)
2373 dec $bptr # of=0, pass cf
2376 mov 8(%rsp),$num # load -num
2377 adc $zero,%r15 # modulo-scheduled
2378 lea ($aptr,$num),$aptr # rewind $aptr
2380 mov 8+8(%rsp),$bptr # re-load &b[i]
2381 adc $zero,$zero # top-most carry
2382 mov %r14,-1*8($tptr)
2387 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2392 for($i=0;$i<$STRIDE/16;$i+=4) {
2394 movdqa `16*($i+0)-128`($bptr),%xmm0
2395 movdqa `16*($i+1)-128`($bptr),%xmm1
2396 movdqa `16*($i+2)-128`($bptr),%xmm2
2397 pand `16*($i+0)+256`(%r10),%xmm0
2398 movdqa `16*($i+3)-128`($bptr),%xmm3
2399 pand `16*($i+1)+256`(%r10),%xmm1
2401 pand `16*($i+2)+256`(%r10),%xmm2
2403 pand `16*($i+3)+256`(%r10),%xmm3
2410 pshufd \$0x4e,%xmm4,%xmm0
2412 lea $STRIDE($bptr),$bptr
2413 movq %xmm0,%rdx # m0=bp[i]
2415 mov $zero,($tptr) # save top-most carry
2416 lea 4*8($tptr,$num),$tptr # rewind $tptr
2417 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2418 xor $zero,$zero # cf=0, of=0
2420 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2421 adox -4*8($tptr),$mi # +t[0]
2423 mulx 2*8($aptr),%r15,%r13 # ...
2424 adox -3*8($tptr),%r11
2426 mulx 3*8($aptr),%rdx,%r14
2427 adox -2*8($tptr),%r12
2429 lea ($nptr,$num),$nptr # rewind $nptr
2430 lea 4*8($aptr),$aptr
2431 adox -1*8($tptr),%r13
2436 imulq 32+8(%rsp),$mi # "t[0]"*n0
2439 xor $zero,$zero # cf=0, of=0
2440 mov $bptr,8+8(%rsp) # off-load &b[i]
2442 mulx 0*8($nptr),%rax,%r10
2443 adcx %rax,%r15 # discarded
2445 mulx 1*8($nptr),%rax,%r11
2448 mulx 2*8($nptr),%rax,%r12
2451 mulx 3*8($nptr),%rax,%r15
2453 mov 24+8(%rsp),$bptr # counter value
2454 mov %r10,-8*4($tptr)
2456 mov %r11,-8*3($tptr)
2457 adox $zero,%r15 # of=0
2458 mov %r12,-8*2($tptr)
2459 lea 4*8($nptr),$nptr
2464 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2465 adcx $zero,%r15 # cf=0, modulo-scheduled
2467 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2468 adcx 0*8($tptr),%r10
2470 mulx 2*8($aptr),%r12,%rax # ...
2471 adcx 1*8($tptr),%r11
2473 mulx 3*8($aptr),%r13,%r14
2475 adcx 2*8($tptr),%r12
2477 adcx 3*8($tptr),%r13
2478 adox $zero,%r14 # of=0
2479 lea 4*8($aptr),$aptr
2480 lea 4*8($tptr),$tptr
2481 adcx $zero,%r14 # cf=0
2484 mulx 0*8($nptr),%rax,%r15
2487 mulx 1*8($nptr),%rax,%r15
2490 mulx 2*8($nptr),%rax,%r15
2491 mov %r10,-5*8($tptr)
2494 mov %r11,-4*8($tptr)
2495 mulx 3*8($nptr),%rax,%r15
2497 lea 4*8($nptr),$nptr
2498 mov %r12,-3*8($tptr)
2501 mov %r13,-2*8($tptr)
2503 dec $bptr # of=0, pass cf
2506 mov 0+8(%rsp),$num # load -num
2507 adc $zero,%r15 # modulo-scheduled
2508 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2509 mov 8+8(%rsp),$bptr # re-load &b[i]
2512 lea ($aptr,$num),$aptr # rewind $aptr
2513 adc $zero,$zero # top-most carry
2514 mov %r14,-1*8($tptr)
2521 mov ($nptr,$num),%r12
2522 lea ($nptr,$num),%rbp # rewind $nptr
2524 lea ($tptr,$num),%rdi # rewind $tptr
2527 sub %r14,%r10 # compare top-most words
2531 sub %r8,%rax # %rax=-%r8
2532 mov 56+8(%rsp),%rdx # restore rp
2533 dec %r12 # so that after 'not' we get -n[0]
2538 jmp .Lsqrx4x_sub_entry # common post-condition
2539 .size mulx4x_internal,.-mulx4x_internal
2542 ######################################################################
2544 my $rptr="%rdi"; # BN_ULONG *rptr,
2545 my $aptr="%rsi"; # const BN_ULONG *aptr,
2546 my $bptr="%rdx"; # const void *table,
2547 my $nptr="%rcx"; # const BN_ULONG *nptr,
2548 my $n0 ="%r8"; # const BN_ULONG *n0);
2549 my $num ="%r9"; # int num, has to be divisible by 8
2552 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2553 my @A0=("%r10","%r11");
2554 my @A1=("%r12","%r13");
2555 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2558 .type bn_powerx5,\@function,6
2570 shl \$3,${num}d # convert $num to bytes
2571 lea ($num,$num,2),%r10 # 3*$num in bytes
2575 ##############################################################
2576 # Ensure that stack frame doesn't alias with $rptr+3*$num
2577 # modulo 4096, which covers ret[num], am[num] and n[num]
2578 # (see bn_exp.c). This is done to allow memory disambiguation
2579 # logic do its magic. [Extra 256 bytes is for power mask
2580 # calculated from 7th argument, the index.]
2582 lea -320(%rsp,$num,2),%r11
2587 sub %r11,%rsp # align with $aptr
2588 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2593 lea 4096-320(,$num,2),%r10
2594 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2604 ##############################################################
2607 # +0 saved $num, used in reduction section
2608 # +8 &t[2*$num], used in reduction section
2609 # +16 intermediate carry bit
2610 # +24 top-most carry bit, used in reduction section
2616 movq $rptr,%xmm1 # save $rptr
2617 movq $nptr,%xmm2 # save $nptr
2618 movq %r10, %xmm3 # -$num
2621 mov %rax, 40(%rsp) # save original %rsp
2624 call __bn_sqrx8x_internal
2625 call __bn_sqrx8x_internal
2626 call __bn_sqrx8x_internal
2627 call __bn_sqrx8x_internal
2628 call __bn_sqrx8x_internal
2630 mov %r10,$num # -num
2636 call mulx4x_internal
2638 mov 40(%rsp),%rsi # restore %rsp
2650 .size bn_powerx5,.-bn_powerx5
2652 .globl bn_sqrx8x_internal
2653 .hidden bn_sqrx8x_internal
2654 .type bn_sqrx8x_internal,\@abi-omnipotent
2657 __bn_sqrx8x_internal:
2658 ##################################################################
2661 # a) multiply-n-add everything but a[i]*a[i];
2662 # b) shift result of a) by 1 to the left and accumulate
2663 # a[i]*a[i] products;
2665 ##################################################################
2666 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2697 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2700 my ($zero,$carry)=("%rbp","%rcx");
2703 lea 48+8(%rsp),$tptr
2704 lea ($aptr,$num),$aaptr
2705 mov $num,0+8(%rsp) # save $num
2706 mov $aaptr,8+8(%rsp) # save end of $aptr
2707 jmp .Lsqr8x_zero_start
2710 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2713 movdqa %xmm0,0*8($tptr)
2714 movdqa %xmm0,2*8($tptr)
2715 movdqa %xmm0,4*8($tptr)
2716 movdqa %xmm0,6*8($tptr)
2717 .Lsqr8x_zero_start: # aligned at 32
2718 movdqa %xmm0,8*8($tptr)
2719 movdqa %xmm0,10*8($tptr)
2720 movdqa %xmm0,12*8($tptr)
2721 movdqa %xmm0,14*8($tptr)
2722 lea 16*8($tptr),$tptr
2726 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2727 #xor %r9,%r9 # t[1], ex-$num, zero already
2734 lea 48+8(%rsp),$tptr
2735 xor $zero,$zero # cf=0, cf=0
2736 jmp .Lsqrx8x_outer_loop
2739 .Lsqrx8x_outer_loop:
2740 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2741 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2743 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2746 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2749 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2752 mulx 5*8($aptr),%r12,%rax
2755 mulx 6*8($aptr),%r13,%rax
2758 mulx 7*8($aptr),%r14,%r15
2759 mov 1*8($aptr),%rdx # a[1]
2763 mov %r8,1*8($tptr) # t[1]
2764 mov %r9,2*8($tptr) # t[2]
2765 sbb $carry,$carry # mov %cf,$carry
2766 xor $zero,$zero # cf=0, of=0
2769 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2770 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2773 mulx 4*8($aptr),%r10,%rbx # ...
2776 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2779 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2782 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2783 mov 2*8($aptr),%rdx # a[2]
2787 adox $zero,%r14 # of=0
2788 adcx $zero,%r14 # cf=0
2790 mov %r8,3*8($tptr) # t[3]
2791 mov %r9,4*8($tptr) # t[4]
2793 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2794 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2797 mulx 5*8($aptr),%r10,%rbx # ...
2800 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2803 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2805 mov 3*8($aptr),%rdx # a[3]
2809 mov %r8,5*8($tptr) # t[5]
2810 mov %r9,6*8($tptr) # t[6]
2811 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2812 adox $zero,%r13 # of=0
2813 adcx $zero,%r13 # cf=0
2815 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2818 mulx 6*8($aptr),%r10,%rax # ...
2821 mulx 7*8($aptr),%r11,%r12
2822 mov 4*8($aptr),%rdx # a[4]
2823 mov 5*8($aptr),%r14 # a[5]
2826 mov 6*8($aptr),%r15 # a[6]
2828 adox $zero,%r12 # of=0
2829 adcx $zero,%r12 # cf=0
2831 mov %r8,7*8($tptr) # t[7]
2832 mov %r9,8*8($tptr) # t[8]
2834 mulx %r14,%r9,%rax # a[5]*a[4]
2835 mov 7*8($aptr),%r8 # a[7]
2837 mulx %r15,%r10,%rbx # a[6]*a[4]
2840 mulx %r8,%r11,%rax # a[7]*a[4]
2841 mov %r14,%rdx # a[5]
2844 #adox $zero,%rax # of=0
2845 adcx $zero,%rax # cf=0
2847 mulx %r15,%r14,%rbx # a[6]*a[5]
2848 mulx %r8,%r12,%r13 # a[7]*a[5]
2849 mov %r15,%rdx # a[6]
2850 lea 8*8($aptr),$aptr
2857 mulx %r8,%r8,%r14 # a[7]*a[6]
2862 je .Lsqrx8x_outer_break
2864 neg $carry # mov $carry,%cf
2868 adcx 9*8($tptr),%r9 # +=t[9]
2869 adcx 10*8($tptr),%r10 # ...
2870 adcx 11*8($tptr),%r11
2871 adc 12*8($tptr),%r12
2872 adc 13*8($tptr),%r13
2873 adc 14*8($tptr),%r14
2874 adc 15*8($tptr),%r15
2876 lea 2*64($tptr),$tptr
2877 sbb %rax,%rax # mov %cf,$carry
2879 mov -64($aptr),%rdx # a[0]
2880 mov %rax,16+8(%rsp) # offload $carry
2881 mov $tptr,24+8(%rsp)
2883 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2884 xor %eax,%eax # cf=0, of=0
2890 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2891 adcx %rax,%rbx # +=t[8]
2894 mulx 1*8($aaptr),%rax,%r9 # ...
2898 mulx 2*8($aaptr),%rax,%r10
2902 mulx 3*8($aaptr),%rax,%r11
2906 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2910 mulx 5*8($aaptr),%rax,%r13
2914 mulx 6*8($aaptr),%rax,%r14
2915 mov %rbx,($tptr,%rcx,8) # store t[8+i]
2920 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
2921 mov 8($aptr,%rcx,8),%rdx # a[i]
2923 adox %rbx,%r15 # %rbx is 0, of=0
2924 adcx %rbx,%r15 # cf=0
2930 lea 8*8($aaptr),$aaptr
2932 cmp 8+8(%rsp),$aaptr # done?
2935 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
2946 lea 8*8($tptr),$tptr
2948 sbb %rax,%rax # mov %cf,%rax
2949 xor %ebx,%ebx # cf=0, of=0
2950 mov %rax,16+8(%rsp) # offload carry
2955 sub 16+8(%rsp),%r8 # consume last carry
2956 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
2957 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
2958 xor %ebp,%ebp # xor $zero,$zero
2960 cmp $carry,$tptr # cf=0, of=0
2961 je .Lsqrx8x_outer_loop
2966 mov 2*8($carry),%r10
2968 mov 3*8($carry),%r11
2970 mov 4*8($carry),%r12
2972 mov 5*8($carry),%r13
2974 mov 6*8($carry),%r14
2976 mov 7*8($carry),%r15
2978 jmp .Lsqrx8x_outer_loop
2981 .Lsqrx8x_outer_break:
2982 mov %r9,9*8($tptr) # t[9]
2983 movq %xmm3,%rcx # -$num
2984 mov %r10,10*8($tptr) # ...
2985 mov %r11,11*8($tptr)
2986 mov %r12,12*8($tptr)
2987 mov %r13,13*8($tptr)
2988 mov %r14,14*8($tptr)
2993 lea 48+8(%rsp),$tptr
2994 mov ($aptr,$i),%rdx # a[0]
2996 mov 8($tptr),$A0[1] # t[1]
2997 xor $A0[0],$A0[0] # t[0], of=0, cf=0
2998 mov 0+8(%rsp),$num # restore $num
3000 mov 16($tptr),$A1[0] # t[2] # prefetch
3001 mov 24($tptr),$A1[1] # t[3] # prefetch
3002 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3005 .Lsqrx4x_shift_n_add:
3009 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3010 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3013 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3020 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3021 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3024 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3031 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3033 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3036 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3043 jrcxz .Lsqrx4x_shift_n_add_break
3044 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3047 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3048 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3053 jmp .Lsqrx4x_shift_n_add
3056 .Lsqrx4x_shift_n_add_break:
3060 lea 64($tptr),$tptr # end of t[] buffer
3063 ######################################################################
3064 # Montgomery reduction part, "word-by-word" algorithm.
3066 # This new path is inspired by multiple submissions from Intel, by
3067 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3070 my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3075 xor %eax,%eax # initial top-most carry bit
3076 mov 32+8(%rsp),%rbx # n0
3077 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
3078 lea -8*8($nptr,$num),%rcx # end of n[]
3079 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3080 mov %rcx, 0+8(%rsp) # save end of n[]
3081 mov $tptr,8+8(%rsp) # save end of t[]
3083 lea 48+8(%rsp),$tptr # initial t[] window
3084 jmp .Lsqrx8x_reduction_loop
3087 .Lsqrx8x_reduction_loop:
3093 imulq %rbx,%rdx # n0*a[i]
3097 mov %rax,24+8(%rsp) # store top-most carry bit
3099 lea 8*8($tptr),$tptr
3100 xor $carry,$carry # cf=0,of=0
3107 mulx 8*0($nptr),%rax,%r8 # n[0]
3108 adcx %rbx,%rax # discarded
3111 mulx 8*1($nptr),%rbx,%r9 # n[1]
3115 mulx 8*2($nptr),%rbx,%r10
3119 mulx 8*3($nptr),%rbx,%r11
3123 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
3129 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3131 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3133 mulx 8*5($nptr),%rax,%r13
3137 mulx 8*6($nptr),%rax,%r14
3141 mulx 8*7($nptr),%rax,%r15
3144 adox $carry,%r15 # $carry is 0
3145 adcx $carry,%r15 # cf=0
3147 .byte 0x67,0x67,0x67
3151 mov $carry,%rax # xor %rax,%rax
3152 cmp 0+8(%rsp),$nptr # end of n[]?
3153 jae .Lsqrx8x_no_tail
3155 mov 48+8(%rsp),%rdx # pull n0*a[0]
3157 lea 8*8($nptr),$nptr
3160 adcx 8*2($tptr),%r10
3166 lea 8*8($tptr),$tptr
3167 sbb %rax,%rax # top carry
3169 xor $carry,$carry # of=0, cf=0
3176 mulx 8*0($nptr),%rax,%r8
3180 mulx 8*1($nptr),%rax,%r9
3184 mulx 8*2($nptr),%rax,%r10
3188 mulx 8*3($nptr),%rax,%r11
3192 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
3196 mulx 8*5($nptr),%rax,%r13
3200 mulx 8*6($nptr),%rax,%r14
3204 mulx 8*7($nptr),%rax,%r15
3205 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3208 mov %rbx,($tptr,%rcx,8) # save result
3210 adcx $carry,%r15 # cf=0
3215 cmp 0+8(%rsp),$nptr # end of n[]?
3216 jae .Lsqrx8x_tail_done # break out of loop
3218 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3219 mov 48+8(%rsp),%rdx # pull n0*a[0]
3220 lea 8*8($nptr),$nptr