3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Companion to x86_64-mont.pl that optimizes cache-timing attack
13 # countermeasures. The subroutines are produced by replacing bp[i]
14 # references in their x86_64-mont.pl counterparts with cache-neutral
15 # references to powers table computed in BN_mod_exp_mont_consttime.
16 # In addition subroutine that scatters elements of the powers table
17 # is implemented, so that scatter-/gathering can be tuned without
18 # bn_exp.c modifications.
22 # Add MULX/AD*X code paths and additional interfaces to optimize for
23 # branch prediction unit. For input lengths that are multiples of 8
24 # the np argument is not just modulus value, but one interleaved
25 # with 0. This is to optimize post-condition...
29 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
31 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
33 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
34 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
35 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
36 die "can't locate x86_64-xlate.pl";
38 open OUT,"| \"$^X\" $xlate $flavour $output";
41 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
42 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
46 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
47 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
51 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
52 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
56 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
57 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
61 # int bn_mul_mont_gather5(
62 $rp="%rdi"; # BN_ULONG *rp,
63 $ap="%rsi"; # const BN_ULONG *ap,
64 $bp="%rdx"; # const BN_ULONG *bp,
65 $np="%rcx"; # const BN_ULONG *np,
66 $n0="%r8"; # const BN_ULONG *n0,
67 $num="%r9"; # int num,
68 # int idx); # 0 to 2^5-1, "index" in $bp holding
69 # pre-computed powers of a', interlaced
70 # in such manner that b[0] is $bp[idx],
71 # b[1] is [2^5+idx], etc.
83 .extern OPENSSL_ia32cap_P
85 .globl bn_mul_mont_gather5
86 .type bn_mul_mont_gather5,\@function,6
92 $code.=<<___ if ($addx);
93 mov OPENSSL_ia32cap_P+8(%rip),%r11d
102 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
113 lea -264(%rsp,%r11,8),%rsp # tp=alloca(8*(num+2)+256+8)
114 and \$-1024,%rsp # minimize TLB usage
116 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
118 lea 128($bp),%r12 # reassign $bp (+size optimization)
121 $STRIDE=2**5*8; # 5 is "window size"
122 $N=$STRIDE/4; # should match cache line size
124 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
125 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
126 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
129 pshufd \$0,%xmm5,%xmm5 # broadcast index
133 ########################################################################
134 # calculate mask by comparing 0..31 to index and save result to stack
138 pcmpeqd %xmm5,%xmm0 # compare to 1,0
142 for($k=0;$k<$STRIDE/16-4;$k+=4) {
145 pcmpeqd %xmm5,%xmm1 # compare to 3,2
146 movdqa %xmm0,`16*($k+0)+112`(%r10)
150 pcmpeqd %xmm5,%xmm2 # compare to 5,4
151 movdqa %xmm1,`16*($k+1)+112`(%r10)
155 pcmpeqd %xmm5,%xmm3 # compare to 7,6
156 movdqa %xmm2,`16*($k+2)+112`(%r10)
161 movdqa %xmm3,`16*($k+3)+112`(%r10)
165 $code.=<<___; # last iteration can be optimized
168 movdqa %xmm0,`16*($k+0)+112`(%r10)
173 movdqa %xmm1,`16*($k+1)+112`(%r10)
176 movdqa %xmm2,`16*($k+2)+112`(%r10)
177 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
179 pand `16*($k+1)-128`($bp),%xmm1
180 pand `16*($k+2)-128`($bp),%xmm2
181 movdqa %xmm3,`16*($k+3)+112`(%r10)
182 pand `16*($k+3)-128`($bp),%xmm3
186 for($k=0;$k<$STRIDE/16-4;$k+=4) {
188 movdqa `16*($k+0)-128`($bp),%xmm4
189 movdqa `16*($k+1)-128`($bp),%xmm5
190 movdqa `16*($k+2)-128`($bp),%xmm2
191 pand `16*($k+0)+112`(%r10),%xmm4
192 movdqa `16*($k+3)-128`($bp),%xmm3
193 pand `16*($k+1)+112`(%r10),%xmm5
195 pand `16*($k+2)+112`(%r10),%xmm2
197 pand `16*($k+3)+112`(%r10),%xmm3
204 pshufd \$0x4e,%xmm0,%xmm1
207 movq %xmm0,$m0 # m0=bp[0]
209 mov ($n0),$n0 # pull n0[0] value
216 mulq $m0 # ap[0]*bp[0]
220 imulq $lo0,$m1 # "tp[0]"*n0
224 add %rax,$lo0 # discarded
237 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
240 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
244 mulq $m0 # ap[j]*bp[0]
253 jne .L1st # note that upon exit $j==$num, so
254 # they can be used interchangeably
258 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
260 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
267 mov $hi1,-8(%rsp,$num,8)
268 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
274 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
279 for($k=0;$k<$STRIDE/16;$k+=4) {
281 movdqa `16*($k+0)-128`($bp),%xmm0
282 movdqa `16*($k+1)-128`($bp),%xmm1
283 movdqa `16*($k+2)-128`($bp),%xmm2
284 movdqa `16*($k+3)-128`($bp),%xmm3
285 pand `16*($k+0)-128`(%rdx),%xmm0
286 pand `16*($k+1)-128`(%rdx),%xmm1
288 pand `16*($k+2)-128`(%rdx),%xmm2
290 pand `16*($k+3)-128`(%rdx),%xmm3
297 pshufd \$0x4e,%xmm4,%xmm0
301 mov ($ap),%rax # ap[0]
302 movq %xmm0,$m0 # m0=bp[i]
308 mulq $m0 # ap[0]*bp[i]
309 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
313 imulq $lo0,$m1 # tp[0]*n0
317 add %rax,$lo0 # discarded
320 mov 8(%rsp),$lo0 # tp[1]
331 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
334 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
338 mulq $m0 # ap[j]*bp[i]
342 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
349 jne .Linner # note that upon exit $j==$num, so
350 # they can be used interchangeably
353 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
354 mov (%rsp,$num,8),$lo0
356 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
362 add $lo0,$hi1 # pull upmost overflow bit
364 mov $hi1,-8(%rsp,$num,8)
365 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
371 xor $i,$i # i=0 and clear CF!
372 mov (%rsp),%rax # tp[0]
373 lea (%rsp),$ap # borrow ap for tp
377 .Lsub: sbb ($np,$i,8),%rax
378 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
379 mov 8($ap,$i,8),%rax # tp[i+1]
381 dec $j # doesnn't affect CF!
384 sbb \$0,%rax # handle upmost overflow bit
391 or $np,$ap # ap=borrow?tp:rp
393 .Lcopy: # copy or in-place refresh
395 mov $i,(%rsp,$i,8) # zap temporary vector
396 mov %rax,($rp,$i,8) # rp[i]=tp[i]
401 mov 8(%rsp,$num,8),%rsi # restore %rsp
413 .size bn_mul_mont_gather5,.-bn_mul_mont_gather5
416 my @A=("%r10","%r11");
417 my @N=("%r13","%rdi");
419 .type bn_mul4x_mont_gather5,\@function,6
421 bn_mul4x_mont_gather5:
424 $code.=<<___ if ($addx);
426 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
440 shl \$3,${num}d # convert $num to bytes
441 lea ($num,$num,2),%r10 # 3*$num in bytes
444 ##############################################################
445 # Ensure that stack frame doesn't alias with $rptr+3*$num
446 # modulo 4096, which covers ret[num], am[num] and n[num]
447 # (see bn_exp.c). This is done to allow memory disambiguation
448 # logic do its magic. [Extra [num] is allocated in order
449 # to align with bn_power5's frame, which is cleansed after
450 # completing exponentiation. Extra 256 bytes is for power mask
451 # calculated from 7th argument, the index.]
453 lea -320(%rsp,$num,2),%r11
458 sub %r11,%rsp # align with $rp
459 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
464 lea 4096-320(,$num,2),%r10
465 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
479 mov 40(%rsp),%rsi # restore %rsp
491 .size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
493 .type mul4x_internal,\@abi-omnipotent
496 shl \$5,$num # $num was in bytes
497 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
499 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
500 shr \$5,$num # restore $num
503 $STRIDE=2**5*8; # 5 is "window size"
504 $N=$STRIDE/4; # should match cache line size
507 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
508 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
509 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
510 lea 128(%rdx),$bp # size optimization
512 pshufd \$0,%xmm5,%xmm5 # broadcast index
517 ########################################################################
518 # calculate mask by comparing 0..31 to index and save result to stack
522 pcmpeqd %xmm5,%xmm0 # compare to 1,0
526 for($i=0;$i<$STRIDE/16-4;$i+=4) {
529 pcmpeqd %xmm5,%xmm1 # compare to 3,2
530 movdqa %xmm0,`16*($i+0)+112`(%r10)
534 pcmpeqd %xmm5,%xmm2 # compare to 5,4
535 movdqa %xmm1,`16*($i+1)+112`(%r10)
539 pcmpeqd %xmm5,%xmm3 # compare to 7,6
540 movdqa %xmm2,`16*($i+2)+112`(%r10)
545 movdqa %xmm3,`16*($i+3)+112`(%r10)
549 $code.=<<___; # last iteration can be optimized
552 movdqa %xmm0,`16*($i+0)+112`(%r10)
557 movdqa %xmm1,`16*($i+1)+112`(%r10)
560 movdqa %xmm2,`16*($i+2)+112`(%r10)
561 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
563 pand `16*($i+1)-128`($bp),%xmm1
564 pand `16*($i+2)-128`($bp),%xmm2
565 movdqa %xmm3,`16*($i+3)+112`(%r10)
566 pand `16*($i+3)-128`($bp),%xmm3
570 for($i=0;$i<$STRIDE/16-4;$i+=4) {
572 movdqa `16*($i+0)-128`($bp),%xmm4
573 movdqa `16*($i+1)-128`($bp),%xmm5
574 movdqa `16*($i+2)-128`($bp),%xmm2
575 pand `16*($i+0)+112`(%r10),%xmm4
576 movdqa `16*($i+3)-128`($bp),%xmm3
577 pand `16*($i+1)+112`(%r10),%xmm5
579 pand `16*($i+2)+112`(%r10),%xmm2
581 pand `16*($i+3)+112`(%r10),%xmm3
588 pshufd \$0x4e,%xmm0,%xmm1
591 movq %xmm0,$m0 # m0=bp[0]
593 mov %r13,16+8(%rsp) # save end of b[num]
594 mov $rp, 56+8(%rsp) # save $rp
596 mov ($n0),$n0 # pull n0[0] value
598 lea ($ap,$num),$ap # end of a[num]
602 mulq $m0 # ap[0]*bp[0]
606 imulq $A[0],$m1 # "tp[0]"*n0
611 add %rax,$A[0] # discarded
624 mov 16($ap,$num),%rax
627 lea 4*8($num),$j # j=4
636 mulq $m0 # ap[j]*bp[0]
647 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
649 mov $N[0],-24($tp) # tp[j-1]
652 mulq $m0 # ap[j]*bp[0]
662 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
664 mov $N[1],-16($tp) # tp[j-1]
667 mulq $m0 # ap[j]*bp[0]
677 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
679 mov $N[0],-8($tp) # tp[j-1]
682 mulq $m0 # ap[j]*bp[0]
692 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
695 mov $N[1],($tp) # tp[j-1]
701 mulq $m0 # ap[j]*bp[0]
712 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
714 mov $N[0],-24($tp) # tp[j-1]
717 mulq $m0 # ap[j]*bp[0]
725 mov ($ap,$num),%rax # ap[0]
727 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
729 mov $N[1],-16($tp) # tp[j-1]
732 lea ($np,$num),$np # rewind $np
743 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
747 for($i=0;$i<$STRIDE/16;$i+=4) {
749 movdqa `16*($i+0)-128`($bp),%xmm0
750 movdqa `16*($i+1)-128`($bp),%xmm1
751 movdqa `16*($i+2)-128`($bp),%xmm2
752 movdqa `16*($i+3)-128`($bp),%xmm3
753 pand `16*($i+0)-128`(%rdx),%xmm0
754 pand `16*($i+1)-128`(%rdx),%xmm1
756 pand `16*($i+2)-128`(%rdx),%xmm2
758 pand `16*($i+3)-128`(%rdx),%xmm3
765 pshufd \$0x4e,%xmm4,%xmm0
768 movq %xmm0,$m0 # m0=bp[i]
772 mulq $m0 # ap[0]*bp[i]
773 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
777 imulq $A[0],$m1 # tp[0]*n0
779 mov $N[1],($tp) # store upmost overflow bit
781 lea ($tp,$num),$tp # rewind $tp
784 add %rax,$A[0] # "$N[0]", discarded
789 mulq $m0 # ap[j]*bp[i]
793 add 8($tp),$A[1] # +tp[1]
799 mov 16($ap,$num),%rax
801 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
802 lea 4*8($num),$j # j=4
810 mulq $m0 # ap[j]*bp[i]
814 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
825 mov $N[1],-32($tp) # tp[j-1]
828 mulq $m0 # ap[j]*bp[i]
842 mov $N[0],-24($tp) # tp[j-1]
845 mulq $m0 # ap[j]*bp[i]
849 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
859 mov $N[1],-16($tp) # tp[j-1]
862 mulq $m0 # ap[j]*bp[i]
877 mov $N[0],-8($tp) # tp[j-1]
883 mulq $m0 # ap[j]*bp[i]
887 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
898 mov $N[1],-32($tp) # tp[j-1]
901 mulq $m0 # ap[j]*bp[i]
912 mov ($ap,$num),%rax # ap[0]
916 mov $N[0],-24($tp) # tp[j-1]
919 mov $N[1],-16($tp) # tp[j-1]
920 lea ($np,$num),$np # rewind $np
925 add ($tp),$N[0] # pull upmost overflow bit
926 adc \$0,$N[1] # upmost overflow bit
935 sub $N[0],$m1 # compare top-most words
936 adc $j,$j # $j is zero
938 sub $N[1],%rax # %rax=-$N[1]
939 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
941 lea ($np),%rbp # nptr in .sqr4x_sub
944 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
945 dec %r12 # so that after 'not' we get -n[0]
950 jmp .Lsqr4x_sub_entry
953 my @ri=("%rax",$bp,$m0,$m1);
957 lea ($tp,$num),$tp # rewind $tp
959 lea ($np,$N[1],8),$np
960 mov 56+8(%rsp),$rp # restore $rp
990 .size mul4x_internal,.-mul4x_internal
994 ######################################################################
996 my $rptr="%rdi"; # BN_ULONG *rptr,
997 my $aptr="%rsi"; # const BN_ULONG *aptr,
998 my $bptr="%rdx"; # const void *table,
999 my $nptr="%rcx"; # const BN_ULONG *nptr,
1000 my $n0 ="%r8"; # const BN_ULONG *n0);
1001 my $num ="%r9"; # int num, has to be divisible by 8
1004 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
1005 my @A0=("%r10","%r11");
1006 my @A1=("%r12","%r13");
1007 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
1011 .type bn_power5,\@function,6
1015 $code.=<<___ if ($addx);
1016 mov OPENSSL_ia32cap_P+8(%rip),%r11d
1018 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
1030 shl \$3,${num}d # convert $num to bytes
1031 lea ($num,$num,2),%r10d # 3*$num
1035 ##############################################################
1036 # Ensure that stack frame doesn't alias with $rptr+3*$num
1037 # modulo 4096, which covers ret[num], am[num] and n[num]
1038 # (see bn_exp.c). This is done to allow memory disambiguation
1039 # logic do its magic. [Extra 256 bytes is for power mask
1040 # calculated from 7th argument, the index.]
1042 lea -320(%rsp,$num,2),%r11
1047 sub %r11,%rsp # align with $aptr
1048 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
1053 lea 4096-320(,$num,2),%r10
1054 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
1064 ##############################################################
1067 # +0 saved $num, used in reduction section
1068 # +8 &t[2*$num], used in reduction section
1074 mov %rax, 40(%rsp) # save original %rsp
1076 movq $rptr,%xmm1 # save $rptr, used in sqr8x
1077 movq $nptr,%xmm2 # save $nptr
1078 movq %r10, %xmm3 # -$num, used in sqr8x
1081 call __bn_sqr8x_internal
1082 call __bn_post4x_internal
1083 call __bn_sqr8x_internal
1084 call __bn_post4x_internal
1085 call __bn_sqr8x_internal
1086 call __bn_post4x_internal
1087 call __bn_sqr8x_internal
1088 call __bn_post4x_internal
1089 call __bn_sqr8x_internal
1090 call __bn_post4x_internal
1100 mov 40(%rsp),%rsi # restore %rsp
1111 .size bn_power5,.-bn_power5
1113 .globl bn_sqr8x_internal
1114 .hidden bn_sqr8x_internal
1115 .type bn_sqr8x_internal,\@abi-omnipotent
1118 __bn_sqr8x_internal:
1119 ##############################################################
1122 # a) multiply-n-add everything but a[i]*a[i];
1123 # b) shift result of a) by 1 to the left and accumulate
1124 # a[i]*a[i] products;
1126 ##############################################################
1192 lea 32(%r10),$i # $i=-($num-32)
1193 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1195 mov $num,$j # $j=$num
1197 # comments apply to $num==8 case
1198 mov -32($aptr,$i),$a0 # a[0]
1199 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1200 mov -24($aptr,$i),%rax # a[1]
1201 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1202 mov -16($aptr,$i),$ai # a[2]
1206 mov %rax,$A0[0] # a[1]*a[0]
1209 mov $A0[0],-24($tptr,$i) # t[1]
1215 mov $A0[1],-16($tptr,$i) # t[2]
1219 mov -8($aptr,$i),$ai # a[3]
1221 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1227 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1233 mov $A0[0],-8($tptr,$j) # t[3]
1238 mov ($aptr,$j),$ai # a[4]
1240 add %rax,$A1[1] # a[3]*a[1]+t[4]
1246 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1248 mov 8($aptr,$j),$ai # a[5]
1256 add %rax,$A1[0] # a[4]*a[3]+t[5]
1258 mov $A0[1],($tptr,$j) # t[4]
1263 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1265 mov 16($aptr,$j),$ai # a[6]
1272 add %rax,$A1[1] # a[5]*a[3]+t[6]
1274 mov $A0[0],8($tptr,$j) # t[5]
1279 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1281 mov 24($aptr,$j),$ai # a[7]
1289 add %rax,$A1[0] # a[6]*a[5]+t[7]
1291 mov $A0[1],16($tptr,$j) # t[6]
1297 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1303 mov $A0[0],-8($tptr,$j) # t[7]
1315 mov $A1[1],($tptr) # t[8]
1317 mov %rdx,8($tptr) # t[9]
1321 .Lsqr4x_outer: # comments apply to $num==6 case
1322 mov -32($aptr,$i),$a0 # a[0]
1323 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1324 mov -24($aptr,$i),%rax # a[1]
1325 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1326 mov -16($aptr,$i),$ai # a[2]
1330 mov -24($tptr,$i),$A0[0] # t[1]
1331 add %rax,$A0[0] # a[1]*a[0]+t[1]
1334 mov $A0[0],-24($tptr,$i) # t[1]
1341 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1344 mov $A0[1],-16($tptr,$i) # t[2]
1348 mov -8($aptr,$i),$ai # a[3]
1350 add %rax,$A1[0] # a[2]*a[1]+t[3]
1353 add -8($tptr,$i),$A1[0]
1358 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1364 mov $A0[0],-8($tptr,$i) # t[3]
1371 mov ($aptr,$j),$ai # a[4]
1373 add %rax,$A1[1] # a[3]*a[1]+t[4]
1377 add ($tptr,$j),$A1[1]
1382 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1384 mov 8($aptr,$j),$ai # a[5]
1391 add %rax,$A1[0] # a[4]*a[3]+t[5]
1392 mov $A0[1],($tptr,$j) # t[4]
1396 add 8($tptr,$j),$A1[0]
1401 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1407 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1419 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1421 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1426 # comments apply to $num==4 case
1427 mov -32($aptr),$a0 # a[0]
1428 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1429 mov -24($aptr),%rax # a[1]
1430 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1431 mov -16($aptr),$ai # a[2]
1435 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1443 mov $A0[0],-24($tptr) # t[1]
1446 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1447 mov -8($aptr),$ai # a[3]
1451 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1453 mov $A0[1],-16($tptr) # t[2]
1458 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1464 mov $A0[0],-8($tptr) # t[3]
1468 mov -16($aptr),%rax # a[2]
1473 mov $A1[1],($tptr) # t[4]
1475 mov %rdx,8($tptr) # t[5]
1480 my ($shift,$carry)=($a0,$a1);
1481 my @S=(@A1,$ai,$n0);
1485 sub $num,$i # $i=16-$num
1488 add $A1[0],%rax # t[5]
1490 mov %rax,8($tptr) # t[5]
1491 mov %rdx,16($tptr) # t[6]
1492 mov $carry,24($tptr) # t[7]
1494 mov -16($aptr,$i),%rax # a[0]
1495 lea 48+8(%rsp),$tptr
1496 xor $A0[0],$A0[0] # t[0]
1497 mov 8($tptr),$A0[1] # t[1]
1499 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1501 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1503 or $A0[0],$S[1] # | t[2*i]>>63
1504 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1505 mov $A0[1],$shift # shift=t[2*i+1]>>63
1506 mul %rax # a[i]*a[i]
1507 neg $carry # mov $carry,cf
1508 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1510 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1514 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1516 sbb $carry,$carry # mov cf,$carry
1518 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1520 or $A0[0],$S[3] # | t[2*i]>>63
1521 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1522 mov $A0[1],$shift # shift=t[2*i+1]>>63
1523 mul %rax # a[i]*a[i]
1524 neg $carry # mov $carry,cf
1525 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1527 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1532 sbb $carry,$carry # mov cf,$carry
1534 jmp .Lsqr4x_shift_n_add
1537 .Lsqr4x_shift_n_add:
1538 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1540 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1542 or $A0[0],$S[1] # | t[2*i]>>63
1543 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1544 mov $A0[1],$shift # shift=t[2*i+1]>>63
1545 mul %rax # a[i]*a[i]
1546 neg $carry # mov $carry,cf
1547 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1549 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1550 mov $S[0],-32($tptr)
1553 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1554 mov $S[1],-24($tptr)
1555 sbb $carry,$carry # mov cf,$carry
1557 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1559 or $A0[0],$S[3] # | t[2*i]>>63
1560 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1561 mov $A0[1],$shift # shift=t[2*i+1]>>63
1562 mul %rax # a[i]*a[i]
1563 neg $carry # mov $carry,cf
1564 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1566 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1567 mov $S[2],-16($tptr)
1570 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1572 sbb $carry,$carry # mov cf,$carry
1574 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1576 or $A0[0],$S[1] # | t[2*i]>>63
1577 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1578 mov $A0[1],$shift # shift=t[2*i+1]>>63
1579 mul %rax # a[i]*a[i]
1580 neg $carry # mov $carry,cf
1581 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1583 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1587 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1589 sbb $carry,$carry # mov cf,$carry
1591 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1593 or $A0[0],$S[3] # | t[2*i]>>63
1594 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1595 mov $A0[1],$shift # shift=t[2*i+1]>>63
1596 mul %rax # a[i]*a[i]
1597 neg $carry # mov $carry,cf
1598 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1600 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1604 sbb $carry,$carry # mov cf,$carry
1607 jnz .Lsqr4x_shift_n_add
1609 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1612 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1614 or $A0[0],$S[1] # | t[2*i]>>63
1615 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1616 mov $A0[1],$shift # shift=t[2*i+1]>>63
1617 mul %rax # a[i]*a[i]
1618 neg $carry # mov $carry,cf
1619 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1621 mov -8($aptr),%rax # a[i+1] # prefetch
1622 mov $S[0],-32($tptr)
1625 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1626 mov $S[1],-24($tptr)
1627 sbb $carry,$carry # mov cf,$carry
1629 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1631 or $A0[0],$S[3] # | t[2*i]>>63
1632 mul %rax # a[i]*a[i]
1633 neg $carry # mov $carry,cf
1636 mov $S[2],-16($tptr)
1640 ######################################################################
1641 # Montgomery reduction part, "word-by-word" algorithm.
1643 # This new path is inspired by multiple submissions from Intel, by
1644 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1647 my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1651 __bn_sqr8x_reduction:
1653 lea ($nptr,$num),%rcx # end of n[]
1654 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1656 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1659 jmp .L8x_reduction_loop
1662 .L8x_reduction_loop:
1663 lea ($tptr,$num),$tptr # start of current t[] window
1673 mov %rax,(%rdx) # store top-most carry bit
1674 lea 8*8($tptr),$tptr
1678 imulq 32+8(%rsp),$m0 # n0*a[0]
1679 mov 8*0($nptr),%rax # n[0]
1686 mov 8*1($nptr),%rax # n[1]
1696 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1705 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1713 imulq %r8,$carry # modulo-scheduled
1743 mov $carry,$m0 # n0*a[i]
1745 mov 8*0($nptr),%rax # n[0]
1754 lea 8*8($nptr),$nptr
1756 mov 8+8(%rsp),%rdx # pull end of t[]
1757 cmp 0+8(%rsp),$nptr # end of n[]?
1769 sbb $carry,$carry # top carry
1771 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1781 mov %r8,($tptr) # save result
1790 lea 8($tptr),$tptr # $tptr++
1835 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1839 mov 8*0($nptr),%rax # pull n[0]
1846 lea 8*8($nptr),$nptr
1847 mov 8+8(%rsp),%rdx # pull end of t[]
1848 cmp 0+8(%rsp),$nptr # end of n[]?
1849 jae .L8x_tail_done # break out of loop
1851 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1853 mov 8*0($nptr),%rax # pull n[0]
1862 sbb $carry,$carry # top carry
1869 add (%rdx),%r8 # can this overflow?
1876 adc \$0,%r15 # can't overflow, because we
1877 # started with "overhung" part
1891 adc \$0,%rax # top-most carry
1892 mov -8($nptr),%rcx # np[num-1]
1895 movq %xmm2,$nptr # restore $nptr
1897 mov %r8,8*0($tptr) # store top 512 bits
1899 movq %xmm3,$num # $num is %r9, can't be moved upwards
1906 lea 8*8($tptr),$tptr
1908 cmp %rdx,$tptr # end of t[]?
1909 jb .L8x_reduction_loop
1911 .size bn_sqr8x_internal,.-bn_sqr8x_internal
1914 ##############################################################
1915 # Post-condition, 4x unrolled
1918 my ($tptr,$nptr)=("%rbx","%rbp");
1920 .type __bn_post4x_internal,\@abi-omnipotent
1922 __bn_post4x_internal:
1924 lea (%rdi,$num),$tptr # %rdi was $tptr above
1926 movq %xmm1,$rptr # restore $rptr
1928 movq %xmm1,$aptr # prepare for back-to-back call
1930 dec %r12 # so that after 'not' we get -n[0]
1935 jmp .Lsqr4x_sub_entry
1944 lea 8*4($nptr),$nptr
1954 neg %r10 # mov %r10,%cf
1960 lea 8*4($tptr),$tptr
1962 sbb %r10,%r10 # mov %cf,%r10
1965 lea 8*4($rptr),$rptr
1970 mov $num,%r10 # prepare for back-to-back call
1971 neg $num # restore $num
1973 .size __bn_post4x_internal,.-__bn_post4x_internal
1978 .globl bn_from_montgomery
1979 .type bn_from_montgomery,\@abi-omnipotent
1982 testl \$7,`($win64?"48(%rsp)":"%r9d")`
1986 .size bn_from_montgomery,.-bn_from_montgomery
1988 .type bn_from_mont8x,\@function,6
2000 shl \$3,${num}d # convert $num to bytes
2001 lea ($num,$num,2),%r10 # 3*$num in bytes
2005 ##############################################################
2006 # Ensure that stack frame doesn't alias with $rptr+3*$num
2007 # modulo 4096, which covers ret[num], am[num] and n[num]
2008 # (see bn_exp.c). The stack is allocated to aligned with
2009 # bn_power5's frame, and as bn_from_montgomery happens to be
2010 # last operation, we use the opportunity to cleanse it.
2012 lea -320(%rsp,$num,2),%r11
2017 sub %r11,%rsp # align with $aptr
2018 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2023 lea 4096-320(,$num,2),%r10
2024 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2034 ##############################################################
2037 # +0 saved $num, used in reduction section
2038 # +8 &t[2*$num], used in reduction section
2044 mov %rax, 40(%rsp) # save original %rsp
2053 movdqu ($aptr),%xmm1
2054 movdqu 16($aptr),%xmm2
2055 movdqu 32($aptr),%xmm3
2056 movdqa %xmm0,(%rax,$num)
2057 movdqu 48($aptr),%xmm4
2058 movdqa %xmm0,16(%rax,$num)
2059 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
2061 movdqa %xmm0,32(%rax,$num)
2062 movdqa %xmm2,16(%rax)
2063 movdqa %xmm0,48(%rax,$num)
2064 movdqa %xmm3,32(%rax)
2065 movdqa %xmm4,48(%rax)
2074 movq %r10, %xmm3 # -num
2076 $code.=<<___ if ($addx);
2077 mov OPENSSL_ia32cap_P+8(%rip),%r11d
2079 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
2082 lea (%rax,$num),$rptr
2083 call __bn_sqrx8x_reduction
2084 call __bn_postx4x_internal
2088 mov 40(%rsp),%rsi # restore %rsp
2089 jmp .Lfrom_mont_zero
2095 call __bn_sqr8x_reduction
2096 call __bn_post4x_internal
2100 mov 40(%rsp),%rsi # restore %rsp
2101 jmp .Lfrom_mont_zero
2105 movdqa %xmm0,16*0(%rax)
2106 movdqa %xmm0,16*1(%rax)
2107 movdqa %xmm0,16*2(%rax)
2108 movdqa %xmm0,16*3(%rax)
2111 jnz .Lfrom_mont_zero
2123 .size bn_from_mont8x,.-bn_from_mont8x
2129 my $bp="%rdx"; # restore original value
2132 .type bn_mulx4x_mont_gather5,\@function,6
2134 bn_mulx4x_mont_gather5:
2144 shl \$3,${num}d # convert $num to bytes
2145 lea ($num,$num,2),%r10 # 3*$num in bytes
2149 ##############################################################
2150 # Ensure that stack frame doesn't alias with $rptr+3*$num
2151 # modulo 4096, which covers ret[num], am[num] and n[num]
2152 # (see bn_exp.c). This is done to allow memory disambiguation
2153 # logic do its magic. [Extra [num] is allocated in order
2154 # to align with bn_power5's frame, which is cleansed after
2155 # completing exponentiation. Extra 256 bytes is for power mask
2156 # calculated from 7th argument, the index.]
2158 lea -320(%rsp,$num,2),%r11
2163 sub %r11,%rsp # align with $aptr
2164 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2168 lea 4096-320(,$num,2),%r10
2169 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2175 and \$-64,%rsp # ensure alignment
2176 ##############################################################
2179 # +8 off-loaded &b[i]
2188 mov $n0, 32(%rsp) # save *n0
2189 mov %rax,40(%rsp) # save original %rsp
2191 call mulx4x_internal
2193 mov 40(%rsp),%rsi # restore %rsp
2205 .size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2207 .type mulx4x_internal,\@abi-omnipotent
2210 mov $num,8(%rsp) # save -$num (it was in bytes)
2212 neg $num # restore $num
2214 neg %r10 # restore $num
2215 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
2217 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
2219 lea .Linc(%rip),%rax
2220 mov %r13,16+8(%rsp) # end of b[num]
2221 mov $num,24+8(%rsp) # inner counter
2222 mov $rp, 56+8(%rsp) # save $rp
2224 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2225 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2227 my $STRIDE=2**5*8; # 5 is "window size"
2228 my $N=$STRIDE/4; # should match cache line size
2230 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2231 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2232 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
2233 lea 128($bp),$bptr # size optimization
2235 pshufd \$0,%xmm5,%xmm5 # broadcast index
2240 ########################################################################
2241 # calculate mask by comparing 0..31 to index and save result to stack
2246 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2249 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2252 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2253 movdqa %xmm0,`16*($i+0)+112`(%r10)
2257 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2258 movdqa %xmm1,`16*($i+1)+112`(%r10)
2262 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2263 movdqa %xmm2,`16*($i+2)+112`(%r10)
2268 movdqa %xmm3,`16*($i+3)+112`(%r10)
2272 $code.=<<___; # last iteration can be optimized
2276 movdqa %xmm0,`16*($i+0)+112`(%r10)
2280 movdqa %xmm1,`16*($i+1)+112`(%r10)
2283 movdqa %xmm2,`16*($i+2)+112`(%r10)
2285 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2286 pand `16*($i+1)-128`($bptr),%xmm1
2287 pand `16*($i+2)-128`($bptr),%xmm2
2288 movdqa %xmm3,`16*($i+3)+112`(%r10)
2289 pand `16*($i+3)-128`($bptr),%xmm3
2293 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2295 movdqa `16*($i+0)-128`($bptr),%xmm4
2296 movdqa `16*($i+1)-128`($bptr),%xmm5
2297 movdqa `16*($i+2)-128`($bptr),%xmm2
2298 pand `16*($i+0)+112`(%r10),%xmm4
2299 movdqa `16*($i+3)-128`($bptr),%xmm3
2300 pand `16*($i+1)+112`(%r10),%xmm5
2302 pand `16*($i+2)+112`(%r10),%xmm2
2304 pand `16*($i+3)+112`(%r10),%xmm3
2311 pshufd \$0x4e,%xmm0,%xmm1
2313 lea $STRIDE($bptr),$bptr
2314 movq %xmm0,%rdx # bp[0]
2315 lea 64+8*4+8(%rsp),$tptr
2318 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2319 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2321 mulx 2*8($aptr),%rax,%r13 # ...
2324 mulx 3*8($aptr),%rax,%r14
2327 imulq 32+8(%rsp),$mi # "t[0]"*n0
2328 xor $zero,$zero # cf=0, of=0
2331 mov $bptr,8+8(%rsp) # off-load &b[i]
2333 lea 4*8($aptr),$aptr
2335 adcx $zero,%r14 # cf=0
2337 mulx 0*8($nptr),%rax,%r10
2338 adcx %rax,%r15 # discarded
2340 mulx 1*8($nptr),%rax,%r11
2343 mulx 2*8($nptr),%rax,%r12
2344 mov 24+8(%rsp),$bptr # counter value
2345 mov %r10,-8*4($tptr)
2348 mulx 3*8($nptr),%rax,%r15
2350 mov %r11,-8*3($tptr)
2352 adox $zero,%r15 # of=0
2353 lea 4*8($nptr),$nptr
2354 mov %r12,-8*2($tptr)
2359 adcx $zero,%r15 # cf=0, modulo-scheduled
2360 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2362 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2364 mulx 2*8($aptr),%r12,%rax # ...
2366 mulx 3*8($aptr),%r13,%r14
2370 adcx $zero,%r14 # cf=0
2371 lea 4*8($aptr),$aptr
2372 lea 4*8($tptr),$tptr
2375 mulx 0*8($nptr),%rax,%r15
2378 mulx 1*8($nptr),%rax,%r15
2381 mulx 2*8($nptr),%rax,%r15
2382 mov %r10,-5*8($tptr)
2384 mov %r11,-4*8($tptr)
2386 mulx 3*8($nptr),%rax,%r15
2388 mov %r12,-3*8($tptr)
2391 lea 4*8($nptr),$nptr
2392 mov %r13,-2*8($tptr)
2394 dec $bptr # of=0, pass cf
2397 mov 8(%rsp),$num # load -num
2398 adc $zero,%r15 # modulo-scheduled
2399 lea ($aptr,$num),$aptr # rewind $aptr
2401 mov 8+8(%rsp),$bptr # re-load &b[i]
2402 adc $zero,$zero # top-most carry
2403 mov %r14,-1*8($tptr)
2408 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2413 for($i=0;$i<$STRIDE/16;$i+=4) {
2415 movdqa `16*($i+0)-128`($bptr),%xmm0
2416 movdqa `16*($i+1)-128`($bptr),%xmm1
2417 movdqa `16*($i+2)-128`($bptr),%xmm2
2418 pand `16*($i+0)+256`(%r10),%xmm0
2419 movdqa `16*($i+3)-128`($bptr),%xmm3
2420 pand `16*($i+1)+256`(%r10),%xmm1
2422 pand `16*($i+2)+256`(%r10),%xmm2
2424 pand `16*($i+3)+256`(%r10),%xmm3
2431 pshufd \$0x4e,%xmm4,%xmm0
2433 lea $STRIDE($bptr),$bptr
2434 movq %xmm0,%rdx # m0=bp[i]
2436 mov $zero,($tptr) # save top-most carry
2437 lea 4*8($tptr,$num),$tptr # rewind $tptr
2438 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2439 xor $zero,$zero # cf=0, of=0
2441 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2442 adox -4*8($tptr),$mi # +t[0]
2444 mulx 2*8($aptr),%r15,%r13 # ...
2445 adox -3*8($tptr),%r11
2447 mulx 3*8($aptr),%rdx,%r14
2448 adox -2*8($tptr),%r12
2450 lea ($nptr,$num),$nptr # rewind $nptr
2451 lea 4*8($aptr),$aptr
2452 adox -1*8($tptr),%r13
2457 imulq 32+8(%rsp),$mi # "t[0]"*n0
2460 xor $zero,$zero # cf=0, of=0
2461 mov $bptr,8+8(%rsp) # off-load &b[i]
2463 mulx 0*8($nptr),%rax,%r10
2464 adcx %rax,%r15 # discarded
2466 mulx 1*8($nptr),%rax,%r11
2469 mulx 2*8($nptr),%rax,%r12
2472 mulx 3*8($nptr),%rax,%r15
2474 mov 24+8(%rsp),$bptr # counter value
2475 mov %r10,-8*4($tptr)
2477 mov %r11,-8*3($tptr)
2478 adox $zero,%r15 # of=0
2479 mov %r12,-8*2($tptr)
2480 lea 4*8($nptr),$nptr
2485 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2486 adcx $zero,%r15 # cf=0, modulo-scheduled
2488 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2489 adcx 0*8($tptr),%r10
2491 mulx 2*8($aptr),%r12,%rax # ...
2492 adcx 1*8($tptr),%r11
2494 mulx 3*8($aptr),%r13,%r14
2496 adcx 2*8($tptr),%r12
2498 adcx 3*8($tptr),%r13
2499 adox $zero,%r14 # of=0
2500 lea 4*8($aptr),$aptr
2501 lea 4*8($tptr),$tptr
2502 adcx $zero,%r14 # cf=0
2505 mulx 0*8($nptr),%rax,%r15
2508 mulx 1*8($nptr),%rax,%r15
2511 mulx 2*8($nptr),%rax,%r15
2512 mov %r10,-5*8($tptr)
2515 mov %r11,-4*8($tptr)
2516 mulx 3*8($nptr),%rax,%r15
2518 lea 4*8($nptr),$nptr
2519 mov %r12,-3*8($tptr)
2522 mov %r13,-2*8($tptr)
2524 dec $bptr # of=0, pass cf
2527 mov 0+8(%rsp),$num # load -num
2528 adc $zero,%r15 # modulo-scheduled
2529 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2530 mov 8+8(%rsp),$bptr # re-load &b[i]
2533 lea ($aptr,$num),$aptr # rewind $aptr
2534 adc $zero,$zero # top-most carry
2535 mov %r14,-1*8($tptr)
2542 mov ($nptr,$num),%r12
2543 lea ($nptr,$num),%rbp # rewind $nptr
2545 lea ($tptr,$num),%rdi # rewind $tptr
2548 sub %r14,%r10 # compare top-most words
2552 sub %r8,%rax # %rax=-%r8
2553 mov 56+8(%rsp),%rdx # restore rp
2554 dec %r12 # so that after 'not' we get -n[0]
2559 jmp .Lsqrx4x_sub_entry # common post-condition
2560 .size mulx4x_internal,.-mulx4x_internal
2563 ######################################################################
2565 my $rptr="%rdi"; # BN_ULONG *rptr,
2566 my $aptr="%rsi"; # const BN_ULONG *aptr,
2567 my $bptr="%rdx"; # const void *table,
2568 my $nptr="%rcx"; # const BN_ULONG *nptr,
2569 my $n0 ="%r8"; # const BN_ULONG *n0);
2570 my $num ="%r9"; # int num, has to be divisible by 8
2573 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2574 my @A0=("%r10","%r11");
2575 my @A1=("%r12","%r13");
2576 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2579 .type bn_powerx5,\@function,6
2591 shl \$3,${num}d # convert $num to bytes
2592 lea ($num,$num,2),%r10 # 3*$num in bytes
2596 ##############################################################
2597 # Ensure that stack frame doesn't alias with $rptr+3*$num
2598 # modulo 4096, which covers ret[num], am[num] and n[num]
2599 # (see bn_exp.c). This is done to allow memory disambiguation
2600 # logic do its magic. [Extra 256 bytes is for power mask
2601 # calculated from 7th argument, the index.]
2603 lea -320(%rsp,$num,2),%r11
2608 sub %r11,%rsp # align with $aptr
2609 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2614 lea 4096-320(,$num,2),%r10
2615 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2625 ##############################################################
2628 # +0 saved $num, used in reduction section
2629 # +8 &t[2*$num], used in reduction section
2630 # +16 intermediate carry bit
2631 # +24 top-most carry bit, used in reduction section
2637 movq $rptr,%xmm1 # save $rptr
2638 movq $nptr,%xmm2 # save $nptr
2639 movq %r10, %xmm3 # -$num
2642 mov %rax, 40(%rsp) # save original %rsp
2645 call __bn_sqrx8x_internal
2646 call __bn_postx4x_internal
2647 call __bn_sqrx8x_internal
2648 call __bn_postx4x_internal
2649 call __bn_sqrx8x_internal
2650 call __bn_postx4x_internal
2651 call __bn_sqrx8x_internal
2652 call __bn_postx4x_internal
2653 call __bn_sqrx8x_internal
2654 call __bn_postx4x_internal
2656 mov %r10,$num # -num
2662 call mulx4x_internal
2664 mov 40(%rsp),%rsi # restore %rsp
2676 .size bn_powerx5,.-bn_powerx5
2678 .globl bn_sqrx8x_internal
2679 .hidden bn_sqrx8x_internal
2680 .type bn_sqrx8x_internal,\@abi-omnipotent
2683 __bn_sqrx8x_internal:
2684 ##################################################################
2687 # a) multiply-n-add everything but a[i]*a[i];
2688 # b) shift result of a) by 1 to the left and accumulate
2689 # a[i]*a[i] products;
2691 ##################################################################
2692 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2723 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2726 my ($zero,$carry)=("%rbp","%rcx");
2729 lea 48+8(%rsp),$tptr
2730 lea ($aptr,$num),$aaptr
2731 mov $num,0+8(%rsp) # save $num
2732 mov $aaptr,8+8(%rsp) # save end of $aptr
2733 jmp .Lsqr8x_zero_start
2736 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2739 movdqa %xmm0,0*8($tptr)
2740 movdqa %xmm0,2*8($tptr)
2741 movdqa %xmm0,4*8($tptr)
2742 movdqa %xmm0,6*8($tptr)
2743 .Lsqr8x_zero_start: # aligned at 32
2744 movdqa %xmm0,8*8($tptr)
2745 movdqa %xmm0,10*8($tptr)
2746 movdqa %xmm0,12*8($tptr)
2747 movdqa %xmm0,14*8($tptr)
2748 lea 16*8($tptr),$tptr
2752 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2753 #xor %r9,%r9 # t[1], ex-$num, zero already
2760 lea 48+8(%rsp),$tptr
2761 xor $zero,$zero # cf=0, cf=0
2762 jmp .Lsqrx8x_outer_loop
2765 .Lsqrx8x_outer_loop:
2766 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2767 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2769 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2772 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2775 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2778 mulx 5*8($aptr),%r12,%rax
2781 mulx 6*8($aptr),%r13,%rax
2784 mulx 7*8($aptr),%r14,%r15
2785 mov 1*8($aptr),%rdx # a[1]
2789 mov %r8,1*8($tptr) # t[1]
2790 mov %r9,2*8($tptr) # t[2]
2791 sbb $carry,$carry # mov %cf,$carry
2792 xor $zero,$zero # cf=0, of=0
2795 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2796 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2799 mulx 4*8($aptr),%r10,%rbx # ...
2802 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2805 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2808 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2809 mov 2*8($aptr),%rdx # a[2]
2813 adox $zero,%r14 # of=0
2814 adcx $zero,%r14 # cf=0
2816 mov %r8,3*8($tptr) # t[3]
2817 mov %r9,4*8($tptr) # t[4]
2819 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2820 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2823 mulx 5*8($aptr),%r10,%rbx # ...
2826 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2829 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2831 mov 3*8($aptr),%rdx # a[3]
2835 mov %r8,5*8($tptr) # t[5]
2836 mov %r9,6*8($tptr) # t[6]
2837 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2838 adox $zero,%r13 # of=0
2839 adcx $zero,%r13 # cf=0
2841 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2844 mulx 6*8($aptr),%r10,%rax # ...
2847 mulx 7*8($aptr),%r11,%r12
2848 mov 4*8($aptr),%rdx # a[4]
2849 mov 5*8($aptr),%r14 # a[5]
2852 mov 6*8($aptr),%r15 # a[6]
2854 adox $zero,%r12 # of=0
2855 adcx $zero,%r12 # cf=0
2857 mov %r8,7*8($tptr) # t[7]
2858 mov %r9,8*8($tptr) # t[8]
2860 mulx %r14,%r9,%rax # a[5]*a[4]
2861 mov 7*8($aptr),%r8 # a[7]
2863 mulx %r15,%r10,%rbx # a[6]*a[4]
2866 mulx %r8,%r11,%rax # a[7]*a[4]
2867 mov %r14,%rdx # a[5]
2870 #adox $zero,%rax # of=0
2871 adcx $zero,%rax # cf=0
2873 mulx %r15,%r14,%rbx # a[6]*a[5]
2874 mulx %r8,%r12,%r13 # a[7]*a[5]
2875 mov %r15,%rdx # a[6]
2876 lea 8*8($aptr),$aptr
2883 mulx %r8,%r8,%r14 # a[7]*a[6]
2888 je .Lsqrx8x_outer_break
2890 neg $carry # mov $carry,%cf
2894 adcx 9*8($tptr),%r9 # +=t[9]
2895 adcx 10*8($tptr),%r10 # ...
2896 adcx 11*8($tptr),%r11
2897 adc 12*8($tptr),%r12
2898 adc 13*8($tptr),%r13
2899 adc 14*8($tptr),%r14
2900 adc 15*8($tptr),%r15
2902 lea 2*64($tptr),$tptr
2903 sbb %rax,%rax # mov %cf,$carry
2905 mov -64($aptr),%rdx # a[0]
2906 mov %rax,16+8(%rsp) # offload $carry
2907 mov $tptr,24+8(%rsp)
2909 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2910 xor %eax,%eax # cf=0, of=0
2916 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2917 adcx %rax,%rbx # +=t[8]
2920 mulx 1*8($aaptr),%rax,%r9 # ...
2924 mulx 2*8($aaptr),%rax,%r10
2928 mulx 3*8($aaptr),%rax,%r11
2932 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2936 mulx 5*8($aaptr),%rax,%r13
2940 mulx 6*8($aaptr),%rax,%r14
2941 mov %rbx,($tptr,%rcx,8) # store t[8+i]
2946 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
2947 mov 8($aptr,%rcx,8),%rdx # a[i]
2949 adox %rbx,%r15 # %rbx is 0, of=0
2950 adcx %rbx,%r15 # cf=0
2956 lea 8*8($aaptr),$aaptr
2958 cmp 8+8(%rsp),$aaptr # done?
2961 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
2972 lea 8*8($tptr),$tptr
2974 sbb %rax,%rax # mov %cf,%rax
2975 xor %ebx,%ebx # cf=0, of=0
2976 mov %rax,16+8(%rsp) # offload carry
2981 sub 16+8(%rsp),%r8 # consume last carry
2982 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
2983 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
2984 xor %ebp,%ebp # xor $zero,$zero
2986 cmp $carry,$tptr # cf=0, of=0
2987 je .Lsqrx8x_outer_loop
2992 mov 2*8($carry),%r10
2994 mov 3*8($carry),%r11
2996 mov 4*8($carry),%r12
2998 mov 5*8($carry),%r13
3000 mov 6*8($carry),%r14
3002 mov 7*8($carry),%r15
3004 jmp .Lsqrx8x_outer_loop
3007 .Lsqrx8x_outer_break:
3008 mov %r9,9*8($tptr) # t[9]
3009 movq %xmm3,%rcx # -$num
3010 mov %r10,10*8($tptr) # ...
3011 mov %r11,11*8($tptr)
3012 mov %r12,12*8($tptr)
3013 mov %r13,13*8($tptr)
3014 mov %r14,14*8($tptr)
3019 lea 48+8(%rsp),$tptr
3020 mov ($aptr,$i),%rdx # a[0]
3022 mov 8($tptr),$A0[1] # t[1]
3023 xor $A0[0],$A0[0] # t[0], of=0, cf=0
3024 mov 0+8(%rsp),$num # restore $num
3026 mov 16($tptr),$A1[0] # t[2] # prefetch
3027 mov 24($tptr),$A1[1] # t[3] # prefetch
3028 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3031 .Lsqrx4x_shift_n_add:
3035 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3036 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3039 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3046 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3047 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3050 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3057 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3059 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3062 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3069 jrcxz .Lsqrx4x_shift_n_add_break
3070 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3073 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3074 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3079 jmp .Lsqrx4x_shift_n_add
3082 .Lsqrx4x_shift_n_add_break:
3086 lea 64($tptr),$tptr # end of t[] buffer
3089 ######################################################################
3090 # Montgomery reduction part, "word-by-word" algorithm.
3092 # This new path is inspired by multiple submissions from Intel, by
3093 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3096 my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3100 __bn_sqrx8x_reduction:
3101 xor %eax,%eax # initial top-most carry bit
3102 mov 32+8(%rsp),%rbx # n0
3103 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
3104 lea -8*8($nptr,$num),%rcx # end of n[]
3105 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3106 mov %rcx, 0+8(%rsp) # save end of n[]
3107 mov $tptr,8+8(%rsp) # save end of t[]
3109 lea 48+8(%rsp),$tptr # initial t[] window
3110 jmp .Lsqrx8x_reduction_loop
3113 .Lsqrx8x_reduction_loop:
3119 imulq %rbx,%rdx # n0*a[i]
3123 mov %rax,24+8(%rsp) # store top-most carry bit
3125 lea 8*8($tptr),$tptr
3126 xor $carry,$carry # cf=0,of=0
3133 mulx 8*0($nptr),%rax,%r8 # n[0]
3134 adcx %rbx,%rax # discarded
3137 mulx 8*1($nptr),%rbx,%r9 # n[1]
3141 mulx 8*2($nptr),%rbx,%r10
3145 mulx 8*3($nptr),%rbx,%r11
3149 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
3155 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3157 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3159 mulx 8*5($nptr),%rax,%r13
3163 mulx 8*6($nptr),%rax,%r14
3167 mulx 8*7($nptr),%rax,%r15
3170 adox $carry,%r15 # $carry is 0
3171 adcx $carry,%r15 # cf=0
3173 .byte 0x67,0x67,0x67
3177 mov $carry,%rax # xor %rax,%rax
3178 cmp 0+8(%rsp),$nptr # end of n[]?
3179 jae .Lsqrx8x_no_tail
3181 mov 48+8(%rsp),%rdx # pull n0*a[0]
3183 lea 8*8($nptr),$nptr
3186 adcx 8*2($tptr),%r10
3192 lea 8*8($tptr),$tptr
3193 sbb %rax,%rax # top carry
3195 xor $carry,$carry # of=0, cf=0
3202 mulx 8*0($nptr),%rax,%r8
3206 mulx 8*1($nptr),%rax,%r9
3210 mulx 8*2($nptr),%rax,%r10
3214 mulx 8*3($nptr),%rax,%r11
3218 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
3222 mulx 8*5($nptr),%rax,%r13
3226 mulx 8*6($nptr),%rax,%r14
3230 mulx 8*7($nptr),%rax,%r15
3231 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3234 mov %rbx,($tptr,%rcx,8) # save result
3236 adcx $carry,%r15 # cf=0
3241 cmp 0+8(%rsp),$nptr # end of n[]?
3242 jae .Lsqrx8x_tail_done # break out of loop
3244 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3245 mov 48+8(%rsp),%rdx # pull n0*a[0]
3246 lea 8*8($nptr),$nptr
3255 lea 8*8($tptr),$tptr
3257 sub \$8,%rcx # mov \$-8,%rcx
3259 xor $carry,$carry # of=0, cf=0
3265 add 24+8(%rsp),%r8 # can this overflow?
3272 adc \$0,%r15 # can't overflow, because we
3273 # started with "overhung" part
3275 mov $carry,%rax # xor %rax,%rax
3277 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3278 .Lsqrx8x_no_tail: # %cf is 0 if jumped here
3282 mov 8*7($nptr),$carry
3283 movq %xmm2,$nptr # restore $nptr
3290 adc %rax,%rax # top-most carry
3292 mov 32+8(%rsp),%rbx # n0
3293 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3295 mov %r8,8*0($tptr) # store top 512 bits
3296 lea 8*8($tptr),%r8 # borrow %r8
3305 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3306 cmp 8+8(%rsp),%r8 # end of t[]?
3307 jb .Lsqrx8x_reduction_loop
3309 .size bn_sqrx8x_internal,.-bn_sqrx8x_internal
3312 ##############################################################
3313 # Post-condition, 4x unrolled
3316 my ($rptr,$nptr)=("%rdx","%rbp");
3319 __bn_postx4x_internal:
3321 mov %rcx,%r10 # -$num
3322 mov %rcx,%r9 # -$num
3325 #lea 48+8(%rsp,%r9),$tptr
3326 movq %xmm1,$rptr # restore $rptr
3327 movq %xmm1,$aptr # prepare for back-to-back call
3328 dec %r12 # so that after 'not' we get -n[0]
3333 jmp .Lsqrx4x_sub_entry
3343 lea 8*4($nptr),$nptr
3348 neg %r8 # mov %r8,%cf
3354 lea 8*4($tptr),$tptr
3356 sbb %r8,%r8 # mov %cf,%r8
3359 lea 8*4($rptr),$rptr
3364 neg %r9 # restore $num
3367 .size __bn_postx4x_internal,.-__bn_postx4x_internal
3372 my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3373 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3380 .type bn_get_bits5,\@abi-omnipotent
3392 movzw (%r10,$num,2),%eax
3396 .size bn_get_bits5,.-bn_get_bits5
3399 .type bn_scatter5,\@abi-omnipotent
3403 jz .Lscatter_epilogue
3404 lea ($tbl,$idx,8),$tbl
3414 .size bn_scatter5,.-bn_scatter5
3417 .type bn_gather5,\@abi-omnipotent
3420 .LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
3421 # I can't trust assembler to use specific encoding:-(
3422 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
3423 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
3424 lea .Linc(%rip),%rax
3425 and \$-16,%rsp # shouldn't be formally required
3428 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
3429 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
3430 lea 128($tbl),%r11 # size optimization
3431 lea 128(%rsp),%rax # size optimization
3433 pshufd \$0,%xmm5,%xmm5 # broadcast $idx
3437 ########################################################################
3438 # calculate mask by comparing 0..31 to $idx and save result to stack
3440 for($i=0;$i<$STRIDE/16;$i+=4) {
3443 pcmpeqd %xmm5,%xmm0 # compare to 1,0
3445 $code.=<<___ if ($i);
3446 movdqa %xmm3,`16*($i-1)-128`(%rax)
3452 pcmpeqd %xmm5,%xmm1 # compare to 3,2
3453 movdqa %xmm0,`16*($i+0)-128`(%rax)
3457 pcmpeqd %xmm5,%xmm2 # compare to 5,4
3458 movdqa %xmm1,`16*($i+1)-128`(%rax)
3462 pcmpeqd %xmm5,%xmm3 # compare to 7,6
3463 movdqa %xmm2,`16*($i+2)-128`(%rax)
3468 movdqa %xmm3,`16*($i-1)-128`(%rax)
3476 for($i=0;$i<$STRIDE/16;$i+=4) {
3478 movdqa `16*($i+0)-128`(%r11),%xmm0
3479 movdqa `16*($i+1)-128`(%r11),%xmm1
3480 movdqa `16*($i+2)-128`(%r11),%xmm2
3481 pand `16*($i+0)-128`(%rax),%xmm0
3482 movdqa `16*($i+3)-128`(%r11),%xmm3
3483 pand `16*($i+1)-128`(%rax),%xmm1
3485 pand `16*($i+2)-128`(%rax),%xmm2
3487 pand `16*($i+3)-128`(%rax),%xmm3
3494 lea $STRIDE(%r11),%r11
3495 pshufd \$0x4e,%xmm4,%xmm0
3497 movq %xmm0,($out) # m0=bp[0]
3504 .LSEH_end_bn_gather5:
3505 .size bn_gather5,.-bn_gather5
3513 .asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3516 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3517 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3525 .extern __imp_RtlVirtualUnwind
3526 .type mul_handler,\@abi-omnipotent
3540 mov 120($context),%rax # pull context->Rax
3541 mov 248($context),%rbx # pull context->Rip
3543 mov 8($disp),%rsi # disp->ImageBase
3544 mov 56($disp),%r11 # disp->HandlerData
3546 mov 0(%r11),%r10d # HandlerData[0]
3547 lea (%rsi,%r10),%r10 # end of prologue label
3548 cmp %r10,%rbx # context->Rip<end of prologue label
3549 jb .Lcommon_seh_tail
3551 mov 152($context),%rax # pull context->Rsp
3553 mov 4(%r11),%r10d # HandlerData[1]
3554 lea (%rsi,%r10),%r10 # epilogue label
3555 cmp %r10,%rbx # context->Rip>=epilogue label
3556 jae .Lcommon_seh_tail
3558 lea .Lmul_epilogue(%rip),%r10
3562 mov 192($context),%r10 # pull $num
3563 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
3568 mov 40(%rax),%rax # pull saved stack pointer
3576 mov %rbx,144($context) # restore context->Rbx
3577 mov %rbp,160($context) # restore context->Rbp
3578 mov %r12,216($context) # restore context->R12
3579 mov %r13,224($context) # restore context->R13
3580 mov %r14,232($context) # restore context->R14
3581 mov %r15,240($context) # restore context->R15
3586 mov %rax,152($context) # restore context->Rsp
3587 mov %rsi,168($context) # restore context->Rsi
3588 mov %rdi,176($context) # restore context->Rdi
3590 mov 40($disp),%rdi # disp->ContextRecord
3591 mov $context,%rsi # context
3592 mov \$154,%ecx # sizeof(CONTEXT)
3593 .long 0xa548f3fc # cld; rep movsq
3596 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3597 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3598 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3599 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3600 mov 40(%rsi),%r10 # disp->ContextRecord
3601 lea 56(%rsi),%r11 # &disp->HandlerData
3602 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3603 mov %r10,32(%rsp) # arg5
3604 mov %r11,40(%rsp) # arg6
3605 mov %r12,48(%rsp) # arg7
3606 mov %rcx,56(%rsp) # arg8, (NULL)
3607 call *__imp_RtlVirtualUnwind(%rip)
3609 mov \$1,%eax # ExceptionContinueSearch
3621 .size mul_handler,.-mul_handler
3625 .rva .LSEH_begin_bn_mul_mont_gather5
3626 .rva .LSEH_end_bn_mul_mont_gather5
3627 .rva .LSEH_info_bn_mul_mont_gather5
3629 .rva .LSEH_begin_bn_mul4x_mont_gather5
3630 .rva .LSEH_end_bn_mul4x_mont_gather5
3631 .rva .LSEH_info_bn_mul4x_mont_gather5
3633 .rva .LSEH_begin_bn_power5
3634 .rva .LSEH_end_bn_power5
3635 .rva .LSEH_info_bn_power5
3637 .rva .LSEH_begin_bn_from_mont8x
3638 .rva .LSEH_end_bn_from_mont8x
3639 .rva .LSEH_info_bn_from_mont8x
3641 $code.=<<___ if ($addx);
3642 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3643 .rva .LSEH_end_bn_mulx4x_mont_gather5
3644 .rva .LSEH_info_bn_mulx4x_mont_gather5
3646 .rva .LSEH_begin_bn_powerx5
3647 .rva .LSEH_end_bn_powerx5
3648 .rva .LSEH_info_bn_powerx5
3651 .rva .LSEH_begin_bn_gather5
3652 .rva .LSEH_end_bn_gather5
3653 .rva .LSEH_info_bn_gather5
3657 .LSEH_info_bn_mul_mont_gather5:
3660 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
3662 .LSEH_info_bn_mul4x_mont_gather5:
3665 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
3667 .LSEH_info_bn_power5:
3670 .rva .Lpower5_body,.Lpower5_epilogue # HandlerData[]
3672 .LSEH_info_bn_from_mont8x:
3675 .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
3677 $code.=<<___ if ($addx);
3679 .LSEH_info_bn_mulx4x_mont_gather5:
3682 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
3684 .LSEH_info_bn_powerx5:
3687 .rva .Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
3691 .LSEH_info_bn_gather5:
3692 .byte 0x01,0x0b,0x03,0x0a
3693 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
3694 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
3699 $code =~ s/\`([^\`]*)\`/eval($1)/gem;