3 ##############################################################################
5 # Copyright 2014 Intel Corporation #
7 # Licensed under the Apache License, Version 2.0 (the "License"); #
8 # you may not use this file except in compliance with the License. #
9 # You may obtain a copy of the License at #
11 # http://www.apache.org/licenses/LICENSE-2.0 #
13 # Unless required by applicable law or agreed to in writing, software #
14 # distributed under the License is distributed on an "AS IS" BASIS, #
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
16 # See the License for the specific language governing permissions and #
17 # limitations under the License. #
19 ##############################################################################
21 # Developers and authors: #
22 # Shay Gueron (1, 2), and Vlad Krasnov (1) #
23 # (1) Intel Corporation, Israel Development Center #
24 # (2) University of Haifa #
26 # S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with#
29 ##############################################################################
31 # Further optimization by <appro@openssl.org>:
45 # Ranges denote minimum and maximum improvement coefficients depending
46 # on benchmark. Lower coefficients are for ECDSA sign, relatively
47 # fastest server-side operation.
51 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
53 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
55 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
56 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
57 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
58 die "can't locate x86_64-xlate.pl";
60 open OUT,"| \"$^X\" $xlate $flavour $output";
63 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
64 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
65 $avx = ($1>=2.19) + ($1>=2.22);
69 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
70 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
71 $avx = ($1>=2.09) + ($1>=2.10);
75 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
76 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
77 $avx = ($1>=10) + ($1>=11);
81 if (!$addx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
82 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
83 $avx = ($ver>=3.0) + ($ver>=3.01);
89 .extern OPENSSL_ia32cap_P
94 .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
96 # 2^512 mod P precomputed for NIST P256 polynomial
98 .quad 0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd
101 .long 1,1,1,1,1,1,1,1
103 .long 2,2,2,2,2,2,2,2
105 .long 3,3,3,3,3,3,3,3
107 .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
111 ################################################################################
112 # void ecp_nistz256_mul_by_2(uint64_t res[4], uint64_t a[4]);
114 my ($a0,$a1,$a2,$a3)=map("%r$_",(8..11));
115 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rdx","%rcx","%r12","%r13");
116 my ($r_ptr,$a_ptr,$b_ptr)=("%rdi","%rsi","%rdx");
120 .globl ecp_nistz256_mul_by_2
121 .type ecp_nistz256_mul_by_2,\@function,2
123 ecp_nistz256_mul_by_2:
129 add $a0, $a0 # a0:a3+a0:a3
133 lea .Lpoly(%rip), $a_ptr
160 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
162 ################################################################################
163 # void ecp_nistz256_div_by_2(uint64_t res[4], uint64_t a[4]);
164 .globl ecp_nistz256_div_by_2
165 .type ecp_nistz256_div_by_2,\@function,2
167 ecp_nistz256_div_by_2:
176 lea .Lpoly(%rip), $a_ptr
187 xor $a_ptr, $a_ptr # borrow $a_ptr
196 mov $a1, $t0 # a0:a3>>1
220 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
222 ################################################################################
223 # void ecp_nistz256_mul_by_3(uint64_t res[4], uint64_t a[4]);
224 .globl ecp_nistz256_mul_by_3
225 .type ecp_nistz256_mul_by_3,\@function,2
227 ecp_nistz256_mul_by_3:
234 add $a0, $a0 # a0:a3+a0:a3
246 sbb .Lpoly+8*1(%rip), $a1
249 sbb .Lpoly+8*3(%rip), $a3
258 add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
268 sbb .Lpoly+8*1(%rip), $a1
271 sbb .Lpoly+8*3(%rip), $a3
286 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
288 ################################################################################
289 # void ecp_nistz256_add(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
290 .globl ecp_nistz256_add
291 .type ecp_nistz256_add,\@function,3
302 lea .Lpoly(%rip), $a_ptr
332 .size ecp_nistz256_add,.-ecp_nistz256_add
334 ################################################################################
335 # void ecp_nistz256_sub(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
336 .globl ecp_nistz256_sub
337 .type ecp_nistz256_sub,\@function,3
348 lea .Lpoly(%rip), $a_ptr
378 .size ecp_nistz256_sub,.-ecp_nistz256_sub
380 ################################################################################
381 # void ecp_nistz256_neg(uint64_t res[4], uint64_t a[4]);
382 .globl ecp_nistz256_neg
383 .type ecp_nistz256_neg,\@function,2
400 lea .Lpoly(%rip), $a_ptr
424 .size ecp_nistz256_neg,.-ecp_nistz256_neg
428 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
429 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
430 my ($t0,$t1,$t2,$t3,$t4)=("%rcx","%rbp","%rbx","%rdx","%rax");
431 my ($poly1,$poly3)=($acc6,$acc7);
434 ################################################################################
435 # void ecp_nistz256_to_mont(
438 .globl ecp_nistz256_to_mont
439 .type ecp_nistz256_to_mont,\@function,2
441 ecp_nistz256_to_mont:
443 $code.=<<___ if ($addx);
445 and OPENSSL_ia32cap_P+8(%rip), %ecx
448 lea .LRR(%rip), $b_org
450 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
452 ################################################################################
453 # void ecp_nistz256_mul_mont(
458 .globl ecp_nistz256_mul_mont
459 .type ecp_nistz256_mul_mont,\@function,3
461 ecp_nistz256_mul_mont:
463 $code.=<<___ if ($addx);
465 and OPENSSL_ia32cap_P+8(%rip), %ecx
476 $code.=<<___ if ($addx);
482 mov 8*0($b_org), %rax
483 mov 8*0($a_ptr), $acc1
484 mov 8*1($a_ptr), $acc2
485 mov 8*2($a_ptr), $acc3
486 mov 8*3($a_ptr), $acc4
488 call __ecp_nistz256_mul_montq
490 $code.=<<___ if ($addx);
496 mov 8*0($b_org), %rdx
497 mov 8*0($a_ptr), $acc1
498 mov 8*1($a_ptr), $acc2
499 mov 8*2($a_ptr), $acc3
500 mov 8*3($a_ptr), $acc4
501 lea -128($a_ptr), $a_ptr # control u-op density
503 call __ecp_nistz256_mul_montx
514 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
516 .type __ecp_nistz256_mul_montq,\@abi-omnipotent
518 __ecp_nistz256_mul_montq:
519 ########################################################################
523 mov .Lpoly+8*1(%rip),$poly1
529 mov .Lpoly+8*3(%rip),$poly3
548 ########################################################################
549 # First reduction step
550 # Basically now we want to multiply acc[0] by p256,
551 # and add the result to the acc.
552 # Due to the special form of p256 we do some optimizations
554 # acc[0] x p256[0..1] = acc[0] x 2^96 - acc[0]
555 # then we add acc[0] and get acc[0] x 2^96
561 add $acc0, $acc1 # +=acc[0]<<96
564 mov 8*1($b_ptr), %rax
569 ########################################################################
602 ########################################################################
603 # Second reduction step
611 mov 8*2($b_ptr), %rax
616 ########################################################################
649 ########################################################################
650 # Third reduction step
658 mov 8*3($b_ptr), %rax
663 ########################################################################
696 ########################################################################
697 # Final reduction step
710 ########################################################################
711 # Branch-less conditional subtraction of P
712 sub \$-1, $acc4 # .Lpoly[0]
714 sbb $poly1, $acc5 # .Lpoly[1]
715 sbb \$0, $acc0 # .Lpoly[2]
717 sbb $poly3, $acc1 # .Lpoly[3]
722 mov $acc4, 8*0($r_ptr)
724 mov $acc5, 8*1($r_ptr)
726 mov $acc0, 8*2($r_ptr)
727 mov $acc1, 8*3($r_ptr)
730 .size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
732 ################################################################################
733 # void ecp_nistz256_sqr_mont(
737 # we optimize the square according to S.Gueron and V.Krasnov,
738 # "Speeding up Big-Number Squaring"
739 .globl ecp_nistz256_sqr_mont
740 .type ecp_nistz256_sqr_mont,\@function,2
742 ecp_nistz256_sqr_mont:
744 $code.=<<___ if ($addx);
746 and OPENSSL_ia32cap_P+8(%rip), %ecx
756 $code.=<<___ if ($addx);
761 mov 8*0($a_ptr), %rax
762 mov 8*1($a_ptr), $acc6
763 mov 8*2($a_ptr), $acc7
764 mov 8*3($a_ptr), $acc0
766 call __ecp_nistz256_sqr_montq
768 $code.=<<___ if ($addx);
773 mov 8*0($a_ptr), %rdx
774 mov 8*1($a_ptr), $acc6
775 mov 8*2($a_ptr), $acc7
776 mov 8*3($a_ptr), $acc0
777 lea -128($a_ptr), $a_ptr # control u-op density
779 call __ecp_nistz256_sqr_montx
790 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
792 .type __ecp_nistz256_sqr_montq,\@abi-omnipotent
794 __ecp_nistz256_sqr_montq:
796 mulq $acc6 # a[1]*a[0]
801 mulq $acc5 # a[0]*a[2]
807 mulq $acc5 # a[0]*a[3]
813 #################################
814 mulq $acc6 # a[1]*a[2]
820 mulq $acc6 # a[1]*a[3]
828 #################################
829 mulq $acc7 # a[2]*a[3]
832 mov 8*0($a_ptr), %rax
836 add $acc1, $acc1 # acc1:6<<1
846 mov 8*1($a_ptr), %rax
852 mov 8*2($a_ptr), %rax
859 mov 8*3($a_ptr), %rax
869 mov .Lpoly+8*1(%rip), $a_ptr
870 mov .Lpoly+8*3(%rip), $t1
872 ##########################################
879 add $acc0, $acc1 # +=acc[0]<<96
885 ##########################################
898 ##########################################
911 ###########################################
924 ############################################
925 # Add the rest of the acc
934 sub \$-1, $acc4 # .Lpoly[0]
936 sbb $a_ptr, $acc5 # .Lpoly[1]
937 sbb \$0, $acc6 # .Lpoly[2]
939 sbb $t1, $acc7 # .Lpoly[3]
944 mov $acc4, 8*0($r_ptr)
946 mov $acc5, 8*1($r_ptr)
948 mov $acc6, 8*2($r_ptr)
949 mov $acc7, 8*3($r_ptr)
952 .size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
957 .type __ecp_nistz256_mul_montx,\@abi-omnipotent
959 __ecp_nistz256_mul_montx:
960 ########################################################################
962 mulx $acc1, $acc0, $acc1
963 mulx $acc2, $t0, $acc2
965 xor $acc5, $acc5 # cf=0
966 mulx $acc3, $t1, $acc3
967 mov .Lpoly+8*3(%rip), $poly3
969 mulx $acc4, $t0, $acc4
972 shlx $poly1,$acc0,$t1
974 shrx $poly1,$acc0,$t0
977 ########################################################################
978 # First reduction step
982 mulx $poly3, $t0, $t1
983 mov 8*1($b_ptr), %rdx
987 xor $acc0, $acc0 # $acc0=0,cf=0,of=0
989 ########################################################################
991 mulx 8*0+128($a_ptr), $t0, $t1
995 mulx 8*1+128($a_ptr), $t0, $t1
999 mulx 8*2+128($a_ptr), $t0, $t1
1003 mulx 8*3+128($a_ptr), $t0, $t1
1006 shlx $poly1, $acc1, $t0
1008 shrx $poly1, $acc1, $t1
1014 ########################################################################
1015 # Second reduction step
1019 mulx $poly3, $t0, $t1
1020 mov 8*2($b_ptr), %rdx
1024 xor $acc1 ,$acc1 # $acc1=0,cf=0,of=0
1026 ########################################################################
1028 mulx 8*0+128($a_ptr), $t0, $t1
1032 mulx 8*1+128($a_ptr), $t0, $t1
1036 mulx 8*2+128($a_ptr), $t0, $t1
1040 mulx 8*3+128($a_ptr), $t0, $t1
1043 shlx $poly1, $acc2, $t0
1045 shrx $poly1, $acc2, $t1
1051 ########################################################################
1052 # Third reduction step
1056 mulx $poly3, $t0, $t1
1057 mov 8*3($b_ptr), %rdx
1061 xor $acc2, $acc2 # $acc2=0,cf=0,of=0
1063 ########################################################################
1065 mulx 8*0+128($a_ptr), $t0, $t1
1069 mulx 8*1+128($a_ptr), $t0, $t1
1073 mulx 8*2+128($a_ptr), $t0, $t1
1077 mulx 8*3+128($a_ptr), $t0, $t1
1080 shlx $poly1, $acc3, $t0
1082 shrx $poly1, $acc3, $t1
1088 ########################################################################
1089 # Fourth reduction step
1093 mulx $poly3, $t0, $t1
1095 mov .Lpoly+8*1(%rip), $poly1
1101 ########################################################################
1102 # Branch-less conditional subtraction of P
1105 sbb \$-1, $acc4 # .Lpoly[0]
1106 sbb $poly1, $acc5 # .Lpoly[1]
1107 sbb \$0, $acc0 # .Lpoly[2]
1109 sbb $poly3, $acc1 # .Lpoly[3]
1114 mov $acc4, 8*0($r_ptr)
1116 mov $acc5, 8*1($r_ptr)
1118 mov $acc0, 8*2($r_ptr)
1119 mov $acc1, 8*3($r_ptr)
1122 .size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
1124 .type __ecp_nistz256_sqr_montx,\@abi-omnipotent
1126 __ecp_nistz256_sqr_montx:
1127 mulx $acc6, $acc1, $acc2 # a[0]*a[1]
1128 mulx $acc7, $t0, $acc3 # a[0]*a[2]
1131 mulx $acc0, $t1, $acc4 # a[0]*a[3]
1135 xor $acc5, $acc5 # $acc5=0,cf=0,of=0
1137 #################################
1138 mulx $acc7, $t0, $t1 # a[1]*a[2]
1142 mulx $acc0, $t0, $t1 # a[1]*a[3]
1148 #################################
1149 mulx $acc0, $t0, $acc6 # a[2]*a[3]
1150 mov 8*0+128($a_ptr), %rdx
1151 xor $acc7, $acc7 # $acc7=0,cf=0,of=0
1152 adcx $acc1, $acc1 # acc1:6<<1
1155 adox $acc7, $acc6 # of=0
1157 mulx %rdx, $acc0, $t1
1158 mov 8*1+128($a_ptr), %rdx
1163 mov 8*2+128($a_ptr), %rdx
1169 mov 8*3+128($a_ptr), %rdx
1179 shlx $a_ptr, $acc0, $t0
1181 shrx $a_ptr, $acc0, $t4
1182 mov .Lpoly+8*3(%rip), $t1
1188 mulx $t1, $t0, $acc0
1191 shlx $a_ptr, $acc1, $t0
1193 shrx $a_ptr, $acc1, $t4
1199 mulx $t1, $t0, $acc1
1202 shlx $a_ptr, $acc2, $t0
1204 shrx $a_ptr, $acc2, $t4
1210 mulx $t1, $t0, $acc2
1213 shlx $a_ptr, $acc3, $t0
1215 shrx $a_ptr, $acc3, $t4
1221 mulx $t1, $t0, $acc3
1226 adc $acc0, $acc4 # accumulate upper half
1227 mov .Lpoly+8*1(%rip), $a_ptr
1235 xor %eax, %eax # cf=0
1236 sbb \$-1, $acc4 # .Lpoly[0]
1238 sbb $a_ptr, $acc5 # .Lpoly[1]
1239 sbb \$0, $acc6 # .Lpoly[2]
1241 sbb $t1, $acc7 # .Lpoly[3]
1246 mov $acc4, 8*0($r_ptr)
1248 mov $acc5, 8*1($r_ptr)
1250 mov $acc6, 8*2($r_ptr)
1251 mov $acc7, 8*3($r_ptr)
1254 .size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
1259 my ($r_ptr,$in_ptr)=("%rdi","%rsi");
1260 my ($acc0,$acc1,$acc2,$acc3)=map("%r$_",(8..11));
1261 my ($t0,$t1,$t2)=("%rcx","%r12","%r13");
1264 ################################################################################
1265 # void ecp_nistz256_from_mont(
1268 # This one performs Montgomery multiplication by 1, so we only need the reduction
1270 .globl ecp_nistz256_from_mont
1271 .type ecp_nistz256_from_mont,\@function,2
1273 ecp_nistz256_from_mont:
1277 mov 8*0($in_ptr), %rax
1278 mov .Lpoly+8*3(%rip), $t2
1279 mov 8*1($in_ptr), $acc1
1280 mov 8*2($in_ptr), $acc2
1281 mov 8*3($in_ptr), $acc3
1283 mov .Lpoly+8*1(%rip), $t1
1285 #########################################
1297 #########################################
1310 ##########################################
1323 ###########################################
1337 ###########################################
1338 # Branch-less conditional subtraction
1348 cmovnz $in_ptr, $acc1
1349 mov $acc0, 8*0($r_ptr)
1351 mov $acc1, 8*1($r_ptr)
1353 mov $acc2, 8*2($r_ptr)
1354 mov $acc3, 8*3($r_ptr)
1359 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
1363 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1364 my ($ONE,$INDEX,$Ra,$Rb,$Rc,$Rd,$Re,$Rf)=map("%xmm$_",(0..7));
1365 my ($M0,$T0a,$T0b,$T0c,$T0d,$T0e,$T0f,$TMP0)=map("%xmm$_",(8..15));
1366 my ($M1,$T2a,$T2b,$TMP2,$M2,$T2a,$T2b,$TMP2)=map("%xmm$_",(8..15));
1369 ################################################################################
1370 # void ecp_nistz256_scatter_w5(uint64_t *val, uint64_t *in_t, int index);
1371 .globl ecp_nistz256_scatter_w5
1372 .type ecp_nistz256_scatter_w5,\@abi-omnipotent
1374 ecp_nistz256_scatter_w5:
1375 lea -3($index,$index,2), $index
1376 movdqa 0x00($in_t), %xmm0
1378 movdqa 0x10($in_t), %xmm1
1379 movdqa 0x20($in_t), %xmm2
1380 movdqa 0x30($in_t), %xmm3
1381 movdqa 0x40($in_t), %xmm4
1382 movdqa 0x50($in_t), %xmm5
1383 movdqa %xmm0, 0x00($val,$index)
1384 movdqa %xmm1, 0x10($val,$index)
1385 movdqa %xmm2, 0x20($val,$index)
1386 movdqa %xmm3, 0x30($val,$index)
1387 movdqa %xmm4, 0x40($val,$index)
1388 movdqa %xmm5, 0x50($val,$index)
1391 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1393 ################################################################################
1394 # void ecp_nistz256_gather_w5(uint64_t *val, uint64_t *in_t, int index);
1395 .globl ecp_nistz256_gather_w5
1396 .type ecp_nistz256_gather_w5,\@abi-omnipotent
1398 ecp_nistz256_gather_w5:
1400 $code.=<<___ if ($avx>1);
1401 mov OPENSSL_ia32cap_P+8(%rip), %eax
1403 jnz .Lavx2_gather_w5
1405 $code.=<<___ if ($win64);
1406 lea -0x88(%rsp), %rax
1407 .LSEH_begin_ecp_nistz256_gather_w5:
1408 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1409 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
1410 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
1411 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
1412 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
1413 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
1414 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
1415 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
1416 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
1417 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
1418 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
1421 movdqa .LOne(%rip), $ONE
1432 pshufd \$0, $INDEX, $INDEX
1435 .Lselect_loop_sse_w5:
1439 pcmpeqd $INDEX, $TMP0
1441 movdqa 16*0($in_t), $T0a
1442 movdqa 16*1($in_t), $T0b
1443 movdqa 16*2($in_t), $T0c
1444 movdqa 16*3($in_t), $T0d
1445 movdqa 16*4($in_t), $T0e
1446 movdqa 16*5($in_t), $T0f
1447 lea 16*6($in_t), $in_t
1463 jnz .Lselect_loop_sse_w5
1465 movdqu $Ra, 16*0($val)
1466 movdqu $Rb, 16*1($val)
1467 movdqu $Rc, 16*2($val)
1468 movdqu $Rd, 16*3($val)
1469 movdqu $Re, 16*4($val)
1470 movdqu $Rf, 16*5($val)
1472 $code.=<<___ if ($win64);
1473 movaps (%rsp), %xmm6
1474 movaps 0x10(%rsp), %xmm7
1475 movaps 0x20(%rsp), %xmm8
1476 movaps 0x30(%rsp), %xmm9
1477 movaps 0x40(%rsp), %xmm10
1478 movaps 0x50(%rsp), %xmm11
1479 movaps 0x60(%rsp), %xmm12
1480 movaps 0x70(%rsp), %xmm13
1481 movaps 0x80(%rsp), %xmm14
1482 movaps 0x90(%rsp), %xmm15
1483 lea 0xa8(%rsp), %rsp
1484 .LSEH_end_ecp_nistz256_gather_w5:
1488 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1490 ################################################################################
1491 # void ecp_nistz256_scatter_w7(uint64_t *val, uint64_t *in_t, int index);
1492 .globl ecp_nistz256_scatter_w7
1493 .type ecp_nistz256_scatter_w7,\@abi-omnipotent
1495 ecp_nistz256_scatter_w7:
1496 movdqu 0x00($in_t), %xmm0
1498 movdqu 0x10($in_t), %xmm1
1499 movdqu 0x20($in_t), %xmm2
1500 movdqu 0x30($in_t), %xmm3
1501 movdqa %xmm0, 0x00($val,$index)
1502 movdqa %xmm1, 0x10($val,$index)
1503 movdqa %xmm2, 0x20($val,$index)
1504 movdqa %xmm3, 0x30($val,$index)
1507 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1509 ################################################################################
1510 # void ecp_nistz256_gather_w7(uint64_t *val, uint64_t *in_t, int index);
1511 .globl ecp_nistz256_gather_w7
1512 .type ecp_nistz256_gather_w7,\@abi-omnipotent
1514 ecp_nistz256_gather_w7:
1516 $code.=<<___ if ($avx>1);
1517 mov OPENSSL_ia32cap_P+8(%rip), %eax
1519 jnz .Lavx2_gather_w7
1521 $code.=<<___ if ($win64);
1522 lea -0x88(%rsp), %rax
1523 .LSEH_begin_ecp_nistz256_gather_w7:
1524 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1525 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
1526 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
1527 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
1528 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
1529 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
1530 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
1531 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
1532 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
1533 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
1534 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
1537 movdqa .LOne(%rip), $M0
1546 pshufd \$0, $INDEX, $INDEX
1549 .Lselect_loop_sse_w7:
1552 movdqa 16*0($in_t), $T0a
1553 movdqa 16*1($in_t), $T0b
1554 pcmpeqd $INDEX, $TMP0
1555 movdqa 16*2($in_t), $T0c
1556 movdqa 16*3($in_t), $T0d
1557 lea 16*4($in_t), $in_t
1566 prefetcht0 255($in_t)
1570 jnz .Lselect_loop_sse_w7
1572 movdqu $Ra, 16*0($val)
1573 movdqu $Rb, 16*1($val)
1574 movdqu $Rc, 16*2($val)
1575 movdqu $Rd, 16*3($val)
1577 $code.=<<___ if ($win64);
1578 movaps (%rsp), %xmm6
1579 movaps 0x10(%rsp), %xmm7
1580 movaps 0x20(%rsp), %xmm8
1581 movaps 0x30(%rsp), %xmm9
1582 movaps 0x40(%rsp), %xmm10
1583 movaps 0x50(%rsp), %xmm11
1584 movaps 0x60(%rsp), %xmm12
1585 movaps 0x70(%rsp), %xmm13
1586 movaps 0x80(%rsp), %xmm14
1587 movaps 0x90(%rsp), %xmm15
1588 lea 0xa8(%rsp), %rsp
1589 .LSEH_end_ecp_nistz256_gather_w7:
1593 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1597 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1598 my ($TWO,$INDEX,$Ra,$Rb,$Rc)=map("%ymm$_",(0..4));
1599 my ($M0,$T0a,$T0b,$T0c,$TMP0)=map("%ymm$_",(5..9));
1600 my ($M1,$T1a,$T1b,$T1c,$TMP1)=map("%ymm$_",(10..14));
1603 ################################################################################
1604 # void ecp_nistz256_avx2_gather_w5(uint64_t *val, uint64_t *in_t, int index);
1605 .type ecp_nistz256_avx2_gather_w5,\@abi-omnipotent
1607 ecp_nistz256_avx2_gather_w5:
1611 $code.=<<___ if ($win64);
1612 lea -0x88(%rsp), %rax
1613 .LSEH_begin_ecp_nistz256_avx2_gather_w5:
1614 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1615 .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
1616 .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
1617 .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
1618 .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
1619 .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
1620 .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
1621 .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
1622 .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
1623 .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
1624 .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
1627 vmovdqa .LTwo(%rip), $TWO
1633 vmovdqa .LOne(%rip), $M0
1634 vmovdqa .LTwo(%rip), $M1
1637 vpermd $INDEX, $Ra, $INDEX
1640 .Lselect_loop_avx2_w5:
1642 vmovdqa 32*0($in_t), $T0a
1643 vmovdqa 32*1($in_t), $T0b
1644 vmovdqa 32*2($in_t), $T0c
1646 vmovdqa 32*3($in_t), $T1a
1647 vmovdqa 32*4($in_t), $T1b
1648 vmovdqa 32*5($in_t), $T1c
1650 vpcmpeqd $INDEX, $M0, $TMP0
1651 vpcmpeqd $INDEX, $M1, $TMP1
1653 vpaddd $TWO, $M0, $M0
1654 vpaddd $TWO, $M1, $M1
1655 lea 32*6($in_t), $in_t
1657 vpand $TMP0, $T0a, $T0a
1658 vpand $TMP0, $T0b, $T0b
1659 vpand $TMP0, $T0c, $T0c
1660 vpand $TMP1, $T1a, $T1a
1661 vpand $TMP1, $T1b, $T1b
1662 vpand $TMP1, $T1c, $T1c
1664 vpxor $T0a, $Ra, $Ra
1665 vpxor $T0b, $Rb, $Rb
1666 vpxor $T0c, $Rc, $Rc
1667 vpxor $T1a, $Ra, $Ra
1668 vpxor $T1b, $Rb, $Rb
1669 vpxor $T1c, $Rc, $Rc
1672 jnz .Lselect_loop_avx2_w5
1674 vmovdqu $Ra, 32*0($val)
1675 vmovdqu $Rb, 32*1($val)
1676 vmovdqu $Rc, 32*2($val)
1679 $code.=<<___ if ($win64);
1680 movaps (%rsp), %xmm6
1681 movaps 0x10(%rsp), %xmm7
1682 movaps 0x20(%rsp), %xmm8
1683 movaps 0x30(%rsp), %xmm9
1684 movaps 0x40(%rsp), %xmm10
1685 movaps 0x50(%rsp), %xmm11
1686 movaps 0x60(%rsp), %xmm12
1687 movaps 0x70(%rsp), %xmm13
1688 movaps 0x80(%rsp), %xmm14
1689 movaps 0x90(%rsp), %xmm15
1690 lea 0xa8(%rsp), %rsp
1691 .LSEH_end_ecp_nistz256_avx2_gather_w5:
1695 .size ecp_nistz256_avx2_gather_w5,.-ecp_nistz256_avx2_gather_w5
1699 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1700 my ($THREE,$INDEX,$Ra,$Rb)=map("%ymm$_",(0..3));
1701 my ($M0,$T0a,$T0b,$TMP0)=map("%ymm$_",(4..7));
1702 my ($M1,$T1a,$T1b,$TMP1)=map("%ymm$_",(8..11));
1703 my ($M2,$T2a,$T2b,$TMP2)=map("%ymm$_",(12..15));
1707 ################################################################################
1708 # void ecp_nistz256_avx2_gather_w7(uint64_t *val, uint64_t *in_t, int index);
1709 .globl ecp_nistz256_avx2_gather_w7
1710 .type ecp_nistz256_avx2_gather_w7,\@abi-omnipotent
1712 ecp_nistz256_avx2_gather_w7:
1716 $code.=<<___ if ($win64);
1717 lea -0x88(%rsp), %rax
1718 .LSEH_begin_ecp_nistz256_avx2_gather_w7:
1719 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1720 .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
1721 .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
1722 .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
1723 .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
1724 .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
1725 .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
1726 .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
1727 .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
1728 .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
1729 .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
1732 vmovdqa .LThree(%rip), $THREE
1737 vmovdqa .LOne(%rip), $M0
1738 vmovdqa .LTwo(%rip), $M1
1739 vmovdqa .LThree(%rip), $M2
1742 vpermd $INDEX, $Ra, $INDEX
1743 # Skip index = 0, because it is implicitly the point at infinity
1746 .Lselect_loop_avx2_w7:
1748 vmovdqa 32*0($in_t), $T0a
1749 vmovdqa 32*1($in_t), $T0b
1751 vmovdqa 32*2($in_t), $T1a
1752 vmovdqa 32*3($in_t), $T1b
1754 vmovdqa 32*4($in_t), $T2a
1755 vmovdqa 32*5($in_t), $T2b
1757 vpcmpeqd $INDEX, $M0, $TMP0
1758 vpcmpeqd $INDEX, $M1, $TMP1
1759 vpcmpeqd $INDEX, $M2, $TMP2
1761 vpaddd $THREE, $M0, $M0
1762 vpaddd $THREE, $M1, $M1
1763 vpaddd $THREE, $M2, $M2
1764 lea 32*6($in_t), $in_t
1766 vpand $TMP0, $T0a, $T0a
1767 vpand $TMP0, $T0b, $T0b
1768 vpand $TMP1, $T1a, $T1a
1769 vpand $TMP1, $T1b, $T1b
1770 vpand $TMP2, $T2a, $T2a
1771 vpand $TMP2, $T2b, $T2b
1773 vpxor $T0a, $Ra, $Ra
1774 vpxor $T0b, $Rb, $Rb
1775 vpxor $T1a, $Ra, $Ra
1776 vpxor $T1b, $Rb, $Rb
1777 vpxor $T2a, $Ra, $Ra
1778 vpxor $T2b, $Rb, $Rb
1781 jnz .Lselect_loop_avx2_w7
1784 vmovdqa 32*0($in_t), $T0a
1785 vmovdqa 32*1($in_t), $T0b
1787 vpcmpeqd $INDEX, $M0, $TMP0
1789 vpand $TMP0, $T0a, $T0a
1790 vpand $TMP0, $T0b, $T0b
1792 vpxor $T0a, $Ra, $Ra
1793 vpxor $T0b, $Rb, $Rb
1795 vmovdqu $Ra, 32*0($val)
1796 vmovdqu $Rb, 32*1($val)
1799 $code.=<<___ if ($win64);
1800 movaps (%rsp), %xmm6
1801 movaps 0x10(%rsp), %xmm7
1802 movaps 0x20(%rsp), %xmm8
1803 movaps 0x30(%rsp), %xmm9
1804 movaps 0x40(%rsp), %xmm10
1805 movaps 0x50(%rsp), %xmm11
1806 movaps 0x60(%rsp), %xmm12
1807 movaps 0x70(%rsp), %xmm13
1808 movaps 0x80(%rsp), %xmm14
1809 movaps 0x90(%rsp), %xmm15
1810 lea 0xa8(%rsp), %rsp
1811 .LSEH_end_ecp_nistz256_avx2_gather_w7:
1815 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
1819 .globl ecp_nistz256_avx2_gather_w7
1820 .type ecp_nistz256_avx2_gather_w7,\@function,3
1822 ecp_nistz256_avx2_gather_w7:
1823 .byte 0x0f,0x0b # ud2
1825 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
1829 ########################################################################
1830 # This block implements higher level point_double, point_add and
1831 # point_add_affine. The key to performance in this case is to allow
1832 # out-of-order execution logic to overlap computations from next step
1833 # with tail processing from current step. By using tailored calling
1834 # sequence we minimize inter-step overhead to give processor better
1835 # shot at overlapping operations...
1837 # You will notice that input data is copied to stack. Trouble is that
1838 # there are no registers to spare for holding original pointers and
1839 # reloading them, pointers, would create undesired dependencies on
1840 # effective addresses calculation paths. In other words it's too done
1841 # to favour out-of-order execution logic.
1842 # <appro@openssl.org>
1844 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
1845 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
1846 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rbp","%rcx",$acc4,$acc4);
1847 my ($poly1,$poly3)=($acc6,$acc7);
1849 sub load_for_mul () {
1850 my ($a,$b,$src0) = @_;
1851 my $bias = $src0 eq "%rax" ? 0 : -128;
1857 lea $bias+$a, $a_ptr
1862 sub load_for_sqr () {
1864 my $bias = $src0 eq "%rax" ? 0 : -128;
1868 lea $bias+$a, $a_ptr
1874 ########################################################################
1875 # operate in 4-5-0-1 "name space" that matches multiplication output
1877 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
1880 .type __ecp_nistz256_add_toq,\@abi-omnipotent
1882 __ecp_nistz256_add_toq:
1883 add 8*0($b_ptr), $a0
1884 adc 8*1($b_ptr), $a1
1886 adc 8*2($b_ptr), $a2
1887 adc 8*3($b_ptr), $a3
1901 mov $a0, 8*0($r_ptr)
1903 mov $a1, 8*1($r_ptr)
1905 mov $a2, 8*2($r_ptr)
1906 mov $a3, 8*3($r_ptr)
1909 .size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
1911 .type __ecp_nistz256_sub_fromq,\@abi-omnipotent
1913 __ecp_nistz256_sub_fromq:
1914 sub 8*0($b_ptr), $a0
1915 sbb 8*1($b_ptr), $a1
1917 sbb 8*2($b_ptr), $a2
1918 sbb 8*3($b_ptr), $a3
1932 mov $a0, 8*0($r_ptr)
1934 mov $a1, 8*1($r_ptr)
1936 mov $a2, 8*2($r_ptr)
1937 mov $a3, 8*3($r_ptr)
1940 .size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
1942 .type __ecp_nistz256_subq,\@abi-omnipotent
1944 __ecp_nistz256_subq:
1967 .size __ecp_nistz256_subq,.-__ecp_nistz256_subq
1969 .type __ecp_nistz256_mul_by_2q,\@abi-omnipotent
1971 __ecp_nistz256_mul_by_2q:
1972 add $a0, $a0 # a0:a3+a0:a3
1990 mov $a0, 8*0($r_ptr)
1992 mov $a1, 8*1($r_ptr)
1994 mov $a2, 8*2($r_ptr)
1995 mov $a3, 8*3($r_ptr)
1998 .size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
2003 my ($src0,$sfx,$bias);
2004 my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
2012 .globl ecp_nistz256_point_double
2013 .type ecp_nistz256_point_double,\@function,2
2015 ecp_nistz256_point_double:
2017 $code.=<<___ if ($addx);
2019 and OPENSSL_ia32cap_P+8(%rip), %ecx
2029 .type ecp_nistz256_point_doublex,\@function,2
2031 ecp_nistz256_point_doublex:
2044 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr.x
2045 mov $a_ptr, $b_ptr # backup copy
2046 movdqu 0x10($a_ptr), %xmm1
2047 mov 0x20+8*0($a_ptr), $acc4 # load in_y in "5-4-0-1" order
2048 mov 0x20+8*1($a_ptr), $acc5
2049 mov 0x20+8*2($a_ptr), $acc0
2050 mov 0x20+8*3($a_ptr), $acc1
2051 mov .Lpoly+8*1(%rip), $poly1
2052 mov .Lpoly+8*3(%rip), $poly3
2053 movdqa %xmm0, $in_x(%rsp)
2054 movdqa %xmm1, $in_x+0x10(%rsp)
2055 lea 0x20($r_ptr), $acc2
2056 lea 0x40($r_ptr), $acc3
2061 lea $S(%rsp), $r_ptr
2062 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(S, in_y);
2064 mov 0x40+8*0($a_ptr), $src0
2065 mov 0x40+8*1($a_ptr), $acc6
2066 mov 0x40+8*2($a_ptr), $acc7
2067 mov 0x40+8*3($a_ptr), $acc0
2068 lea 0x40-$bias($a_ptr), $a_ptr
2069 lea $Zsqr(%rsp), $r_ptr
2070 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Zsqr, in_z);
2072 `&load_for_sqr("$S(%rsp)", "$src0")`
2073 lea $S(%rsp), $r_ptr
2074 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(S, S);
2076 mov 0x20($b_ptr), $src0 # $b_ptr is still valid
2077 mov 0x40+8*0($b_ptr), $acc1
2078 mov 0x40+8*1($b_ptr), $acc2
2079 mov 0x40+8*2($b_ptr), $acc3
2080 mov 0x40+8*3($b_ptr), $acc4
2081 lea 0x40-$bias($b_ptr), $a_ptr
2082 lea 0x20($b_ptr), $b_ptr
2084 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, in_z, in_y);
2085 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(res_z, res_z);
2087 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2088 mov $in_x+8*1(%rsp), $acc5
2089 lea $Zsqr(%rsp), $b_ptr
2090 mov $in_x+8*2(%rsp), $acc0
2091 mov $in_x+8*3(%rsp), $acc1
2092 lea $M(%rsp), $r_ptr
2093 call __ecp_nistz256_add_to$x # p256_add(M, in_x, Zsqr);
2095 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2096 mov $in_x+8*1(%rsp), $acc5
2097 lea $Zsqr(%rsp), $b_ptr
2098 mov $in_x+8*2(%rsp), $acc0
2099 mov $in_x+8*3(%rsp), $acc1
2100 lea $Zsqr(%rsp), $r_ptr
2101 call __ecp_nistz256_sub_from$x # p256_sub(Zsqr, in_x, Zsqr);
2103 `&load_for_sqr("$S(%rsp)", "$src0")`
2105 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_y, S);
2108 ######## ecp_nistz256_div_by_2(res_y, res_y); ##########################
2109 # operate in 4-5-6-7 "name space" that matches squaring output
2111 my ($poly1,$poly3)=($a_ptr,$t1);
2112 my ($a0,$a1,$a2,$a3,$t3,$t4,$t1)=($acc4,$acc5,$acc6,$acc7,$acc0,$acc1,$acc2);
2125 xor $a_ptr, $a_ptr # borrow $a_ptr
2134 mov $a1, $t0 # a0:a3>>1
2145 mov $a0, 8*0($r_ptr)
2147 mov $a1, 8*1($r_ptr)
2151 mov $a2, 8*2($r_ptr)
2152 mov $a3, 8*3($r_ptr)
2156 `&load_for_mul("$M(%rsp)", "$Zsqr(%rsp)", "$src0")`
2157 lea $M(%rsp), $r_ptr
2158 call __ecp_nistz256_mul_mont$x # p256_mul_mont(M, M, Zsqr);
2160 lea $tmp0(%rsp), $r_ptr
2161 call __ecp_nistz256_mul_by_2$x
2163 lea $M(%rsp), $b_ptr
2164 lea $M(%rsp), $r_ptr
2165 call __ecp_nistz256_add_to$x # p256_mul_by_3(M, M);
2167 `&load_for_mul("$S(%rsp)", "$in_x(%rsp)", "$src0")`
2168 lea $S(%rsp), $r_ptr
2169 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, in_x);
2171 lea $tmp0(%rsp), $r_ptr
2172 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(tmp0, S);
2174 `&load_for_sqr("$M(%rsp)", "$src0")`
2176 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_x, M);
2178 lea $tmp0(%rsp), $b_ptr
2179 mov $acc6, $acc0 # harmonize sqr output and sub input
2183 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, tmp0);
2185 mov $S+8*0(%rsp), $t0
2186 mov $S+8*1(%rsp), $t1
2187 mov $S+8*2(%rsp), $t2
2188 mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
2189 lea $S(%rsp), $r_ptr
2190 call __ecp_nistz256_sub$x # p256_sub(S, S, res_x);
2193 lea $M(%rsp), $b_ptr
2194 mov $acc4, $acc6 # harmonize sub output and mul input
2196 mov $acc4, $S+8*0(%rsp) # have to save:-(
2198 mov $acc5, $S+8*1(%rsp)
2200 mov $acc0, $S+8*2(%rsp)
2201 lea $S-$bias(%rsp), $a_ptr
2203 mov $acc1, $S+8*3(%rsp)
2205 lea $S(%rsp), $r_ptr
2206 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, M);
2210 call __ecp_nistz256_sub_from$x # p256_sub(res_y, S, res_y);
2220 .size ecp_nistz256_point_double$sfx,.-ecp_nistz256_point_double$sfx
2227 my ($src0,$sfx,$bias);
2228 my ($H,$Hsqr,$R,$Rsqr,$Hcub,
2230 $res_x,$res_y,$res_z,
2231 $in1_x,$in1_y,$in1_z,
2232 $in2_x,$in2_y,$in2_z)=map(32*$_,(0..17));
2233 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2241 .globl ecp_nistz256_point_add
2242 .type ecp_nistz256_point_add,\@function,3
2244 ecp_nistz256_point_add:
2246 $code.=<<___ if ($addx);
2248 and OPENSSL_ia32cap_P+8(%rip), %ecx
2258 .type ecp_nistz256_point_addx,\@function,3
2260 ecp_nistz256_point_addx:
2273 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
2274 movdqu 0x10($a_ptr), %xmm1
2275 movdqu 0x20($a_ptr), %xmm2
2276 movdqu 0x30($a_ptr), %xmm3
2277 movdqu 0x40($a_ptr), %xmm4
2278 movdqu 0x50($a_ptr), %xmm5
2279 mov $a_ptr, $b_ptr # reassign
2280 mov $b_org, $a_ptr # reassign
2281 movdqa %xmm0, $in1_x(%rsp)
2282 movdqa %xmm1, $in1_x+0x10(%rsp)
2284 movdqa %xmm2, $in1_y(%rsp)
2285 movdqa %xmm3, $in1_y+0x10(%rsp)
2287 movdqa %xmm4, $in1_z(%rsp)
2288 movdqa %xmm5, $in1_z+0x10(%rsp)
2291 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$b_ptr
2292 pshufd \$0xb1, %xmm3, %xmm5
2293 movdqu 0x10($a_ptr), %xmm1
2294 movdqu 0x20($a_ptr), %xmm2
2296 movdqu 0x30($a_ptr), %xmm3
2297 mov 0x40+8*0($a_ptr), $src0 # load original in2_z
2298 mov 0x40+8*1($a_ptr), $acc6
2299 mov 0x40+8*2($a_ptr), $acc7
2300 mov 0x40+8*3($a_ptr), $acc0
2301 movdqa %xmm0, $in2_x(%rsp)
2302 pshufd \$0x1e, %xmm5, %xmm4
2303 movdqa %xmm1, $in2_x+0x10(%rsp)
2305 movq $r_ptr, %xmm0 # save $r_ptr
2306 movdqa %xmm2, $in2_y(%rsp)
2307 movdqa %xmm3, $in2_y+0x10(%rsp)
2313 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
2314 mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
2315 mov $acc6, $in2_z+8*1(%rsp)
2316 mov $acc7, $in2_z+8*2(%rsp)
2317 mov $acc0, $in2_z+8*3(%rsp)
2318 lea $Z2sqr(%rsp), $r_ptr # Z2^2
2319 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z2sqr, in2_z);
2321 pcmpeqd %xmm4, %xmm5
2322 pshufd \$0xb1, %xmm3, %xmm4
2324 pshufd \$0, %xmm5, %xmm5 # in1infty
2325 pshufd \$0x1e, %xmm4, %xmm3
2328 pcmpeqd %xmm3, %xmm4
2329 pshufd \$0, %xmm4, %xmm4 # in2infty
2330 mov 0x40+8*0($b_ptr), $src0 # load original in1_z
2331 mov 0x40+8*1($b_ptr), $acc6
2332 mov 0x40+8*2($b_ptr), $acc7
2333 mov 0x40+8*3($b_ptr), $acc0
2335 lea 0x40-$bias($b_ptr), $a_ptr
2336 lea $Z1sqr(%rsp), $r_ptr # Z1^2
2337 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
2339 `&load_for_mul("$Z2sqr(%rsp)", "$in2_z(%rsp)", "$src0")`
2340 lea $S1(%rsp), $r_ptr # S1 = Z2^3
2341 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, Z2sqr, in2_z);
2343 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
2344 lea $S2(%rsp), $r_ptr # S2 = Z1^3
2345 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
2347 `&load_for_mul("$S1(%rsp)", "$in1_y(%rsp)", "$src0")`
2348 lea $S1(%rsp), $r_ptr # S1 = Y1*Z2^3
2349 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, S1, in1_y);
2351 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
2352 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
2353 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
2355 lea $S1(%rsp), $b_ptr
2356 lea $R(%rsp), $r_ptr # R = S2 - S1
2357 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, S1);
2359 or $acc5, $acc4 # see if result is zero
2363 por %xmm5, %xmm2 # in1infty || in2infty
2366 `&load_for_mul("$Z2sqr(%rsp)", "$in1_x(%rsp)", "$src0")`
2367 lea $U1(%rsp), $r_ptr # U1 = X1*Z2^2
2368 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U1, in1_x, Z2sqr);
2370 `&load_for_mul("$Z1sqr(%rsp)", "$in2_x(%rsp)", "$src0")`
2371 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
2372 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in2_x, Z1sqr);
2374 lea $U1(%rsp), $b_ptr
2375 lea $H(%rsp), $r_ptr # H = U2 - U1
2376 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, U1);
2378 or $acc5, $acc4 # see if result is zero
2382 .byte 0x3e # predict taken
2383 jnz .Ladd_proceed$x # is_equal(U1,U2)?
2387 jnz .Ladd_proceed$x # (in1infty || in2infty)?
2389 jz .Ladd_proceed$x # is_equal(S1,S2)?
2391 movq %xmm0, $r_ptr # restore $r_ptr
2393 movdqu %xmm0, 0x00($r_ptr)
2394 movdqu %xmm0, 0x10($r_ptr)
2395 movdqu %xmm0, 0x20($r_ptr)
2396 movdqu %xmm0, 0x30($r_ptr)
2397 movdqu %xmm0, 0x40($r_ptr)
2398 movdqu %xmm0, 0x50($r_ptr)
2403 `&load_for_sqr("$R(%rsp)", "$src0")`
2404 lea $Rsqr(%rsp), $r_ptr # R^2
2405 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
2407 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
2408 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2409 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
2411 `&load_for_sqr("$H(%rsp)", "$src0")`
2412 lea $Hsqr(%rsp), $r_ptr # H^2
2413 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
2415 `&load_for_mul("$res_z(%rsp)", "$in2_z(%rsp)", "$src0")`
2416 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2417 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, res_z, in2_z);
2419 `&load_for_mul("$Hsqr(%rsp)", "$H(%rsp)", "$src0")`
2420 lea $Hcub(%rsp), $r_ptr # H^3
2421 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
2423 `&load_for_mul("$Hsqr(%rsp)", "$U1(%rsp)", "$src0")`
2424 lea $U2(%rsp), $r_ptr # U1*H^2
2425 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, U1, Hsqr);
2428 #######################################################################
2429 # operate in 4-5-0-1 "name space" that matches multiplication output
2431 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2432 my ($poly1, $poly3)=($acc6,$acc7);
2435 #lea $U2(%rsp), $a_ptr
2436 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
2437 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
2439 add $acc0, $acc0 # a0:a3+a0:a3
2440 lea $Rsqr(%rsp), $a_ptr
2457 mov 8*0($a_ptr), $t0
2459 mov 8*1($a_ptr), $t1
2461 mov 8*2($a_ptr), $t2
2463 mov 8*3($a_ptr), $t3
2465 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
2467 lea $Hcub(%rsp), $b_ptr
2468 lea $res_x(%rsp), $r_ptr
2469 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
2471 mov $U2+8*0(%rsp), $t0
2472 mov $U2+8*1(%rsp), $t1
2473 mov $U2+8*2(%rsp), $t2
2474 mov $U2+8*3(%rsp), $t3
2475 lea $res_y(%rsp), $r_ptr
2477 call __ecp_nistz256_sub$x # p256_sub(res_y, U2, res_x);
2479 mov $acc0, 8*0($r_ptr) # save the result, as
2480 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2481 mov $acc2, 8*2($r_ptr)
2482 mov $acc3, 8*3($r_ptr)
2486 `&load_for_mul("$S1(%rsp)", "$Hcub(%rsp)", "$src0")`
2487 lea $S2(%rsp), $r_ptr
2488 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S1, Hcub);
2490 `&load_for_mul("$R(%rsp)", "$res_y(%rsp)", "$src0")`
2491 lea $res_y(%rsp), $r_ptr
2492 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_y, R, res_y);
2494 lea $S2(%rsp), $b_ptr
2495 lea $res_y(%rsp), $r_ptr
2496 call __ecp_nistz256_sub_from$x # p256_sub(res_y, res_y, S2);
2498 movq %xmm0, $r_ptr # restore $r_ptr
2500 movdqa %xmm5, %xmm0 # copy_conditional(res_z, in2_z, in1infty);
2502 pandn $res_z(%rsp), %xmm0
2504 pandn $res_z+0x10(%rsp), %xmm1
2506 pand $in2_z(%rsp), %xmm2
2507 pand $in2_z+0x10(%rsp), %xmm3
2511 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
2517 pand $in1_z(%rsp), %xmm2
2518 pand $in1_z+0x10(%rsp), %xmm3
2521 movdqu %xmm2, 0x40($r_ptr)
2522 movdqu %xmm3, 0x50($r_ptr)
2524 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
2526 pandn $res_x(%rsp), %xmm0
2528 pandn $res_x+0x10(%rsp), %xmm1
2530 pand $in2_x(%rsp), %xmm2
2531 pand $in2_x+0x10(%rsp), %xmm3
2535 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
2541 pand $in1_x(%rsp), %xmm2
2542 pand $in1_x+0x10(%rsp), %xmm3
2545 movdqu %xmm2, 0x00($r_ptr)
2546 movdqu %xmm3, 0x10($r_ptr)
2548 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
2550 pandn $res_y(%rsp), %xmm0
2552 pandn $res_y+0x10(%rsp), %xmm1
2554 pand $in2_y(%rsp), %xmm2
2555 pand $in2_y+0x10(%rsp), %xmm3
2559 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
2565 pand $in1_y(%rsp), %xmm2
2566 pand $in1_y+0x10(%rsp), %xmm3
2569 movdqu %xmm2, 0x20($r_ptr)
2570 movdqu %xmm3, 0x30($r_ptr)
2581 .size ecp_nistz256_point_add$sfx,.-ecp_nistz256_point_add$sfx
2586 sub gen_add_affine () {
2588 my ($src0,$sfx,$bias);
2589 my ($U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr,
2590 $res_x,$res_y,$res_z,
2591 $in1_x,$in1_y,$in1_z,
2592 $in2_x,$in2_y)=map(32*$_,(0..14));
2601 .globl ecp_nistz256_point_add_affine
2602 .type ecp_nistz256_point_add_affine,\@function,3
2604 ecp_nistz256_point_add_affine:
2606 $code.=<<___ if ($addx);
2608 and OPENSSL_ia32cap_P+8(%rip), %ecx
2610 je .Lpoint_add_affinex
2618 .type ecp_nistz256_point_add_affinex,\@function,3
2620 ecp_nistz256_point_add_affinex:
2621 .Lpoint_add_affinex:
2633 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
2634 mov $b_org, $b_ptr # reassign
2635 movdqu 0x10($a_ptr), %xmm1
2636 movdqu 0x20($a_ptr), %xmm2
2637 movdqu 0x30($a_ptr), %xmm3
2638 movdqu 0x40($a_ptr), %xmm4
2639 movdqu 0x50($a_ptr), %xmm5
2640 mov 0x40+8*0($a_ptr), $src0 # load original in1_z
2641 mov 0x40+8*1($a_ptr), $acc6
2642 mov 0x40+8*2($a_ptr), $acc7
2643 mov 0x40+8*3($a_ptr), $acc0
2644 movdqa %xmm0, $in1_x(%rsp)
2645 movdqa %xmm1, $in1_x+0x10(%rsp)
2647 movdqa %xmm2, $in1_y(%rsp)
2648 movdqa %xmm3, $in1_y+0x10(%rsp)
2650 movdqa %xmm4, $in1_z(%rsp)
2651 movdqa %xmm5, $in1_z+0x10(%rsp)
2654 movdqu 0x00($b_ptr), %xmm0 # copy *(P256_POINT_AFFINE *)$b_ptr
2655 pshufd \$0xb1, %xmm3, %xmm5
2656 movdqu 0x10($b_ptr), %xmm1
2657 movdqu 0x20($b_ptr), %xmm2
2659 movdqu 0x30($b_ptr), %xmm3
2660 movdqa %xmm0, $in2_x(%rsp)
2661 pshufd \$0x1e, %xmm5, %xmm4
2662 movdqa %xmm1, $in2_x+0x10(%rsp)
2664 movq $r_ptr, %xmm0 # save $r_ptr
2665 movdqa %xmm2, $in2_y(%rsp)
2666 movdqa %xmm3, $in2_y+0x10(%rsp)
2672 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
2673 lea $Z1sqr(%rsp), $r_ptr # Z1^2
2674 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
2676 pcmpeqd %xmm4, %xmm5
2677 pshufd \$0xb1, %xmm3, %xmm4
2678 mov 0x00($b_ptr), $src0 # $b_ptr is still valid
2679 #lea 0x00($b_ptr), $b_ptr
2680 mov $acc4, $acc1 # harmonize sqr output and mul input
2682 pshufd \$0, %xmm5, %xmm5 # in1infty
2683 pshufd \$0x1e, %xmm4, %xmm3
2688 pcmpeqd %xmm3, %xmm4
2689 pshufd \$0, %xmm4, %xmm4 # in2infty
2691 lea $Z1sqr-$bias(%rsp), $a_ptr
2693 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
2694 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, Z1sqr, in2_x);
2696 lea $in1_x(%rsp), $b_ptr
2697 lea $H(%rsp), $r_ptr # H = U2 - U1
2698 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, in1_x);
2700 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
2701 lea $S2(%rsp), $r_ptr # S2 = Z1^3
2702 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
2704 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
2705 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2706 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
2708 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
2709 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
2710 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
2712 lea $in1_y(%rsp), $b_ptr
2713 lea $R(%rsp), $r_ptr # R = S2 - S1
2714 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, in1_y);
2716 `&load_for_sqr("$H(%rsp)", "$src0")`
2717 lea $Hsqr(%rsp), $r_ptr # H^2
2718 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
2720 `&load_for_sqr("$R(%rsp)", "$src0")`
2721 lea $Rsqr(%rsp), $r_ptr # R^2
2722 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
2724 `&load_for_mul("$H(%rsp)", "$Hsqr(%rsp)", "$src0")`
2725 lea $Hcub(%rsp), $r_ptr # H^3
2726 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
2728 `&load_for_mul("$Hsqr(%rsp)", "$in1_x(%rsp)", "$src0")`
2729 lea $U2(%rsp), $r_ptr # U1*H^2
2730 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in1_x, Hsqr);
2733 #######################################################################
2734 # operate in 4-5-0-1 "name space" that matches multiplication output
2736 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2737 my ($poly1, $poly3)=($acc6,$acc7);
2740 #lea $U2(%rsp), $a_ptr
2741 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
2742 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
2744 add $acc0, $acc0 # a0:a3+a0:a3
2745 lea $Rsqr(%rsp), $a_ptr
2762 mov 8*0($a_ptr), $t0
2764 mov 8*1($a_ptr), $t1
2766 mov 8*2($a_ptr), $t2
2768 mov 8*3($a_ptr), $t3
2770 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
2772 lea $Hcub(%rsp), $b_ptr
2773 lea $res_x(%rsp), $r_ptr
2774 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
2776 mov $U2+8*0(%rsp), $t0
2777 mov $U2+8*1(%rsp), $t1
2778 mov $U2+8*2(%rsp), $t2
2779 mov $U2+8*3(%rsp), $t3
2780 lea $H(%rsp), $r_ptr
2782 call __ecp_nistz256_sub$x # p256_sub(H, U2, res_x);
2784 mov $acc0, 8*0($r_ptr) # save the result, as
2785 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2786 mov $acc2, 8*2($r_ptr)
2787 mov $acc3, 8*3($r_ptr)
2791 `&load_for_mul("$Hcub(%rsp)", "$in1_y(%rsp)", "$src0")`
2792 lea $S2(%rsp), $r_ptr
2793 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Hcub, in1_y);
2795 `&load_for_mul("$H(%rsp)", "$R(%rsp)", "$src0")`
2796 lea $H(%rsp), $r_ptr
2797 call __ecp_nistz256_mul_mont$x # p256_mul_mont(H, H, R);
2799 lea $S2(%rsp), $b_ptr
2800 lea $res_y(%rsp), $r_ptr
2801 call __ecp_nistz256_sub_from$x # p256_sub(res_y, H, S2);
2803 movq %xmm0, $r_ptr # restore $r_ptr
2805 movdqa %xmm5, %xmm0 # copy_conditional(res_z, ONE, in1infty);
2807 pandn $res_z(%rsp), %xmm0
2809 pandn $res_z+0x10(%rsp), %xmm1
2811 pand .LONE_mont(%rip), %xmm2
2812 pand .LONE_mont+0x10(%rip), %xmm3
2816 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
2822 pand $in1_z(%rsp), %xmm2
2823 pand $in1_z+0x10(%rsp), %xmm3
2826 movdqu %xmm2, 0x40($r_ptr)
2827 movdqu %xmm3, 0x50($r_ptr)
2829 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
2831 pandn $res_x(%rsp), %xmm0
2833 pandn $res_x+0x10(%rsp), %xmm1
2835 pand $in2_x(%rsp), %xmm2
2836 pand $in2_x+0x10(%rsp), %xmm3
2840 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
2846 pand $in1_x(%rsp), %xmm2
2847 pand $in1_x+0x10(%rsp), %xmm3
2850 movdqu %xmm2, 0x00($r_ptr)
2851 movdqu %xmm3, 0x10($r_ptr)
2853 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
2855 pandn $res_y(%rsp), %xmm0
2857 pandn $res_y+0x10(%rsp), %xmm1
2859 pand $in2_y(%rsp), %xmm2
2860 pand $in2_y+0x10(%rsp), %xmm3
2864 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
2870 pand $in1_y(%rsp), %xmm2
2871 pand $in1_y+0x10(%rsp), %xmm3
2874 movdqu %xmm2, 0x20($r_ptr)
2875 movdqu %xmm3, 0x30($r_ptr)
2885 .size ecp_nistz256_point_add_affine$sfx,.-ecp_nistz256_point_add_affine$sfx
2888 &gen_add_affine("q");
2890 ########################################################################
2894 ########################################################################
2895 # operate in 4-5-0-1 "name space" that matches multiplication output
2897 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2900 .type __ecp_nistz256_add_tox,\@abi-omnipotent
2902 __ecp_nistz256_add_tox:
2904 adc 8*0($b_ptr), $a0
2905 adc 8*1($b_ptr), $a1
2907 adc 8*2($b_ptr), $a2
2908 adc 8*3($b_ptr), $a3
2923 mov $a0, 8*0($r_ptr)
2925 mov $a1, 8*1($r_ptr)
2927 mov $a2, 8*2($r_ptr)
2928 mov $a3, 8*3($r_ptr)
2931 .size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
2933 .type __ecp_nistz256_sub_fromx,\@abi-omnipotent
2935 __ecp_nistz256_sub_fromx:
2937 sbb 8*0($b_ptr), $a0
2938 sbb 8*1($b_ptr), $a1
2940 sbb 8*2($b_ptr), $a2
2941 sbb 8*3($b_ptr), $a3
2956 mov $a0, 8*0($r_ptr)
2958 mov $a1, 8*1($r_ptr)
2960 mov $a2, 8*2($r_ptr)
2961 mov $a3, 8*3($r_ptr)
2964 .size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
2966 .type __ecp_nistz256_subx,\@abi-omnipotent
2968 __ecp_nistz256_subx:
2993 .size __ecp_nistz256_subx,.-__ecp_nistz256_subx
2995 .type __ecp_nistz256_mul_by_2x,\@abi-omnipotent
2997 __ecp_nistz256_mul_by_2x:
2999 adc $a0, $a0 # a0:a3+a0:a3
3018 mov $a0, 8*0($r_ptr)
3020 mov $a1, 8*1($r_ptr)
3022 mov $a2, 8*2($r_ptr)
3023 mov $a3, 8*3($r_ptr)
3026 .size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
3031 &gen_add_affine("x");
3035 ########################################################################
3036 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
3038 open TABLE,"<ecp_nistz256_table.c" or
3039 open TABLE,"<${dir}../ecp_nistz256_table.c" or
3040 die "failed to open ecp_nistz256_table.c:",$!;
3045 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
3049 die "insane number of elements" if ($#arr != 64*16*37-1);
3053 .globl ecp_nistz256_precomputed
3054 .type ecp_nistz256_precomputed,\@object
3056 ecp_nistz256_precomputed:
3058 while (@line=splice(@arr,0,16)) {
3059 print ".long\t",join(',',map { sprintf "0x%08x",$_} @line),"\n";
3062 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
3065 $code =~ s/\`([^\`]*)\`/eval $1/gem;