2 # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 ##############################################################################
12 # Copyright 2014 Intel Corporation #
14 # Licensed under the Apache License, Version 2.0 (the "License"); #
15 # you may not use this file except in compliance with the License. #
16 # You may obtain a copy of the License at #
18 # http://www.apache.org/licenses/LICENSE-2.0 #
20 # Unless required by applicable law or agreed to in writing, software #
21 # distributed under the License is distributed on an "AS IS" BASIS, #
22 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
23 # See the License for the specific language governing permissions and #
24 # limitations under the License. #
26 ##############################################################################
28 # Developers and authors: #
29 # Shay Gueron (1, 2), and Vlad Krasnov (1) #
30 # (1) Intel Corporation, Israel Development Center #
31 # (2) University of Haifa #
33 # S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with#
36 ##############################################################################
38 # Further optimization by <appro@openssl.org>:
40 # this/original with/without -DECP_NISTZ256_ASM(*)
41 # Opteron +12-49% +110-150%
42 # Bulldozer +14-45% +175-210%
44 # Westmere +12-34% +80-87%
45 # Sandy Bridge +9-35% +110-120%
46 # Ivy Bridge +9-35% +110-125%
47 # Haswell +8-37% +140-160%
48 # Broadwell +18-58% +145-210%
49 # Atom +15-50% +130-180%
50 # VIA Nano +43-160% +300-480%
52 # (*) "without -DECP_NISTZ256_ASM" refers to build with
53 # "enable-ec_nistp_64_gcc_128";
55 # Ranges denote minimum and maximum improvement coefficients depending
56 # on benchmark. Lower coefficients are for ECDSA sign, relatively fastest
57 # server-side operation. Keep in mind that +100% means 2x improvement.
61 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
63 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
65 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
66 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
67 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
68 die "can't locate x86_64-xlate.pl";
70 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
73 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
74 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
75 $avx = ($1>=2.19) + ($1>=2.22);
79 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
80 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
81 $avx = ($1>=2.09) + ($1>=2.10);
85 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
86 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
87 $avx = ($1>=10) + ($1>=11);
91 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
92 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
93 $avx = ($ver>=3.0) + ($ver>=3.01);
99 .extern OPENSSL_ia32cap_P
104 .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
106 # 2^512 mod P precomputed for NIST P256 polynomial
108 .quad 0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd
111 .long 1,1,1,1,1,1,1,1
113 .long 2,2,2,2,2,2,2,2
115 .long 3,3,3,3,3,3,3,3
117 .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
121 ################################################################################
122 # void ecp_nistz256_mul_by_2(uint64_t res[4], uint64_t a[4]);
124 my ($a0,$a1,$a2,$a3)=map("%r$_",(8..11));
125 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rdx","%rcx","%r12","%r13");
126 my ($r_ptr,$a_ptr,$b_ptr)=("%rdi","%rsi","%rdx");
130 .globl ecp_nistz256_mul_by_2
131 .type ecp_nistz256_mul_by_2,\@function,2
133 ecp_nistz256_mul_by_2:
141 add $a0, $a0 # a0:a3+a0:a3
145 lea .Lpoly(%rip), $a_ptr
174 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
176 ################################################################################
177 # void ecp_nistz256_div_by_2(uint64_t res[4], uint64_t a[4]);
178 .globl ecp_nistz256_div_by_2
179 .type ecp_nistz256_div_by_2,\@function,2
181 ecp_nistz256_div_by_2:
191 lea .Lpoly(%rip), $a_ptr
202 xor $a_ptr, $a_ptr # borrow $a_ptr
211 mov $a1, $t0 # a0:a3>>1
237 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
239 ################################################################################
240 # void ecp_nistz256_mul_by_3(uint64_t res[4], uint64_t a[4]);
241 .globl ecp_nistz256_mul_by_3
242 .type ecp_nistz256_mul_by_3,\@function,2
244 ecp_nistz256_mul_by_3:
252 add $a0, $a0 # a0:a3+a0:a3
264 sbb .Lpoly+8*1(%rip), $a1
267 sbb .Lpoly+8*3(%rip), $a3
276 add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
286 sbb .Lpoly+8*1(%rip), $a1
289 sbb .Lpoly+8*3(%rip), $a3
306 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
308 ################################################################################
309 # void ecp_nistz256_add(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
310 .globl ecp_nistz256_add
311 .type ecp_nistz256_add,\@function,3
323 lea .Lpoly(%rip), $a_ptr
355 .size ecp_nistz256_add,.-ecp_nistz256_add
357 ################################################################################
358 # void ecp_nistz256_sub(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
359 .globl ecp_nistz256_sub
360 .type ecp_nistz256_sub,\@function,3
372 lea .Lpoly(%rip), $a_ptr
404 .size ecp_nistz256_sub,.-ecp_nistz256_sub
406 ################################################################################
407 # void ecp_nistz256_neg(uint64_t res[4], uint64_t a[4]);
408 .globl ecp_nistz256_neg
409 .type ecp_nistz256_neg,\@function,2
427 lea .Lpoly(%rip), $a_ptr
453 .size ecp_nistz256_neg,.-ecp_nistz256_neg
457 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
458 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
459 my ($t0,$t1,$t2,$t3,$t4)=("%rcx","%rbp","%rbx","%rdx","%rax");
460 my ($poly1,$poly3)=($acc6,$acc7);
463 ################################################################################
464 # void ecp_nistz256_to_mont(
467 .globl ecp_nistz256_to_mont
468 .type ecp_nistz256_to_mont,\@function,2
470 ecp_nistz256_to_mont:
472 $code.=<<___ if ($addx);
474 and OPENSSL_ia32cap_P+8(%rip), %ecx
477 lea .LRR(%rip), $b_org
479 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
481 ################################################################################
482 # void ecp_nistz256_mul_mont(
487 .globl ecp_nistz256_mul_mont
488 .type ecp_nistz256_mul_mont,\@function,3
490 ecp_nistz256_mul_mont:
492 $code.=<<___ if ($addx);
494 and OPENSSL_ia32cap_P+8(%rip), %ecx
506 $code.=<<___ if ($addx);
512 mov 8*0($b_org), %rax
513 mov 8*0($a_ptr), $acc1
514 mov 8*1($a_ptr), $acc2
515 mov 8*2($a_ptr), $acc3
516 mov 8*3($a_ptr), $acc4
518 call __ecp_nistz256_mul_montq
520 $code.=<<___ if ($addx);
526 mov 8*0($b_org), %rdx
527 mov 8*0($a_ptr), $acc1
528 mov 8*1($a_ptr), $acc2
529 mov 8*2($a_ptr), $acc3
530 mov 8*3($a_ptr), $acc4
531 lea -128($a_ptr), $a_ptr # control u-op density
533 call __ecp_nistz256_mul_montx
546 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
548 .type __ecp_nistz256_mul_montq,\@abi-omnipotent
550 __ecp_nistz256_mul_montq:
551 ########################################################################
555 mov .Lpoly+8*1(%rip),$poly1
561 mov .Lpoly+8*3(%rip),$poly3
580 ########################################################################
581 # First reduction step
582 # Basically now we want to multiply acc[0] by p256,
583 # and add the result to the acc.
584 # Due to the special form of p256 we do some optimizations
586 # acc[0] x p256[0..1] = acc[0] x 2^96 - acc[0]
587 # then we add acc[0] and get acc[0] x 2^96
593 add $acc0, $acc1 # +=acc[0]<<96
596 mov 8*1($b_ptr), %rax
601 ########################################################################
634 ########################################################################
635 # Second reduction step
643 mov 8*2($b_ptr), %rax
648 ########################################################################
681 ########################################################################
682 # Third reduction step
690 mov 8*3($b_ptr), %rax
695 ########################################################################
728 ########################################################################
729 # Final reduction step
742 ########################################################################
743 # Branch-less conditional subtraction of P
744 sub \$-1, $acc4 # .Lpoly[0]
746 sbb $poly1, $acc5 # .Lpoly[1]
747 sbb \$0, $acc0 # .Lpoly[2]
749 sbb $poly3, $acc1 # .Lpoly[3]
754 mov $acc4, 8*0($r_ptr)
756 mov $acc5, 8*1($r_ptr)
758 mov $acc0, 8*2($r_ptr)
759 mov $acc1, 8*3($r_ptr)
762 .size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
764 ################################################################################
765 # void ecp_nistz256_sqr_mont(
769 # we optimize the square according to S.Gueron and V.Krasnov,
770 # "Speeding up Big-Number Squaring"
771 .globl ecp_nistz256_sqr_mont
772 .type ecp_nistz256_sqr_mont,\@function,2
774 ecp_nistz256_sqr_mont:
776 $code.=<<___ if ($addx);
778 and OPENSSL_ia32cap_P+8(%rip), %ecx
789 $code.=<<___ if ($addx);
794 mov 8*0($a_ptr), %rax
795 mov 8*1($a_ptr), $acc6
796 mov 8*2($a_ptr), $acc7
797 mov 8*3($a_ptr), $acc0
799 call __ecp_nistz256_sqr_montq
801 $code.=<<___ if ($addx);
806 mov 8*0($a_ptr), %rdx
807 mov 8*1($a_ptr), $acc6
808 mov 8*2($a_ptr), $acc7
809 mov 8*3($a_ptr), $acc0
810 lea -128($a_ptr), $a_ptr # control u-op density
812 call __ecp_nistz256_sqr_montx
825 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
827 .type __ecp_nistz256_sqr_montq,\@abi-omnipotent
829 __ecp_nistz256_sqr_montq:
831 mulq $acc6 # a[1]*a[0]
836 mulq $acc5 # a[0]*a[2]
842 mulq $acc5 # a[0]*a[3]
848 #################################
849 mulq $acc6 # a[1]*a[2]
855 mulq $acc6 # a[1]*a[3]
863 #################################
864 mulq $acc7 # a[2]*a[3]
867 mov 8*0($a_ptr), %rax
871 add $acc1, $acc1 # acc1:6<<1
881 mov 8*1($a_ptr), %rax
887 mov 8*2($a_ptr), %rax
894 mov 8*3($a_ptr), %rax
904 mov .Lpoly+8*1(%rip), $a_ptr
905 mov .Lpoly+8*3(%rip), $t1
907 ##########################################
914 add $acc0, $acc1 # +=acc[0]<<96
920 ##########################################
933 ##########################################
946 ###########################################
959 ############################################
960 # Add the rest of the acc
969 sub \$-1, $acc4 # .Lpoly[0]
971 sbb $a_ptr, $acc5 # .Lpoly[1]
972 sbb \$0, $acc6 # .Lpoly[2]
974 sbb $t1, $acc7 # .Lpoly[3]
979 mov $acc4, 8*0($r_ptr)
981 mov $acc5, 8*1($r_ptr)
983 mov $acc6, 8*2($r_ptr)
984 mov $acc7, 8*3($r_ptr)
987 .size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
992 .type __ecp_nistz256_mul_montx,\@abi-omnipotent
994 __ecp_nistz256_mul_montx:
995 ########################################################################
997 mulx $acc1, $acc0, $acc1
998 mulx $acc2, $t0, $acc2
1000 xor $acc5, $acc5 # cf=0
1001 mulx $acc3, $t1, $acc3
1002 mov .Lpoly+8*3(%rip), $poly3
1004 mulx $acc4, $t0, $acc4
1007 shlx $poly1,$acc0,$t1
1009 shrx $poly1,$acc0,$t0
1012 ########################################################################
1013 # First reduction step
1017 mulx $poly3, $t0, $t1
1018 mov 8*1($b_ptr), %rdx
1022 xor $acc0, $acc0 # $acc0=0,cf=0,of=0
1024 ########################################################################
1026 mulx 8*0+128($a_ptr), $t0, $t1
1030 mulx 8*1+128($a_ptr), $t0, $t1
1034 mulx 8*2+128($a_ptr), $t0, $t1
1038 mulx 8*3+128($a_ptr), $t0, $t1
1041 shlx $poly1, $acc1, $t0
1043 shrx $poly1, $acc1, $t1
1049 ########################################################################
1050 # Second reduction step
1054 mulx $poly3, $t0, $t1
1055 mov 8*2($b_ptr), %rdx
1059 xor $acc1 ,$acc1 # $acc1=0,cf=0,of=0
1061 ########################################################################
1063 mulx 8*0+128($a_ptr), $t0, $t1
1067 mulx 8*1+128($a_ptr), $t0, $t1
1071 mulx 8*2+128($a_ptr), $t0, $t1
1075 mulx 8*3+128($a_ptr), $t0, $t1
1078 shlx $poly1, $acc2, $t0
1080 shrx $poly1, $acc2, $t1
1086 ########################################################################
1087 # Third reduction step
1091 mulx $poly3, $t0, $t1
1092 mov 8*3($b_ptr), %rdx
1096 xor $acc2, $acc2 # $acc2=0,cf=0,of=0
1098 ########################################################################
1100 mulx 8*0+128($a_ptr), $t0, $t1
1104 mulx 8*1+128($a_ptr), $t0, $t1
1108 mulx 8*2+128($a_ptr), $t0, $t1
1112 mulx 8*3+128($a_ptr), $t0, $t1
1115 shlx $poly1, $acc3, $t0
1117 shrx $poly1, $acc3, $t1
1123 ########################################################################
1124 # Fourth reduction step
1128 mulx $poly3, $t0, $t1
1130 mov .Lpoly+8*1(%rip), $poly1
1136 ########################################################################
1137 # Branch-less conditional subtraction of P
1140 sbb \$-1, $acc4 # .Lpoly[0]
1141 sbb $poly1, $acc5 # .Lpoly[1]
1142 sbb \$0, $acc0 # .Lpoly[2]
1144 sbb $poly3, $acc1 # .Lpoly[3]
1149 mov $acc4, 8*0($r_ptr)
1151 mov $acc5, 8*1($r_ptr)
1153 mov $acc0, 8*2($r_ptr)
1154 mov $acc1, 8*3($r_ptr)
1157 .size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
1159 .type __ecp_nistz256_sqr_montx,\@abi-omnipotent
1161 __ecp_nistz256_sqr_montx:
1162 mulx $acc6, $acc1, $acc2 # a[0]*a[1]
1163 mulx $acc7, $t0, $acc3 # a[0]*a[2]
1166 mulx $acc0, $t1, $acc4 # a[0]*a[3]
1170 xor $acc5, $acc5 # $acc5=0,cf=0,of=0
1172 #################################
1173 mulx $acc7, $t0, $t1 # a[1]*a[2]
1177 mulx $acc0, $t0, $t1 # a[1]*a[3]
1183 #################################
1184 mulx $acc0, $t0, $acc6 # a[2]*a[3]
1185 mov 8*0+128($a_ptr), %rdx
1186 xor $acc7, $acc7 # $acc7=0,cf=0,of=0
1187 adcx $acc1, $acc1 # acc1:6<<1
1190 adox $acc7, $acc6 # of=0
1192 mulx %rdx, $acc0, $t1
1193 mov 8*1+128($a_ptr), %rdx
1198 mov 8*2+128($a_ptr), %rdx
1204 mov 8*3+128($a_ptr), %rdx
1214 shlx $a_ptr, $acc0, $t0
1216 shrx $a_ptr, $acc0, $t4
1217 mov .Lpoly+8*3(%rip), $t1
1223 mulx $t1, $t0, $acc0
1226 shlx $a_ptr, $acc1, $t0
1228 shrx $a_ptr, $acc1, $t4
1234 mulx $t1, $t0, $acc1
1237 shlx $a_ptr, $acc2, $t0
1239 shrx $a_ptr, $acc2, $t4
1245 mulx $t1, $t0, $acc2
1248 shlx $a_ptr, $acc3, $t0
1250 shrx $a_ptr, $acc3, $t4
1256 mulx $t1, $t0, $acc3
1261 adc $acc0, $acc4 # accumulate upper half
1262 mov .Lpoly+8*1(%rip), $a_ptr
1270 xor %eax, %eax # cf=0
1271 sbb \$-1, $acc4 # .Lpoly[0]
1273 sbb $a_ptr, $acc5 # .Lpoly[1]
1274 sbb \$0, $acc6 # .Lpoly[2]
1276 sbb $t1, $acc7 # .Lpoly[3]
1281 mov $acc4, 8*0($r_ptr)
1283 mov $acc5, 8*1($r_ptr)
1285 mov $acc6, 8*2($r_ptr)
1286 mov $acc7, 8*3($r_ptr)
1289 .size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
1294 my ($r_ptr,$in_ptr)=("%rdi","%rsi");
1295 my ($acc0,$acc1,$acc2,$acc3)=map("%r$_",(8..11));
1296 my ($t0,$t1,$t2)=("%rcx","%r12","%r13");
1299 ################################################################################
1300 # void ecp_nistz256_from_mont(
1303 # This one performs Montgomery multiplication by 1, so we only need the reduction
1305 .globl ecp_nistz256_from_mont
1306 .type ecp_nistz256_from_mont,\@function,2
1308 ecp_nistz256_from_mont:
1313 mov 8*0($in_ptr), %rax
1314 mov .Lpoly+8*3(%rip), $t2
1315 mov 8*1($in_ptr), $acc1
1316 mov 8*2($in_ptr), $acc2
1317 mov 8*3($in_ptr), $acc3
1319 mov .Lpoly+8*1(%rip), $t1
1321 #########################################
1333 #########################################
1346 ##########################################
1359 ###########################################
1373 ###########################################
1374 # Branch-less conditional subtraction
1384 cmovnz $in_ptr, $acc1
1385 mov $acc0, 8*0($r_ptr)
1387 mov $acc1, 8*1($r_ptr)
1389 mov $acc2, 8*2($r_ptr)
1390 mov $acc3, 8*3($r_ptr)
1397 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
1401 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1402 my ($ONE,$INDEX,$Ra,$Rb,$Rc,$Rd,$Re,$Rf)=map("%xmm$_",(0..7));
1403 my ($M0,$T0a,$T0b,$T0c,$T0d,$T0e,$T0f,$TMP0)=map("%xmm$_",(8..15));
1404 my ($M1,$T2a,$T2b,$TMP2,$M2,$T2a,$T2b,$TMP2)=map("%xmm$_",(8..15));
1407 ################################################################################
1408 # void ecp_nistz256_scatter_w5(uint64_t *val, uint64_t *in_t, int index);
1409 .globl ecp_nistz256_scatter_w5
1410 .type ecp_nistz256_scatter_w5,\@abi-omnipotent
1412 ecp_nistz256_scatter_w5:
1413 lea -3($index,$index,2), $index
1414 movdqa 0x00($in_t), %xmm0
1416 movdqa 0x10($in_t), %xmm1
1417 movdqa 0x20($in_t), %xmm2
1418 movdqa 0x30($in_t), %xmm3
1419 movdqa 0x40($in_t), %xmm4
1420 movdqa 0x50($in_t), %xmm5
1421 movdqa %xmm0, 0x00($val,$index)
1422 movdqa %xmm1, 0x10($val,$index)
1423 movdqa %xmm2, 0x20($val,$index)
1424 movdqa %xmm3, 0x30($val,$index)
1425 movdqa %xmm4, 0x40($val,$index)
1426 movdqa %xmm5, 0x50($val,$index)
1429 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1431 ################################################################################
1432 # void ecp_nistz256_gather_w5(uint64_t *val, uint64_t *in_t, int index);
1433 .globl ecp_nistz256_gather_w5
1434 .type ecp_nistz256_gather_w5,\@abi-omnipotent
1436 ecp_nistz256_gather_w5:
1438 $code.=<<___ if ($avx>1);
1439 mov OPENSSL_ia32cap_P+8(%rip), %eax
1441 jnz .Lavx2_gather_w5
1443 $code.=<<___ if ($win64);
1444 lea -0x88(%rsp), %rax
1445 .LSEH_begin_ecp_nistz256_gather_w5:
1446 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1447 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
1448 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
1449 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
1450 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
1451 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
1452 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
1453 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
1454 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
1455 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
1456 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
1459 movdqa .LOne(%rip), $ONE
1470 pshufd \$0, $INDEX, $INDEX
1473 .Lselect_loop_sse_w5:
1477 pcmpeqd $INDEX, $TMP0
1479 movdqa 16*0($in_t), $T0a
1480 movdqa 16*1($in_t), $T0b
1481 movdqa 16*2($in_t), $T0c
1482 movdqa 16*3($in_t), $T0d
1483 movdqa 16*4($in_t), $T0e
1484 movdqa 16*5($in_t), $T0f
1485 lea 16*6($in_t), $in_t
1501 jnz .Lselect_loop_sse_w5
1503 movdqu $Ra, 16*0($val)
1504 movdqu $Rb, 16*1($val)
1505 movdqu $Rc, 16*2($val)
1506 movdqu $Rd, 16*3($val)
1507 movdqu $Re, 16*4($val)
1508 movdqu $Rf, 16*5($val)
1510 $code.=<<___ if ($win64);
1511 movaps (%rsp), %xmm6
1512 movaps 0x10(%rsp), %xmm7
1513 movaps 0x20(%rsp), %xmm8
1514 movaps 0x30(%rsp), %xmm9
1515 movaps 0x40(%rsp), %xmm10
1516 movaps 0x50(%rsp), %xmm11
1517 movaps 0x60(%rsp), %xmm12
1518 movaps 0x70(%rsp), %xmm13
1519 movaps 0x80(%rsp), %xmm14
1520 movaps 0x90(%rsp), %xmm15
1521 lea 0xa8(%rsp), %rsp
1525 .LSEH_end_ecp_nistz256_gather_w5:
1526 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1528 ################################################################################
1529 # void ecp_nistz256_scatter_w7(uint64_t *val, uint64_t *in_t, int index);
1530 .globl ecp_nistz256_scatter_w7
1531 .type ecp_nistz256_scatter_w7,\@abi-omnipotent
1533 ecp_nistz256_scatter_w7:
1534 movdqu 0x00($in_t), %xmm0
1536 movdqu 0x10($in_t), %xmm1
1537 movdqu 0x20($in_t), %xmm2
1538 movdqu 0x30($in_t), %xmm3
1539 movdqa %xmm0, 0x00($val,$index)
1540 movdqa %xmm1, 0x10($val,$index)
1541 movdqa %xmm2, 0x20($val,$index)
1542 movdqa %xmm3, 0x30($val,$index)
1545 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1547 ################################################################################
1548 # void ecp_nistz256_gather_w7(uint64_t *val, uint64_t *in_t, int index);
1549 .globl ecp_nistz256_gather_w7
1550 .type ecp_nistz256_gather_w7,\@abi-omnipotent
1552 ecp_nistz256_gather_w7:
1554 $code.=<<___ if ($avx>1);
1555 mov OPENSSL_ia32cap_P+8(%rip), %eax
1557 jnz .Lavx2_gather_w7
1559 $code.=<<___ if ($win64);
1560 lea -0x88(%rsp), %rax
1561 .LSEH_begin_ecp_nistz256_gather_w7:
1562 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1563 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
1564 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
1565 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
1566 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
1567 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
1568 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
1569 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
1570 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
1571 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
1572 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
1575 movdqa .LOne(%rip), $M0
1584 pshufd \$0, $INDEX, $INDEX
1587 .Lselect_loop_sse_w7:
1590 movdqa 16*0($in_t), $T0a
1591 movdqa 16*1($in_t), $T0b
1592 pcmpeqd $INDEX, $TMP0
1593 movdqa 16*2($in_t), $T0c
1594 movdqa 16*3($in_t), $T0d
1595 lea 16*4($in_t), $in_t
1604 prefetcht0 255($in_t)
1608 jnz .Lselect_loop_sse_w7
1610 movdqu $Ra, 16*0($val)
1611 movdqu $Rb, 16*1($val)
1612 movdqu $Rc, 16*2($val)
1613 movdqu $Rd, 16*3($val)
1615 $code.=<<___ if ($win64);
1616 movaps (%rsp), %xmm6
1617 movaps 0x10(%rsp), %xmm7
1618 movaps 0x20(%rsp), %xmm8
1619 movaps 0x30(%rsp), %xmm9
1620 movaps 0x40(%rsp), %xmm10
1621 movaps 0x50(%rsp), %xmm11
1622 movaps 0x60(%rsp), %xmm12
1623 movaps 0x70(%rsp), %xmm13
1624 movaps 0x80(%rsp), %xmm14
1625 movaps 0x90(%rsp), %xmm15
1626 lea 0xa8(%rsp), %rsp
1630 .LSEH_end_ecp_nistz256_gather_w7:
1631 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1635 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1636 my ($TWO,$INDEX,$Ra,$Rb,$Rc)=map("%ymm$_",(0..4));
1637 my ($M0,$T0a,$T0b,$T0c,$TMP0)=map("%ymm$_",(5..9));
1638 my ($M1,$T1a,$T1b,$T1c,$TMP1)=map("%ymm$_",(10..14));
1641 ################################################################################
1642 # void ecp_nistz256_avx2_gather_w5(uint64_t *val, uint64_t *in_t, int index);
1643 .type ecp_nistz256_avx2_gather_w5,\@abi-omnipotent
1645 ecp_nistz256_avx2_gather_w5:
1649 $code.=<<___ if ($win64);
1650 lea -0x88(%rsp), %rax
1652 .LSEH_begin_ecp_nistz256_avx2_gather_w5:
1653 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax), %rsp
1654 .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6, -0x20(%rax)
1655 .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7, -0x10(%rax)
1656 .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8, 8(%rax)
1657 .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9, 0x10(%rax)
1658 .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10, 0x20(%rax)
1659 .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11, 0x30(%rax)
1660 .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12, 0x40(%rax)
1661 .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13, 0x50(%rax)
1662 .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14, 0x60(%rax)
1663 .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15, 0x70(%rax)
1666 vmovdqa .LTwo(%rip), $TWO
1672 vmovdqa .LOne(%rip), $M0
1673 vmovdqa .LTwo(%rip), $M1
1676 vpermd $INDEX, $Ra, $INDEX
1679 .Lselect_loop_avx2_w5:
1681 vmovdqa 32*0($in_t), $T0a
1682 vmovdqa 32*1($in_t), $T0b
1683 vmovdqa 32*2($in_t), $T0c
1685 vmovdqa 32*3($in_t), $T1a
1686 vmovdqa 32*4($in_t), $T1b
1687 vmovdqa 32*5($in_t), $T1c
1689 vpcmpeqd $INDEX, $M0, $TMP0
1690 vpcmpeqd $INDEX, $M1, $TMP1
1692 vpaddd $TWO, $M0, $M0
1693 vpaddd $TWO, $M1, $M1
1694 lea 32*6($in_t), $in_t
1696 vpand $TMP0, $T0a, $T0a
1697 vpand $TMP0, $T0b, $T0b
1698 vpand $TMP0, $T0c, $T0c
1699 vpand $TMP1, $T1a, $T1a
1700 vpand $TMP1, $T1b, $T1b
1701 vpand $TMP1, $T1c, $T1c
1703 vpxor $T0a, $Ra, $Ra
1704 vpxor $T0b, $Rb, $Rb
1705 vpxor $T0c, $Rc, $Rc
1706 vpxor $T1a, $Ra, $Ra
1707 vpxor $T1b, $Rb, $Rb
1708 vpxor $T1c, $Rc, $Rc
1711 jnz .Lselect_loop_avx2_w5
1713 vmovdqu $Ra, 32*0($val)
1714 vmovdqu $Rb, 32*1($val)
1715 vmovdqu $Rc, 32*2($val)
1718 $code.=<<___ if ($win64);
1719 movaps (%rsp), %xmm6
1720 movaps 0x10(%rsp), %xmm7
1721 movaps 0x20(%rsp), %xmm8
1722 movaps 0x30(%rsp), %xmm9
1723 movaps 0x40(%rsp), %xmm10
1724 movaps 0x50(%rsp), %xmm11
1725 movaps 0x60(%rsp), %xmm12
1726 movaps 0x70(%rsp), %xmm13
1727 movaps 0x80(%rsp), %xmm14
1728 movaps 0x90(%rsp), %xmm15
1733 .LSEH_end_ecp_nistz256_avx2_gather_w5:
1734 .size ecp_nistz256_avx2_gather_w5,.-ecp_nistz256_avx2_gather_w5
1738 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1739 my ($THREE,$INDEX,$Ra,$Rb)=map("%ymm$_",(0..3));
1740 my ($M0,$T0a,$T0b,$TMP0)=map("%ymm$_",(4..7));
1741 my ($M1,$T1a,$T1b,$TMP1)=map("%ymm$_",(8..11));
1742 my ($M2,$T2a,$T2b,$TMP2)=map("%ymm$_",(12..15));
1746 ################################################################################
1747 # void ecp_nistz256_avx2_gather_w7(uint64_t *val, uint64_t *in_t, int index);
1748 .globl ecp_nistz256_avx2_gather_w7
1749 .type ecp_nistz256_avx2_gather_w7,\@abi-omnipotent
1751 ecp_nistz256_avx2_gather_w7:
1755 $code.=<<___ if ($win64);
1757 lea -0x88(%rsp), %rax
1758 .LSEH_begin_ecp_nistz256_avx2_gather_w7:
1759 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax), %rsp
1760 .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6, -0x20(%rax)
1761 .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7, -0x10(%rax)
1762 .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8, 8(%rax)
1763 .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9, 0x10(%rax)
1764 .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10, 0x20(%rax)
1765 .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11, 0x30(%rax)
1766 .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12, 0x40(%rax)
1767 .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13, 0x50(%rax)
1768 .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14, 0x60(%rax)
1769 .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15, 0x70(%rax)
1772 vmovdqa .LThree(%rip), $THREE
1777 vmovdqa .LOne(%rip), $M0
1778 vmovdqa .LTwo(%rip), $M1
1779 vmovdqa .LThree(%rip), $M2
1782 vpermd $INDEX, $Ra, $INDEX
1783 # Skip index = 0, because it is implicitly the point at infinity
1786 .Lselect_loop_avx2_w7:
1788 vmovdqa 32*0($in_t), $T0a
1789 vmovdqa 32*1($in_t), $T0b
1791 vmovdqa 32*2($in_t), $T1a
1792 vmovdqa 32*3($in_t), $T1b
1794 vmovdqa 32*4($in_t), $T2a
1795 vmovdqa 32*5($in_t), $T2b
1797 vpcmpeqd $INDEX, $M0, $TMP0
1798 vpcmpeqd $INDEX, $M1, $TMP1
1799 vpcmpeqd $INDEX, $M2, $TMP2
1801 vpaddd $THREE, $M0, $M0
1802 vpaddd $THREE, $M1, $M1
1803 vpaddd $THREE, $M2, $M2
1804 lea 32*6($in_t), $in_t
1806 vpand $TMP0, $T0a, $T0a
1807 vpand $TMP0, $T0b, $T0b
1808 vpand $TMP1, $T1a, $T1a
1809 vpand $TMP1, $T1b, $T1b
1810 vpand $TMP2, $T2a, $T2a
1811 vpand $TMP2, $T2b, $T2b
1813 vpxor $T0a, $Ra, $Ra
1814 vpxor $T0b, $Rb, $Rb
1815 vpxor $T1a, $Ra, $Ra
1816 vpxor $T1b, $Rb, $Rb
1817 vpxor $T2a, $Ra, $Ra
1818 vpxor $T2b, $Rb, $Rb
1821 jnz .Lselect_loop_avx2_w7
1824 vmovdqa 32*0($in_t), $T0a
1825 vmovdqa 32*1($in_t), $T0b
1827 vpcmpeqd $INDEX, $M0, $TMP0
1829 vpand $TMP0, $T0a, $T0a
1830 vpand $TMP0, $T0b, $T0b
1832 vpxor $T0a, $Ra, $Ra
1833 vpxor $T0b, $Rb, $Rb
1835 vmovdqu $Ra, 32*0($val)
1836 vmovdqu $Rb, 32*1($val)
1839 $code.=<<___ if ($win64);
1840 movaps (%rsp), %xmm6
1841 movaps 0x10(%rsp), %xmm7
1842 movaps 0x20(%rsp), %xmm8
1843 movaps 0x30(%rsp), %xmm9
1844 movaps 0x40(%rsp), %xmm10
1845 movaps 0x50(%rsp), %xmm11
1846 movaps 0x60(%rsp), %xmm12
1847 movaps 0x70(%rsp), %xmm13
1848 movaps 0x80(%rsp), %xmm14
1849 movaps 0x90(%rsp), %xmm15
1854 .LSEH_end_ecp_nistz256_avx2_gather_w7:
1855 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
1859 .globl ecp_nistz256_avx2_gather_w7
1860 .type ecp_nistz256_avx2_gather_w7,\@function,3
1862 ecp_nistz256_avx2_gather_w7:
1863 .byte 0x0f,0x0b # ud2
1865 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
1869 ########################################################################
1870 # This block implements higher level point_double, point_add and
1871 # point_add_affine. The key to performance in this case is to allow
1872 # out-of-order execution logic to overlap computations from next step
1873 # with tail processing from current step. By using tailored calling
1874 # sequence we minimize inter-step overhead to give processor better
1875 # shot at overlapping operations...
1877 # You will notice that input data is copied to stack. Trouble is that
1878 # there are no registers to spare for holding original pointers and
1879 # reloading them, pointers, would create undesired dependencies on
1880 # effective addresses calculation paths. In other words it's too done
1881 # to favour out-of-order execution logic.
1882 # <appro@openssl.org>
1884 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
1885 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
1886 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rbp","%rcx",$acc4,$acc4);
1887 my ($poly1,$poly3)=($acc6,$acc7);
1889 sub load_for_mul () {
1890 my ($a,$b,$src0) = @_;
1891 my $bias = $src0 eq "%rax" ? 0 : -128;
1897 lea $bias+$a, $a_ptr
1902 sub load_for_sqr () {
1904 my $bias = $src0 eq "%rax" ? 0 : -128;
1908 lea $bias+$a, $a_ptr
1914 ########################################################################
1915 # operate in 4-5-0-1 "name space" that matches multiplication output
1917 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
1920 .type __ecp_nistz256_add_toq,\@abi-omnipotent
1922 __ecp_nistz256_add_toq:
1924 add 8*0($b_ptr), $a0
1925 adc 8*1($b_ptr), $a1
1927 adc 8*2($b_ptr), $a2
1928 adc 8*3($b_ptr), $a3
1942 mov $a0, 8*0($r_ptr)
1944 mov $a1, 8*1($r_ptr)
1946 mov $a2, 8*2($r_ptr)
1947 mov $a3, 8*3($r_ptr)
1950 .size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
1952 .type __ecp_nistz256_sub_fromq,\@abi-omnipotent
1954 __ecp_nistz256_sub_fromq:
1955 sub 8*0($b_ptr), $a0
1956 sbb 8*1($b_ptr), $a1
1958 sbb 8*2($b_ptr), $a2
1959 sbb 8*3($b_ptr), $a3
1973 mov $a0, 8*0($r_ptr)
1975 mov $a1, 8*1($r_ptr)
1977 mov $a2, 8*2($r_ptr)
1978 mov $a3, 8*3($r_ptr)
1981 .size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
1983 .type __ecp_nistz256_subq,\@abi-omnipotent
1985 __ecp_nistz256_subq:
2008 .size __ecp_nistz256_subq,.-__ecp_nistz256_subq
2010 .type __ecp_nistz256_mul_by_2q,\@abi-omnipotent
2012 __ecp_nistz256_mul_by_2q:
2014 add $a0, $a0 # a0:a3+a0:a3
2032 mov $a0, 8*0($r_ptr)
2034 mov $a1, 8*1($r_ptr)
2036 mov $a2, 8*2($r_ptr)
2037 mov $a3, 8*3($r_ptr)
2040 .size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
2045 my ($src0,$sfx,$bias);
2046 my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
2054 .globl ecp_nistz256_point_double
2055 .type ecp_nistz256_point_double,\@function,2
2057 ecp_nistz256_point_double:
2059 $code.=<<___ if ($addx);
2061 and OPENSSL_ia32cap_P+8(%rip), %ecx
2071 .type ecp_nistz256_point_doublex,\@function,2
2073 ecp_nistz256_point_doublex:
2085 .Lpoint_double${x}_body:
2087 .Lpoint_double_shortcut$x:
2088 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr.x
2089 mov $a_ptr, $b_ptr # backup copy
2090 movdqu 0x10($a_ptr), %xmm1
2091 mov 0x20+8*0($a_ptr), $acc4 # load in_y in "5-4-0-1" order
2092 mov 0x20+8*1($a_ptr), $acc5
2093 mov 0x20+8*2($a_ptr), $acc0
2094 mov 0x20+8*3($a_ptr), $acc1
2095 mov .Lpoly+8*1(%rip), $poly1
2096 mov .Lpoly+8*3(%rip), $poly3
2097 movdqa %xmm0, $in_x(%rsp)
2098 movdqa %xmm1, $in_x+0x10(%rsp)
2099 lea 0x20($r_ptr), $acc2
2100 lea 0x40($r_ptr), $acc3
2105 lea $S(%rsp), $r_ptr
2106 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(S, in_y);
2108 mov 0x40+8*0($a_ptr), $src0
2109 mov 0x40+8*1($a_ptr), $acc6
2110 mov 0x40+8*2($a_ptr), $acc7
2111 mov 0x40+8*3($a_ptr), $acc0
2112 lea 0x40-$bias($a_ptr), $a_ptr
2113 lea $Zsqr(%rsp), $r_ptr
2114 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Zsqr, in_z);
2116 `&load_for_sqr("$S(%rsp)", "$src0")`
2117 lea $S(%rsp), $r_ptr
2118 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(S, S);
2120 mov 0x20($b_ptr), $src0 # $b_ptr is still valid
2121 mov 0x40+8*0($b_ptr), $acc1
2122 mov 0x40+8*1($b_ptr), $acc2
2123 mov 0x40+8*2($b_ptr), $acc3
2124 mov 0x40+8*3($b_ptr), $acc4
2125 lea 0x40-$bias($b_ptr), $a_ptr
2126 lea 0x20($b_ptr), $b_ptr
2128 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, in_z, in_y);
2129 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(res_z, res_z);
2131 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2132 mov $in_x+8*1(%rsp), $acc5
2133 lea $Zsqr(%rsp), $b_ptr
2134 mov $in_x+8*2(%rsp), $acc0
2135 mov $in_x+8*3(%rsp), $acc1
2136 lea $M(%rsp), $r_ptr
2137 call __ecp_nistz256_add_to$x # p256_add(M, in_x, Zsqr);
2139 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2140 mov $in_x+8*1(%rsp), $acc5
2141 lea $Zsqr(%rsp), $b_ptr
2142 mov $in_x+8*2(%rsp), $acc0
2143 mov $in_x+8*3(%rsp), $acc1
2144 lea $Zsqr(%rsp), $r_ptr
2145 call __ecp_nistz256_sub_from$x # p256_sub(Zsqr, in_x, Zsqr);
2147 `&load_for_sqr("$S(%rsp)", "$src0")`
2149 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_y, S);
2152 ######## ecp_nistz256_div_by_2(res_y, res_y); ##########################
2153 # operate in 4-5-6-7 "name space" that matches squaring output
2155 my ($poly1,$poly3)=($a_ptr,$t1);
2156 my ($a0,$a1,$a2,$a3,$t3,$t4,$t1)=($acc4,$acc5,$acc6,$acc7,$acc0,$acc1,$acc2);
2169 xor $a_ptr, $a_ptr # borrow $a_ptr
2178 mov $a1, $t0 # a0:a3>>1
2189 mov $a0, 8*0($r_ptr)
2191 mov $a1, 8*1($r_ptr)
2195 mov $a2, 8*2($r_ptr)
2196 mov $a3, 8*3($r_ptr)
2200 `&load_for_mul("$M(%rsp)", "$Zsqr(%rsp)", "$src0")`
2201 lea $M(%rsp), $r_ptr
2202 call __ecp_nistz256_mul_mont$x # p256_mul_mont(M, M, Zsqr);
2204 lea $tmp0(%rsp), $r_ptr
2205 call __ecp_nistz256_mul_by_2$x
2207 lea $M(%rsp), $b_ptr
2208 lea $M(%rsp), $r_ptr
2209 call __ecp_nistz256_add_to$x # p256_mul_by_3(M, M);
2211 `&load_for_mul("$S(%rsp)", "$in_x(%rsp)", "$src0")`
2212 lea $S(%rsp), $r_ptr
2213 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, in_x);
2215 lea $tmp0(%rsp), $r_ptr
2216 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(tmp0, S);
2218 `&load_for_sqr("$M(%rsp)", "$src0")`
2220 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_x, M);
2222 lea $tmp0(%rsp), $b_ptr
2223 mov $acc6, $acc0 # harmonize sqr output and sub input
2227 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, tmp0);
2229 mov $S+8*0(%rsp), $t0
2230 mov $S+8*1(%rsp), $t1
2231 mov $S+8*2(%rsp), $t2
2232 mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
2233 lea $S(%rsp), $r_ptr
2234 call __ecp_nistz256_sub$x # p256_sub(S, S, res_x);
2237 lea $M(%rsp), $b_ptr
2238 mov $acc4, $acc6 # harmonize sub output and mul input
2240 mov $acc4, $S+8*0(%rsp) # have to save:-(
2242 mov $acc5, $S+8*1(%rsp)
2244 mov $acc0, $S+8*2(%rsp)
2245 lea $S-$bias(%rsp), $a_ptr
2247 mov $acc1, $S+8*3(%rsp)
2249 lea $S(%rsp), $r_ptr
2250 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, M);
2254 call __ecp_nistz256_sub_from$x # p256_sub(res_y, S, res_y);
2256 lea 32*5+56(%rsp), %rsi
2264 .Lpoint_double${x}_epilogue:
2266 .size ecp_nistz256_point_double$sfx,.-ecp_nistz256_point_double$sfx
2273 my ($src0,$sfx,$bias);
2274 my ($H,$Hsqr,$R,$Rsqr,$Hcub,
2276 $res_x,$res_y,$res_z,
2277 $in1_x,$in1_y,$in1_z,
2278 $in2_x,$in2_y,$in2_z)=map(32*$_,(0..17));
2279 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2287 .globl ecp_nistz256_point_add
2288 .type ecp_nistz256_point_add,\@function,3
2290 ecp_nistz256_point_add:
2292 $code.=<<___ if ($addx);
2294 and OPENSSL_ia32cap_P+8(%rip), %ecx
2304 .type ecp_nistz256_point_addx,\@function,3
2306 ecp_nistz256_point_addx:
2318 .Lpoint_add${x}_body:
2320 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
2321 movdqu 0x10($a_ptr), %xmm1
2322 movdqu 0x20($a_ptr), %xmm2
2323 movdqu 0x30($a_ptr), %xmm3
2324 movdqu 0x40($a_ptr), %xmm4
2325 movdqu 0x50($a_ptr), %xmm5
2326 mov $a_ptr, $b_ptr # reassign
2327 mov $b_org, $a_ptr # reassign
2328 movdqa %xmm0, $in1_x(%rsp)
2329 movdqa %xmm1, $in1_x+0x10(%rsp)
2330 movdqa %xmm2, $in1_y(%rsp)
2331 movdqa %xmm3, $in1_y+0x10(%rsp)
2332 movdqa %xmm4, $in1_z(%rsp)
2333 movdqa %xmm5, $in1_z+0x10(%rsp)
2336 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$b_ptr
2337 pshufd \$0xb1, %xmm5, %xmm3
2338 movdqu 0x10($a_ptr), %xmm1
2339 movdqu 0x20($a_ptr), %xmm2
2341 movdqu 0x30($a_ptr), %xmm3
2342 mov 0x40+8*0($a_ptr), $src0 # load original in2_z
2343 mov 0x40+8*1($a_ptr), $acc6
2344 mov 0x40+8*2($a_ptr), $acc7
2345 mov 0x40+8*3($a_ptr), $acc0
2346 movdqa %xmm0, $in2_x(%rsp)
2347 pshufd \$0x1e, %xmm5, %xmm4
2348 movdqa %xmm1, $in2_x+0x10(%rsp)
2349 movdqu 0x40($a_ptr),%xmm0 # in2_z again
2350 movdqu 0x50($a_ptr),%xmm1
2351 movdqa %xmm2, $in2_y(%rsp)
2352 movdqa %xmm3, $in2_y+0x10(%rsp)
2356 movq $r_ptr, %xmm0 # save $r_ptr
2358 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
2359 mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
2360 mov $acc6, $in2_z+8*1(%rsp)
2361 mov $acc7, $in2_z+8*2(%rsp)
2362 mov $acc0, $in2_z+8*3(%rsp)
2363 lea $Z2sqr(%rsp), $r_ptr # Z2^2
2364 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z2sqr, in2_z);
2366 pcmpeqd %xmm4, %xmm5
2367 pshufd \$0xb1, %xmm1, %xmm4
2369 pshufd \$0, %xmm5, %xmm5 # in1infty
2370 pshufd \$0x1e, %xmm4, %xmm3
2373 pcmpeqd %xmm3, %xmm4
2374 pshufd \$0, %xmm4, %xmm4 # in2infty
2375 mov 0x40+8*0($b_ptr), $src0 # load original in1_z
2376 mov 0x40+8*1($b_ptr), $acc6
2377 mov 0x40+8*2($b_ptr), $acc7
2378 mov 0x40+8*3($b_ptr), $acc0
2381 lea 0x40-$bias($b_ptr), $a_ptr
2382 lea $Z1sqr(%rsp), $r_ptr # Z1^2
2383 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
2385 `&load_for_mul("$Z2sqr(%rsp)", "$in2_z(%rsp)", "$src0")`
2386 lea $S1(%rsp), $r_ptr # S1 = Z2^3
2387 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, Z2sqr, in2_z);
2389 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
2390 lea $S2(%rsp), $r_ptr # S2 = Z1^3
2391 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
2393 `&load_for_mul("$S1(%rsp)", "$in1_y(%rsp)", "$src0")`
2394 lea $S1(%rsp), $r_ptr # S1 = Y1*Z2^3
2395 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, S1, in1_y);
2397 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
2398 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
2399 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
2401 lea $S1(%rsp), $b_ptr
2402 lea $R(%rsp), $r_ptr # R = S2 - S1
2403 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, S1);
2405 or $acc5, $acc4 # see if result is zero
2409 por %xmm5, %xmm2 # in1infty || in2infty
2412 `&load_for_mul("$Z2sqr(%rsp)", "$in1_x(%rsp)", "$src0")`
2413 lea $U1(%rsp), $r_ptr # U1 = X1*Z2^2
2414 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U1, in1_x, Z2sqr);
2416 `&load_for_mul("$Z1sqr(%rsp)", "$in2_x(%rsp)", "$src0")`
2417 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
2418 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in2_x, Z1sqr);
2420 lea $U1(%rsp), $b_ptr
2421 lea $H(%rsp), $r_ptr # H = U2 - U1
2422 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, U1);
2424 or $acc5, $acc4 # see if result is zero
2428 .byte 0x3e # predict taken
2429 jnz .Ladd_proceed$x # is_equal(U1,U2)?
2433 jnz .Ladd_proceed$x # (in1infty || in2infty)?
2435 jz .Ladd_double$x # is_equal(S1,S2)?
2437 movq %xmm0, $r_ptr # restore $r_ptr
2439 movdqu %xmm0, 0x00($r_ptr)
2440 movdqu %xmm0, 0x10($r_ptr)
2441 movdqu %xmm0, 0x20($r_ptr)
2442 movdqu %xmm0, 0x30($r_ptr)
2443 movdqu %xmm0, 0x40($r_ptr)
2444 movdqu %xmm0, 0x50($r_ptr)
2449 movq %xmm1, $a_ptr # restore $a_ptr
2450 movq %xmm0, $r_ptr # restore $r_ptr
2451 add \$`32*(18-5)`, %rsp # difference in frame sizes
2452 jmp .Lpoint_double_shortcut$x
2456 `&load_for_sqr("$R(%rsp)", "$src0")`
2457 lea $Rsqr(%rsp), $r_ptr # R^2
2458 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
2460 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
2461 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2462 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
2464 `&load_for_sqr("$H(%rsp)", "$src0")`
2465 lea $Hsqr(%rsp), $r_ptr # H^2
2466 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
2468 `&load_for_mul("$res_z(%rsp)", "$in2_z(%rsp)", "$src0")`
2469 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2470 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, res_z, in2_z);
2472 `&load_for_mul("$Hsqr(%rsp)", "$H(%rsp)", "$src0")`
2473 lea $Hcub(%rsp), $r_ptr # H^3
2474 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
2476 `&load_for_mul("$Hsqr(%rsp)", "$U1(%rsp)", "$src0")`
2477 lea $U2(%rsp), $r_ptr # U1*H^2
2478 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, U1, Hsqr);
2481 #######################################################################
2482 # operate in 4-5-0-1 "name space" that matches multiplication output
2484 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2485 my ($poly1, $poly3)=($acc6,$acc7);
2488 #lea $U2(%rsp), $a_ptr
2489 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
2490 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
2493 add $acc0, $acc0 # a0:a3+a0:a3
2494 lea $Rsqr(%rsp), $a_ptr
2511 mov 8*0($a_ptr), $t0
2513 mov 8*1($a_ptr), $t1
2515 mov 8*2($a_ptr), $t2
2517 mov 8*3($a_ptr), $t3
2519 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
2521 lea $Hcub(%rsp), $b_ptr
2522 lea $res_x(%rsp), $r_ptr
2523 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
2525 mov $U2+8*0(%rsp), $t0
2526 mov $U2+8*1(%rsp), $t1
2527 mov $U2+8*2(%rsp), $t2
2528 mov $U2+8*3(%rsp), $t3
2529 lea $res_y(%rsp), $r_ptr
2531 call __ecp_nistz256_sub$x # p256_sub(res_y, U2, res_x);
2533 mov $acc0, 8*0($r_ptr) # save the result, as
2534 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2535 mov $acc2, 8*2($r_ptr)
2536 mov $acc3, 8*3($r_ptr)
2540 `&load_for_mul("$S1(%rsp)", "$Hcub(%rsp)", "$src0")`
2541 lea $S2(%rsp), $r_ptr
2542 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S1, Hcub);
2544 `&load_for_mul("$R(%rsp)", "$res_y(%rsp)", "$src0")`
2545 lea $res_y(%rsp), $r_ptr
2546 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_y, R, res_y);
2548 lea $S2(%rsp), $b_ptr
2549 lea $res_y(%rsp), $r_ptr
2550 call __ecp_nistz256_sub_from$x # p256_sub(res_y, res_y, S2);
2552 movq %xmm0, $r_ptr # restore $r_ptr
2554 movdqa %xmm5, %xmm0 # copy_conditional(res_z, in2_z, in1infty);
2556 pandn $res_z(%rsp), %xmm0
2558 pandn $res_z+0x10(%rsp), %xmm1
2560 pand $in2_z(%rsp), %xmm2
2561 pand $in2_z+0x10(%rsp), %xmm3
2565 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
2571 pand $in1_z(%rsp), %xmm2
2572 pand $in1_z+0x10(%rsp), %xmm3
2575 movdqu %xmm2, 0x40($r_ptr)
2576 movdqu %xmm3, 0x50($r_ptr)
2578 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
2580 pandn $res_x(%rsp), %xmm0
2582 pandn $res_x+0x10(%rsp), %xmm1
2584 pand $in2_x(%rsp), %xmm2
2585 pand $in2_x+0x10(%rsp), %xmm3
2589 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
2595 pand $in1_x(%rsp), %xmm2
2596 pand $in1_x+0x10(%rsp), %xmm3
2599 movdqu %xmm2, 0x00($r_ptr)
2600 movdqu %xmm3, 0x10($r_ptr)
2602 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
2604 pandn $res_y(%rsp), %xmm0
2606 pandn $res_y+0x10(%rsp), %xmm1
2608 pand $in2_y(%rsp), %xmm2
2609 pand $in2_y+0x10(%rsp), %xmm3
2613 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
2619 pand $in1_y(%rsp), %xmm2
2620 pand $in1_y+0x10(%rsp), %xmm3
2623 movdqu %xmm2, 0x20($r_ptr)
2624 movdqu %xmm3, 0x30($r_ptr)
2627 lea 32*18+56(%rsp), %rsi
2635 .Lpoint_add${x}_epilogue:
2637 .size ecp_nistz256_point_add$sfx,.-ecp_nistz256_point_add$sfx
2642 sub gen_add_affine () {
2644 my ($src0,$sfx,$bias);
2645 my ($U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr,
2646 $res_x,$res_y,$res_z,
2647 $in1_x,$in1_y,$in1_z,
2648 $in2_x,$in2_y)=map(32*$_,(0..14));
2657 .globl ecp_nistz256_point_add_affine
2658 .type ecp_nistz256_point_add_affine,\@function,3
2660 ecp_nistz256_point_add_affine:
2662 $code.=<<___ if ($addx);
2664 and OPENSSL_ia32cap_P+8(%rip), %ecx
2666 je .Lpoint_add_affinex
2674 .type ecp_nistz256_point_add_affinex,\@function,3
2676 ecp_nistz256_point_add_affinex:
2677 .Lpoint_add_affinex:
2688 .Ladd_affine${x}_body:
2690 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
2691 mov $b_org, $b_ptr # reassign
2692 movdqu 0x10($a_ptr), %xmm1
2693 movdqu 0x20($a_ptr), %xmm2
2694 movdqu 0x30($a_ptr), %xmm3
2695 movdqu 0x40($a_ptr), %xmm4
2696 movdqu 0x50($a_ptr), %xmm5
2697 mov 0x40+8*0($a_ptr), $src0 # load original in1_z
2698 mov 0x40+8*1($a_ptr), $acc6
2699 mov 0x40+8*2($a_ptr), $acc7
2700 mov 0x40+8*3($a_ptr), $acc0
2701 movdqa %xmm0, $in1_x(%rsp)
2702 movdqa %xmm1, $in1_x+0x10(%rsp)
2703 movdqa %xmm2, $in1_y(%rsp)
2704 movdqa %xmm3, $in1_y+0x10(%rsp)
2705 movdqa %xmm4, $in1_z(%rsp)
2706 movdqa %xmm5, $in1_z+0x10(%rsp)
2709 movdqu 0x00($b_ptr), %xmm0 # copy *(P256_POINT_AFFINE *)$b_ptr
2710 pshufd \$0xb1, %xmm5, %xmm3
2711 movdqu 0x10($b_ptr), %xmm1
2712 movdqu 0x20($b_ptr), %xmm2
2714 movdqu 0x30($b_ptr), %xmm3
2715 movdqa %xmm0, $in2_x(%rsp)
2716 pshufd \$0x1e, %xmm5, %xmm4
2717 movdqa %xmm1, $in2_x+0x10(%rsp)
2719 movq $r_ptr, %xmm0 # save $r_ptr
2720 movdqa %xmm2, $in2_y(%rsp)
2721 movdqa %xmm3, $in2_y+0x10(%rsp)
2727 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
2728 lea $Z1sqr(%rsp), $r_ptr # Z1^2
2729 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
2731 pcmpeqd %xmm4, %xmm5
2732 pshufd \$0xb1, %xmm3, %xmm4
2733 mov 0x00($b_ptr), $src0 # $b_ptr is still valid
2734 #lea 0x00($b_ptr), $b_ptr
2735 mov $acc4, $acc1 # harmonize sqr output and mul input
2737 pshufd \$0, %xmm5, %xmm5 # in1infty
2738 pshufd \$0x1e, %xmm4, %xmm3
2743 pcmpeqd %xmm3, %xmm4
2744 pshufd \$0, %xmm4, %xmm4 # in2infty
2746 lea $Z1sqr-$bias(%rsp), $a_ptr
2748 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
2749 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, Z1sqr, in2_x);
2751 lea $in1_x(%rsp), $b_ptr
2752 lea $H(%rsp), $r_ptr # H = U2 - U1
2753 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, in1_x);
2755 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
2756 lea $S2(%rsp), $r_ptr # S2 = Z1^3
2757 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
2759 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
2760 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2761 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
2763 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
2764 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
2765 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
2767 lea $in1_y(%rsp), $b_ptr
2768 lea $R(%rsp), $r_ptr # R = S2 - S1
2769 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, in1_y);
2771 `&load_for_sqr("$H(%rsp)", "$src0")`
2772 lea $Hsqr(%rsp), $r_ptr # H^2
2773 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
2775 `&load_for_sqr("$R(%rsp)", "$src0")`
2776 lea $Rsqr(%rsp), $r_ptr # R^2
2777 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
2779 `&load_for_mul("$H(%rsp)", "$Hsqr(%rsp)", "$src0")`
2780 lea $Hcub(%rsp), $r_ptr # H^3
2781 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
2783 `&load_for_mul("$Hsqr(%rsp)", "$in1_x(%rsp)", "$src0")`
2784 lea $U2(%rsp), $r_ptr # U1*H^2
2785 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in1_x, Hsqr);
2788 #######################################################################
2789 # operate in 4-5-0-1 "name space" that matches multiplication output
2791 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2792 my ($poly1, $poly3)=($acc6,$acc7);
2795 #lea $U2(%rsp), $a_ptr
2796 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
2797 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
2800 add $acc0, $acc0 # a0:a3+a0:a3
2801 lea $Rsqr(%rsp), $a_ptr
2818 mov 8*0($a_ptr), $t0
2820 mov 8*1($a_ptr), $t1
2822 mov 8*2($a_ptr), $t2
2824 mov 8*3($a_ptr), $t3
2826 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
2828 lea $Hcub(%rsp), $b_ptr
2829 lea $res_x(%rsp), $r_ptr
2830 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
2832 mov $U2+8*0(%rsp), $t0
2833 mov $U2+8*1(%rsp), $t1
2834 mov $U2+8*2(%rsp), $t2
2835 mov $U2+8*3(%rsp), $t3
2836 lea $H(%rsp), $r_ptr
2838 call __ecp_nistz256_sub$x # p256_sub(H, U2, res_x);
2840 mov $acc0, 8*0($r_ptr) # save the result, as
2841 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2842 mov $acc2, 8*2($r_ptr)
2843 mov $acc3, 8*3($r_ptr)
2847 `&load_for_mul("$Hcub(%rsp)", "$in1_y(%rsp)", "$src0")`
2848 lea $S2(%rsp), $r_ptr
2849 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Hcub, in1_y);
2851 `&load_for_mul("$H(%rsp)", "$R(%rsp)", "$src0")`
2852 lea $H(%rsp), $r_ptr
2853 call __ecp_nistz256_mul_mont$x # p256_mul_mont(H, H, R);
2855 lea $S2(%rsp), $b_ptr
2856 lea $res_y(%rsp), $r_ptr
2857 call __ecp_nistz256_sub_from$x # p256_sub(res_y, H, S2);
2859 movq %xmm0, $r_ptr # restore $r_ptr
2861 movdqa %xmm5, %xmm0 # copy_conditional(res_z, ONE, in1infty);
2863 pandn $res_z(%rsp), %xmm0
2865 pandn $res_z+0x10(%rsp), %xmm1
2867 pand .LONE_mont(%rip), %xmm2
2868 pand .LONE_mont+0x10(%rip), %xmm3
2872 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
2878 pand $in1_z(%rsp), %xmm2
2879 pand $in1_z+0x10(%rsp), %xmm3
2882 movdqu %xmm2, 0x40($r_ptr)
2883 movdqu %xmm3, 0x50($r_ptr)
2885 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
2887 pandn $res_x(%rsp), %xmm0
2889 pandn $res_x+0x10(%rsp), %xmm1
2891 pand $in2_x(%rsp), %xmm2
2892 pand $in2_x+0x10(%rsp), %xmm3
2896 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
2902 pand $in1_x(%rsp), %xmm2
2903 pand $in1_x+0x10(%rsp), %xmm3
2906 movdqu %xmm2, 0x00($r_ptr)
2907 movdqu %xmm3, 0x10($r_ptr)
2909 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
2911 pandn $res_y(%rsp), %xmm0
2913 pandn $res_y+0x10(%rsp), %xmm1
2915 pand $in2_y(%rsp), %xmm2
2916 pand $in2_y+0x10(%rsp), %xmm3
2920 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
2926 pand $in1_y(%rsp), %xmm2
2927 pand $in1_y+0x10(%rsp), %xmm3
2930 movdqu %xmm2, 0x20($r_ptr)
2931 movdqu %xmm3, 0x30($r_ptr)
2933 lea 32*15+56(%rsp), %rsi
2941 .Ladd_affine${x}_epilogue:
2943 .size ecp_nistz256_point_add_affine$sfx,.-ecp_nistz256_point_add_affine$sfx
2946 &gen_add_affine("q");
2948 ########################################################################
2952 ########################################################################
2953 # operate in 4-5-0-1 "name space" that matches multiplication output
2955 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2958 .type __ecp_nistz256_add_tox,\@abi-omnipotent
2960 __ecp_nistz256_add_tox:
2962 adc 8*0($b_ptr), $a0
2963 adc 8*1($b_ptr), $a1
2965 adc 8*2($b_ptr), $a2
2966 adc 8*3($b_ptr), $a3
2981 mov $a0, 8*0($r_ptr)
2983 mov $a1, 8*1($r_ptr)
2985 mov $a2, 8*2($r_ptr)
2986 mov $a3, 8*3($r_ptr)
2989 .size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
2991 .type __ecp_nistz256_sub_fromx,\@abi-omnipotent
2993 __ecp_nistz256_sub_fromx:
2995 sbb 8*0($b_ptr), $a0
2996 sbb 8*1($b_ptr), $a1
2998 sbb 8*2($b_ptr), $a2
2999 sbb 8*3($b_ptr), $a3
3014 mov $a0, 8*0($r_ptr)
3016 mov $a1, 8*1($r_ptr)
3018 mov $a2, 8*2($r_ptr)
3019 mov $a3, 8*3($r_ptr)
3022 .size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
3024 .type __ecp_nistz256_subx,\@abi-omnipotent
3026 __ecp_nistz256_subx:
3051 .size __ecp_nistz256_subx,.-__ecp_nistz256_subx
3053 .type __ecp_nistz256_mul_by_2x,\@abi-omnipotent
3055 __ecp_nistz256_mul_by_2x:
3057 adc $a0, $a0 # a0:a3+a0:a3
3076 mov $a0, 8*0($r_ptr)
3078 mov $a1, 8*1($r_ptr)
3080 mov $a2, 8*2($r_ptr)
3081 mov $a3, 8*3($r_ptr)
3084 .size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
3089 &gen_add_affine("x");
3093 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3094 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3102 .extern __imp_RtlVirtualUnwind
3104 .type short_handler,\@abi-omnipotent
3118 mov 120($context),%rax # pull context->Rax
3119 mov 248($context),%rbx # pull context->Rip
3121 mov 8($disp),%rsi # disp->ImageBase
3122 mov 56($disp),%r11 # disp->HandlerData
3124 mov 0(%r11),%r10d # HandlerData[0]
3125 lea (%rsi,%r10),%r10 # end of prologue label
3126 cmp %r10,%rbx # context->Rip<end of prologue label
3127 jb .Lcommon_seh_tail
3129 mov 152($context),%rax # pull context->Rsp
3131 mov 4(%r11),%r10d # HandlerData[1]
3132 lea (%rsi,%r10),%r10 # epilogue label
3133 cmp %r10,%rbx # context->Rip>=epilogue label
3134 jae .Lcommon_seh_tail
3140 mov %r12,216($context) # restore context->R12
3141 mov %r13,224($context) # restore context->R13
3143 jmp .Lcommon_seh_tail
3144 .size short_handler,.-short_handler
3146 .type full_handler,\@abi-omnipotent
3160 mov 120($context),%rax # pull context->Rax
3161 mov 248($context),%rbx # pull context->Rip
3163 mov 8($disp),%rsi # disp->ImageBase
3164 mov 56($disp),%r11 # disp->HandlerData
3166 mov 0(%r11),%r10d # HandlerData[0]
3167 lea (%rsi,%r10),%r10 # end of prologue label
3168 cmp %r10,%rbx # context->Rip<end of prologue label
3169 jb .Lcommon_seh_tail
3171 mov 152($context),%rax # pull context->Rsp
3173 mov 4(%r11),%r10d # HandlerData[1]
3174 lea (%rsi,%r10),%r10 # epilogue label
3175 cmp %r10,%rbx # context->Rip>=epilogue label
3176 jae .Lcommon_seh_tail
3178 mov 8(%r11),%r10d # HandlerData[2]
3179 lea (%rax,%r10),%rax
3187 mov %rbx,144($context) # restore context->Rbx
3188 mov %rbp,160($context) # restore context->Rbp
3189 mov %r12,216($context) # restore context->R12
3190 mov %r13,224($context) # restore context->R13
3191 mov %r14,232($context) # restore context->R14
3192 mov %r15,240($context) # restore context->R15
3197 mov %rax,152($context) # restore context->Rsp
3198 mov %rsi,168($context) # restore context->Rsi
3199 mov %rdi,176($context) # restore context->Rdi
3201 mov 40($disp),%rdi # disp->ContextRecord
3202 mov $context,%rsi # context
3203 mov \$154,%ecx # sizeof(CONTEXT)
3204 .long 0xa548f3fc # cld; rep movsq
3207 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3208 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3209 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3210 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3211 mov 40(%rsi),%r10 # disp->ContextRecord
3212 lea 56(%rsi),%r11 # &disp->HandlerData
3213 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3214 mov %r10,32(%rsp) # arg5
3215 mov %r11,40(%rsp) # arg6
3216 mov %r12,48(%rsp) # arg7
3217 mov %rcx,56(%rsp) # arg8, (NULL)
3218 call *__imp_RtlVirtualUnwind(%rip)
3220 mov \$1,%eax # ExceptionContinueSearch
3232 .size full_handler,.-full_handler
3236 .rva .LSEH_begin_ecp_nistz256_mul_by_2
3237 .rva .LSEH_end_ecp_nistz256_mul_by_2
3238 .rva .LSEH_info_ecp_nistz256_mul_by_2
3240 .rva .LSEH_begin_ecp_nistz256_div_by_2
3241 .rva .LSEH_end_ecp_nistz256_div_by_2
3242 .rva .LSEH_info_ecp_nistz256_div_by_2
3244 .rva .LSEH_begin_ecp_nistz256_mul_by_3
3245 .rva .LSEH_end_ecp_nistz256_mul_by_3
3246 .rva .LSEH_info_ecp_nistz256_mul_by_3
3248 .rva .LSEH_begin_ecp_nistz256_add
3249 .rva .LSEH_end_ecp_nistz256_add
3250 .rva .LSEH_info_ecp_nistz256_add
3252 .rva .LSEH_begin_ecp_nistz256_sub
3253 .rva .LSEH_end_ecp_nistz256_sub
3254 .rva .LSEH_info_ecp_nistz256_sub
3256 .rva .LSEH_begin_ecp_nistz256_neg
3257 .rva .LSEH_end_ecp_nistz256_neg
3258 .rva .LSEH_info_ecp_nistz256_neg
3260 .rva .LSEH_begin_ecp_nistz256_to_mont
3261 .rva .LSEH_end_ecp_nistz256_to_mont
3262 .rva .LSEH_info_ecp_nistz256_to_mont
3264 .rva .LSEH_begin_ecp_nistz256_mul_mont
3265 .rva .LSEH_end_ecp_nistz256_mul_mont
3266 .rva .LSEH_info_ecp_nistz256_mul_mont
3268 .rva .LSEH_begin_ecp_nistz256_sqr_mont
3269 .rva .LSEH_end_ecp_nistz256_sqr_mont
3270 .rva .LSEH_info_ecp_nistz256_sqr_mont
3272 .rva .LSEH_begin_ecp_nistz256_from_mont
3273 .rva .LSEH_end_ecp_nistz256_from_mont
3274 .rva .LSEH_info_ecp_nistz256_from_mont
3276 .rva .LSEH_begin_ecp_nistz256_gather_w5
3277 .rva .LSEH_end_ecp_nistz256_gather_w5
3278 .rva .LSEH_info_ecp_nistz256_gather_wX
3280 .rva .LSEH_begin_ecp_nistz256_gather_w7
3281 .rva .LSEH_end_ecp_nistz256_gather_w7
3282 .rva .LSEH_info_ecp_nistz256_gather_wX
3284 $code.=<<___ if ($avx>1);
3285 .rva .LSEH_begin_ecp_nistz256_avx2_gather_w5
3286 .rva .LSEH_end_ecp_nistz256_avx2_gather_w5
3287 .rva .LSEH_info_ecp_nistz256_avx2_gather_wX
3289 .rva .LSEH_begin_ecp_nistz256_avx2_gather_w7
3290 .rva .LSEH_end_ecp_nistz256_avx2_gather_w7
3291 .rva .LSEH_info_ecp_nistz256_avx2_gather_wX
3294 .rva .LSEH_begin_ecp_nistz256_point_double
3295 .rva .LSEH_end_ecp_nistz256_point_double
3296 .rva .LSEH_info_ecp_nistz256_point_double
3298 .rva .LSEH_begin_ecp_nistz256_point_add
3299 .rva .LSEH_end_ecp_nistz256_point_add
3300 .rva .LSEH_info_ecp_nistz256_point_add
3302 .rva .LSEH_begin_ecp_nistz256_point_add_affine
3303 .rva .LSEH_end_ecp_nistz256_point_add_affine
3304 .rva .LSEH_info_ecp_nistz256_point_add_affine
3306 $code.=<<___ if ($addx);
3307 .rva .LSEH_begin_ecp_nistz256_point_doublex
3308 .rva .LSEH_end_ecp_nistz256_point_doublex
3309 .rva .LSEH_info_ecp_nistz256_point_doublex
3311 .rva .LSEH_begin_ecp_nistz256_point_addx
3312 .rva .LSEH_end_ecp_nistz256_point_addx
3313 .rva .LSEH_info_ecp_nistz256_point_addx
3315 .rva .LSEH_begin_ecp_nistz256_point_add_affinex
3316 .rva .LSEH_end_ecp_nistz256_point_add_affinex
3317 .rva .LSEH_info_ecp_nistz256_point_add_affinex
3323 .LSEH_info_ecp_nistz256_mul_by_2:
3326 .rva .Lmul_by_2_body,.Lmul_by_2_epilogue # HandlerData[]
3327 .LSEH_info_ecp_nistz256_div_by_2:
3330 .rva .Ldiv_by_2_body,.Ldiv_by_2_epilogue # HandlerData[]
3331 .LSEH_info_ecp_nistz256_mul_by_3:
3334 .rva .Lmul_by_3_body,.Lmul_by_3_epilogue # HandlerData[]
3335 .LSEH_info_ecp_nistz256_add:
3338 .rva .Ladd_body,.Ladd_epilogue # HandlerData[]
3339 .LSEH_info_ecp_nistz256_sub:
3342 .rva .Lsub_body,.Lsub_epilogue # HandlerData[]
3343 .LSEH_info_ecp_nistz256_neg:
3346 .rva .Lneg_body,.Lneg_epilogue # HandlerData[]
3347 .LSEH_info_ecp_nistz256_to_mont:
3350 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
3352 .LSEH_info_ecp_nistz256_mul_mont:
3355 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
3357 .LSEH_info_ecp_nistz256_sqr_mont:
3360 .rva .Lsqr_body,.Lsqr_epilogue # HandlerData[]
3362 .LSEH_info_ecp_nistz256_from_mont:
3365 .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
3366 .LSEH_info_ecp_nistz256_gather_wX:
3367 .byte 0x01,0x33,0x16,0x00
3368 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15
3369 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14
3370 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13
3371 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12
3372 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11
3373 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10
3374 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9
3375 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8
3376 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
3377 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
3378 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8
3381 $code.=<<___ if ($avx>1);
3382 .LSEH_info_ecp_nistz256_avx2_gather_wX:
3383 .byte 0x01,0x36,0x17,0x0b
3384 .byte 0x36,0xf8,0x09,0x00 # vmovaps 0x90(rsp),xmm15
3385 .byte 0x31,0xe8,0x08,0x00 # vmovaps 0x80(rsp),xmm14
3386 .byte 0x2c,0xd8,0x07,0x00 # vmovaps 0x70(rsp),xmm13
3387 .byte 0x27,0xc8,0x06,0x00 # vmovaps 0x60(rsp),xmm12
3388 .byte 0x22,0xb8,0x05,0x00 # vmovaps 0x50(rsp),xmm11
3389 .byte 0x1d,0xa8,0x04,0x00 # vmovaps 0x40(rsp),xmm10
3390 .byte 0x18,0x98,0x03,0x00 # vmovaps 0x30(rsp),xmm9
3391 .byte 0x13,0x88,0x02,0x00 # vmovaps 0x20(rsp),xmm8
3392 .byte 0x0e,0x78,0x01,0x00 # vmovaps 0x10(rsp),xmm7
3393 .byte 0x09,0x68,0x00,0x00 # vmovaps 0x00(rsp),xmm6
3394 .byte 0x04,0x01,0x15,0x00 # sub rsp,0xa8
3395 .byte 0x00,0xb3,0x00,0x00 # set_frame r11
3399 .LSEH_info_ecp_nistz256_point_double:
3402 .rva .Lpoint_doubleq_body,.Lpoint_doubleq_epilogue # HandlerData[]
3404 .LSEH_info_ecp_nistz256_point_add:
3407 .rva .Lpoint_addq_body,.Lpoint_addq_epilogue # HandlerData[]
3409 .LSEH_info_ecp_nistz256_point_add_affine:
3412 .rva .Ladd_affineq_body,.Ladd_affineq_epilogue # HandlerData[]
3415 $code.=<<___ if ($addx);
3417 .LSEH_info_ecp_nistz256_point_doublex:
3420 .rva .Lpoint_doublex_body,.Lpoint_doublex_epilogue # HandlerData[]
3422 .LSEH_info_ecp_nistz256_point_addx:
3425 .rva .Lpoint_addx_body,.Lpoint_addx_epilogue # HandlerData[]
3427 .LSEH_info_ecp_nistz256_point_add_affinex:
3430 .rva .Ladd_affinex_body,.Ladd_affinex_epilogue # HandlerData[]
3435 ########################################################################
3436 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
3438 open TABLE,"<ecp_nistz256_table.c" or
3439 open TABLE,"<${dir}../ecp_nistz256_table.c" or
3440 die "failed to open ecp_nistz256_table.c:",$!;
3445 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
3449 die "insane number of elements" if ($#arr != 64*16*37-1);
3453 .globl ecp_nistz256_precomputed
3454 .type ecp_nistz256_precomputed,\@object
3456 ecp_nistz256_precomputed:
3458 while (@line=splice(@arr,0,16)) {
3459 print ".long\t",join(',',map { sprintf "0x%08x",$_} @line),"\n";
3462 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
3465 $code =~ s/\`([^\`]*)\`/eval $1/gem;