2 # Copyright 2014-2020 The OpenSSL Project Authors. All Rights Reserved.
3 # Copyright (c) 2014, Intel Corporation. All Rights Reserved.
4 # Copyright (c) 2015 CloudFlare, Inc.
6 # Licensed under the Apache License 2.0 (the "License"). You may not use
7 # this file except in compliance with the License. You can obtain a copy
8 # in the file LICENSE in the source distribution or at
9 # https://www.openssl.org/source/license.html
11 # Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1, 3)
12 # (1) Intel Corporation, Israel Development Center, Haifa, Israel
13 # (2) University of Haifa, Israel
14 # (3) CloudFlare, Inc.
17 # S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
20 # Further optimization by <appro@openssl.org>:
22 # this/original with/without -DECP_NISTZ256_ASM(*)
23 # Opteron +15-49% +150-195%
24 # Bulldozer +18-45% +175-240%
25 # P4 +24-46% +100-150%
26 # Westmere +18-34% +87-160%
27 # Sandy Bridge +14-35% +120-185%
28 # Ivy Bridge +11-35% +125-180%
29 # Haswell +10-37% +160-200%
30 # Broadwell +24-58% +210-270%
31 # Atom +20-50% +180-240%
32 # VIA Nano +50-160% +480-480%
34 # (*) "without -DECP_NISTZ256_ASM" refers to build with
35 # "enable-ec_nistp_64_gcc_128";
37 # Ranges denote minimum and maximum improvement coefficients depending
38 # on benchmark. In "this/original" column lower coefficient is for
39 # ECDSA sign, while in "with/without" - for ECDH key agreement, and
40 # higher - for ECDSA sign, relatively fastest server-side operation.
41 # Keep in mind that +100% means 2x improvement.
43 # $output is the last argument if it looks like a file (it has an extension)
44 # $flavour is the first argument if it doesn't look like a file
45 $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
46 $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
48 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
50 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
51 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
52 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
53 die "can't locate x86_64-xlate.pl";
55 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
56 or die "can't call $xlate: $!";
59 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
60 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
61 $avx = ($1>=2.19) + ($1>=2.22);
65 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
66 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
67 $avx = ($1>=2.09) + ($1>=2.10);
71 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
72 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
73 $avx = ($1>=10) + ($1>=11);
77 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+)\.([0-9]+)/) {
78 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
79 $avx = ($ver>=3.0) + ($ver>=3.01);
85 .extern OPENSSL_ia32cap_P
88 .section .rodata align=4096
91 .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
93 # 2^512 mod P precomputed for NIST P256 polynomial
95 .quad 0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd
100 .long 2,2,2,2,2,2,2,2
102 .long 3,3,3,3,3,3,3,3
104 .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
106 # Constants for computations modulo ord(p256)
108 .quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
110 .quad 0xccd1c8aaee00bc4f
115 ################################################################################
116 # void ecp_nistz256_mul_by_2(uint64_t res[4], uint64_t a[4]);
118 my ($a0,$a1,$a2,$a3)=map("%r$_",(8..11));
119 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rdx","%rcx","%r12","%r13");
120 my ($r_ptr,$a_ptr,$b_ptr)=("%rdi","%rsi","%rdx");
124 .globl ecp_nistz256_mul_by_2
125 .type ecp_nistz256_mul_by_2,\@function,2
127 ecp_nistz256_mul_by_2:
138 add $a0, $a0 # a0:a3+a0:a3
142 lea .Lpoly(%rip), $a_ptr
171 .cfi_adjust_cfa_offset -16
175 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
177 ################################################################################
178 # void ecp_nistz256_div_by_2(uint64_t res[4], uint64_t a[4]);
179 .globl ecp_nistz256_div_by_2
180 .type ecp_nistz256_div_by_2,\@function,2
182 ecp_nistz256_div_by_2:
195 lea .Lpoly(%rip), $a_ptr
206 xor $a_ptr, $a_ptr # borrow $a_ptr
215 mov $a1, $t0 # a0:a3>>1
241 .cfi_adjust_cfa_offset -16
245 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
247 ################################################################################
248 # void ecp_nistz256_mul_by_3(uint64_t res[4], uint64_t a[4]);
249 .globl ecp_nistz256_mul_by_3
250 .type ecp_nistz256_mul_by_3,\@function,2
252 ecp_nistz256_mul_by_3:
263 add $a0, $a0 # a0:a3+a0:a3
275 sbb .Lpoly+8*1(%rip), $a1
278 sbb .Lpoly+8*3(%rip), $a3
287 add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
297 sbb .Lpoly+8*1(%rip), $a1
300 sbb .Lpoly+8*3(%rip), $a3
317 .cfi_adjust_cfa_offset -16
321 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
323 ################################################################################
324 # void ecp_nistz256_add(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
325 .globl ecp_nistz256_add
326 .type ecp_nistz256_add,\@function,3
341 lea .Lpoly(%rip), $a_ptr
373 .cfi_adjust_cfa_offset -16
377 .size ecp_nistz256_add,.-ecp_nistz256_add
379 ################################################################################
380 # void ecp_nistz256_sub(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
381 .globl ecp_nistz256_sub
382 .type ecp_nistz256_sub,\@function,3
397 lea .Lpoly(%rip), $a_ptr
429 .cfi_adjust_cfa_offset -16
433 .size ecp_nistz256_sub,.-ecp_nistz256_sub
435 ################################################################################
436 # void ecp_nistz256_neg(uint64_t res[4], uint64_t a[4]);
437 .globl ecp_nistz256_neg
438 .type ecp_nistz256_neg,\@function,2
459 lea .Lpoly(%rip), $a_ptr
485 .cfi_adjust_cfa_offset -16
489 .size ecp_nistz256_neg,.-ecp_nistz256_neg
493 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
494 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
495 my ($t0,$t1,$t2,$t3,$t4)=("%rcx","%rbp","%rbx","%rdx","%rax");
496 my ($poly1,$poly3)=($acc6,$acc7);
499 ################################################################################
500 # void ecp_nistz256_ord_mul_mont(
505 .globl ecp_nistz256_ord_mul_mont
506 .type ecp_nistz256_ord_mul_mont,\@function,3
508 ecp_nistz256_ord_mul_mont:
511 $code.=<<___ if ($addx);
513 and OPENSSL_ia32cap_P+8(%rip), %ecx
515 je .Lecp_nistz256_ord_mul_montx
532 mov 8*0($b_org), %rax
534 lea .Lord(%rip), %r14
535 mov .LordK(%rip), %r15
537 ################################# * b[0]
565 ################################# First reduction step
568 add %rax, $acc5 # guaranteed to be zero
574 sbb \$0, $acc0 # can't borrow
583 adc \$0, $acc0 # can't overflow
588 mov 8*1($b_ptr), %rax
589 sbb %rdx, $t1 # can't borrow
595 ################################# * b[1]
631 ################################# Second reduction step
634 add %rax, $t0 # guaranteed to be zero
639 sbb \$0, $acc1 # can't borrow
648 adc \$0, $acc1 # can't overflow
653 mov 8*2($b_ptr), %rax
654 sbb %rdx, $t1 # can't borrow
660 ################################## * b[2]
696 ################################# Third reduction step
699 add %rax, $t0 # guaranteed to be zero
704 sbb \$0, $acc2 # can't borrow
713 adc \$0, $acc2 # can't overflow
718 mov 8*3($b_ptr), %rax
719 sbb %rdx, $t1 # can't borrow
725 ################################# * b[3]
761 ################################# Last reduction step
764 add %rax, $t0 # guaranteed to be zero
769 sbb \$0, $acc3 # can't borrow
778 adc \$0, $acc3 # can't overflow
783 sbb %rdx, $t1 # can't borrow
789 ################################# Subtract ord
805 mov $acc4, 8*0($r_ptr)
806 mov $acc5, 8*1($r_ptr)
807 mov $acc0, 8*2($r_ptr)
808 mov $acc1, 8*3($r_ptr)
823 .cfi_adjust_cfa_offset -48
827 .size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
829 ################################################################################
830 # void ecp_nistz256_ord_sqr_mont(
835 .globl ecp_nistz256_ord_sqr_mont
836 .type ecp_nistz256_ord_sqr_mont,\@function,3
838 ecp_nistz256_ord_sqr_mont:
841 $code.=<<___ if ($addx);
843 and OPENSSL_ia32cap_P+8(%rip), %ecx
845 je .Lecp_nistz256_ord_sqr_montx
862 mov 8*0($a_ptr), $acc0
863 mov 8*1($a_ptr), %rax
864 mov 8*2($a_ptr), $acc6
865 mov 8*3($a_ptr), $acc7
866 lea .Lord(%rip), $a_ptr # pointer to modulus
872 ################################# a[1:] * a[0]
873 mov %rax, $t1 # put aside a[1]
874 mul $acc0 # a[1] * a[0]
876 movq $t1, %xmm1 # offload a[1]
880 mul $acc0 # a[2] * a[0]
883 movq $acc6, %xmm2 # offload a[2]
887 mul $acc0 # a[3] * a[0]
890 movq $acc7, %xmm3 # offload a[3]
894 ################################# a[3] * a[2]
895 mul $acc6 # a[3] * a[2]
900 ################################# a[2:] * a[1]
901 mul $t1 # a[2] * a[1]
907 mul $t1 # a[3] * a[1]
913 adc \$0, $acc6 # can't overflow
915 ################################# *2
926 ################################# Missing products
927 mul %rax # a[0] * a[0]
932 mul %rax # a[1] * a[1]
939 mul %rax # a[2] * a[2]
947 imulq 8*4($a_ptr), $acc0 # *= .LordK
949 mul %rax # a[3] * a[3]
952 mov 8*0($a_ptr), %rax # modulus[0]
953 adc %rdx, $acc7 # can't overflow
955 ################################# First reduction step
958 add %rax, $t0 # guaranteed to be zero
959 mov 8*1($a_ptr), %rax # modulus[1]
963 sbb \$0, $t1 # can't borrow
972 adc \$0, $t1 # can't overflow
975 imulq 8*4($a_ptr), $acc1 # *= .LordK
980 mov 8*0($a_ptr), %rax
981 sbb %rdx, $acc0 # can't borrow
984 adc \$0, $acc0 # can't overflow
986 ################################# Second reduction step
989 add %rax, $t0 # guaranteed to be zero
990 mov 8*1($a_ptr), %rax
994 sbb \$0, $t1 # can't borrow
1003 adc \$0, $t1 # can't overflow
1006 imulq 8*4($a_ptr), $acc2 # *= .LordK
1011 mov 8*0($a_ptr), %rax
1012 sbb %rdx, $acc1 # can't borrow
1015 adc \$0, $acc1 # can't overflow
1017 ################################# Third reduction step
1020 add %rax, $t0 # guaranteed to be zero
1021 mov 8*1($a_ptr), %rax
1025 sbb \$0, $t1 # can't borrow
1034 adc \$0, $t1 # can't overflow
1037 imulq 8*4($a_ptr), $acc3 # *= .LordK
1042 mov 8*0($a_ptr), %rax
1043 sbb %rdx, $acc2 # can't borrow
1046 adc \$0, $acc2 # can't overflow
1048 ################################# Last reduction step
1051 add %rax, $t0 # guaranteed to be zero
1052 mov 8*1($a_ptr), %rax
1056 sbb \$0, $t1 # can't borrow
1065 adc \$0, $t1 # can't overflow
1070 sbb %rdx, $acc3 # can't borrow
1073 adc \$0, $acc3 # can't overflow
1075 ################################# Add bits [511:256] of the sqr result
1085 ################################# Compare to modulus
1086 sub 8*0($a_ptr), $acc0
1088 sbb 8*1($a_ptr), $acc1
1089 sbb 8*2($a_ptr), $acc2
1091 sbb 8*3($a_ptr), $acc3
1102 mov $acc0, 8*0($r_ptr)
1103 mov %rax, 8*1($r_ptr)
1105 mov $acc6, 8*2($r_ptr)
1107 mov $acc7, 8*3($r_ptr)
1123 .cfi_adjust_cfa_offset -48
1127 .size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
1130 $code.=<<___ if ($addx);
1131 ################################################################################
1132 .type ecp_nistz256_ord_mul_montx,\@function,3
1134 ecp_nistz256_ord_mul_montx:
1136 .Lecp_nistz256_ord_mul_montx:
1152 mov 8*0($b_org), %rdx
1153 mov 8*0($a_ptr), $acc1
1154 mov 8*1($a_ptr), $acc2
1155 mov 8*2($a_ptr), $acc3
1156 mov 8*3($a_ptr), $acc4
1157 lea -128($a_ptr), $a_ptr # control u-op density
1158 lea .Lord-128(%rip), %r14
1159 mov .LordK(%rip), %r15
1161 ################################# Multiply by b[0]
1162 mulx $acc1, $acc0, $acc1
1163 mulx $acc2, $t0, $acc2
1164 mulx $acc3, $t1, $acc3
1166 mulx $acc4, $t0, $acc4
1168 mulx %r15, %rdx, %rax
1173 ################################# reduction
1174 xor $acc5, $acc5 # $acc5=0, cf=0, of=0
1175 mulx 8*0+128(%r14), $t0, $t1
1176 adcx $t0, $acc0 # guaranteed to be zero
1179 mulx 8*1+128(%r14), $t0, $t1
1183 mulx 8*2+128(%r14), $t0, $t1
1187 mulx 8*3+128(%r14), $t0, $t1
1188 mov 8*1($b_ptr), %rdx
1193 adc \$0, $acc5 # cf=0, of=0
1195 ################################# Multiply by b[1]
1196 mulx 8*0+128($a_ptr), $t0, $t1
1200 mulx 8*1+128($a_ptr), $t0, $t1
1204 mulx 8*2+128($a_ptr), $t0, $t1
1208 mulx 8*3+128($a_ptr), $t0, $t1
1210 mulx %r15, %rdx, %rax
1216 adc \$0, $acc0 # cf=0, of=0
1218 ################################# reduction
1219 mulx 8*0+128(%r14), $t0, $t1
1220 adcx $t0, $acc1 # guaranteed to be zero
1223 mulx 8*1+128(%r14), $t0, $t1
1227 mulx 8*2+128(%r14), $t0, $t1
1231 mulx 8*3+128(%r14), $t0, $t1
1232 mov 8*2($b_ptr), %rdx
1237 adc \$0, $acc0 # cf=0, of=0
1239 ################################# Multiply by b[2]
1240 mulx 8*0+128($a_ptr), $t0, $t1
1244 mulx 8*1+128($a_ptr), $t0, $t1
1248 mulx 8*2+128($a_ptr), $t0, $t1
1252 mulx 8*3+128($a_ptr), $t0, $t1
1254 mulx %r15, %rdx, %rax
1260 adc \$0, $acc1 # cf=0, of=0
1262 ################################# reduction
1263 mulx 8*0+128(%r14), $t0, $t1
1264 adcx $t0, $acc2 # guaranteed to be zero
1267 mulx 8*1+128(%r14), $t0, $t1
1271 mulx 8*2+128(%r14), $t0, $t1
1275 mulx 8*3+128(%r14), $t0, $t1
1276 mov 8*3($b_ptr), %rdx
1281 adc \$0, $acc1 # cf=0, of=0
1283 ################################# Multiply by b[3]
1284 mulx 8*0+128($a_ptr), $t0, $t1
1288 mulx 8*1+128($a_ptr), $t0, $t1
1292 mulx 8*2+128($a_ptr), $t0, $t1
1296 mulx 8*3+128($a_ptr), $t0, $t1
1298 mulx %r15, %rdx, %rax
1304 adc \$0, $acc2 # cf=0, of=0
1306 ################################# reduction
1307 mulx 8*0+128(%r14), $t0, $t1
1308 adcx $t0, $acc3 # guaranteed to be zero
1311 mulx 8*1+128(%r14), $t0, $t1
1315 mulx 8*2+128(%r14), $t0, $t1
1319 mulx 8*3+128(%r14), $t0, $t1
1329 #################################
1330 # Branch-less conditional subtraction of P
1332 sub 8*0(%r14), $acc4
1333 sbb 8*1(%r14), $acc5
1334 sbb 8*2(%r14), $acc0
1336 sbb 8*3(%r14), $acc1
1344 mov $acc4, 8*0($r_ptr)
1345 mov $acc5, 8*1($r_ptr)
1346 mov $acc0, 8*2($r_ptr)
1347 mov $acc1, 8*3($r_ptr)
1362 .cfi_adjust_cfa_offset -48
1363 .Lord_mulx_epilogue:
1366 .size ecp_nistz256_ord_mul_montx,.-ecp_nistz256_ord_mul_montx
1368 .type ecp_nistz256_ord_sqr_montx,\@function,3
1370 ecp_nistz256_ord_sqr_montx:
1372 .Lecp_nistz256_ord_sqr_montx:
1388 mov 8*0($a_ptr), %rdx
1389 mov 8*1($a_ptr), $acc6
1390 mov 8*2($a_ptr), $acc7
1391 mov 8*3($a_ptr), $acc0
1392 lea .Lord(%rip), $a_ptr
1397 mulx $acc6, $acc1, $acc2 # a[0]*a[1]
1398 mulx $acc7, $t0, $acc3 # a[0]*a[2]
1399 mov %rdx, %rax # offload a[0]
1400 movq $acc6, %xmm1 # offload a[1]
1401 mulx $acc0, $t1, $acc4 # a[0]*a[3]
1404 movq $acc7, %xmm2 # offload a[2]
1407 xor $acc5, $acc5 # $acc5=0,cf=0,of=0
1408 #################################
1409 mulx $acc7, $t0, $t1 # a[1]*a[2]
1413 mulx $acc0, $t0, $t1 # a[1]*a[3]
1418 #################################
1419 mulx $acc0, $t0, $acc6 # a[2]*a[3]
1421 movq $acc0, %xmm3 # offload a[3]
1422 xor $acc7, $acc7 # $acc7=0,cf=0,of=0
1423 adcx $acc1, $acc1 # acc1:6<<1
1426 adox $acc7, $acc6 # of=0
1428 ################################# a[i]*a[i]
1429 mulx %rdx, $acc0, $t1
1450 ################################# reduction
1452 mulx 8*4($a_ptr), %rdx, $t0
1454 xor %rax, %rax # cf=0, of=0
1455 mulx 8*0($a_ptr), $t0, $t1
1456 adcx $t0, $acc0 # guaranteed to be zero
1458 mulx 8*1($a_ptr), $t0, $t1
1461 mulx 8*2($a_ptr), $t0, $t1
1464 mulx 8*3($a_ptr), $t0, $t1
1466 adox $t1, $acc0 # of=0
1467 adcx %rax, $acc0 # cf=0
1469 #################################
1471 mulx 8*4($a_ptr), %rdx, $t0
1473 mulx 8*0($a_ptr), $t0, $t1
1474 adox $t0, $acc1 # guaranteed to be zero
1476 mulx 8*1($a_ptr), $t0, $t1
1479 mulx 8*2($a_ptr), $t0, $t1
1482 mulx 8*3($a_ptr), $t0, $t1
1484 adcx $t1, $acc1 # cf=0
1485 adox %rax, $acc1 # of=0
1487 #################################
1489 mulx 8*4($a_ptr), %rdx, $t0
1491 mulx 8*0($a_ptr), $t0, $t1
1492 adcx $t0, $acc2 # guaranteed to be zero
1494 mulx 8*1($a_ptr), $t0, $t1
1497 mulx 8*2($a_ptr), $t0, $t1
1500 mulx 8*3($a_ptr), $t0, $t1
1502 adox $t1, $acc2 # of=0
1503 adcx %rax, $acc2 # cf=0
1505 #################################
1507 mulx 8*4($a_ptr), %rdx, $t0
1509 mulx 8*0($a_ptr), $t0, $t1
1510 adox $t0, $acc3 # guaranteed to be zero
1512 mulx 8*1($a_ptr), $t0, $t1
1515 mulx 8*2($a_ptr), $t0, $t1
1518 mulx 8*3($a_ptr), $t0, $t1
1523 ################################# accumulate upper half
1524 add $acc0, $acc4 # add $acc4, $acc0
1532 ################################# compare to modulus
1533 sub 8*0($a_ptr), $acc4
1535 sbb 8*1($a_ptr), $acc1
1536 sbb 8*2($a_ptr), $acc2
1538 sbb 8*3($a_ptr), $acc3
1549 mov %rdx, 8*0($r_ptr)
1550 mov $acc6, 8*1($r_ptr)
1552 mov $acc7, 8*2($r_ptr)
1554 mov $acc0, 8*3($r_ptr)
1570 .cfi_adjust_cfa_offset -48
1571 .Lord_sqrx_epilogue:
1574 .size ecp_nistz256_ord_sqr_montx,.-ecp_nistz256_ord_sqr_montx
1578 ################################################################################
1579 # void ecp_nistz256_to_mont(
1582 .globl ecp_nistz256_to_mont
1583 .type ecp_nistz256_to_mont,\@function,2
1585 ecp_nistz256_to_mont:
1588 $code.=<<___ if ($addx);
1590 and OPENSSL_ia32cap_P+8(%rip), %ecx
1593 lea .LRR(%rip), $b_org
1596 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
1598 ################################################################################
1599 # void ecp_nistz256_mul_mont(
1604 .globl ecp_nistz256_mul_mont
1605 .type ecp_nistz256_mul_mont,\@function,3
1607 ecp_nistz256_mul_mont:
1610 $code.=<<___ if ($addx);
1612 and OPENSSL_ia32cap_P+8(%rip), %ecx
1630 $code.=<<___ if ($addx);
1636 mov 8*0($b_org), %rax
1637 mov 8*0($a_ptr), $acc1
1638 mov 8*1($a_ptr), $acc2
1639 mov 8*2($a_ptr), $acc3
1640 mov 8*3($a_ptr), $acc4
1642 call __ecp_nistz256_mul_montq
1644 $code.=<<___ if ($addx);
1650 mov 8*0($b_org), %rdx
1651 mov 8*0($a_ptr), $acc1
1652 mov 8*1($a_ptr), $acc2
1653 mov 8*2($a_ptr), $acc3
1654 mov 8*3($a_ptr), $acc4
1655 lea -128($a_ptr), $a_ptr # control u-op density
1657 call __ecp_nistz256_mul_montx
1674 .cfi_adjust_cfa_offset -48
1678 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
1680 .type __ecp_nistz256_mul_montq,\@abi-omnipotent
1682 __ecp_nistz256_mul_montq:
1684 ########################################################################
1685 # Multiply a by b[0]
1688 mov .Lpoly+8*1(%rip),$poly1
1694 mov .Lpoly+8*3(%rip),$poly3
1713 ########################################################################
1714 # First reduction step
1715 # Basically now we want to multiply acc[0] by p256,
1716 # and add the result to the acc.
1717 # Due to the special form of p256 we do some optimizations
1719 # acc[0] x p256[0..1] = acc[0] x 2^96 - acc[0]
1720 # then we add acc[0] and get acc[0] x 2^96
1726 add $acc0, $acc1 # +=acc[0]<<96
1729 mov 8*1($b_ptr), %rax
1734 ########################################################################
1767 ########################################################################
1768 # Second reduction step
1776 mov 8*2($b_ptr), %rax
1781 ########################################################################
1814 ########################################################################
1815 # Third reduction step
1823 mov 8*3($b_ptr), %rax
1828 ########################################################################
1861 ########################################################################
1862 # Final reduction step
1875 ########################################################################
1876 # Branch-less conditional subtraction of P
1877 sub \$-1, $acc4 # .Lpoly[0]
1879 sbb $poly1, $acc5 # .Lpoly[1]
1880 sbb \$0, $acc0 # .Lpoly[2]
1882 sbb $poly3, $acc1 # .Lpoly[3]
1887 mov $acc4, 8*0($r_ptr)
1889 mov $acc5, 8*1($r_ptr)
1891 mov $acc0, 8*2($r_ptr)
1892 mov $acc1, 8*3($r_ptr)
1896 .size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
1898 ################################################################################
1899 # void ecp_nistz256_sqr_mont(
1903 # we optimize the square according to S.Gueron and V.Krasnov,
1904 # "Speeding up Big-Number Squaring"
1905 .globl ecp_nistz256_sqr_mont
1906 .type ecp_nistz256_sqr_mont,\@function,2
1908 ecp_nistz256_sqr_mont:
1911 $code.=<<___ if ($addx);
1913 and OPENSSL_ia32cap_P+8(%rip), %ecx
1930 $code.=<<___ if ($addx);
1935 mov 8*0($a_ptr), %rax
1936 mov 8*1($a_ptr), $acc6
1937 mov 8*2($a_ptr), $acc7
1938 mov 8*3($a_ptr), $acc0
1940 call __ecp_nistz256_sqr_montq
1942 $code.=<<___ if ($addx);
1947 mov 8*0($a_ptr), %rdx
1948 mov 8*1($a_ptr), $acc6
1949 mov 8*2($a_ptr), $acc7
1950 mov 8*3($a_ptr), $acc0
1951 lea -128($a_ptr), $a_ptr # control u-op density
1953 call __ecp_nistz256_sqr_montx
1970 .cfi_adjust_cfa_offset -48
1974 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
1976 .type __ecp_nistz256_sqr_montq,\@abi-omnipotent
1978 __ecp_nistz256_sqr_montq:
1981 mulq $acc6 # a[1]*a[0]
1986 mulq $acc5 # a[0]*a[2]
1992 mulq $acc5 # a[0]*a[3]
1998 #################################
1999 mulq $acc6 # a[1]*a[2]
2005 mulq $acc6 # a[1]*a[3]
2013 #################################
2014 mulq $acc7 # a[2]*a[3]
2017 mov 8*0($a_ptr), %rax
2021 add $acc1, $acc1 # acc1:6<<1
2031 mov 8*1($a_ptr), %rax
2037 mov 8*2($a_ptr), %rax
2044 mov 8*3($a_ptr), %rax
2054 mov .Lpoly+8*1(%rip), $a_ptr
2055 mov .Lpoly+8*3(%rip), $t1
2057 ##########################################
2064 add $acc0, $acc1 # +=acc[0]<<96
2070 ##########################################
2083 ##########################################
2096 ###########################################
2109 ############################################
2110 # Add the rest of the acc
2119 sub \$-1, $acc4 # .Lpoly[0]
2121 sbb $a_ptr, $acc5 # .Lpoly[1]
2122 sbb \$0, $acc6 # .Lpoly[2]
2124 sbb $t1, $acc7 # .Lpoly[3]
2129 mov $acc4, 8*0($r_ptr)
2131 mov $acc5, 8*1($r_ptr)
2133 mov $acc6, 8*2($r_ptr)
2134 mov $acc7, 8*3($r_ptr)
2138 .size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
2143 .type __ecp_nistz256_mul_montx,\@abi-omnipotent
2145 __ecp_nistz256_mul_montx:
2147 ########################################################################
2149 mulx $acc1, $acc0, $acc1
2150 mulx $acc2, $t0, $acc2
2152 xor $acc5, $acc5 # cf=0
2153 mulx $acc3, $t1, $acc3
2154 mov .Lpoly+8*3(%rip), $poly3
2156 mulx $acc4, $t0, $acc4
2159 shlx $poly1,$acc0,$t1
2161 shrx $poly1,$acc0,$t0
2164 ########################################################################
2165 # First reduction step
2169 mulx $poly3, $t0, $t1
2170 mov 8*1($b_ptr), %rdx
2174 xor $acc0, $acc0 # $acc0=0,cf=0,of=0
2176 ########################################################################
2178 mulx 8*0+128($a_ptr), $t0, $t1
2182 mulx 8*1+128($a_ptr), $t0, $t1
2186 mulx 8*2+128($a_ptr), $t0, $t1
2190 mulx 8*3+128($a_ptr), $t0, $t1
2193 shlx $poly1, $acc1, $t0
2195 shrx $poly1, $acc1, $t1
2201 ########################################################################
2202 # Second reduction step
2206 mulx $poly3, $t0, $t1
2207 mov 8*2($b_ptr), %rdx
2211 xor $acc1 ,$acc1 # $acc1=0,cf=0,of=0
2213 ########################################################################
2215 mulx 8*0+128($a_ptr), $t0, $t1
2219 mulx 8*1+128($a_ptr), $t0, $t1
2223 mulx 8*2+128($a_ptr), $t0, $t1
2227 mulx 8*3+128($a_ptr), $t0, $t1
2230 shlx $poly1, $acc2, $t0
2232 shrx $poly1, $acc2, $t1
2238 ########################################################################
2239 # Third reduction step
2243 mulx $poly3, $t0, $t1
2244 mov 8*3($b_ptr), %rdx
2248 xor $acc2, $acc2 # $acc2=0,cf=0,of=0
2250 ########################################################################
2252 mulx 8*0+128($a_ptr), $t0, $t1
2256 mulx 8*1+128($a_ptr), $t0, $t1
2260 mulx 8*2+128($a_ptr), $t0, $t1
2264 mulx 8*3+128($a_ptr), $t0, $t1
2267 shlx $poly1, $acc3, $t0
2269 shrx $poly1, $acc3, $t1
2275 ########################################################################
2276 # Fourth reduction step
2280 mulx $poly3, $t0, $t1
2282 mov .Lpoly+8*1(%rip), $poly1
2288 ########################################################################
2289 # Branch-less conditional subtraction of P
2292 sbb \$-1, $acc4 # .Lpoly[0]
2293 sbb $poly1, $acc5 # .Lpoly[1]
2294 sbb \$0, $acc0 # .Lpoly[2]
2296 sbb $poly3, $acc1 # .Lpoly[3]
2301 mov $acc4, 8*0($r_ptr)
2303 mov $acc5, 8*1($r_ptr)
2305 mov $acc0, 8*2($r_ptr)
2306 mov $acc1, 8*3($r_ptr)
2310 .size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
2312 .type __ecp_nistz256_sqr_montx,\@abi-omnipotent
2314 __ecp_nistz256_sqr_montx:
2316 mulx $acc6, $acc1, $acc2 # a[0]*a[1]
2317 mulx $acc7, $t0, $acc3 # a[0]*a[2]
2320 mulx $acc0, $t1, $acc4 # a[0]*a[3]
2324 xor $acc5, $acc5 # $acc5=0,cf=0,of=0
2326 #################################
2327 mulx $acc7, $t0, $t1 # a[1]*a[2]
2331 mulx $acc0, $t0, $t1 # a[1]*a[3]
2337 #################################
2338 mulx $acc0, $t0, $acc6 # a[2]*a[3]
2339 mov 8*0+128($a_ptr), %rdx
2340 xor $acc7, $acc7 # $acc7=0,cf=0,of=0
2341 adcx $acc1, $acc1 # acc1:6<<1
2344 adox $acc7, $acc6 # of=0
2346 mulx %rdx, $acc0, $t1
2347 mov 8*1+128($a_ptr), %rdx
2352 mov 8*2+128($a_ptr), %rdx
2358 mov 8*3+128($a_ptr), %rdx
2366 mov .Lpoly+8*3(%rip), %rdx
2368 shlx $a_ptr, $acc0, $t0
2370 shrx $a_ptr, $acc0, $t4
2377 mulx $acc0, $t0, $acc0
2379 shlx $a_ptr, $acc1, $t0
2381 shrx $a_ptr, $acc1, $t4
2387 mulx $acc1, $t0, $acc1
2389 shlx $a_ptr, $acc2, $t0
2391 shrx $a_ptr, $acc2, $t4
2397 mulx $acc2, $t0, $acc2
2399 shlx $a_ptr, $acc3, $t0
2401 shrx $a_ptr, $acc3, $t4
2407 mulx $acc3, $t0, $acc3
2412 add $acc0, $acc4 # accumulate upper half
2413 mov .Lpoly+8*1(%rip), $a_ptr
2421 sub \$-1, $acc4 # .Lpoly[0]
2423 sbb $a_ptr, $acc5 # .Lpoly[1]
2424 sbb \$0, $acc6 # .Lpoly[2]
2426 sbb $t1, $acc7 # .Lpoly[3]
2431 mov $acc4, 8*0($r_ptr)
2433 mov $acc5, 8*1($r_ptr)
2435 mov $acc6, 8*2($r_ptr)
2436 mov $acc7, 8*3($r_ptr)
2440 .size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
2445 my ($r_ptr,$in_ptr)=("%rdi","%rsi");
2446 my ($acc0,$acc1,$acc2,$acc3)=map("%r$_",(8..11));
2447 my ($t0,$t1,$t2)=("%rcx","%r12","%r13");
2450 ################################################################################
2451 # void ecp_nistz256_from_mont(
2454 # This one performs Montgomery multiplication by 1, so we only need the reduction
2456 .globl ecp_nistz256_from_mont
2457 .type ecp_nistz256_from_mont,\@function,2
2459 ecp_nistz256_from_mont:
2467 mov 8*0($in_ptr), %rax
2468 mov .Lpoly+8*3(%rip), $t2
2469 mov 8*1($in_ptr), $acc1
2470 mov 8*2($in_ptr), $acc2
2471 mov 8*3($in_ptr), $acc3
2473 mov .Lpoly+8*1(%rip), $t1
2475 #########################################
2487 #########################################
2500 ##########################################
2513 ###########################################
2527 ###########################################
2528 # Branch-less conditional subtraction
2538 cmovnz $in_ptr, $acc1
2539 mov $acc0, 8*0($r_ptr)
2541 mov $acc1, 8*1($r_ptr)
2543 mov $acc2, 8*2($r_ptr)
2544 mov $acc3, 8*3($r_ptr)
2551 .cfi_adjust_cfa_offset -16
2555 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
2559 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
2560 my ($ONE,$INDEX,$Ra,$Rb,$Rc,$Rd,$Re,$Rf)=map("%xmm$_",(0..7));
2561 my ($M0,$T0a,$T0b,$T0c,$T0d,$T0e,$T0f,$TMP0)=map("%xmm$_",(8..15));
2562 my ($M1,$T2a,$T2b,$TMP2,$M2,$T2a,$T2b,$TMP2)=map("%xmm$_",(8..15));
2565 ################################################################################
2566 # void ecp_nistz256_scatter_w5(uint64_t *val, uint64_t *in_t, int index);
2567 .globl ecp_nistz256_scatter_w5
2568 .type ecp_nistz256_scatter_w5,\@abi-omnipotent
2570 ecp_nistz256_scatter_w5:
2572 lea -3($index,$index,2), $index
2573 movdqa 0x00($in_t), %xmm0
2575 movdqa 0x10($in_t), %xmm1
2576 movdqa 0x20($in_t), %xmm2
2577 movdqa 0x30($in_t), %xmm3
2578 movdqa 0x40($in_t), %xmm4
2579 movdqa 0x50($in_t), %xmm5
2580 movdqa %xmm0, 0x00($val,$index)
2581 movdqa %xmm1, 0x10($val,$index)
2582 movdqa %xmm2, 0x20($val,$index)
2583 movdqa %xmm3, 0x30($val,$index)
2584 movdqa %xmm4, 0x40($val,$index)
2585 movdqa %xmm5, 0x50($val,$index)
2589 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
2591 ################################################################################
2592 # void ecp_nistz256_gather_w5(uint64_t *val, uint64_t *in_t, int index);
2593 .globl ecp_nistz256_gather_w5
2594 .type ecp_nistz256_gather_w5,\@abi-omnipotent
2596 ecp_nistz256_gather_w5:
2599 $code.=<<___ if ($avx>1);
2600 mov OPENSSL_ia32cap_P+8(%rip), %eax
2602 jnz .Lavx2_gather_w5
2604 $code.=<<___ if ($win64);
2605 lea -0x88(%rsp), %rax
2606 .LSEH_begin_ecp_nistz256_gather_w5:
2607 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
2608 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
2609 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
2610 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
2611 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
2612 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
2613 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
2614 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
2615 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
2616 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
2617 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
2620 movdqa .LOne(%rip), $ONE
2631 pshufd \$0, $INDEX, $INDEX
2634 .Lselect_loop_sse_w5:
2638 pcmpeqd $INDEX, $TMP0
2640 movdqa 16*0($in_t), $T0a
2641 movdqa 16*1($in_t), $T0b
2642 movdqa 16*2($in_t), $T0c
2643 movdqa 16*3($in_t), $T0d
2644 movdqa 16*4($in_t), $T0e
2645 movdqa 16*5($in_t), $T0f
2646 lea 16*6($in_t), $in_t
2662 jnz .Lselect_loop_sse_w5
2664 movdqu $Ra, 16*0($val)
2665 movdqu $Rb, 16*1($val)
2666 movdqu $Rc, 16*2($val)
2667 movdqu $Rd, 16*3($val)
2668 movdqu $Re, 16*4($val)
2669 movdqu $Rf, 16*5($val)
2671 $code.=<<___ if ($win64);
2672 movaps (%rsp), %xmm6
2673 movaps 0x10(%rsp), %xmm7
2674 movaps 0x20(%rsp), %xmm8
2675 movaps 0x30(%rsp), %xmm9
2676 movaps 0x40(%rsp), %xmm10
2677 movaps 0x50(%rsp), %xmm11
2678 movaps 0x60(%rsp), %xmm12
2679 movaps 0x70(%rsp), %xmm13
2680 movaps 0x80(%rsp), %xmm14
2681 movaps 0x90(%rsp), %xmm15
2682 lea 0xa8(%rsp), %rsp
2687 .LSEH_end_ecp_nistz256_gather_w5:
2688 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
2690 ################################################################################
2691 # void ecp_nistz256_scatter_w7(uint64_t *val, uint64_t *in_t, int index);
2692 .globl ecp_nistz256_scatter_w7
2693 .type ecp_nistz256_scatter_w7,\@abi-omnipotent
2695 ecp_nistz256_scatter_w7:
2697 movdqu 0x00($in_t), %xmm0
2699 movdqu 0x10($in_t), %xmm1
2700 movdqu 0x20($in_t), %xmm2
2701 movdqu 0x30($in_t), %xmm3
2702 movdqa %xmm0, 0x00($val,$index)
2703 movdqa %xmm1, 0x10($val,$index)
2704 movdqa %xmm2, 0x20($val,$index)
2705 movdqa %xmm3, 0x30($val,$index)
2709 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
2711 ################################################################################
2712 # void ecp_nistz256_gather_w7(uint64_t *val, uint64_t *in_t, int index);
2713 .globl ecp_nistz256_gather_w7
2714 .type ecp_nistz256_gather_w7,\@abi-omnipotent
2716 ecp_nistz256_gather_w7:
2719 $code.=<<___ if ($avx>1);
2720 mov OPENSSL_ia32cap_P+8(%rip), %eax
2722 jnz .Lavx2_gather_w7
2724 $code.=<<___ if ($win64);
2725 lea -0x88(%rsp), %rax
2726 .LSEH_begin_ecp_nistz256_gather_w7:
2727 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
2728 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
2729 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
2730 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
2731 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
2732 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
2733 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
2734 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
2735 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
2736 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
2737 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
2740 movdqa .LOne(%rip), $M0
2749 pshufd \$0, $INDEX, $INDEX
2752 .Lselect_loop_sse_w7:
2755 movdqa 16*0($in_t), $T0a
2756 movdqa 16*1($in_t), $T0b
2757 pcmpeqd $INDEX, $TMP0
2758 movdqa 16*2($in_t), $T0c
2759 movdqa 16*3($in_t), $T0d
2760 lea 16*4($in_t), $in_t
2769 prefetcht0 255($in_t)
2773 jnz .Lselect_loop_sse_w7
2775 movdqu $Ra, 16*0($val)
2776 movdqu $Rb, 16*1($val)
2777 movdqu $Rc, 16*2($val)
2778 movdqu $Rd, 16*3($val)
2780 $code.=<<___ if ($win64);
2781 movaps (%rsp), %xmm6
2782 movaps 0x10(%rsp), %xmm7
2783 movaps 0x20(%rsp), %xmm8
2784 movaps 0x30(%rsp), %xmm9
2785 movaps 0x40(%rsp), %xmm10
2786 movaps 0x50(%rsp), %xmm11
2787 movaps 0x60(%rsp), %xmm12
2788 movaps 0x70(%rsp), %xmm13
2789 movaps 0x80(%rsp), %xmm14
2790 movaps 0x90(%rsp), %xmm15
2791 lea 0xa8(%rsp), %rsp
2796 .LSEH_end_ecp_nistz256_gather_w7:
2797 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
2801 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
2802 my ($TWO,$INDEX,$Ra,$Rb,$Rc)=map("%ymm$_",(0..4));
2803 my ($M0,$T0a,$T0b,$T0c,$TMP0)=map("%ymm$_",(5..9));
2804 my ($M1,$T1a,$T1b,$T1c,$TMP1)=map("%ymm$_",(10..14));
2807 ################################################################################
2808 # void ecp_nistz256_avx2_gather_w5(uint64_t *val, uint64_t *in_t, int index);
2809 .type ecp_nistz256_avx2_gather_w5,\@abi-omnipotent
2811 ecp_nistz256_avx2_gather_w5:
2816 $code.=<<___ if ($win64);
2817 lea -0x88(%rsp), %rax
2819 .LSEH_begin_ecp_nistz256_avx2_gather_w5:
2820 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax), %rsp
2821 .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6, -0x20(%rax)
2822 .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7, -0x10(%rax)
2823 .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8, 8(%rax)
2824 .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9, 0x10(%rax)
2825 .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10, 0x20(%rax)
2826 .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11, 0x30(%rax)
2827 .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12, 0x40(%rax)
2828 .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13, 0x50(%rax)
2829 .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14, 0x60(%rax)
2830 .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15, 0x70(%rax)
2833 vmovdqa .LTwo(%rip), $TWO
2839 vmovdqa .LOne(%rip), $M0
2840 vmovdqa .LTwo(%rip), $M1
2843 vpermd $INDEX, $Ra, $INDEX
2846 .Lselect_loop_avx2_w5:
2848 vmovdqa 32*0($in_t), $T0a
2849 vmovdqa 32*1($in_t), $T0b
2850 vmovdqa 32*2($in_t), $T0c
2852 vmovdqa 32*3($in_t), $T1a
2853 vmovdqa 32*4($in_t), $T1b
2854 vmovdqa 32*5($in_t), $T1c
2856 vpcmpeqd $INDEX, $M0, $TMP0
2857 vpcmpeqd $INDEX, $M1, $TMP1
2859 vpaddd $TWO, $M0, $M0
2860 vpaddd $TWO, $M1, $M1
2861 lea 32*6($in_t), $in_t
2863 vpand $TMP0, $T0a, $T0a
2864 vpand $TMP0, $T0b, $T0b
2865 vpand $TMP0, $T0c, $T0c
2866 vpand $TMP1, $T1a, $T1a
2867 vpand $TMP1, $T1b, $T1b
2868 vpand $TMP1, $T1c, $T1c
2870 vpxor $T0a, $Ra, $Ra
2871 vpxor $T0b, $Rb, $Rb
2872 vpxor $T0c, $Rc, $Rc
2873 vpxor $T1a, $Ra, $Ra
2874 vpxor $T1b, $Rb, $Rb
2875 vpxor $T1c, $Rc, $Rc
2878 jnz .Lselect_loop_avx2_w5
2880 vmovdqu $Ra, 32*0($val)
2881 vmovdqu $Rb, 32*1($val)
2882 vmovdqu $Rc, 32*2($val)
2885 $code.=<<___ if ($win64);
2886 movaps (%rsp), %xmm6
2887 movaps 0x10(%rsp), %xmm7
2888 movaps 0x20(%rsp), %xmm8
2889 movaps 0x30(%rsp), %xmm9
2890 movaps 0x40(%rsp), %xmm10
2891 movaps 0x50(%rsp), %xmm11
2892 movaps 0x60(%rsp), %xmm12
2893 movaps 0x70(%rsp), %xmm13
2894 movaps 0x80(%rsp), %xmm14
2895 movaps 0x90(%rsp), %xmm15
2901 .LSEH_end_ecp_nistz256_avx2_gather_w5:
2902 .size ecp_nistz256_avx2_gather_w5,.-ecp_nistz256_avx2_gather_w5
2906 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
2907 my ($THREE,$INDEX,$Ra,$Rb)=map("%ymm$_",(0..3));
2908 my ($M0,$T0a,$T0b,$TMP0)=map("%ymm$_",(4..7));
2909 my ($M1,$T1a,$T1b,$TMP1)=map("%ymm$_",(8..11));
2910 my ($M2,$T2a,$T2b,$TMP2)=map("%ymm$_",(12..15));
2914 ################################################################################
2915 # void ecp_nistz256_avx2_gather_w7(uint64_t *val, uint64_t *in_t, int index);
2916 .globl ecp_nistz256_avx2_gather_w7
2917 .type ecp_nistz256_avx2_gather_w7,\@abi-omnipotent
2919 ecp_nistz256_avx2_gather_w7:
2924 $code.=<<___ if ($win64);
2926 lea -0x88(%rsp), %rax
2927 .LSEH_begin_ecp_nistz256_avx2_gather_w7:
2928 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax), %rsp
2929 .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6, -0x20(%rax)
2930 .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7, -0x10(%rax)
2931 .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8, 8(%rax)
2932 .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9, 0x10(%rax)
2933 .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10, 0x20(%rax)
2934 .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11, 0x30(%rax)
2935 .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12, 0x40(%rax)
2936 .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13, 0x50(%rax)
2937 .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14, 0x60(%rax)
2938 .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15, 0x70(%rax)
2941 vmovdqa .LThree(%rip), $THREE
2946 vmovdqa .LOne(%rip), $M0
2947 vmovdqa .LTwo(%rip), $M1
2948 vmovdqa .LThree(%rip), $M2
2951 vpermd $INDEX, $Ra, $INDEX
2952 # Skip index = 0, because it is implicitly the point at infinity
2955 .Lselect_loop_avx2_w7:
2957 vmovdqa 32*0($in_t), $T0a
2958 vmovdqa 32*1($in_t), $T0b
2960 vmovdqa 32*2($in_t), $T1a
2961 vmovdqa 32*3($in_t), $T1b
2963 vmovdqa 32*4($in_t), $T2a
2964 vmovdqa 32*5($in_t), $T2b
2966 vpcmpeqd $INDEX, $M0, $TMP0
2967 vpcmpeqd $INDEX, $M1, $TMP1
2968 vpcmpeqd $INDEX, $M2, $TMP2
2970 vpaddd $THREE, $M0, $M0
2971 vpaddd $THREE, $M1, $M1
2972 vpaddd $THREE, $M2, $M2
2973 lea 32*6($in_t), $in_t
2975 vpand $TMP0, $T0a, $T0a
2976 vpand $TMP0, $T0b, $T0b
2977 vpand $TMP1, $T1a, $T1a
2978 vpand $TMP1, $T1b, $T1b
2979 vpand $TMP2, $T2a, $T2a
2980 vpand $TMP2, $T2b, $T2b
2982 vpxor $T0a, $Ra, $Ra
2983 vpxor $T0b, $Rb, $Rb
2984 vpxor $T1a, $Ra, $Ra
2985 vpxor $T1b, $Rb, $Rb
2986 vpxor $T2a, $Ra, $Ra
2987 vpxor $T2b, $Rb, $Rb
2990 jnz .Lselect_loop_avx2_w7
2993 vmovdqa 32*0($in_t), $T0a
2994 vmovdqa 32*1($in_t), $T0b
2996 vpcmpeqd $INDEX, $M0, $TMP0
2998 vpand $TMP0, $T0a, $T0a
2999 vpand $TMP0, $T0b, $T0b
3001 vpxor $T0a, $Ra, $Ra
3002 vpxor $T0b, $Rb, $Rb
3004 vmovdqu $Ra, 32*0($val)
3005 vmovdqu $Rb, 32*1($val)
3008 $code.=<<___ if ($win64);
3009 movaps (%rsp), %xmm6
3010 movaps 0x10(%rsp), %xmm7
3011 movaps 0x20(%rsp), %xmm8
3012 movaps 0x30(%rsp), %xmm9
3013 movaps 0x40(%rsp), %xmm10
3014 movaps 0x50(%rsp), %xmm11
3015 movaps 0x60(%rsp), %xmm12
3016 movaps 0x70(%rsp), %xmm13
3017 movaps 0x80(%rsp), %xmm14
3018 movaps 0x90(%rsp), %xmm15
3024 .LSEH_end_ecp_nistz256_avx2_gather_w7:
3025 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
3029 .globl ecp_nistz256_avx2_gather_w7
3030 .type ecp_nistz256_avx2_gather_w7,\@function,3
3032 ecp_nistz256_avx2_gather_w7:
3034 .byte 0x0f,0x0b # ud2
3037 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
3041 ########################################################################
3042 # This block implements higher level point_double, point_add and
3043 # point_add_affine. The key to performance in this case is to allow
3044 # out-of-order execution logic to overlap computations from next step
3045 # with tail processing from current step. By using tailored calling
3046 # sequence we minimize inter-step overhead to give processor better
3047 # shot at overlapping operations...
3049 # You will notice that input data is copied to stack. Trouble is that
3050 # there are no registers to spare for holding original pointers and
3051 # reloading them, pointers, would create undesired dependencies on
3052 # effective addresses calculation paths. In other words it's too done
3053 # to favour out-of-order execution logic.
3054 # <appro@openssl.org>
3056 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
3057 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
3058 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rbp","%rcx",$acc4,$acc4);
3059 my ($poly1,$poly3)=($acc6,$acc7);
3061 sub load_for_mul () {
3062 my ($a,$b,$src0) = @_;
3063 my $bias = $src0 eq "%rax" ? 0 : -128;
3069 lea $bias+$a, $a_ptr
3074 sub load_for_sqr () {
3076 my $bias = $src0 eq "%rax" ? 0 : -128;
3080 lea $bias+$a, $a_ptr
3086 ########################################################################
3087 # operate in 4-5-0-1 "name space" that matches multiplication output
3089 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
3092 .type __ecp_nistz256_add_toq,\@abi-omnipotent
3094 __ecp_nistz256_add_toq:
3097 add 8*0($b_ptr), $a0
3098 adc 8*1($b_ptr), $a1
3100 adc 8*2($b_ptr), $a2
3101 adc 8*3($b_ptr), $a3
3115 mov $a0, 8*0($r_ptr)
3117 mov $a1, 8*1($r_ptr)
3119 mov $a2, 8*2($r_ptr)
3120 mov $a3, 8*3($r_ptr)
3124 .size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
3126 .type __ecp_nistz256_sub_fromq,\@abi-omnipotent
3128 __ecp_nistz256_sub_fromq:
3130 sub 8*0($b_ptr), $a0
3131 sbb 8*1($b_ptr), $a1
3133 sbb 8*2($b_ptr), $a2
3134 sbb 8*3($b_ptr), $a3
3148 mov $a0, 8*0($r_ptr)
3150 mov $a1, 8*1($r_ptr)
3152 mov $a2, 8*2($r_ptr)
3153 mov $a3, 8*3($r_ptr)
3157 .size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
3159 .type __ecp_nistz256_subq,\@abi-omnipotent
3161 __ecp_nistz256_subq:
3186 .size __ecp_nistz256_subq,.-__ecp_nistz256_subq
3188 .type __ecp_nistz256_mul_by_2q,\@abi-omnipotent
3190 __ecp_nistz256_mul_by_2q:
3193 add $a0, $a0 # a0:a3+a0:a3
3211 mov $a0, 8*0($r_ptr)
3213 mov $a1, 8*1($r_ptr)
3215 mov $a2, 8*2($r_ptr)
3216 mov $a3, 8*3($r_ptr)
3220 .size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
3225 my ($src0,$sfx,$bias);
3226 my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
3234 .globl ecp_nistz256_point_double
3235 .type ecp_nistz256_point_double,\@function,2
3237 ecp_nistz256_point_double:
3240 $code.=<<___ if ($addx);
3242 and OPENSSL_ia32cap_P+8(%rip), %ecx
3252 .type ecp_nistz256_point_doublex,\@function,2
3254 ecp_nistz256_point_doublex:
3273 .cfi_adjust_cfa_offset 32*5+8
3274 .Lpoint_double${x}_body:
3276 .Lpoint_double_shortcut$x:
3277 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr.x
3278 mov $a_ptr, $b_ptr # backup copy
3279 movdqu 0x10($a_ptr), %xmm1
3280 mov 0x20+8*0($a_ptr), $acc4 # load in_y in "5-4-0-1" order
3281 mov 0x20+8*1($a_ptr), $acc5
3282 mov 0x20+8*2($a_ptr), $acc0
3283 mov 0x20+8*3($a_ptr), $acc1
3284 mov .Lpoly+8*1(%rip), $poly1
3285 mov .Lpoly+8*3(%rip), $poly3
3286 movdqa %xmm0, $in_x(%rsp)
3287 movdqa %xmm1, $in_x+0x10(%rsp)
3288 lea 0x20($r_ptr), $acc2
3289 lea 0x40($r_ptr), $acc3
3294 lea $S(%rsp), $r_ptr
3295 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(S, in_y);
3297 mov 0x40+8*0($a_ptr), $src0
3298 mov 0x40+8*1($a_ptr), $acc6
3299 mov 0x40+8*2($a_ptr), $acc7
3300 mov 0x40+8*3($a_ptr), $acc0
3301 lea 0x40-$bias($a_ptr), $a_ptr
3302 lea $Zsqr(%rsp), $r_ptr
3303 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Zsqr, in_z);
3305 `&load_for_sqr("$S(%rsp)", "$src0")`
3306 lea $S(%rsp), $r_ptr
3307 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(S, S);
3309 mov 0x20($b_ptr), $src0 # $b_ptr is still valid
3310 mov 0x40+8*0($b_ptr), $acc1
3311 mov 0x40+8*1($b_ptr), $acc2
3312 mov 0x40+8*2($b_ptr), $acc3
3313 mov 0x40+8*3($b_ptr), $acc4
3314 lea 0x40-$bias($b_ptr), $a_ptr
3315 lea 0x20($b_ptr), $b_ptr
3317 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, in_z, in_y);
3318 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(res_z, res_z);
3320 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
3321 mov $in_x+8*1(%rsp), $acc5
3322 lea $Zsqr(%rsp), $b_ptr
3323 mov $in_x+8*2(%rsp), $acc0
3324 mov $in_x+8*3(%rsp), $acc1
3325 lea $M(%rsp), $r_ptr
3326 call __ecp_nistz256_add_to$x # p256_add(M, in_x, Zsqr);
3328 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
3329 mov $in_x+8*1(%rsp), $acc5
3330 lea $Zsqr(%rsp), $b_ptr
3331 mov $in_x+8*2(%rsp), $acc0
3332 mov $in_x+8*3(%rsp), $acc1
3333 lea $Zsqr(%rsp), $r_ptr
3334 call __ecp_nistz256_sub_from$x # p256_sub(Zsqr, in_x, Zsqr);
3336 `&load_for_sqr("$S(%rsp)", "$src0")`
3338 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_y, S);
3341 ######## ecp_nistz256_div_by_2(res_y, res_y); ##########################
3342 # operate in 4-5-6-7 "name space" that matches squaring output
3344 my ($poly1,$poly3)=($a_ptr,$t1);
3345 my ($a0,$a1,$a2,$a3,$t3,$t4,$t1)=($acc4,$acc5,$acc6,$acc7,$acc0,$acc1,$acc2);
3358 xor $a_ptr, $a_ptr # borrow $a_ptr
3367 mov $a1, $t0 # a0:a3>>1
3378 mov $a0, 8*0($r_ptr)
3380 mov $a1, 8*1($r_ptr)
3384 mov $a2, 8*2($r_ptr)
3385 mov $a3, 8*3($r_ptr)
3389 `&load_for_mul("$M(%rsp)", "$Zsqr(%rsp)", "$src0")`
3390 lea $M(%rsp), $r_ptr
3391 call __ecp_nistz256_mul_mont$x # p256_mul_mont(M, M, Zsqr);
3393 lea $tmp0(%rsp), $r_ptr
3394 call __ecp_nistz256_mul_by_2$x
3396 lea $M(%rsp), $b_ptr
3397 lea $M(%rsp), $r_ptr
3398 call __ecp_nistz256_add_to$x # p256_mul_by_3(M, M);
3400 `&load_for_mul("$S(%rsp)", "$in_x(%rsp)", "$src0")`
3401 lea $S(%rsp), $r_ptr
3402 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, in_x);
3404 lea $tmp0(%rsp), $r_ptr
3405 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(tmp0, S);
3407 `&load_for_sqr("$M(%rsp)", "$src0")`
3409 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_x, M);
3411 lea $tmp0(%rsp), $b_ptr
3412 mov $acc6, $acc0 # harmonize sqr output and sub input
3416 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, tmp0);
3418 mov $S+8*0(%rsp), $t0
3419 mov $S+8*1(%rsp), $t1
3420 mov $S+8*2(%rsp), $t2
3421 mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
3422 lea $S(%rsp), $r_ptr
3423 call __ecp_nistz256_sub$x # p256_sub(S, S, res_x);
3426 lea $M(%rsp), $b_ptr
3427 mov $acc4, $acc6 # harmonize sub output and mul input
3429 mov $acc4, $S+8*0(%rsp) # have to save:-(
3431 mov $acc5, $S+8*1(%rsp)
3433 mov $acc0, $S+8*2(%rsp)
3434 lea $S-$bias(%rsp), $a_ptr
3436 mov $acc1, $S+8*3(%rsp)
3438 lea $S(%rsp), $r_ptr
3439 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, M);
3443 call __ecp_nistz256_sub_from$x # p256_sub(res_y, S, res_y);
3445 lea 32*5+56(%rsp), %rsi
3460 .cfi_def_cfa_register %rsp
3461 .Lpoint_double${x}_epilogue:
3464 .size ecp_nistz256_point_double$sfx,.-ecp_nistz256_point_double$sfx
3471 my ($src0,$sfx,$bias);
3472 my ($H,$Hsqr,$R,$Rsqr,$Hcub,
3474 $res_x,$res_y,$res_z,
3475 $in1_x,$in1_y,$in1_z,
3476 $in2_x,$in2_y,$in2_z)=map(32*$_,(0..17));
3477 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
3485 .globl ecp_nistz256_point_add
3486 .type ecp_nistz256_point_add,\@function,3
3488 ecp_nistz256_point_add:
3491 $code.=<<___ if ($addx);
3493 and OPENSSL_ia32cap_P+8(%rip), %ecx
3503 .type ecp_nistz256_point_addx,\@function,3
3505 ecp_nistz256_point_addx:
3524 .cfi_adjust_cfa_offset 32*18+8
3525 .Lpoint_add${x}_body:
3527 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
3528 movdqu 0x10($a_ptr), %xmm1
3529 movdqu 0x20($a_ptr), %xmm2
3530 movdqu 0x30($a_ptr), %xmm3
3531 movdqu 0x40($a_ptr), %xmm4
3532 movdqu 0x50($a_ptr), %xmm5
3533 mov $a_ptr, $b_ptr # reassign
3534 mov $b_org, $a_ptr # reassign
3535 movdqa %xmm0, $in1_x(%rsp)
3536 movdqa %xmm1, $in1_x+0x10(%rsp)
3537 movdqa %xmm2, $in1_y(%rsp)
3538 movdqa %xmm3, $in1_y+0x10(%rsp)
3539 movdqa %xmm4, $in1_z(%rsp)
3540 movdqa %xmm5, $in1_z+0x10(%rsp)
3543 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$b_ptr
3544 pshufd \$0xb1, %xmm5, %xmm3
3545 movdqu 0x10($a_ptr), %xmm1
3546 movdqu 0x20($a_ptr), %xmm2
3548 movdqu 0x30($a_ptr), %xmm3
3549 mov 0x40+8*0($a_ptr), $src0 # load original in2_z
3550 mov 0x40+8*1($a_ptr), $acc6
3551 mov 0x40+8*2($a_ptr), $acc7
3552 mov 0x40+8*3($a_ptr), $acc0
3553 movdqa %xmm0, $in2_x(%rsp)
3554 pshufd \$0x1e, %xmm5, %xmm4
3555 movdqa %xmm1, $in2_x+0x10(%rsp)
3556 movdqu 0x40($a_ptr),%xmm0 # in2_z again
3557 movdqu 0x50($a_ptr),%xmm1
3558 movdqa %xmm2, $in2_y(%rsp)
3559 movdqa %xmm3, $in2_y+0x10(%rsp)
3563 movq $r_ptr, %xmm0 # save $r_ptr
3565 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
3566 mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
3567 mov $acc6, $in2_z+8*1(%rsp)
3568 mov $acc7, $in2_z+8*2(%rsp)
3569 mov $acc0, $in2_z+8*3(%rsp)
3570 lea $Z2sqr(%rsp), $r_ptr # Z2^2
3571 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z2sqr, in2_z);
3573 pcmpeqd %xmm4, %xmm5
3574 pshufd \$0xb1, %xmm1, %xmm4
3576 pshufd \$0, %xmm5, %xmm5 # in1infty
3577 pshufd \$0x1e, %xmm4, %xmm3
3580 pcmpeqd %xmm3, %xmm4
3581 pshufd \$0, %xmm4, %xmm4 # in2infty
3582 mov 0x40+8*0($b_ptr), $src0 # load original in1_z
3583 mov 0x40+8*1($b_ptr), $acc6
3584 mov 0x40+8*2($b_ptr), $acc7
3585 mov 0x40+8*3($b_ptr), $acc0
3588 lea 0x40-$bias($b_ptr), $a_ptr
3589 lea $Z1sqr(%rsp), $r_ptr # Z1^2
3590 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
3592 `&load_for_mul("$Z2sqr(%rsp)", "$in2_z(%rsp)", "$src0")`
3593 lea $S1(%rsp), $r_ptr # S1 = Z2^3
3594 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, Z2sqr, in2_z);
3596 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
3597 lea $S2(%rsp), $r_ptr # S2 = Z1^3
3598 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
3600 `&load_for_mul("$S1(%rsp)", "$in1_y(%rsp)", "$src0")`
3601 lea $S1(%rsp), $r_ptr # S1 = Y1*Z2^3
3602 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, S1, in1_y);
3604 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
3605 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
3606 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
3608 lea $S1(%rsp), $b_ptr
3609 lea $R(%rsp), $r_ptr # R = S2 - S1
3610 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, S1);
3612 or $acc5, $acc4 # see if result is zero
3616 por %xmm5, %xmm2 # in1infty || in2infty
3619 `&load_for_mul("$Z2sqr(%rsp)", "$in1_x(%rsp)", "$src0")`
3620 lea $U1(%rsp), $r_ptr # U1 = X1*Z2^2
3621 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U1, in1_x, Z2sqr);
3623 `&load_for_mul("$Z1sqr(%rsp)", "$in2_x(%rsp)", "$src0")`
3624 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
3625 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in2_x, Z1sqr);
3627 lea $U1(%rsp), $b_ptr
3628 lea $H(%rsp), $r_ptr # H = U2 - U1
3629 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, U1);
3631 or $acc5, $acc4 # see if result is zero
3633 or $acc1, $acc4 # !is_equal(U1, U2)
3635 movq %xmm2, $acc0 # in1infty | in2infty
3636 movq %xmm3, $acc1 # !is_equal(S1, S2)
3641 # if (!is_equal(U1, U2) | in1infty | in2infty | !is_equal(S1, S2))
3642 .byte 0x3e # predict taken
3646 movq %xmm1, $a_ptr # restore $a_ptr
3647 movq %xmm0, $r_ptr # restore $r_ptr
3648 add \$`32*(18-5)`, %rsp # difference in frame sizes
3649 .cfi_adjust_cfa_offset `-32*(18-5)`
3650 jmp .Lpoint_double_shortcut$x
3651 .cfi_adjust_cfa_offset `32*(18-5)`
3655 `&load_for_sqr("$R(%rsp)", "$src0")`
3656 lea $Rsqr(%rsp), $r_ptr # R^2
3657 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
3659 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
3660 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
3661 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
3663 `&load_for_sqr("$H(%rsp)", "$src0")`
3664 lea $Hsqr(%rsp), $r_ptr # H^2
3665 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
3667 `&load_for_mul("$res_z(%rsp)", "$in2_z(%rsp)", "$src0")`
3668 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
3669 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, res_z, in2_z);
3671 `&load_for_mul("$Hsqr(%rsp)", "$H(%rsp)", "$src0")`
3672 lea $Hcub(%rsp), $r_ptr # H^3
3673 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
3675 `&load_for_mul("$Hsqr(%rsp)", "$U1(%rsp)", "$src0")`
3676 lea $U2(%rsp), $r_ptr # U1*H^2
3677 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, U1, Hsqr);
3680 #######################################################################
3681 # operate in 4-5-0-1 "name space" that matches multiplication output
3683 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
3684 my ($poly1, $poly3)=($acc6,$acc7);
3687 #lea $U2(%rsp), $a_ptr
3688 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
3689 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
3692 add $acc0, $acc0 # a0:a3+a0:a3
3693 lea $Rsqr(%rsp), $a_ptr
3710 mov 8*0($a_ptr), $t0
3712 mov 8*1($a_ptr), $t1
3714 mov 8*2($a_ptr), $t2
3716 mov 8*3($a_ptr), $t3
3718 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
3720 lea $Hcub(%rsp), $b_ptr
3721 lea $res_x(%rsp), $r_ptr
3722 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
3724 mov $U2+8*0(%rsp), $t0
3725 mov $U2+8*1(%rsp), $t1
3726 mov $U2+8*2(%rsp), $t2
3727 mov $U2+8*3(%rsp), $t3
3728 lea $res_y(%rsp), $r_ptr
3730 call __ecp_nistz256_sub$x # p256_sub(res_y, U2, res_x);
3732 mov $acc0, 8*0($r_ptr) # save the result, as
3733 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
3734 mov $acc2, 8*2($r_ptr)
3735 mov $acc3, 8*3($r_ptr)
3739 `&load_for_mul("$S1(%rsp)", "$Hcub(%rsp)", "$src0")`
3740 lea $S2(%rsp), $r_ptr
3741 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S1, Hcub);
3743 `&load_for_mul("$R(%rsp)", "$res_y(%rsp)", "$src0")`
3744 lea $res_y(%rsp), $r_ptr
3745 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_y, R, res_y);
3747 lea $S2(%rsp), $b_ptr
3748 lea $res_y(%rsp), $r_ptr
3749 call __ecp_nistz256_sub_from$x # p256_sub(res_y, res_y, S2);
3751 movq %xmm0, $r_ptr # restore $r_ptr
3753 movdqa %xmm5, %xmm0 # copy_conditional(res_z, in2_z, in1infty);
3755 pandn $res_z(%rsp), %xmm0
3757 pandn $res_z+0x10(%rsp), %xmm1
3759 pand $in2_z(%rsp), %xmm2
3760 pand $in2_z+0x10(%rsp), %xmm3
3764 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
3770 pand $in1_z(%rsp), %xmm2
3771 pand $in1_z+0x10(%rsp), %xmm3
3774 movdqu %xmm2, 0x40($r_ptr)
3775 movdqu %xmm3, 0x50($r_ptr)
3777 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
3779 pandn $res_x(%rsp), %xmm0
3781 pandn $res_x+0x10(%rsp), %xmm1
3783 pand $in2_x(%rsp), %xmm2
3784 pand $in2_x+0x10(%rsp), %xmm3
3788 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
3794 pand $in1_x(%rsp), %xmm2
3795 pand $in1_x+0x10(%rsp), %xmm3
3798 movdqu %xmm2, 0x00($r_ptr)
3799 movdqu %xmm3, 0x10($r_ptr)
3801 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
3803 pandn $res_y(%rsp), %xmm0
3805 pandn $res_y+0x10(%rsp), %xmm1
3807 pand $in2_y(%rsp), %xmm2
3808 pand $in2_y+0x10(%rsp), %xmm3
3812 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
3818 pand $in1_y(%rsp), %xmm2
3819 pand $in1_y+0x10(%rsp), %xmm3
3822 movdqu %xmm2, 0x20($r_ptr)
3823 movdqu %xmm3, 0x30($r_ptr)
3826 lea 32*18+56(%rsp), %rsi
3841 .cfi_def_cfa_register %rsp
3842 .Lpoint_add${x}_epilogue:
3845 .size ecp_nistz256_point_add$sfx,.-ecp_nistz256_point_add$sfx
3850 sub gen_add_affine () {
3852 my ($src0,$sfx,$bias);
3853 my ($U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr,
3854 $res_x,$res_y,$res_z,
3855 $in1_x,$in1_y,$in1_z,
3856 $in2_x,$in2_y)=map(32*$_,(0..14));
3865 .globl ecp_nistz256_point_add_affine
3866 .type ecp_nistz256_point_add_affine,\@function,3
3868 ecp_nistz256_point_add_affine:
3871 $code.=<<___ if ($addx);
3873 and OPENSSL_ia32cap_P+8(%rip), %ecx
3875 je .Lpoint_add_affinex
3883 .type ecp_nistz256_point_add_affinex,\@function,3
3885 ecp_nistz256_point_add_affinex:
3887 .Lpoint_add_affinex:
3904 .cfi_adjust_cfa_offset 32*15+8
3905 .Ladd_affine${x}_body:
3907 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
3908 mov $b_org, $b_ptr # reassign
3909 movdqu 0x10($a_ptr), %xmm1
3910 movdqu 0x20($a_ptr), %xmm2
3911 movdqu 0x30($a_ptr), %xmm3
3912 movdqu 0x40($a_ptr), %xmm4
3913 movdqu 0x50($a_ptr), %xmm5
3914 mov 0x40+8*0($a_ptr), $src0 # load original in1_z
3915 mov 0x40+8*1($a_ptr), $acc6
3916 mov 0x40+8*2($a_ptr), $acc7
3917 mov 0x40+8*3($a_ptr), $acc0
3918 movdqa %xmm0, $in1_x(%rsp)
3919 movdqa %xmm1, $in1_x+0x10(%rsp)
3920 movdqa %xmm2, $in1_y(%rsp)
3921 movdqa %xmm3, $in1_y+0x10(%rsp)
3922 movdqa %xmm4, $in1_z(%rsp)
3923 movdqa %xmm5, $in1_z+0x10(%rsp)
3926 movdqu 0x00($b_ptr), %xmm0 # copy *(P256_POINT_AFFINE *)$b_ptr
3927 pshufd \$0xb1, %xmm5, %xmm3
3928 movdqu 0x10($b_ptr), %xmm1
3929 movdqu 0x20($b_ptr), %xmm2
3931 movdqu 0x30($b_ptr), %xmm3
3932 movdqa %xmm0, $in2_x(%rsp)
3933 pshufd \$0x1e, %xmm5, %xmm4
3934 movdqa %xmm1, $in2_x+0x10(%rsp)
3936 movq $r_ptr, %xmm0 # save $r_ptr
3937 movdqa %xmm2, $in2_y(%rsp)
3938 movdqa %xmm3, $in2_y+0x10(%rsp)
3944 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
3945 lea $Z1sqr(%rsp), $r_ptr # Z1^2
3946 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
3948 pcmpeqd %xmm4, %xmm5
3949 pshufd \$0xb1, %xmm3, %xmm4
3950 mov 0x00($b_ptr), $src0 # $b_ptr is still valid
3951 #lea 0x00($b_ptr), $b_ptr
3952 mov $acc4, $acc1 # harmonize sqr output and mul input
3954 pshufd \$0, %xmm5, %xmm5 # in1infty
3955 pshufd \$0x1e, %xmm4, %xmm3
3960 pcmpeqd %xmm3, %xmm4
3961 pshufd \$0, %xmm4, %xmm4 # in2infty
3963 lea $Z1sqr-$bias(%rsp), $a_ptr
3965 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
3966 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, Z1sqr, in2_x);
3968 lea $in1_x(%rsp), $b_ptr
3969 lea $H(%rsp), $r_ptr # H = U2 - U1
3970 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, in1_x);
3972 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
3973 lea $S2(%rsp), $r_ptr # S2 = Z1^3
3974 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
3976 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
3977 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
3978 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
3980 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
3981 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
3982 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
3984 lea $in1_y(%rsp), $b_ptr
3985 lea $R(%rsp), $r_ptr # R = S2 - S1
3986 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, in1_y);
3988 `&load_for_sqr("$H(%rsp)", "$src0")`
3989 lea $Hsqr(%rsp), $r_ptr # H^2
3990 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
3992 `&load_for_sqr("$R(%rsp)", "$src0")`
3993 lea $Rsqr(%rsp), $r_ptr # R^2
3994 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
3996 `&load_for_mul("$H(%rsp)", "$Hsqr(%rsp)", "$src0")`
3997 lea $Hcub(%rsp), $r_ptr # H^3
3998 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
4000 `&load_for_mul("$Hsqr(%rsp)", "$in1_x(%rsp)", "$src0")`
4001 lea $U2(%rsp), $r_ptr # U1*H^2
4002 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in1_x, Hsqr);
4005 #######################################################################
4006 # operate in 4-5-0-1 "name space" that matches multiplication output
4008 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
4009 my ($poly1, $poly3)=($acc6,$acc7);
4012 #lea $U2(%rsp), $a_ptr
4013 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
4014 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
4017 add $acc0, $acc0 # a0:a3+a0:a3
4018 lea $Rsqr(%rsp), $a_ptr
4035 mov 8*0($a_ptr), $t0
4037 mov 8*1($a_ptr), $t1
4039 mov 8*2($a_ptr), $t2
4041 mov 8*3($a_ptr), $t3
4043 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
4045 lea $Hcub(%rsp), $b_ptr
4046 lea $res_x(%rsp), $r_ptr
4047 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
4049 mov $U2+8*0(%rsp), $t0
4050 mov $U2+8*1(%rsp), $t1
4051 mov $U2+8*2(%rsp), $t2
4052 mov $U2+8*3(%rsp), $t3
4053 lea $H(%rsp), $r_ptr
4055 call __ecp_nistz256_sub$x # p256_sub(H, U2, res_x);
4057 mov $acc0, 8*0($r_ptr) # save the result, as
4058 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
4059 mov $acc2, 8*2($r_ptr)
4060 mov $acc3, 8*3($r_ptr)
4064 `&load_for_mul("$Hcub(%rsp)", "$in1_y(%rsp)", "$src0")`
4065 lea $S2(%rsp), $r_ptr
4066 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Hcub, in1_y);
4068 `&load_for_mul("$H(%rsp)", "$R(%rsp)", "$src0")`
4069 lea $H(%rsp), $r_ptr
4070 call __ecp_nistz256_mul_mont$x # p256_mul_mont(H, H, R);
4072 lea $S2(%rsp), $b_ptr
4073 lea $res_y(%rsp), $r_ptr
4074 call __ecp_nistz256_sub_from$x # p256_sub(res_y, H, S2);
4076 movq %xmm0, $r_ptr # restore $r_ptr
4078 movdqa %xmm5, %xmm0 # copy_conditional(res_z, ONE, in1infty);
4080 pandn $res_z(%rsp), %xmm0
4082 pandn $res_z+0x10(%rsp), %xmm1
4084 pand .LONE_mont(%rip), %xmm2
4085 pand .LONE_mont+0x10(%rip), %xmm3
4089 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
4095 pand $in1_z(%rsp), %xmm2
4096 pand $in1_z+0x10(%rsp), %xmm3
4099 movdqu %xmm2, 0x40($r_ptr)
4100 movdqu %xmm3, 0x50($r_ptr)
4102 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
4104 pandn $res_x(%rsp), %xmm0
4106 pandn $res_x+0x10(%rsp), %xmm1
4108 pand $in2_x(%rsp), %xmm2
4109 pand $in2_x+0x10(%rsp), %xmm3
4113 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
4119 pand $in1_x(%rsp), %xmm2
4120 pand $in1_x+0x10(%rsp), %xmm3
4123 movdqu %xmm2, 0x00($r_ptr)
4124 movdqu %xmm3, 0x10($r_ptr)
4126 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
4128 pandn $res_y(%rsp), %xmm0
4130 pandn $res_y+0x10(%rsp), %xmm1
4132 pand $in2_y(%rsp), %xmm2
4133 pand $in2_y+0x10(%rsp), %xmm3
4137 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
4143 pand $in1_y(%rsp), %xmm2
4144 pand $in1_y+0x10(%rsp), %xmm3
4147 movdqu %xmm2, 0x20($r_ptr)
4148 movdqu %xmm3, 0x30($r_ptr)
4150 lea 32*15+56(%rsp), %rsi
4165 .cfi_def_cfa_register %rsp
4166 .Ladd_affine${x}_epilogue:
4169 .size ecp_nistz256_point_add_affine$sfx,.-ecp_nistz256_point_add_affine$sfx
4172 &gen_add_affine("q");
4174 ########################################################################
4178 ########################################################################
4179 # operate in 4-5-0-1 "name space" that matches multiplication output
4181 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
4184 .type __ecp_nistz256_add_tox,\@abi-omnipotent
4186 __ecp_nistz256_add_tox:
4189 adc 8*0($b_ptr), $a0
4190 adc 8*1($b_ptr), $a1
4192 adc 8*2($b_ptr), $a2
4193 adc 8*3($b_ptr), $a3
4208 mov $a0, 8*0($r_ptr)
4210 mov $a1, 8*1($r_ptr)
4212 mov $a2, 8*2($r_ptr)
4213 mov $a3, 8*3($r_ptr)
4217 .size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
4219 .type __ecp_nistz256_sub_fromx,\@abi-omnipotent
4221 __ecp_nistz256_sub_fromx:
4224 sbb 8*0($b_ptr), $a0
4225 sbb 8*1($b_ptr), $a1
4227 sbb 8*2($b_ptr), $a2
4228 sbb 8*3($b_ptr), $a3
4243 mov $a0, 8*0($r_ptr)
4245 mov $a1, 8*1($r_ptr)
4247 mov $a2, 8*2($r_ptr)
4248 mov $a3, 8*3($r_ptr)
4252 .size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
4254 .type __ecp_nistz256_subx,\@abi-omnipotent
4256 __ecp_nistz256_subx:
4283 .size __ecp_nistz256_subx,.-__ecp_nistz256_subx
4285 .type __ecp_nistz256_mul_by_2x,\@abi-omnipotent
4287 __ecp_nistz256_mul_by_2x:
4290 adc $a0, $a0 # a0:a3+a0:a3
4309 mov $a0, 8*0($r_ptr)
4311 mov $a1, 8*1($r_ptr)
4313 mov $a2, 8*2($r_ptr)
4314 mov $a3, 8*3($r_ptr)
4318 .size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
4323 &gen_add_affine("x");
4327 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
4328 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
4336 .extern __imp_RtlVirtualUnwind
4338 .type short_handler,\@abi-omnipotent
4352 mov 120($context),%rax # pull context->Rax
4353 mov 248($context),%rbx # pull context->Rip
4355 mov 8($disp),%rsi # disp->ImageBase
4356 mov 56($disp),%r11 # disp->HandlerData
4358 mov 0(%r11),%r10d # HandlerData[0]
4359 lea (%rsi,%r10),%r10 # end of prologue label
4360 cmp %r10,%rbx # context->Rip<end of prologue label
4361 jb .Lcommon_seh_tail
4363 mov 152($context),%rax # pull context->Rsp
4365 mov 4(%r11),%r10d # HandlerData[1]
4366 lea (%rsi,%r10),%r10 # epilogue label
4367 cmp %r10,%rbx # context->Rip>=epilogue label
4368 jae .Lcommon_seh_tail
4374 mov %r12,216($context) # restore context->R12
4375 mov %r13,224($context) # restore context->R13
4377 jmp .Lcommon_seh_tail
4378 .size short_handler,.-short_handler
4380 .type full_handler,\@abi-omnipotent
4394 mov 120($context),%rax # pull context->Rax
4395 mov 248($context),%rbx # pull context->Rip
4397 mov 8($disp),%rsi # disp->ImageBase
4398 mov 56($disp),%r11 # disp->HandlerData
4400 mov 0(%r11),%r10d # HandlerData[0]
4401 lea (%rsi,%r10),%r10 # end of prologue label
4402 cmp %r10,%rbx # context->Rip<end of prologue label
4403 jb .Lcommon_seh_tail
4405 mov 152($context),%rax # pull context->Rsp
4407 mov 4(%r11),%r10d # HandlerData[1]
4408 lea (%rsi,%r10),%r10 # epilogue label
4409 cmp %r10,%rbx # context->Rip>=epilogue label
4410 jae .Lcommon_seh_tail
4412 mov 8(%r11),%r10d # HandlerData[2]
4413 lea (%rax,%r10),%rax
4421 mov %rbx,144($context) # restore context->Rbx
4422 mov %rbp,160($context) # restore context->Rbp
4423 mov %r12,216($context) # restore context->R12
4424 mov %r13,224($context) # restore context->R13
4425 mov %r14,232($context) # restore context->R14
4426 mov %r15,240($context) # restore context->R15
4431 mov %rax,152($context) # restore context->Rsp
4432 mov %rsi,168($context) # restore context->Rsi
4433 mov %rdi,176($context) # restore context->Rdi
4435 mov 40($disp),%rdi # disp->ContextRecord
4436 mov $context,%rsi # context
4437 mov \$154,%ecx # sizeof(CONTEXT)
4438 .long 0xa548f3fc # cld; rep movsq
4441 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
4442 mov 8(%rsi),%rdx # arg2, disp->ImageBase
4443 mov 0(%rsi),%r8 # arg3, disp->ControlPc
4444 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
4445 mov 40(%rsi),%r10 # disp->ContextRecord
4446 lea 56(%rsi),%r11 # &disp->HandlerData
4447 lea 24(%rsi),%r12 # &disp->EstablisherFrame
4448 mov %r10,32(%rsp) # arg5
4449 mov %r11,40(%rsp) # arg6
4450 mov %r12,48(%rsp) # arg7
4451 mov %rcx,56(%rsp) # arg8, (NULL)
4452 call *__imp_RtlVirtualUnwind(%rip)
4454 mov \$1,%eax # ExceptionContinueSearch
4466 .size full_handler,.-full_handler
4470 .rva .LSEH_begin_ecp_nistz256_mul_by_2
4471 .rva .LSEH_end_ecp_nistz256_mul_by_2
4472 .rva .LSEH_info_ecp_nistz256_mul_by_2
4474 .rva .LSEH_begin_ecp_nistz256_div_by_2
4475 .rva .LSEH_end_ecp_nistz256_div_by_2
4476 .rva .LSEH_info_ecp_nistz256_div_by_2
4478 .rva .LSEH_begin_ecp_nistz256_mul_by_3
4479 .rva .LSEH_end_ecp_nistz256_mul_by_3
4480 .rva .LSEH_info_ecp_nistz256_mul_by_3
4482 .rva .LSEH_begin_ecp_nistz256_add
4483 .rva .LSEH_end_ecp_nistz256_add
4484 .rva .LSEH_info_ecp_nistz256_add
4486 .rva .LSEH_begin_ecp_nistz256_sub
4487 .rva .LSEH_end_ecp_nistz256_sub
4488 .rva .LSEH_info_ecp_nistz256_sub
4490 .rva .LSEH_begin_ecp_nistz256_neg
4491 .rva .LSEH_end_ecp_nistz256_neg
4492 .rva .LSEH_info_ecp_nistz256_neg
4494 .rva .LSEH_begin_ecp_nistz256_ord_mul_mont
4495 .rva .LSEH_end_ecp_nistz256_ord_mul_mont
4496 .rva .LSEH_info_ecp_nistz256_ord_mul_mont
4498 .rva .LSEH_begin_ecp_nistz256_ord_sqr_mont
4499 .rva .LSEH_end_ecp_nistz256_ord_sqr_mont
4500 .rva .LSEH_info_ecp_nistz256_ord_sqr_mont
4502 $code.=<<___ if ($addx);
4503 .rva .LSEH_begin_ecp_nistz256_ord_mul_montx
4504 .rva .LSEH_end_ecp_nistz256_ord_mul_montx
4505 .rva .LSEH_info_ecp_nistz256_ord_mul_montx
4507 .rva .LSEH_begin_ecp_nistz256_ord_sqr_montx
4508 .rva .LSEH_end_ecp_nistz256_ord_sqr_montx
4509 .rva .LSEH_info_ecp_nistz256_ord_sqr_montx
4512 .rva .LSEH_begin_ecp_nistz256_to_mont
4513 .rva .LSEH_end_ecp_nistz256_to_mont
4514 .rva .LSEH_info_ecp_nistz256_to_mont
4516 .rva .LSEH_begin_ecp_nistz256_mul_mont
4517 .rva .LSEH_end_ecp_nistz256_mul_mont
4518 .rva .LSEH_info_ecp_nistz256_mul_mont
4520 .rva .LSEH_begin_ecp_nistz256_sqr_mont
4521 .rva .LSEH_end_ecp_nistz256_sqr_mont
4522 .rva .LSEH_info_ecp_nistz256_sqr_mont
4524 .rva .LSEH_begin_ecp_nistz256_from_mont
4525 .rva .LSEH_end_ecp_nistz256_from_mont
4526 .rva .LSEH_info_ecp_nistz256_from_mont
4528 .rva .LSEH_begin_ecp_nistz256_gather_w5
4529 .rva .LSEH_end_ecp_nistz256_gather_w5
4530 .rva .LSEH_info_ecp_nistz256_gather_wX
4532 .rva .LSEH_begin_ecp_nistz256_gather_w7
4533 .rva .LSEH_end_ecp_nistz256_gather_w7
4534 .rva .LSEH_info_ecp_nistz256_gather_wX
4536 $code.=<<___ if ($avx>1);
4537 .rva .LSEH_begin_ecp_nistz256_avx2_gather_w5
4538 .rva .LSEH_end_ecp_nistz256_avx2_gather_w5
4539 .rva .LSEH_info_ecp_nistz256_avx2_gather_wX
4541 .rva .LSEH_begin_ecp_nistz256_avx2_gather_w7
4542 .rva .LSEH_end_ecp_nistz256_avx2_gather_w7
4543 .rva .LSEH_info_ecp_nistz256_avx2_gather_wX
4546 .rva .LSEH_begin_ecp_nistz256_point_double
4547 .rva .LSEH_end_ecp_nistz256_point_double
4548 .rva .LSEH_info_ecp_nistz256_point_double
4550 .rva .LSEH_begin_ecp_nistz256_point_add
4551 .rva .LSEH_end_ecp_nistz256_point_add
4552 .rva .LSEH_info_ecp_nistz256_point_add
4554 .rva .LSEH_begin_ecp_nistz256_point_add_affine
4555 .rva .LSEH_end_ecp_nistz256_point_add_affine
4556 .rva .LSEH_info_ecp_nistz256_point_add_affine
4558 $code.=<<___ if ($addx);
4559 .rva .LSEH_begin_ecp_nistz256_point_doublex
4560 .rva .LSEH_end_ecp_nistz256_point_doublex
4561 .rva .LSEH_info_ecp_nistz256_point_doublex
4563 .rva .LSEH_begin_ecp_nistz256_point_addx
4564 .rva .LSEH_end_ecp_nistz256_point_addx
4565 .rva .LSEH_info_ecp_nistz256_point_addx
4567 .rva .LSEH_begin_ecp_nistz256_point_add_affinex
4568 .rva .LSEH_end_ecp_nistz256_point_add_affinex
4569 .rva .LSEH_info_ecp_nistz256_point_add_affinex
4575 .LSEH_info_ecp_nistz256_mul_by_2:
4578 .rva .Lmul_by_2_body,.Lmul_by_2_epilogue # HandlerData[]
4579 .LSEH_info_ecp_nistz256_div_by_2:
4582 .rva .Ldiv_by_2_body,.Ldiv_by_2_epilogue # HandlerData[]
4583 .LSEH_info_ecp_nistz256_mul_by_3:
4586 .rva .Lmul_by_3_body,.Lmul_by_3_epilogue # HandlerData[]
4587 .LSEH_info_ecp_nistz256_add:
4590 .rva .Ladd_body,.Ladd_epilogue # HandlerData[]
4591 .LSEH_info_ecp_nistz256_sub:
4594 .rva .Lsub_body,.Lsub_epilogue # HandlerData[]
4595 .LSEH_info_ecp_nistz256_neg:
4598 .rva .Lneg_body,.Lneg_epilogue # HandlerData[]
4599 .LSEH_info_ecp_nistz256_ord_mul_mont:
4602 .rva .Lord_mul_body,.Lord_mul_epilogue # HandlerData[]
4604 .LSEH_info_ecp_nistz256_ord_sqr_mont:
4607 .rva .Lord_sqr_body,.Lord_sqr_epilogue # HandlerData[]
4610 $code.=<<___ if ($addx);
4611 .LSEH_info_ecp_nistz256_ord_mul_montx:
4614 .rva .Lord_mulx_body,.Lord_mulx_epilogue # HandlerData[]
4616 .LSEH_info_ecp_nistz256_ord_sqr_montx:
4619 .rva .Lord_sqrx_body,.Lord_sqrx_epilogue # HandlerData[]
4623 .LSEH_info_ecp_nistz256_to_mont:
4626 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
4628 .LSEH_info_ecp_nistz256_mul_mont:
4631 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
4633 .LSEH_info_ecp_nistz256_sqr_mont:
4636 .rva .Lsqr_body,.Lsqr_epilogue # HandlerData[]
4638 .LSEH_info_ecp_nistz256_from_mont:
4641 .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
4642 .LSEH_info_ecp_nistz256_gather_wX:
4643 .byte 0x01,0x33,0x16,0x00
4644 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15
4645 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14
4646 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13
4647 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12
4648 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11
4649 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10
4650 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9
4651 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8
4652 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
4653 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
4654 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8
4657 $code.=<<___ if ($avx>1);
4658 .LSEH_info_ecp_nistz256_avx2_gather_wX:
4659 .byte 0x01,0x36,0x17,0x0b
4660 .byte 0x36,0xf8,0x09,0x00 # vmovaps 0x90(rsp),xmm15
4661 .byte 0x31,0xe8,0x08,0x00 # vmovaps 0x80(rsp),xmm14
4662 .byte 0x2c,0xd8,0x07,0x00 # vmovaps 0x70(rsp),xmm13
4663 .byte 0x27,0xc8,0x06,0x00 # vmovaps 0x60(rsp),xmm12
4664 .byte 0x22,0xb8,0x05,0x00 # vmovaps 0x50(rsp),xmm11
4665 .byte 0x1d,0xa8,0x04,0x00 # vmovaps 0x40(rsp),xmm10
4666 .byte 0x18,0x98,0x03,0x00 # vmovaps 0x30(rsp),xmm9
4667 .byte 0x13,0x88,0x02,0x00 # vmovaps 0x20(rsp),xmm8
4668 .byte 0x0e,0x78,0x01,0x00 # vmovaps 0x10(rsp),xmm7
4669 .byte 0x09,0x68,0x00,0x00 # vmovaps 0x00(rsp),xmm6
4670 .byte 0x04,0x01,0x15,0x00 # sub rsp,0xa8
4671 .byte 0x00,0xb3,0x00,0x00 # set_frame r11
4675 .LSEH_info_ecp_nistz256_point_double:
4678 .rva .Lpoint_doubleq_body,.Lpoint_doubleq_epilogue # HandlerData[]
4680 .LSEH_info_ecp_nistz256_point_add:
4683 .rva .Lpoint_addq_body,.Lpoint_addq_epilogue # HandlerData[]
4685 .LSEH_info_ecp_nistz256_point_add_affine:
4688 .rva .Ladd_affineq_body,.Ladd_affineq_epilogue # HandlerData[]
4691 $code.=<<___ if ($addx);
4693 .LSEH_info_ecp_nistz256_point_doublex:
4696 .rva .Lpoint_doublex_body,.Lpoint_doublex_epilogue # HandlerData[]
4698 .LSEH_info_ecp_nistz256_point_addx:
4701 .rva .Lpoint_addx_body,.Lpoint_addx_epilogue # HandlerData[]
4703 .LSEH_info_ecp_nistz256_point_add_affinex:
4706 .rva .Ladd_affinex_body,.Ladd_affinex_epilogue # HandlerData[]
4711 ########################################################################
4712 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
4714 open TABLE,"<ecp_nistz256_table.c" or
4715 open TABLE,"<${dir}../ecp_nistz256_table.c" or
4716 die "failed to open ecp_nistz256_table.c:",$!;
4721 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
4725 die "insane number of elements" if ($#arr != 64*16*37-1);
4728 .section .rodata align=4096
4729 .globl ecp_nistz256_precomputed
4730 .type ecp_nistz256_precomputed,\@object
4732 ecp_nistz256_precomputed:
4734 while (@line=splice(@arr,0,16)) {
4735 print ".long\t",join(',',map { sprintf "0x%08x",$_} @line),"\n";
4738 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
4741 $code =~ s/\`([^\`]*)\`/eval $1/gem;
4743 close STDOUT or die "error closing STDOUT: $!";