3 ##############################################################################
5 # Copyright 2014 Intel Corporation #
7 # Licensed under the Apache License, Version 2.0 (the "License"); #
8 # you may not use this file except in compliance with the License. #
9 # You may obtain a copy of the License at #
11 # http://www.apache.org/licenses/LICENSE-2.0 #
13 # Unless required by applicable law or agreed to in writing, software #
14 # distributed under the License is distributed on an "AS IS" BASIS, #
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
16 # See the License for the specific language governing permissions and #
17 # limitations under the License. #
19 ##############################################################################
21 # Developers and authors: #
22 # Shay Gueron (1, 2), and Vlad Krasnov (1) #
23 # (1) Intel Corporation, Israel Development Center #
24 # (2) University of Haifa #
26 # S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with#
29 ##############################################################################
31 # Further optimization by <appro@openssl.org>:
44 # Ranges denote minimum and maximum improvement coefficients depending
49 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
51 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
53 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
54 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
55 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
56 die "can't locate x86_64-xlate.pl";
58 open OUT,"| \"$^X\" $xlate $flavour $output";
61 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
62 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
63 $avx = ($1>=2.19) + ($1>=2.22);
67 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
68 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
69 $avx = ($1>=2.09) + ($1>=2.10);
73 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
74 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
75 $avx = ($1>=10) + ($1>=11);
79 if (!$addx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
80 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
81 $avx = ($ver>=3.0) + ($ver>=3.01);
87 .extern OPENSSL_ia32cap_P
92 .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
94 # 2^512 mod P precomputed for NIST P256 polynomial
96 .quad 0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd
101 .long 2,2,2,2,2,2,2,2
103 .long 3,3,3,3,3,3,3,3
105 .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
109 ################################################################################
110 # void ecp_nistz256_mul_by_2(uint64_t res[4], uint64_t a[4]);
112 my ($a0,$a1,$a2,$a3)=map("%r$_",(8..11));
113 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rdx","%rcx","%r12","%r13");
114 my ($r_ptr,$a_ptr,$b_ptr)=("%rdi","%rsi","%rdx");
118 .globl ecp_nistz256_mul_by_2
119 .type ecp_nistz256_mul_by_2,\@function,2
121 ecp_nistz256_mul_by_2:
127 add $a0, $a0 # a0:a3+a0:a3
131 lea .Lpoly(%rip), $a_ptr
158 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
160 ################################################################################
161 # void ecp_nistz256_div_by_2(uint64_t res[4], uint64_t a[4]);
162 .globl ecp_nistz256_div_by_2
163 .type ecp_nistz256_div_by_2,\@function,2
165 ecp_nistz256_div_by_2:
174 lea .Lpoly(%rip), $a_ptr
185 xor $a_ptr, $a_ptr # borrow $a_ptr
194 mov $a1, $t0 # a0:a3>>1
218 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
220 ################################################################################
221 # void ecp_nistz256_mul_by_3(uint64_t res[4], uint64_t a[4]);
222 .globl ecp_nistz256_mul_by_3
223 .type ecp_nistz256_mul_by_3,\@function,2
225 ecp_nistz256_mul_by_3:
232 add $a0, $a0 # a0:a3+a0:a3
244 sbb .Lpoly+8*1(%rip), $a1
247 sbb .Lpoly+8*3(%rip), $a3
256 add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
266 sbb .Lpoly+8*1(%rip), $a1
269 sbb .Lpoly+8*3(%rip), $a3
284 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
286 ################################################################################
287 # void ecp_nistz256_add(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
288 .globl ecp_nistz256_add
289 .type ecp_nistz256_add,\@function,3
300 lea .Lpoly(%rip), $a_ptr
330 .size ecp_nistz256_add,.-ecp_nistz256_add
332 ################################################################################
333 # void ecp_nistz256_sub(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
334 .globl ecp_nistz256_sub
335 .type ecp_nistz256_sub,\@function,3
346 lea .Lpoly(%rip), $a_ptr
376 .size ecp_nistz256_sub,.-ecp_nistz256_sub
378 ################################################################################
379 # void ecp_nistz256_neg(uint64_t res[4], uint64_t a[4]);
380 .globl ecp_nistz256_neg
381 .type ecp_nistz256_neg,\@function,2
398 lea .Lpoly(%rip), $a_ptr
422 .size ecp_nistz256_neg,.-ecp_nistz256_neg
426 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
427 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
428 my ($t0,$t1,$t2,$t3,$t4)=("%rcx","%rbp","%rbx","%rdx","%rax");
429 my ($poly1,$poly3)=($acc6,$acc7);
432 ################################################################################
433 # void ecp_nistz256_to_mont(
436 .globl ecp_nistz256_to_mont
437 .type ecp_nistz256_to_mont,\@function,2
439 ecp_nistz256_to_mont:
441 $code.=<<___ if ($addx);
443 and OPENSSL_ia32cap_P+8(%rip), %ecx
446 lea .LRR(%rip), $b_org
448 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
450 ################################################################################
451 # void ecp_nistz256_mul_mont(
456 .globl ecp_nistz256_mul_mont
457 .type ecp_nistz256_mul_mont,\@function,3
459 ecp_nistz256_mul_mont:
461 $code.=<<___ if ($addx);
463 and OPENSSL_ia32cap_P+8(%rip), %ecx
474 $code.=<<___ if ($addx);
480 mov 8*0($b_org), %rax
481 mov 8*0($a_ptr), $acc1
482 mov 8*1($a_ptr), $acc2
483 mov 8*2($a_ptr), $acc3
484 mov 8*3($a_ptr), $acc4
486 call __ecp_nistz256_mul_montq
488 $code.=<<___ if ($addx);
494 mov 8*0($b_org), %rdx
495 mov 8*0($a_ptr), $acc1
496 mov 8*1($a_ptr), $acc2
497 mov 8*2($a_ptr), $acc3
498 mov 8*3($a_ptr), $acc4
499 lea -128($a_ptr), $a_ptr # control u-op density
501 call __ecp_nistz256_mul_montx
512 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
514 .type __ecp_nistz256_mul_montq,\@abi-omnipotent
516 __ecp_nistz256_mul_montq:
517 ########################################################################
521 mov .Lpoly+8*1(%rip),$poly1
527 mov .Lpoly+8*3(%rip),$poly3
546 ########################################################################
547 # First reduction step
548 # Basically now we want to multiply acc[0] by p256,
549 # and add the result to the acc.
550 # Due to the special form of p256 we do some optimizations
552 # acc[0] x p256[0] = acc[0] x 2^64 - acc[0]
553 # then we add acc[0] and get acc[0] x 2^64
557 add $acc0, $acc1 # +=acc[0]*2^64
562 # acc[0] x p256[2] = 0
571 mov 8*1($b_ptr), %rax
575 ########################################################################
608 ########################################################################
609 # Second reduction step
624 mov 8*2($b_ptr), %rax
628 ########################################################################
661 ########################################################################
662 # Third reduction step
677 mov 8*3($b_ptr), %rax
681 ########################################################################
714 ########################################################################
715 # Final reduction step
723 #adc \$0, $t0 # doesn't overflow
734 ########################################################################
735 # Branch-less conditional subtraction of P
736 sub \$-1, $acc4 # .Lpoly[0]
738 sbb $poly1, $acc5 # .Lpoly[1]
739 sbb \$0, $acc0 # .Lpoly[2]
741 sbb $poly3, $acc1 # .Lpoly[3]
746 mov $acc4, 8*0($r_ptr)
748 mov $acc5, 8*1($r_ptr)
750 mov $acc0, 8*2($r_ptr)
751 mov $acc1, 8*3($r_ptr)
754 .size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
756 ################################################################################
757 # void ecp_nistz256_sqr_mont(
761 # we optimize the square according to S.Gueron and V.Krasnov,
762 # "Speeding up Big-Number Squaring"
763 .globl ecp_nistz256_sqr_mont
764 .type ecp_nistz256_sqr_mont,\@function,2
766 ecp_nistz256_sqr_mont:
768 $code.=<<___ if ($addx);
770 and OPENSSL_ia32cap_P+8(%rip), %ecx
780 $code.=<<___ if ($addx);
785 mov 8*0($a_ptr), %rax
786 mov 8*1($a_ptr), $acc6
787 mov 8*2($a_ptr), $acc7
788 mov 8*3($a_ptr), $acc0
790 call __ecp_nistz256_sqr_montq
792 $code.=<<___ if ($addx);
797 mov 8*0($a_ptr), %rdx
798 mov 8*1($a_ptr), $acc6
799 mov 8*2($a_ptr), $acc7
800 mov 8*3($a_ptr), $acc0
801 lea -128($a_ptr), $a_ptr # control u-op density
803 call __ecp_nistz256_sqr_montx
814 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
816 .type __ecp_nistz256_sqr_montq,\@abi-omnipotent
818 __ecp_nistz256_sqr_montq:
820 mulq $acc6 # a[1]*a[0]
825 mulq $acc5 # a[0]*a[2]
831 mulq $acc5 # a[0]*a[3]
837 #################################
838 mulq $acc6 # a[1]*a[2]
844 mulq $acc6 # a[1]*a[3]
852 #################################
853 mulq $acc7 # a[2]*a[3]
856 mov 8*0($a_ptr), %rax
860 add $acc1, $acc1 # acc1:6<<1
870 mov 8*1($a_ptr), %rax
876 mov 8*2($a_ptr), %rax
883 mov 8*3($a_ptr), %rax
893 mov .Lpoly+8*1(%rip), $a_ptr
894 mov .Lpoly+8*3(%rip), $t1
896 ##########################################
905 adc %rdx, $acc2 # doesn't overflow
917 ##########################################
925 adc %rdx, $acc3 # doesn't overflow
937 ##########################################
945 adc %rdx, $acc4 # doesn't overflow
957 ###########################################
965 adc %rdx, $acc0 # doesn't overflow
976 ############################################
977 # Add the rest of the acc
985 sub \$-1, $acc4 # .Lpoly[0]
987 sbb $a_ptr, $acc5 # .Lpoly[1]
988 sbb \$0, $acc6 # .Lpoly[2]
990 sbb $t1, $acc7 # .Lpoly[3]
995 mov $acc4, 8*0($r_ptr)
997 mov $acc5, 8*1($r_ptr)
999 mov $acc6, 8*2($r_ptr)
1000 mov $acc7, 8*3($r_ptr)
1003 .size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
1008 .type __ecp_nistz256_mul_montx,\@abi-omnipotent
1010 __ecp_nistz256_mul_montx:
1011 ########################################################################
1013 mulx $acc1, $acc0, $acc1
1014 mulx $acc2, $t0, $acc2
1016 xor $acc5, $acc5 # cf=0
1017 mulx $acc3, $t1, $acc3
1018 mov .Lpoly+8*3(%rip), $poly3
1020 mulx $acc4, $t0, $acc4
1023 shlx $poly1,$acc0,$t1
1025 shrx $poly1,$acc0,$t0
1028 ########################################################################
1029 # First reduction step
1030 xor $acc0, $acc0 # $acc0=0,cf=0,of=0
1034 mulx $poly3, $t0, $t1
1035 mov 8*1($b_ptr), %rdx
1040 adcx $acc0, $acc5 # cf=0
1041 adox $acc0, $acc5 # of=0
1043 ########################################################################
1045 mulx 8*0+128($a_ptr), $t0, $t1
1049 mulx 8*1+128($a_ptr), $t0, $t1
1053 mulx 8*2+128($a_ptr), $t0, $t1
1057 mulx 8*3+128($a_ptr), $t0, $t1
1060 shlx $poly1, $acc1, $t0
1062 shrx $poly1, $acc1, $t1
1068 ########################################################################
1069 # Second reduction step
1070 xor $acc1 ,$acc1 # $acc1=0,cf=0,of=0
1074 mulx $poly3, $t0, $t1
1075 mov 8*2($b_ptr), %rdx
1080 adcx $acc1, $acc0 # cf=0
1081 adox $acc1, $acc0 # of=0
1083 ########################################################################
1085 mulx 8*0+128($a_ptr), $t0, $t1
1089 mulx 8*1+128($a_ptr), $t0, $t1
1093 mulx 8*2+128($a_ptr), $t0, $t1
1097 mulx 8*3+128($a_ptr), $t0, $t1
1100 shlx $poly1, $acc2, $t0
1102 shrx $poly1, $acc2, $t1
1108 ########################################################################
1109 # Third reduction step
1110 xor $acc2, $acc2 # $acc2=0,cf=0,of=0
1114 mulx $poly3, $t0, $t1
1115 mov 8*3($b_ptr), %rdx
1120 adcx $acc2, $acc1 # cf=0
1121 adox $acc2, $acc1 # of=0
1123 ########################################################################
1125 mulx 8*0+128($a_ptr), $t0, $t1
1129 mulx 8*1+128($a_ptr), $t0, $t1
1133 mulx 8*2+128($a_ptr), $t0, $t1
1137 mulx 8*3+128($a_ptr), $t0, $t1
1140 shlx $poly1, $acc3, $t0
1142 shrx $poly1, $acc3, $t1
1148 ########################################################################
1149 # Fourth reduction step
1150 xor $acc3, $acc3 # $acc3=0,cf=0,of=0
1154 mulx $poly3, $t0, $t1
1156 mov .Lpoly+8*1(%rip), $poly1
1166 ########################################################################
1167 # Branch-less conditional subtraction of P
1169 sbb \$-1, $acc4 # .Lpoly[0]
1170 sbb $poly1, $acc5 # .Lpoly[1]
1171 sbb \$0, $acc0 # .Lpoly[2]
1173 sbb $poly3, $acc1 # .Lpoly[3]
1178 mov $acc4, 8*0($r_ptr)
1180 mov $acc5, 8*1($r_ptr)
1182 mov $acc0, 8*2($r_ptr)
1183 mov $acc1, 8*3($r_ptr)
1186 .size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
1188 .type __ecp_nistz256_sqr_montx,\@abi-omnipotent
1190 __ecp_nistz256_sqr_montx:
1191 mulx $acc6, $acc1, $acc2 # a[0]*a[1]
1192 mulx $acc7, $t0, $acc3 # a[0]*a[2]
1195 mulx $acc0, $t1, $acc4 # a[0]*a[3]
1199 xor $acc5, $acc5 # $acc5=0,cf=0,of=0
1201 #################################
1202 mulx $acc7, $t0, $t1 # a[1]*a[2]
1206 mulx $acc0, $t0, $t1 # a[1]*a[3]
1212 #################################
1213 mulx $acc0, $t0, $acc6 # a[2]*a[3]
1214 mov 8*0+128($a_ptr), %rdx
1215 xor $acc7, $acc7 # $acc7=0,cf=0,of=0
1216 adcx $acc1, $acc1 # acc1:6<<1
1219 adox $acc7, $acc6 # of=0
1221 mulx %rdx, $acc0, $t1
1222 mov 8*1+128($a_ptr), %rdx
1227 mov 8*2+128($a_ptr), %rdx
1233 mov 8*3+128($a_ptr), %rdx
1243 shlx $a_ptr, $acc0, $t0
1245 shrx $a_ptr, $acc0, $t4
1246 mov .Lpoly+8*3(%rip), $t1
1256 shlx $a_ptr, $acc1, $t0
1258 shrx $a_ptr, $acc1, $t4
1269 shlx $a_ptr, $acc2, $t0
1271 shrx $a_ptr, $acc2, $t4
1282 shlx $a_ptr, $acc3, $t0
1284 shrx $a_ptr, $acc3, $t4
1298 adc $acc0, $acc4 # accumulate upper half
1299 mov .Lpoly+8*1(%rip), $a_ptr
1307 xor %eax, %eax # cf=0
1308 sbb \$-1, $acc4 # .Lpoly[0]
1310 sbb $a_ptr, $acc5 # .Lpoly[1]
1311 sbb \$0, $acc6 # .Lpoly[2]
1313 sbb $t1, $acc7 # .Lpoly[3]
1318 mov $acc4, 8*0($r_ptr)
1320 mov $acc5, 8*1($r_ptr)
1322 mov $acc6, 8*2($r_ptr)
1323 mov $acc7, 8*3($r_ptr)
1326 .size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
1331 my ($r_ptr,$in_ptr)=("%rdi","%rsi");
1332 my ($acc0,$acc1,$acc2,$acc3,$acc4)=map("%r$_",(8..12));
1333 my ($t0,$t1)=("%rcx","%rsi");
1336 ################################################################################
1337 # void ecp_nistz256_from_mont(
1340 # This one performs Montgomery multiplication by 1, so we only need the reduction
1342 .globl ecp_nistz256_from_mont
1343 .type ecp_nistz256_from_mont,\@function,2
1345 ecp_nistz256_from_mont:
1349 mov 8*0($in_ptr), %rax
1350 mov 8*1($in_ptr), $acc1
1351 mov 8*2($in_ptr), $acc2
1352 mov 8*3($in_ptr), $acc3
1353 lea .Lpoly(%rip), $in_ptr
1357 #########################################
1377 #########################################
1397 ##########################################
1417 ###########################################
1435 mov 0*8($in_ptr), %rax
1436 mov 1*8($in_ptr), %rdx
1437 mov 2*8($in_ptr), $t0
1438 mov 3*8($in_ptr), $t1
1447 mov $acc4, 8*0($r_ptr)
1449 mov $acc0, 8*1($r_ptr)
1451 mov $acc1, 8*2($r_ptr)
1452 mov $acc2, 8*3($r_ptr)
1457 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
1461 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1462 my ($ONE,$INDEX,$Ra,$Rb,$Rc,$Rd,$Re,$Rf)=map("%xmm$_",(0..7));
1463 my ($M0,$T0a,$T0b,$T0c,$T0d,$T0e,$T0f,$TMP0)=map("%xmm$_",(8..15));
1464 my ($M1,$T2a,$T2b,$TMP2,$M2,$T2a,$T2b,$TMP2)=map("%xmm$_",(8..15));
1467 ################################################################################
1468 # void ecp_nistz256_scatter_w5(uint64_t *val, uint64_t *in_t, int index);
1469 .globl ecp_nistz256_scatter_w5
1470 .type ecp_nistz256_scatter_w5,\@abi-omnipotent
1472 ecp_nistz256_scatter_w5:
1473 lea -3($index,$index,2), $index
1474 movdqa 0x00($in_t), %xmm0
1476 movdqa 0x10($in_t), %xmm1
1477 movdqa 0x20($in_t), %xmm2
1478 movdqa 0x30($in_t), %xmm3
1479 movdqa 0x40($in_t), %xmm4
1480 movdqa 0x50($in_t), %xmm5
1481 movdqa %xmm0, 0x00($val,$index)
1482 movdqa %xmm1, 0x10($val,$index)
1483 movdqa %xmm2, 0x20($val,$index)
1484 movdqa %xmm3, 0x30($val,$index)
1485 movdqa %xmm4, 0x40($val,$index)
1486 movdqa %xmm5, 0x50($val,$index)
1489 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1491 ################################################################################
1492 # void ecp_nistz256_gather_w5(uint64_t *val, uint64_t *in_t, int index);
1493 .globl ecp_nistz256_gather_w5
1494 .type ecp_nistz256_gather_w5,\@abi-omnipotent
1496 ecp_nistz256_gather_w5:
1498 $code.=<<___ if ($avx>1);
1499 mov OPENSSL_ia32cap_P+8(%rip), %eax
1501 jnz .Lavx2_gather_w5
1503 $code.=<<___ if ($win64);
1504 lea -0x88(%rsp), %rax
1505 .LSEH_begin_ecp_nistz256_gather_w5:
1506 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1507 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
1508 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
1509 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
1510 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
1511 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
1512 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
1513 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
1514 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
1515 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
1516 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
1519 movdqa .LOne(%rip), $ONE
1530 pshufd \$0, $INDEX, $INDEX
1533 .Lselect_loop_sse_w5:
1537 pcmpeqd $INDEX, $TMP0
1539 movdqa 16*0($in_t), $T0a
1540 movdqa 16*1($in_t), $T0b
1541 movdqa 16*2($in_t), $T0c
1542 movdqa 16*3($in_t), $T0d
1543 movdqa 16*4($in_t), $T0e
1544 movdqa 16*5($in_t), $T0f
1545 lea 16*6($in_t), $in_t
1561 jnz .Lselect_loop_sse_w5
1563 movdqu $Ra, 16*0($val)
1564 movdqu $Rb, 16*1($val)
1565 movdqu $Rc, 16*2($val)
1566 movdqu $Rd, 16*3($val)
1567 movdqu $Re, 16*4($val)
1568 movdqu $Rf, 16*5($val)
1570 $code.=<<___ if ($win64);
1571 movaps (%rsp), %xmm6
1572 movaps 0x10(%rsp), %xmm7
1573 movaps 0x20(%rsp), %xmm8
1574 movaps 0x30(%rsp), %xmm9
1575 movaps 0x40(%rsp), %xmm10
1576 movaps 0x50(%rsp), %xmm11
1577 movaps 0x60(%rsp), %xmm12
1578 movaps 0x70(%rsp), %xmm13
1579 movaps 0x80(%rsp), %xmm14
1580 movaps 0x90(%rsp), %xmm15
1581 lea 0xa8(%rsp), %rsp
1582 .LSEH_end_ecp_nistz256_gather_w5:
1586 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1588 ################################################################################
1589 # void ecp_nistz256_scatter_w7(uint64_t *val, uint64_t *in_t, int index);
1590 .globl ecp_nistz256_scatter_w7
1591 .type ecp_nistz256_scatter_w7,\@abi-omnipotent
1593 ecp_nistz256_scatter_w7:
1594 movdqu 0x00($in_t), %xmm0
1596 movdqu 0x10($in_t), %xmm1
1597 movdqu 0x20($in_t), %xmm2
1598 movdqu 0x30($in_t), %xmm3
1599 movdqa %xmm0, 0x00($val,$index)
1600 movdqa %xmm1, 0x10($val,$index)
1601 movdqa %xmm2, 0x20($val,$index)
1602 movdqa %xmm3, 0x30($val,$index)
1605 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1607 ################################################################################
1608 # void ecp_nistz256_gather_w7(uint64_t *val, uint64_t *in_t, int index);
1609 .globl ecp_nistz256_gather_w7
1610 .type ecp_nistz256_gather_w7,\@abi-omnipotent
1612 ecp_nistz256_gather_w7:
1614 $code.=<<___ if ($avx>1);
1615 mov OPENSSL_ia32cap_P+8(%rip), %eax
1617 jnz .Lavx2_gather_w7
1619 $code.=<<___ if ($win64);
1620 lea -0x88(%rsp), %rax
1621 .LSEH_begin_ecp_nistz256_gather_w7:
1622 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1623 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
1624 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
1625 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
1626 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
1627 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
1628 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
1629 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
1630 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
1631 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
1632 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
1635 movdqa .LOne(%rip), $M0
1644 pshufd \$0, $INDEX, $INDEX
1647 .Lselect_loop_sse_w7:
1650 movdqa 16*0($in_t), $T0a
1651 movdqa 16*1($in_t), $T0b
1652 pcmpeqd $INDEX, $TMP0
1653 movdqa 16*2($in_t), $T0c
1654 movdqa 16*3($in_t), $T0d
1655 lea 16*4($in_t), $in_t
1664 prefetcht0 255($in_t)
1668 jnz .Lselect_loop_sse_w7
1670 movdqu $Ra, 16*0($val)
1671 movdqu $Rb, 16*1($val)
1672 movdqu $Rc, 16*2($val)
1673 movdqu $Rd, 16*3($val)
1675 $code.=<<___ if ($win64);
1676 movaps (%rsp), %xmm6
1677 movaps 0x10(%rsp), %xmm7
1678 movaps 0x20(%rsp), %xmm8
1679 movaps 0x30(%rsp), %xmm9
1680 movaps 0x40(%rsp), %xmm10
1681 movaps 0x50(%rsp), %xmm11
1682 movaps 0x60(%rsp), %xmm12
1683 movaps 0x70(%rsp), %xmm13
1684 movaps 0x80(%rsp), %xmm14
1685 movaps 0x90(%rsp), %xmm15
1686 lea 0xa8(%rsp), %rsp
1687 .LSEH_end_ecp_nistz256_gather_w7:
1691 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1695 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1696 my ($TWO,$INDEX,$Ra,$Rb,$Rc)=map("%ymm$_",(0..4));
1697 my ($M0,$T0a,$T0b,$T0c,$TMP0)=map("%ymm$_",(5..9));
1698 my ($M1,$T1a,$T1b,$T1c,$TMP1)=map("%ymm$_",(10..14));
1701 ################################################################################
1702 # void ecp_nistz256_avx2_gather_w5(uint64_t *val, uint64_t *in_t, int index);
1703 .type ecp_nistz256_avx2_gather_w5,\@abi-omnipotent
1705 ecp_nistz256_avx2_gather_w5:
1709 $code.=<<___ if ($win64);
1710 lea -0x88(%rsp), %rax
1711 .LSEH_begin_ecp_nistz256_avx2_gather_w5:
1712 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1713 .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
1714 .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
1715 .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
1716 .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
1717 .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
1718 .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
1719 .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
1720 .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
1721 .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
1722 .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
1725 vmovdqa .LTwo(%rip), $TWO
1731 vmovdqa .LOne(%rip), $M0
1732 vmovdqa .LTwo(%rip), $M1
1735 vpermd $INDEX, $Ra, $INDEX
1738 .Lselect_loop_avx2_w5:
1740 vmovdqa 32*0($in_t), $T0a
1741 vmovdqa 32*1($in_t), $T0b
1742 vmovdqa 32*2($in_t), $T0c
1744 vmovdqa 32*3($in_t), $T1a
1745 vmovdqa 32*4($in_t), $T1b
1746 vmovdqa 32*5($in_t), $T1c
1748 vpcmpeqd $INDEX, $M0, $TMP0
1749 vpcmpeqd $INDEX, $M1, $TMP1
1751 vpaddd $TWO, $M0, $M0
1752 vpaddd $TWO, $M1, $M1
1753 lea 32*6($in_t), $in_t
1755 vpand $TMP0, $T0a, $T0a
1756 vpand $TMP0, $T0b, $T0b
1757 vpand $TMP0, $T0c, $T0c
1758 vpand $TMP1, $T1a, $T1a
1759 vpand $TMP1, $T1b, $T1b
1760 vpand $TMP1, $T1c, $T1c
1762 vpxor $T0a, $Ra, $Ra
1763 vpxor $T0b, $Rb, $Rb
1764 vpxor $T0c, $Rc, $Rc
1765 vpxor $T1a, $Ra, $Ra
1766 vpxor $T1b, $Rb, $Rb
1767 vpxor $T1c, $Rc, $Rc
1770 jnz .Lselect_loop_avx2_w5
1772 vmovdqu $Ra, 32*0($val)
1773 vmovdqu $Rb, 32*1($val)
1774 vmovdqu $Rc, 32*2($val)
1777 $code.=<<___ if ($win64);
1778 movaps (%rsp), %xmm6
1779 movaps 0x10(%rsp), %xmm7
1780 movaps 0x20(%rsp), %xmm8
1781 movaps 0x30(%rsp), %xmm9
1782 movaps 0x40(%rsp), %xmm10
1783 movaps 0x50(%rsp), %xmm11
1784 movaps 0x60(%rsp), %xmm12
1785 movaps 0x70(%rsp), %xmm13
1786 movaps 0x80(%rsp), %xmm14
1787 movaps 0x90(%rsp), %xmm15
1788 lea 0xa8(%rsp), %rsp
1789 .LSEH_end_ecp_nistz256_avx2_gather_w5:
1793 .size ecp_nistz256_avx2_gather_w5,.-ecp_nistz256_avx2_gather_w5
1797 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1798 my ($THREE,$INDEX,$Ra,$Rb)=map("%ymm$_",(0..3));
1799 my ($M0,$T0a,$T0b,$TMP0)=map("%ymm$_",(4..7));
1800 my ($M1,$T1a,$T1b,$TMP1)=map("%ymm$_",(8..11));
1801 my ($M2,$T2a,$T2b,$TMP2)=map("%ymm$_",(12..15));
1805 ################################################################################
1806 # void ecp_nistz256_avx2_gather_w7(uint64_t *val, uint64_t *in_t, int index);
1807 .globl ecp_nistz256_avx2_gather_w7
1808 .type ecp_nistz256_avx2_gather_w7,\@abi-omnipotent
1810 ecp_nistz256_avx2_gather_w7:
1814 $code.=<<___ if ($win64);
1815 lea -0x88(%rsp), %rax
1816 .LSEH_begin_ecp_nistz256_avx2_gather_w7:
1817 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1818 .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
1819 .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
1820 .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
1821 .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
1822 .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
1823 .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
1824 .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
1825 .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
1826 .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
1827 .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
1830 vmovdqa .LThree(%rip), $THREE
1835 vmovdqa .LOne(%rip), $M0
1836 vmovdqa .LTwo(%rip), $M1
1837 vmovdqa .LThree(%rip), $M2
1840 vpermd $INDEX, $Ra, $INDEX
1841 # Skip index = 0, because it is implicitly the point at infinity
1844 .Lselect_loop_avx2_w7:
1846 vmovdqa 32*0($in_t), $T0a
1847 vmovdqa 32*1($in_t), $T0b
1849 vmovdqa 32*2($in_t), $T1a
1850 vmovdqa 32*3($in_t), $T1b
1852 vmovdqa 32*4($in_t), $T2a
1853 vmovdqa 32*5($in_t), $T2b
1855 vpcmpeqd $INDEX, $M0, $TMP0
1856 vpcmpeqd $INDEX, $M1, $TMP1
1857 vpcmpeqd $INDEX, $M2, $TMP2
1859 vpaddd $THREE, $M0, $M0
1860 vpaddd $THREE, $M1, $M1
1861 vpaddd $THREE, $M2, $M2
1862 lea 32*6($in_t), $in_t
1864 vpand $TMP0, $T0a, $T0a
1865 vpand $TMP0, $T0b, $T0b
1866 vpand $TMP1, $T1a, $T1a
1867 vpand $TMP1, $T1b, $T1b
1868 vpand $TMP2, $T2a, $T2a
1869 vpand $TMP2, $T2b, $T2b
1871 vpxor $T0a, $Ra, $Ra
1872 vpxor $T0b, $Rb, $Rb
1873 vpxor $T1a, $Ra, $Ra
1874 vpxor $T1b, $Rb, $Rb
1875 vpxor $T2a, $Ra, $Ra
1876 vpxor $T2b, $Rb, $Rb
1879 jnz .Lselect_loop_avx2_w7
1882 vmovdqa 32*0($in_t), $T0a
1883 vmovdqa 32*1($in_t), $T0b
1885 vpcmpeqd $INDEX, $M0, $TMP0
1887 vpand $TMP0, $T0a, $T0a
1888 vpand $TMP0, $T0b, $T0b
1890 vpxor $T0a, $Ra, $Ra
1891 vpxor $T0b, $Rb, $Rb
1893 vmovdqu $Ra, 32*0($val)
1894 vmovdqu $Rb, 32*1($val)
1897 $code.=<<___ if ($win64);
1898 movaps (%rsp), %xmm6
1899 movaps 0x10(%rsp), %xmm7
1900 movaps 0x20(%rsp), %xmm8
1901 movaps 0x30(%rsp), %xmm9
1902 movaps 0x40(%rsp), %xmm10
1903 movaps 0x50(%rsp), %xmm11
1904 movaps 0x60(%rsp), %xmm12
1905 movaps 0x70(%rsp), %xmm13
1906 movaps 0x80(%rsp), %xmm14
1907 movaps 0x90(%rsp), %xmm15
1908 lea 0xa8(%rsp), %rsp
1909 .LSEH_end_ecp_nistz256_avx2_gather_w7:
1913 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
1917 .globl ecp_nistz256_avx2_gather_w7
1918 .type ecp_nistz256_avx2_gather_w7,\@function,3
1920 ecp_nistz256_avx2_gather_w7:
1921 .byte 0x0f,0x0b # ud2
1923 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
1927 ########################################################################
1928 # This block implements higher level point_double, point_add and
1929 # point_add_affine. The key to performance in this case is to allow
1930 # out-of-order execution logic to overlap computations from next step
1931 # with tail processing from current step. By using tailored calling
1932 # sequence we minimize inter-step overhead to give processor better
1933 # shot at overlapping operations...
1935 # You will notice that input data is copied to stack. Trouble is that
1936 # there are no registers to spare for holding original pointers and
1937 # reloading them, pointers, would create undesired dependencies on
1938 # effective addresses calculation paths. In other words it's too done
1939 # to favour out-of-order execution logic.
1940 # <appro@openssl.org>
1942 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
1943 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
1944 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rbp","%rcx",$acc4,$acc4);
1945 my ($poly1,$poly3)=($acc6,$acc7);
1947 sub load_for_mul () {
1948 my ($a,$b,$src0) = @_;
1949 my $bias = $src0 eq "%rax" ? 0 : -128;
1955 lea $bias+$a, $a_ptr
1960 sub load_for_sqr () {
1962 my $bias = $src0 eq "%rax" ? 0 : -128;
1966 lea $bias+$a, $a_ptr
1972 ########################################################################
1973 # operate in 4-5-0-1 "name space" that matches multiplication output
1975 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
1978 .type __ecp_nistz256_add_toq,\@abi-omnipotent
1980 __ecp_nistz256_add_toq:
1981 add 8*0($b_ptr), $a0
1982 adc 8*1($b_ptr), $a1
1984 adc 8*2($b_ptr), $a2
1985 adc 8*3($b_ptr), $a3
1999 mov $a0, 8*0($r_ptr)
2001 mov $a1, 8*1($r_ptr)
2003 mov $a2, 8*2($r_ptr)
2004 mov $a3, 8*3($r_ptr)
2007 .size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
2009 .type __ecp_nistz256_sub_fromq,\@abi-omnipotent
2011 __ecp_nistz256_sub_fromq:
2012 sub 8*0($b_ptr), $a0
2013 sbb 8*1($b_ptr), $a1
2015 sbb 8*2($b_ptr), $a2
2016 sbb 8*3($b_ptr), $a3
2030 mov $a0, 8*0($r_ptr)
2032 mov $a1, 8*1($r_ptr)
2034 mov $a2, 8*2($r_ptr)
2035 mov $a3, 8*3($r_ptr)
2038 .size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
2040 .type __ecp_nistz256_subq,\@abi-omnipotent
2042 __ecp_nistz256_subq:
2065 .size __ecp_nistz256_subq,.-__ecp_nistz256_subq
2067 .type __ecp_nistz256_mul_by_2q,\@abi-omnipotent
2069 __ecp_nistz256_mul_by_2q:
2070 add $a0, $a0 # a0:a3+a0:a3
2088 mov $a0, 8*0($r_ptr)
2090 mov $a1, 8*1($r_ptr)
2092 mov $a2, 8*2($r_ptr)
2093 mov $a3, 8*3($r_ptr)
2096 .size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
2101 my ($src0,$sfx,$bias);
2102 my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
2110 .globl ecp_nistz256_point_double
2111 .type ecp_nistz256_point_double,\@function,2
2113 ecp_nistz256_point_double:
2115 $code.=<<___ if ($addx);
2117 and OPENSSL_ia32cap_P+8(%rip), %ecx
2127 .type ecp_nistz256_point_doublex,\@function,2
2129 ecp_nistz256_point_doublex:
2142 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr.x
2143 mov $a_ptr, $b_ptr # backup copy
2144 movdqu 0x10($a_ptr), %xmm1
2145 mov 0x20+8*0($a_ptr), $acc4 # load in_y in "5-4-0-1" order
2146 mov 0x20+8*1($a_ptr), $acc5
2147 mov 0x20+8*2($a_ptr), $acc0
2148 mov 0x20+8*3($a_ptr), $acc1
2149 mov .Lpoly+8*1(%rip), $poly1
2150 mov .Lpoly+8*3(%rip), $poly3
2151 movdqa %xmm0, $in_x(%rsp)
2152 movdqa %xmm1, $in_x+0x10(%rsp)
2153 lea 0x20($r_ptr), $acc2
2154 lea 0x40($r_ptr), $acc3
2159 lea $S(%rsp), $r_ptr
2160 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(S, in_y);
2162 mov 0x40+8*0($a_ptr), $src0
2163 mov 0x40+8*1($a_ptr), $acc6
2164 mov 0x40+8*2($a_ptr), $acc7
2165 mov 0x40+8*3($a_ptr), $acc0
2166 lea 0x40-$bias($a_ptr), $a_ptr
2167 lea $Zsqr(%rsp), $r_ptr
2168 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Zsqr, in_z);
2170 `&load_for_sqr("$S(%rsp)", "$src0")`
2171 lea $S(%rsp), $r_ptr
2172 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(S, S);
2174 mov 0x20($b_ptr), $src0 # $b_ptr is still valid
2175 mov 0x40+8*0($b_ptr), $acc1
2176 mov 0x40+8*1($b_ptr), $acc2
2177 mov 0x40+8*2($b_ptr), $acc3
2178 mov 0x40+8*3($b_ptr), $acc4
2179 lea 0x40-$bias($b_ptr), $a_ptr
2180 lea 0x20($b_ptr), $b_ptr
2182 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, in_z, in_y);
2183 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(res_z, res_z);
2185 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2186 mov $in_x+8*1(%rsp), $acc5
2187 lea $Zsqr(%rsp), $b_ptr
2188 mov $in_x+8*2(%rsp), $acc0
2189 mov $in_x+8*3(%rsp), $acc1
2190 lea $M(%rsp), $r_ptr
2191 call __ecp_nistz256_add_to$x # p256_add(M, in_x, Zsqr);
2193 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2194 mov $in_x+8*1(%rsp), $acc5
2195 lea $Zsqr(%rsp), $b_ptr
2196 mov $in_x+8*2(%rsp), $acc0
2197 mov $in_x+8*3(%rsp), $acc1
2198 lea $Zsqr(%rsp), $r_ptr
2199 call __ecp_nistz256_sub_from$x # p256_sub(Zsqr, in_x, Zsqr);
2201 `&load_for_sqr("$S(%rsp)", "$src0")`
2203 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_y, S);
2206 ######## ecp_nistz256_div_by_2(res_y, res_y); ##########################
2207 # operate in 4-5-6-7 "name space" that matches squaring output
2209 my ($poly1,$poly3)=($a_ptr,$t1);
2210 my ($a0,$a1,$a2,$a3,$t3,$t4,$t1)=($acc4,$acc5,$acc6,$acc7,$acc0,$acc1,$acc2);
2223 xor $a_ptr, $a_ptr # borrow $a_ptr
2232 mov $a1, $t0 # a0:a3>>1
2243 mov $a0, 8*0($r_ptr)
2245 mov $a1, 8*1($r_ptr)
2249 mov $a2, 8*2($r_ptr)
2250 mov $a3, 8*3($r_ptr)
2254 `&load_for_mul("$M(%rsp)", "$Zsqr(%rsp)", "$src0")`
2255 lea $M(%rsp), $r_ptr
2256 call __ecp_nistz256_mul_mont$x # p256_mul_mont(M, M, Zsqr);
2258 lea $tmp0(%rsp), $r_ptr
2259 call __ecp_nistz256_mul_by_2$x
2261 lea $M(%rsp), $b_ptr
2262 lea $M(%rsp), $r_ptr
2263 call __ecp_nistz256_add_to$x # p256_mul_by_3(M, M);
2265 `&load_for_mul("$S(%rsp)", "$in_x(%rsp)", "$src0")`
2266 lea $S(%rsp), $r_ptr
2267 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, in_x);
2269 lea $tmp0(%rsp), $r_ptr
2270 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(tmp0, S);
2272 `&load_for_sqr("$M(%rsp)", "$src0")`
2274 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_x, M);
2276 lea $tmp0(%rsp), $b_ptr
2277 mov $acc6, $acc0 # harmonize sqr output and sub input
2281 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, tmp0);
2283 mov $S+8*0(%rsp), $t0
2284 mov $S+8*1(%rsp), $t1
2285 mov $S+8*2(%rsp), $t2
2286 mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
2287 lea $S(%rsp), $r_ptr
2288 call __ecp_nistz256_sub$x # p256_sub(S, S, res_x);
2291 lea $M(%rsp), $b_ptr
2292 mov $acc4, $acc6 # harmonize sub output and mul input
2294 mov $acc4, $S+8*0(%rsp) # have to save:-(
2296 mov $acc5, $S+8*1(%rsp)
2298 mov $acc0, $S+8*2(%rsp)
2299 lea $S-$bias(%rsp), $a_ptr
2301 mov $acc1, $S+8*3(%rsp)
2303 lea $S(%rsp), $r_ptr
2304 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, M);
2308 call __ecp_nistz256_sub_from$x # p256_sub(res_y, S, res_y);
2318 .size ecp_nistz256_point_double$sfx,.-ecp_nistz256_point_double$sfx
2325 my ($src0,$sfx,$bias);
2326 my ($H,$Hsqr,$R,$Rsqr,$Hcub,
2328 $res_x,$res_y,$res_z,
2329 $in1_x,$in1_y,$in1_z,
2330 $in2_x,$in2_y,$in2_z)=map(32*$_,(0..17));
2331 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2339 .globl ecp_nistz256_point_add
2340 .type ecp_nistz256_point_add,\@function,3
2342 ecp_nistz256_point_add:
2344 $code.=<<___ if ($addx);
2346 and OPENSSL_ia32cap_P+8(%rip), %ecx
2356 .type ecp_nistz256_point_addx,\@function,3
2358 ecp_nistz256_point_addx:
2371 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
2372 movdqu 0x10($a_ptr), %xmm1
2373 movdqu 0x20($a_ptr), %xmm2
2374 movdqu 0x30($a_ptr), %xmm3
2375 movdqu 0x40($a_ptr), %xmm4
2376 movdqu 0x50($a_ptr), %xmm5
2377 mov $a_ptr, $b_ptr # reassign
2378 mov $b_org, $a_ptr # reassign
2379 movdqa %xmm0, $in1_x(%rsp)
2380 movdqa %xmm1, $in1_x+0x10(%rsp)
2382 movdqa %xmm2, $in1_y(%rsp)
2383 movdqa %xmm3, $in1_y+0x10(%rsp)
2385 movdqa %xmm4, $in1_z(%rsp)
2386 movdqa %xmm5, $in1_z+0x10(%rsp)
2389 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$b_ptr
2390 pshufd \$0xb1, %xmm3, %xmm5
2391 movdqu 0x10($a_ptr), %xmm1
2392 movdqu 0x20($a_ptr), %xmm2
2394 movdqu 0x30($a_ptr), %xmm3
2395 mov 0x40+8*0($a_ptr), $src0 # load original in2_z
2396 mov 0x40+8*1($a_ptr), $acc6
2397 mov 0x40+8*2($a_ptr), $acc7
2398 mov 0x40+8*3($a_ptr), $acc0
2399 movdqa %xmm0, $in2_x(%rsp)
2400 pshufd \$0x1e, %xmm5, %xmm4
2401 movdqa %xmm1, $in2_x+0x10(%rsp)
2403 movq $r_ptr, %xmm0 # save $r_ptr
2404 movdqa %xmm2, $in2_y(%rsp)
2405 movdqa %xmm3, $in2_y+0x10(%rsp)
2411 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
2412 mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
2413 mov $acc6, $in2_z+8*1(%rsp)
2414 mov $acc7, $in2_z+8*2(%rsp)
2415 mov $acc0, $in2_z+8*3(%rsp)
2416 lea $Z2sqr(%rsp), $r_ptr # Z2^2
2417 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z2sqr, in2_z);
2419 pcmpeqd %xmm4, %xmm5
2420 pshufd \$0xb1, %xmm3, %xmm4
2422 pshufd \$0, %xmm5, %xmm5 # in1infty
2423 pshufd \$0x1e, %xmm4, %xmm3
2426 pcmpeqd %xmm3, %xmm4
2427 pshufd \$0, %xmm4, %xmm4 # in2infty
2428 mov 0x40+8*0($b_ptr), $src0 # load original in1_z
2429 mov 0x40+8*1($b_ptr), $acc6
2430 mov 0x40+8*2($b_ptr), $acc7
2431 mov 0x40+8*3($b_ptr), $acc0
2433 lea 0x40-$bias($b_ptr), $a_ptr
2434 lea $Z1sqr(%rsp), $r_ptr # Z1^2
2435 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
2437 `&load_for_mul("$Z2sqr(%rsp)", "$in2_z(%rsp)", "$src0")`
2438 lea $S1(%rsp), $r_ptr # S1 = Z2^3
2439 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, Z2sqr, in2_z);
2441 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
2442 lea $S2(%rsp), $r_ptr # S2 = Z1^3
2443 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
2445 `&load_for_mul("$S1(%rsp)", "$in1_y(%rsp)", "$src0")`
2446 lea $S1(%rsp), $r_ptr # S1 = Y1*Z2^3
2447 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, S1, in1_y);
2449 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
2450 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
2451 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
2453 lea $S1(%rsp), $b_ptr
2454 lea $R(%rsp), $r_ptr # R = S2 - S1
2455 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, S1);
2457 or $acc5, $acc4 # see if result is zero
2461 por %xmm5, %xmm2 # in1infty || in2infty
2464 `&load_for_mul("$Z2sqr(%rsp)", "$in1_x(%rsp)", "$src0")`
2465 lea $U1(%rsp), $r_ptr # U1 = X1*Z2^2
2466 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U1, in1_x, Z2sqr);
2468 `&load_for_mul("$Z1sqr(%rsp)", "$in2_x(%rsp)", "$src0")`
2469 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
2470 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in2_x, Z1sqr);
2472 lea $U1(%rsp), $b_ptr
2473 lea $H(%rsp), $r_ptr # H = U2 - U1
2474 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, U1);
2476 or $acc5, $acc4 # see if result is zero
2480 .byte 0x3e # predict taken
2481 jnz .Ladd_proceed$x # is_equal(U1,U2)?
2485 jnz .Ladd_proceed$x # (in1infty || in2infty)?
2487 jz .Ladd_proceed$x # is_equal(S1,S2)?
2489 movq %xmm0, $r_ptr # restore $r_ptr
2491 movdqu %xmm0, 0x00($r_ptr)
2492 movdqu %xmm0, 0x10($r_ptr)
2493 movdqu %xmm0, 0x20($r_ptr)
2494 movdqu %xmm0, 0x30($r_ptr)
2495 movdqu %xmm0, 0x40($r_ptr)
2496 movdqu %xmm0, 0x50($r_ptr)
2501 `&load_for_sqr("$R(%rsp)", "$src0")`
2502 lea $Rsqr(%rsp), $r_ptr # R^2
2503 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
2505 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
2506 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2507 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
2509 `&load_for_sqr("$H(%rsp)", "$src0")`
2510 lea $Hsqr(%rsp), $r_ptr # H^2
2511 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
2513 `&load_for_mul("$res_z(%rsp)", "$in2_z(%rsp)", "$src0")`
2514 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2515 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, res_z, in2_z);
2517 `&load_for_mul("$Hsqr(%rsp)", "$H(%rsp)", "$src0")`
2518 lea $Hcub(%rsp), $r_ptr # H^3
2519 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
2521 `&load_for_mul("$Hsqr(%rsp)", "$U1(%rsp)", "$src0")`
2522 lea $U2(%rsp), $r_ptr # U1*H^2
2523 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, U1, Hsqr);
2526 #######################################################################
2527 # operate in 4-5-0-1 "name space" that matches multiplication output
2529 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2530 my ($poly1, $poly3)=($acc6,$acc7);
2533 #lea $U2(%rsp), $a_ptr
2534 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
2535 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
2537 add $acc0, $acc0 # a0:a3+a0:a3
2538 lea $Rsqr(%rsp), $a_ptr
2555 mov 8*0($a_ptr), $t0
2557 mov 8*1($a_ptr), $t1
2559 mov 8*2($a_ptr), $t2
2561 mov 8*3($a_ptr), $t3
2563 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
2565 lea $Hcub(%rsp), $b_ptr
2566 lea $res_x(%rsp), $r_ptr
2567 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
2569 mov $U2+8*0(%rsp), $t0
2570 mov $U2+8*1(%rsp), $t1
2571 mov $U2+8*2(%rsp), $t2
2572 mov $U2+8*3(%rsp), $t3
2573 lea $res_y(%rsp), $r_ptr
2575 call __ecp_nistz256_sub$x # p256_sub(res_y, U2, res_x);
2577 mov $acc0, 8*0($r_ptr) # save the result, as
2578 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2579 mov $acc2, 8*2($r_ptr)
2580 mov $acc3, 8*3($r_ptr)
2584 `&load_for_mul("$S1(%rsp)", "$Hcub(%rsp)", "$src0")`
2585 lea $S2(%rsp), $r_ptr
2586 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S1, Hcub);
2588 `&load_for_mul("$R(%rsp)", "$res_y(%rsp)", "$src0")`
2589 lea $res_y(%rsp), $r_ptr
2590 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_y, R, res_y);
2592 lea $S2(%rsp), $b_ptr
2593 lea $res_y(%rsp), $r_ptr
2594 call __ecp_nistz256_sub_from$x # p256_sub(res_y, res_y, S2);
2596 movq %xmm0, $r_ptr # restore $r_ptr
2598 movdqa %xmm5, %xmm0 # copy_conditional(res_z, in2_z, in1infty);
2600 pandn $res_z(%rsp), %xmm0
2602 pandn $res_z+0x10(%rsp), %xmm1
2604 pand $in2_z(%rsp), %xmm2
2605 pand $in2_z+0x10(%rsp), %xmm3
2609 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
2615 pand $in1_z(%rsp), %xmm2
2616 pand $in1_z+0x10(%rsp), %xmm3
2619 movdqu %xmm2, 0x40($r_ptr)
2620 movdqu %xmm3, 0x50($r_ptr)
2622 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
2624 pandn $res_x(%rsp), %xmm0
2626 pandn $res_x+0x10(%rsp), %xmm1
2628 pand $in2_x(%rsp), %xmm2
2629 pand $in2_x+0x10(%rsp), %xmm3
2633 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
2639 pand $in1_x(%rsp), %xmm2
2640 pand $in1_x+0x10(%rsp), %xmm3
2643 movdqu %xmm2, 0x00($r_ptr)
2644 movdqu %xmm3, 0x10($r_ptr)
2646 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
2648 pandn $res_y(%rsp), %xmm0
2650 pandn $res_y+0x10(%rsp), %xmm1
2652 pand $in2_y(%rsp), %xmm2
2653 pand $in2_y+0x10(%rsp), %xmm3
2657 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
2663 pand $in1_y(%rsp), %xmm2
2664 pand $in1_y+0x10(%rsp), %xmm3
2667 movdqu %xmm2, 0x20($r_ptr)
2668 movdqu %xmm3, 0x30($r_ptr)
2679 .size ecp_nistz256_point_add$sfx,.-ecp_nistz256_point_add$sfx
2684 sub gen_add_affine () {
2686 my ($src0,$sfx,$bias);
2687 my ($U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr,
2688 $res_x,$res_y,$res_z,
2689 $in1_x,$in1_y,$in1_z,
2690 $in2_x,$in2_y)=map(32*$_,(0..14));
2699 .globl ecp_nistz256_point_add_affine
2700 .type ecp_nistz256_point_add_affine,\@function,3
2702 ecp_nistz256_point_add_affine:
2704 $code.=<<___ if ($addx);
2706 and OPENSSL_ia32cap_P+8(%rip), %ecx
2708 je .Lpoint_add_affinex
2716 .type ecp_nistz256_point_add_affinex,\@function,3
2718 ecp_nistz256_point_add_affinex:
2719 .Lpoint_add_affinex:
2731 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
2732 mov $b_org, $b_ptr # reassign
2733 movdqu 0x10($a_ptr), %xmm1
2734 movdqu 0x20($a_ptr), %xmm2
2735 movdqu 0x30($a_ptr), %xmm3
2736 movdqu 0x40($a_ptr), %xmm4
2737 movdqu 0x50($a_ptr), %xmm5
2738 mov 0x40+8*0($a_ptr), $src0 # load original in1_z
2739 mov 0x40+8*1($a_ptr), $acc6
2740 mov 0x40+8*2($a_ptr), $acc7
2741 mov 0x40+8*3($a_ptr), $acc0
2742 movdqa %xmm0, $in1_x(%rsp)
2743 movdqa %xmm1, $in1_x+0x10(%rsp)
2745 movdqa %xmm2, $in1_y(%rsp)
2746 movdqa %xmm3, $in1_y+0x10(%rsp)
2748 movdqa %xmm4, $in1_z(%rsp)
2749 movdqa %xmm5, $in1_z+0x10(%rsp)
2752 movdqu 0x00($b_ptr), %xmm0 # copy *(P256_POINT_AFFINE *)$b_ptr
2753 pshufd \$0xb1, %xmm3, %xmm5
2754 movdqu 0x10($b_ptr), %xmm1
2755 movdqu 0x20($b_ptr), %xmm2
2757 movdqu 0x30($b_ptr), %xmm3
2758 movdqa %xmm0, $in2_x(%rsp)
2759 pshufd \$0x1e, %xmm5, %xmm4
2760 movdqa %xmm1, $in2_x+0x10(%rsp)
2762 movq $r_ptr, %xmm0 # save $r_ptr
2763 movdqa %xmm2, $in2_y(%rsp)
2764 movdqa %xmm3, $in2_y+0x10(%rsp)
2770 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
2771 lea $Z1sqr(%rsp), $r_ptr # Z1^2
2772 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
2774 pcmpeqd %xmm4, %xmm5
2775 pshufd \$0xb1, %xmm3, %xmm4
2776 mov 0x00($b_ptr), $src0 # $b_ptr is still valid
2777 #lea 0x00($b_ptr), $b_ptr
2778 mov $acc4, $acc1 # harmonize sqr output and mul input
2780 pshufd \$0, %xmm5, %xmm5 # in1infty
2781 pshufd \$0x1e, %xmm4, %xmm3
2786 pcmpeqd %xmm3, %xmm4
2787 pshufd \$0, %xmm4, %xmm4 # in2infty
2789 lea $Z1sqr-$bias(%rsp), $a_ptr
2791 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
2792 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, Z1sqr, in2_x);
2794 lea $in1_x(%rsp), $b_ptr
2795 lea $H(%rsp), $r_ptr # H = U2 - U1
2796 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, in1_x);
2798 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
2799 lea $S2(%rsp), $r_ptr # S2 = Z1^3
2800 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
2802 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
2803 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2804 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
2806 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
2807 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
2808 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
2810 lea $in1_y(%rsp), $b_ptr
2811 lea $R(%rsp), $r_ptr # R = S2 - S1
2812 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, in1_y);
2814 `&load_for_sqr("$H(%rsp)", "$src0")`
2815 lea $Hsqr(%rsp), $r_ptr # H^2
2816 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
2818 `&load_for_sqr("$R(%rsp)", "$src0")`
2819 lea $Rsqr(%rsp), $r_ptr # R^2
2820 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
2822 `&load_for_mul("$H(%rsp)", "$Hsqr(%rsp)", "$src0")`
2823 lea $Hcub(%rsp), $r_ptr # H^3
2824 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
2826 `&load_for_mul("$Hsqr(%rsp)", "$in1_x(%rsp)", "$src0")`
2827 lea $U2(%rsp), $r_ptr # U1*H^2
2828 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in1_x, Hsqr);
2831 #######################################################################
2832 # operate in 4-5-0-1 "name space" that matches multiplication output
2834 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2835 my ($poly1, $poly3)=($acc6,$acc7);
2838 #lea $U2(%rsp), $a_ptr
2839 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
2840 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
2842 add $acc0, $acc0 # a0:a3+a0:a3
2843 lea $Rsqr(%rsp), $a_ptr
2860 mov 8*0($a_ptr), $t0
2862 mov 8*1($a_ptr), $t1
2864 mov 8*2($a_ptr), $t2
2866 mov 8*3($a_ptr), $t3
2868 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
2870 lea $Hcub(%rsp), $b_ptr
2871 lea $res_x(%rsp), $r_ptr
2872 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
2874 mov $U2+8*0(%rsp), $t0
2875 mov $U2+8*1(%rsp), $t1
2876 mov $U2+8*2(%rsp), $t2
2877 mov $U2+8*3(%rsp), $t3
2878 lea $H(%rsp), $r_ptr
2880 call __ecp_nistz256_sub$x # p256_sub(H, U2, res_x);
2882 mov $acc0, 8*0($r_ptr) # save the result, as
2883 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2884 mov $acc2, 8*2($r_ptr)
2885 mov $acc3, 8*3($r_ptr)
2889 `&load_for_mul("$Hcub(%rsp)", "$in1_y(%rsp)", "$src0")`
2890 lea $S2(%rsp), $r_ptr
2891 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Hcub, in1_y);
2893 `&load_for_mul("$H(%rsp)", "$R(%rsp)", "$src0")`
2894 lea $H(%rsp), $r_ptr
2895 call __ecp_nistz256_mul_mont$x # p256_mul_mont(H, H, R);
2897 lea $S2(%rsp), $b_ptr
2898 lea $res_y(%rsp), $r_ptr
2899 call __ecp_nistz256_sub_from$x # p256_sub(res_y, H, S2);
2901 movq %xmm0, $r_ptr # restore $r_ptr
2903 movdqa %xmm5, %xmm0 # copy_conditional(res_z, ONE, in1infty);
2905 pandn $res_z(%rsp), %xmm0
2907 pandn $res_z+0x10(%rsp), %xmm1
2909 pand .LONE_mont(%rip), %xmm2
2910 pand .LONE_mont+0x10(%rip), %xmm3
2914 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
2920 pand $in1_z(%rsp), %xmm2
2921 pand $in1_z+0x10(%rsp), %xmm3
2924 movdqu %xmm2, 0x40($r_ptr)
2925 movdqu %xmm3, 0x50($r_ptr)
2927 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
2929 pandn $res_x(%rsp), %xmm0
2931 pandn $res_x+0x10(%rsp), %xmm1
2933 pand $in2_x(%rsp), %xmm2
2934 pand $in2_x+0x10(%rsp), %xmm3
2938 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
2944 pand $in1_x(%rsp), %xmm2
2945 pand $in1_x+0x10(%rsp), %xmm3
2948 movdqu %xmm2, 0x00($r_ptr)
2949 movdqu %xmm3, 0x10($r_ptr)
2951 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
2953 pandn $res_y(%rsp), %xmm0
2955 pandn $res_y+0x10(%rsp), %xmm1
2957 pand $in2_y(%rsp), %xmm2
2958 pand $in2_y+0x10(%rsp), %xmm3
2962 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
2968 pand $in1_y(%rsp), %xmm2
2969 pand $in1_y+0x10(%rsp), %xmm3
2972 movdqu %xmm2, 0x20($r_ptr)
2973 movdqu %xmm3, 0x30($r_ptr)
2983 .size ecp_nistz256_point_add_affine$sfx,.-ecp_nistz256_point_add_affine$sfx
2986 &gen_add_affine("q");
2988 ########################################################################
2992 ########################################################################
2993 # operate in 4-5-0-1 "name space" that matches multiplication output
2995 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2998 .type __ecp_nistz256_add_tox,\@abi-omnipotent
3000 __ecp_nistz256_add_tox:
3002 adc 8*0($b_ptr), $a0
3003 adc 8*1($b_ptr), $a1
3005 adc 8*2($b_ptr), $a2
3006 adc 8*3($b_ptr), $a3
3021 mov $a0, 8*0($r_ptr)
3023 mov $a1, 8*1($r_ptr)
3025 mov $a2, 8*2($r_ptr)
3026 mov $a3, 8*3($r_ptr)
3029 .size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
3031 .type __ecp_nistz256_sub_fromx,\@abi-omnipotent
3033 __ecp_nistz256_sub_fromx:
3035 sbb 8*0($b_ptr), $a0
3036 sbb 8*1($b_ptr), $a1
3038 sbb 8*2($b_ptr), $a2
3039 sbb 8*3($b_ptr), $a3
3054 mov $a0, 8*0($r_ptr)
3056 mov $a1, 8*1($r_ptr)
3058 mov $a2, 8*2($r_ptr)
3059 mov $a3, 8*3($r_ptr)
3062 .size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
3064 .type __ecp_nistz256_subx,\@abi-omnipotent
3066 __ecp_nistz256_subx:
3091 .size __ecp_nistz256_subx,.-__ecp_nistz256_subx
3093 .type __ecp_nistz256_mul_by_2x,\@abi-omnipotent
3095 __ecp_nistz256_mul_by_2x:
3097 adc $a0, $a0 # a0:a3+a0:a3
3116 mov $a0, 8*0($r_ptr)
3118 mov $a1, 8*1($r_ptr)
3120 mov $a2, 8*2($r_ptr)
3121 mov $a3, 8*3($r_ptr)
3124 .size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
3129 &gen_add_affine("x");
3133 ########################################################################
3134 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
3136 open TABLE,"<ecp_nistz256_table.c" or
3137 open TABLE,"<${dir}/../ecp_nistz256_table.c" or
3138 die "failed to open ecp_nistz256_table.c:",$!;
3143 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
3147 die "insane number of elements" if ($#arr != 64*16*37-1);
3151 .globl ecp_nistz256_precomputed
3152 .type ecp_nistz256_precomputed,\@object
3154 ecp_nistz256_precomputed:
3156 while (@line=splice(@arr,0,16)) {
3157 print ".long\t",join(',',map { sprintf "0x%08x",$_} @line),"\n";
3160 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
3163 $code =~ s/\`([^\`]*)\`/eval $1/gem;