2 # Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # ECP_NISTZ256 module for SPARCv9.
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
26 # with/without -DECP_NISTZ256_ASM
27 # UltraSPARC III +12-18%
28 # SPARC T4 +99-550% (+66-150% on 32-bit Solaris)
30 # Ranges denote minimum and maximum improvement coefficients depending
31 # on benchmark. Lower coefficients are for ECDSA sign, server-side
32 # operation. Keep in mind that +200% means 3x improvement.
34 $output = pop and open STDOUT,">$output";
37 #include "sparc_arch.h"
39 #define LOCALS (STACK_BIAS+STACK_FRAME)
41 .register %g2,#scratch
42 .register %g3,#scratch
43 # define STACK64_FRAME STACK_FRAME
44 # define LOCALS64 LOCALS
46 # define STACK64_FRAME (2047+192)
47 # define LOCALS64 STACK64_FRAME
50 .section ".text",#alloc,#execinstr
52 ########################################################################
53 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
55 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
56 open TABLE,"<ecp_nistz256_table.c" or
57 open TABLE,"<${dir}../ecp_nistz256_table.c" or
58 die "failed to open ecp_nistz256_table.c:",$!;
63 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
67 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
68 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
70 die "insane number of elements" if ($#arr != 64*16*37-1);
73 .globl ecp_nistz256_precomputed
75 ecp_nistz256_precomputed:
77 ########################################################################
78 # this conversion smashes P256_POINT_AFFINE by individual bytes with
79 # 64 byte interval, similar to
83 @tbl = splice(@arr,0,64*16);
84 for($i=0;$i<64;$i++) {
86 for($j=0;$j<64;$j++) {
87 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
90 $code.=join(',',map { sprintf "0x%02x",$_} @line);
96 my ($rp,$ap,$bp)=map("%i$_",(0..2));
97 my @acc=map("%l$_",(0..7));
98 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
99 my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
100 my ($rp_real,$ap_real)=("%g2","%g3");
103 .type ecp_nistz256_precomputed,#object
104 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
106 .LRR: ! 2^512 mod P precomputed for NIST P256 polynomial
107 .long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
108 .long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
110 .long 1,0,0,0,0,0,0,0
111 .asciz "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
113 ! void ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
114 .globl ecp_nistz256_to_mont
116 ecp_nistz256_to_mont:
117 save %sp,-STACK_FRAME,%sp
121 call __ecp_nistz256_mul_mont
125 .type ecp_nistz256_to_mont,#function
126 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
128 ! void ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
129 .globl ecp_nistz256_from_mont
131 ecp_nistz256_from_mont:
132 save %sp,-STACK_FRAME,%sp
136 call __ecp_nistz256_mul_mont
140 .type ecp_nistz256_from_mont,#function
141 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
143 ! void ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
144 ! const BN_ULONG %i2[8]);
145 .globl ecp_nistz256_mul_mont
147 ecp_nistz256_mul_mont:
148 save %sp,-STACK_FRAME,%sp
150 call __ecp_nistz256_mul_mont
154 .type ecp_nistz256_mul_mont,#function
155 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
157 ! void ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
158 .globl ecp_nistz256_sqr_mont
160 ecp_nistz256_sqr_mont:
161 save %sp,-STACK_FRAME,%sp
163 call __ecp_nistz256_mul_mont
167 .type ecp_nistz256_sqr_mont,#function
168 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
171 ########################################################################
172 # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
173 # while all others are meant to keep 32. "Meant to" means that additions
174 # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
175 # they can affect outcome (follow 'and' with $mask). Also keep in mind
176 # that addition with carry is addition with 32-bit carry, even though
177 # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
178 # below for VIS3 code paths.]
182 __ecp_nistz256_mul_mont:
183 ld [$bp+0],$bi ! b[0]
186 srl $mask,0,$mask ! 0xffffffff
194 mulx $a0,$bi,$t0 ! a[0-7]*b[0], 64-bit results
202 srlx $t0,32,@acc[1] ! extract high parts
209 srlx $t7,32,@acc[0] ! "@acc[8]"
212 for($i=1;$i<8;$i++) {
214 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
215 ld [$bp+4*$i],$bi ! b[$i]
216 ld [$ap+4],$t1 ! re-load a[1-7]
217 addccc @acc[2],$t2,@acc[2]
218 addccc @acc[3],$t3,@acc[3]
221 addccc @acc[4],$t4,@acc[4]
222 addccc @acc[5],$t5,@acc[5]
225 addccc @acc[6],$t6,@acc[6]
226 addccc @acc[7],$t7,@acc[7]
229 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
232 # Reduction iteration is normally performed by accumulating
233 # result of multiplication of modulus by "magic" digit [and
234 # omitting least significant word, which is guaranteed to
235 # be 0], but thanks to special form of modulus and "magic"
236 # digit being equal to least significant word, it can be
237 # performed with additions and subtractions alone. Indeed:
239 # ffff.0001.0000.0000.0000.ffff.ffff.ffff
241 # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
243 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
246 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
247 # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
248 # - abcd.0000.0000.0000.0000.0000.0000.abcd
250 # or marking redundant operations:
252 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
253 # + abcd.0000.abcd.0000.0000.abcd.----.----.----
254 # - abcd.----.----.----.----.----.----.----
257 ! multiplication-less reduction
258 addcc @acc[3],$t0,@acc[3] ! r[3]+=r[0]
259 addccc @acc[4],%g0,@acc[4] ! r[4]+=0
260 and @acc[1],$mask,@acc[1]
261 and @acc[2],$mask,@acc[2]
262 addccc @acc[5],%g0,@acc[5] ! r[5]+=0
263 addccc @acc[6],$t0,@acc[6] ! r[6]+=r[0]
264 and @acc[3],$mask,@acc[3]
265 and @acc[4],$mask,@acc[4]
266 addccc @acc[7],%g0,@acc[7] ! r[7]+=0
267 addccc @acc[0],$t0,@acc[0] ! r[8]+=r[0] "@acc[8]"
268 and @acc[5],$mask,@acc[5]
269 and @acc[6],$mask,@acc[6]
270 addc $carry,%g0,$carry ! top-most carry
271 subcc @acc[7],$t0,@acc[7] ! r[7]-=r[0]
272 subccc @acc[0],%g0,@acc[0] ! r[8]-=0 "@acc[8]"
273 subc $carry,%g0,$carry ! top-most carry
274 and @acc[7],$mask,@acc[7]
275 and @acc[0],$mask,@acc[0] ! "@acc[8]"
277 push(@acc,shift(@acc)); # rotate registers to "omit" acc[0]
279 mulx $a0,$bi,$t0 ! a[0-7]*b[$i], 64-bit results
287 add @acc[0],$t0,$t0 ! accumulate low parts, can't overflow
289 srlx $t0,32,@acc[1] ! extract high parts
302 srlx $t7,32,@acc[0] ! "@acc[8]"
306 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
307 addccc @acc[2],$t2,@acc[2]
308 addccc @acc[3],$t3,@acc[3]
309 addccc @acc[4],$t4,@acc[4]
310 addccc @acc[5],$t5,@acc[5]
311 addccc @acc[6],$t6,@acc[6]
312 addccc @acc[7],$t7,@acc[7]
313 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
316 addcc @acc[3],$t0,@acc[3] ! multiplication-less reduction
317 addccc @acc[4],%g0,@acc[4]
318 addccc @acc[5],%g0,@acc[5]
319 addccc @acc[6],$t0,@acc[6]
320 addccc @acc[7],%g0,@acc[7]
321 addccc @acc[0],$t0,@acc[0] ! "@acc[8]"
322 addc $carry,%g0,$carry
323 subcc @acc[7],$t0,@acc[7]
324 subccc @acc[0],%g0,@acc[0] ! "@acc[8]"
325 subc $carry,%g0,$carry ! top-most carry
327 push(@acc,shift(@acc)); # rotate registers to omit acc[0]
329 ! Final step is "if result > mod, subtract mod", but we do it
330 ! "other way around", namely subtract modulus from result
331 ! and if it borrowed, add modulus back.
333 subcc @acc[0],-1,@acc[0] ! subtract modulus
334 subccc @acc[1],-1,@acc[1]
335 subccc @acc[2],-1,@acc[2]
336 subccc @acc[3],0,@acc[3]
337 subccc @acc[4],0,@acc[4]
338 subccc @acc[5],0,@acc[5]
339 subccc @acc[6],1,@acc[6]
340 subccc @acc[7],-1,@acc[7]
341 subc $carry,0,$carry ! broadcast borrow bit
343 ! Note that because mod has special form, i.e. consists of
344 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
345 ! using value of broadcasted borrow and the borrow bit itself.
346 ! To minimize dependency chain we first broadcast and then
347 ! extract the bit by negating (follow $bi).
349 addcc @acc[0],$carry,@acc[0] ! add modulus or zero
350 addccc @acc[1],$carry,@acc[1]
353 addccc @acc[2],$carry,@acc[2]
355 addccc @acc[3],0,@acc[3]
357 addccc @acc[4],0,@acc[4]
359 addccc @acc[5],0,@acc[5]
361 addccc @acc[6],$bi,@acc[6]
363 addc @acc[7],$carry,@acc[7]
367 .type __ecp_nistz256_mul_mont,#function
368 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
370 ! void ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
371 ! const BN_ULONG %i2[8]);
372 .globl ecp_nistz256_add
375 save %sp,-STACK_FRAME,%sp
383 call __ecp_nistz256_add
387 .type ecp_nistz256_add,#function
388 .size ecp_nistz256_add,.-ecp_nistz256_add
392 ld [$bp+0],$t0 ! b[0]
396 addcc @acc[0],$t0,@acc[0]
399 addccc @acc[1],$t1,@acc[1]
402 addccc @acc[2],$t2,@acc[2]
403 addccc @acc[3],$t3,@acc[3]
404 addccc @acc[4],$t4,@acc[4]
405 addccc @acc[5],$t5,@acc[5]
406 addccc @acc[6],$t6,@acc[6]
407 addccc @acc[7],$t7,@acc[7]
412 ! if a+b >= modulus, subtract modulus.
414 ! But since comparison implies subtraction, we subtract
415 ! modulus and then add it back if subtraction borrowed.
417 subcc @acc[0],-1,@acc[0]
418 subccc @acc[1],-1,@acc[1]
419 subccc @acc[2],-1,@acc[2]
420 subccc @acc[3], 0,@acc[3]
421 subccc @acc[4], 0,@acc[4]
422 subccc @acc[5], 0,@acc[5]
423 subccc @acc[6], 1,@acc[6]
424 subccc @acc[7],-1,@acc[7]
427 ! Note that because mod has special form, i.e. consists of
428 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
429 ! using value of borrow and its negative.
431 addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
432 addccc @acc[1],$carry,@acc[1]
435 addccc @acc[2],$carry,@acc[2]
437 addccc @acc[3],0,@acc[3]
439 addccc @acc[4],0,@acc[4]
441 addccc @acc[5],0,@acc[5]
443 addccc @acc[6],$bi,@acc[6]
445 addc @acc[7],$carry,@acc[7]
449 .type __ecp_nistz256_add,#function
450 .size __ecp_nistz256_add,.-__ecp_nistz256_add
452 ! void ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
453 .globl ecp_nistz256_mul_by_2
455 ecp_nistz256_mul_by_2:
456 save %sp,-STACK_FRAME,%sp
464 call __ecp_nistz256_mul_by_2
468 .type ecp_nistz256_mul_by_2,#function
469 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
472 __ecp_nistz256_mul_by_2:
473 addcc @acc[0],@acc[0],@acc[0] ! a+a=2*a
474 addccc @acc[1],@acc[1],@acc[1]
475 addccc @acc[2],@acc[2],@acc[2]
476 addccc @acc[3],@acc[3],@acc[3]
477 addccc @acc[4],@acc[4],@acc[4]
478 addccc @acc[5],@acc[5],@acc[5]
479 addccc @acc[6],@acc[6],@acc[6]
480 addccc @acc[7],@acc[7],@acc[7]
483 .type __ecp_nistz256_mul_by_2,#function
484 .size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
486 ! void ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
487 .globl ecp_nistz256_mul_by_3
489 ecp_nistz256_mul_by_3:
490 save %sp,-STACK_FRAME,%sp
498 call __ecp_nistz256_mul_by_3
502 .type ecp_nistz256_mul_by_3,#function
503 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
506 __ecp_nistz256_mul_by_3:
507 addcc @acc[0],@acc[0],$t0 ! a+a=2*a
508 addccc @acc[1],@acc[1],$t1
509 addccc @acc[2],@acc[2],$t2
510 addccc @acc[3],@acc[3],$t3
511 addccc @acc[4],@acc[4],$t4
512 addccc @acc[5],@acc[5],$t5
513 addccc @acc[6],@acc[6],$t6
514 addccc @acc[7],@acc[7],$t7
517 subcc $t0,-1,$t0 ! .Lreduce_by_sub but without stores
527 addcc $t0,$carry,$t0 ! add synthesized modulus
528 addccc $t1,$carry,$t1
530 addccc $t2,$carry,$t2
537 addcc $t0,@acc[0],@acc[0] ! 2*a+a=3*a
538 addccc $t1,@acc[1],@acc[1]
539 addccc $t2,@acc[2],@acc[2]
540 addccc $t3,@acc[3],@acc[3]
541 addccc $t4,@acc[4],@acc[4]
542 addccc $t5,@acc[5],@acc[5]
543 addccc $t6,@acc[6],@acc[6]
544 addccc $t7,@acc[7],@acc[7]
547 .type __ecp_nistz256_mul_by_3,#function
548 .size __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
550 ! void ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
551 ! const BN_ULONG %i2[8]);
552 .globl ecp_nistz256_sub
555 save %sp,-STACK_FRAME,%sp
563 call __ecp_nistz256_sub_from
567 .type ecp_nistz256_sub,#function
568 .size ecp_nistz256_sub,.-ecp_nistz256_sub
570 ! void ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
571 .globl ecp_nistz256_neg
574 save %sp,-STACK_FRAME,%sp
583 call __ecp_nistz256_sub_from
587 .type ecp_nistz256_neg,#function
588 .size ecp_nistz256_neg,.-ecp_nistz256_neg
591 __ecp_nistz256_sub_from:
592 ld [$bp+0],$t0 ! b[0]
596 subcc @acc[0],$t0,@acc[0]
599 subccc @acc[1],$t1,@acc[1]
600 subccc @acc[2],$t2,@acc[2]
603 subccc @acc[3],$t3,@acc[3]
604 subccc @acc[4],$t4,@acc[4]
605 subccc @acc[5],$t5,@acc[5]
606 subccc @acc[6],$t6,@acc[6]
607 subccc @acc[7],$t7,@acc[7]
608 subc %g0,%g0,$carry ! broadcast borrow bit
612 ! if a-b borrows, add modulus.
614 ! Note that because mod has special form, i.e. consists of
615 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
616 ! using value of broadcasted borrow and the borrow bit itself.
617 ! To minimize dependency chain we first broadcast and then
618 ! extract the bit by negating (follow $bi).
620 addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
621 addccc @acc[1],$carry,@acc[1]
624 addccc @acc[2],$carry,@acc[2]
626 addccc @acc[3],0,@acc[3]
628 addccc @acc[4],0,@acc[4]
630 addccc @acc[5],0,@acc[5]
632 addccc @acc[6],$bi,@acc[6]
634 addc @acc[7],$carry,@acc[7]
638 .type __ecp_nistz256_sub_from,#function
639 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
642 __ecp_nistz256_sub_morf:
643 ld [$bp+0],$t0 ! b[0]
647 subcc $t0,@acc[0],@acc[0]
650 subccc $t1,@acc[1],@acc[1]
651 subccc $t2,@acc[2],@acc[2]
654 subccc $t3,@acc[3],@acc[3]
655 subccc $t4,@acc[4],@acc[4]
656 subccc $t5,@acc[5],@acc[5]
657 subccc $t6,@acc[6],@acc[6]
658 subccc $t7,@acc[7],@acc[7]
660 subc %g0,%g0,$carry ! broadcast borrow bit
661 .type __ecp_nistz256_sub_morf,#function
662 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
664 ! void ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
665 .globl ecp_nistz256_div_by_2
667 ecp_nistz256_div_by_2:
668 save %sp,-STACK_FRAME,%sp
676 call __ecp_nistz256_div_by_2
680 .type ecp_nistz256_div_by_2,#function
681 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
684 __ecp_nistz256_div_by_2:
685 ! ret = (a is odd ? a+mod : a) >> 1
689 addcc @acc[0],$carry,@acc[0]
690 addccc @acc[1],$carry,@acc[1]
691 addccc @acc[2],$carry,@acc[2]
692 addccc @acc[3],0,@acc[3]
693 addccc @acc[4],0,@acc[4]
694 addccc @acc[5],0,@acc[5]
695 addccc @acc[6],$bi,@acc[6]
696 addccc @acc[7],$carry,@acc[7]
701 srl @acc[0],1,@acc[0]
703 srl @acc[1],1,@acc[1]
704 or @acc[0],$t0,@acc[0]
706 srl @acc[2],1,@acc[2]
707 or @acc[1],$t1,@acc[1]
710 srl @acc[3],1,@acc[3]
711 or @acc[2],$t2,@acc[2]
714 srl @acc[4],1,@acc[4]
715 or @acc[3],$t3,@acc[3]
718 srl @acc[5],1,@acc[5]
719 or @acc[4],$t4,@acc[4]
722 srl @acc[6],1,@acc[6]
723 or @acc[5],$t5,@acc[5]
726 srl @acc[7],1,@acc[7]
727 or @acc[6],$t6,@acc[6]
730 or @acc[7],$t7,@acc[7]
734 .type __ecp_nistz256_div_by_2,#function
735 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
738 ########################################################################
739 # following subroutines are "literal" implementation of those found in
742 ########################################################################
743 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
746 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
747 # above map() describes stack layout with 4 temporary
748 # 256-bit vectors on top.
755 .globl ecp_nistz256_point_double
757 ecp_nistz256_point_double:
758 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
759 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
760 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
761 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
762 be ecp_nistz256_point_double_vis3
765 save %sp,-STACK_FRAME-32*4,%sp
770 .Lpoint_double_shortcut:
772 ld [$ap+32+4],@acc[1]
773 ld [$ap+32+8],@acc[2]
774 ld [$ap+32+12],@acc[3]
775 ld [$ap+32+16],@acc[4]
776 ld [$ap+32+20],@acc[5]
777 ld [$ap+32+24],@acc[6]
778 ld [$ap+32+28],@acc[7]
779 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
780 add %sp,LOCALS+$S,$rp
784 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
785 add %sp,LOCALS+$Zsqr,$rp
788 call __ecp_nistz256_add ! p256_add(M, Zsqr, in_x);
789 add %sp,LOCALS+$M,$rp
791 add %sp,LOCALS+$S,$bp
792 add %sp,LOCALS+$S,$ap
793 call __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
794 add %sp,LOCALS+$S,$rp
796 ld [$ap_real],@acc[0]
797 add %sp,LOCALS+$Zsqr,$bp
798 ld [$ap_real+4],@acc[1]
799 ld [$ap_real+8],@acc[2]
800 ld [$ap_real+12],@acc[3]
801 ld [$ap_real+16],@acc[4]
802 ld [$ap_real+20],@acc[5]
803 ld [$ap_real+24],@acc[6]
804 ld [$ap_real+28],@acc[7]
805 call __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
806 add %sp,LOCALS+$Zsqr,$rp
810 call __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
811 add %sp,LOCALS+$tmp0,$rp
813 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
816 add %sp,LOCALS+$Zsqr,$bp
817 add %sp,LOCALS+$M,$ap
818 call __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
819 add %sp,LOCALS+$M,$rp
821 call __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
822 add %sp,LOCALS+$M,$rp
824 add %sp,LOCALS+$S,$bp
825 add %sp,LOCALS+$S,$ap
826 call __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
827 add %sp,LOCALS+$tmp0,$rp
829 call __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
833 add %sp,LOCALS+$S,$ap
834 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
835 add %sp,LOCALS+$S,$rp
837 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
838 add %sp,LOCALS+$tmp0,$rp
840 add %sp,LOCALS+$M,$bp
841 add %sp,LOCALS+$M,$ap
842 call __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
845 add %sp,LOCALS+$tmp0,$bp
846 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
849 add %sp,LOCALS+$S,$bp
850 call __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
851 add %sp,LOCALS+$S,$rp
853 add %sp,LOCALS+$M,$bp
854 add %sp,LOCALS+$S,$ap
855 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
856 add %sp,LOCALS+$S,$rp
859 call __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
864 .type ecp_nistz256_point_double,#function
865 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
869 ########################################################################
870 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
871 # const P256_POINT *in2);
873 my ($res_x,$res_y,$res_z,
874 $H,$Hsqr,$R,$Rsqr,$Hcub,
875 $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
876 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
878 # above map() describes stack layout with 12 temporary
879 # 256-bit vectors on top. Then we reserve some space for
880 # !in1infty, !in2infty, result of check for zero and return pointer.
882 my $bp_real=$rp_real;
885 .globl ecp_nistz256_point_add
887 ecp_nistz256_point_add:
888 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
889 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
890 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
891 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
892 be ecp_nistz256_point_add_vis3
895 save %sp,-STACK_FRAME-32*12-32,%sp
897 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
901 ld [$bp+64],$t0 ! in2_z
915 or $t4,$t0,$t0 ! !in2infty
917 st $t0,[%fp+STACK_BIAS-12]
919 ld [$ap+64],$t0 ! in1_z
933 or $t4,$t0,$t0 ! !in1infty
935 st $t0,[%fp+STACK_BIAS-16]
939 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
940 add %sp,LOCALS+$Z2sqr,$rp
944 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
945 add %sp,LOCALS+$Z1sqr,$rp
948 add %sp,LOCALS+$Z2sqr,$ap
949 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
950 add %sp,LOCALS+$S1,$rp
953 add %sp,LOCALS+$Z1sqr,$ap
954 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
955 add %sp,LOCALS+$S2,$rp
958 add %sp,LOCALS+$S1,$ap
959 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
960 add %sp,LOCALS+$S1,$rp
963 add %sp,LOCALS+$S2,$ap
964 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
965 add %sp,LOCALS+$S2,$rp
967 add %sp,LOCALS+$S1,$bp
968 call __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
969 add %sp,LOCALS+$R,$rp
971 or @acc[1],@acc[0],@acc[0] ! see if result is zero
972 or @acc[3],@acc[2],@acc[2]
973 or @acc[5],@acc[4],@acc[4]
974 or @acc[7],@acc[6],@acc[6]
975 or @acc[2],@acc[0],@acc[0]
976 or @acc[6],@acc[4],@acc[4]
977 or @acc[4],@acc[0],@acc[0]
978 st @acc[0],[%fp+STACK_BIAS-20]
981 add %sp,LOCALS+$Z2sqr,$ap
982 call __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
983 add %sp,LOCALS+$U1,$rp
986 add %sp,LOCALS+$Z1sqr,$ap
987 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
988 add %sp,LOCALS+$U2,$rp
990 add %sp,LOCALS+$U1,$bp
991 call __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
992 add %sp,LOCALS+$H,$rp
994 or @acc[1],@acc[0],@acc[0] ! see if result is zero
995 or @acc[3],@acc[2],@acc[2]
996 or @acc[5],@acc[4],@acc[4]
997 or @acc[7],@acc[6],@acc[6]
998 or @acc[2],@acc[0],@acc[0]
999 or @acc[6],@acc[4],@acc[4]
1000 orcc @acc[4],@acc[0],@acc[0]
1002 bne,pt %icc,.Ladd_proceed ! is_equal(U1,U2)?
1005 ld [%fp+STACK_BIAS-12],$t0
1006 ld [%fp+STACK_BIAS-16],$t1
1007 ld [%fp+STACK_BIAS-20],$t2
1009 be,pt %icc,.Ladd_proceed ! (in1infty || in2infty)?
1012 be,pt %icc,.Ladd_double ! is_equal(S1,S2)?
1015 ldx [%fp+STACK_BIAS-8],$rp
1045 ldx [%fp+STACK_BIAS-8],$rp_real
1047 b .Lpoint_double_shortcut
1048 add %sp,32*(12-4)+32,%sp ! difference in frame sizes
1052 add %sp,LOCALS+$R,$bp
1053 add %sp,LOCALS+$R,$ap
1054 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1055 add %sp,LOCALS+$Rsqr,$rp
1058 add %sp,LOCALS+$H,$ap
1059 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1060 add %sp,LOCALS+$res_z,$rp
1062 add %sp,LOCALS+$H,$bp
1063 add %sp,LOCALS+$H,$ap
1064 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1065 add %sp,LOCALS+$Hsqr,$rp
1068 add %sp,LOCALS+$res_z,$ap
1069 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
1070 add %sp,LOCALS+$res_z,$rp
1072 add %sp,LOCALS+$H,$bp
1073 add %sp,LOCALS+$Hsqr,$ap
1074 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1075 add %sp,LOCALS+$Hcub,$rp
1077 add %sp,LOCALS+$U1,$bp
1078 add %sp,LOCALS+$Hsqr,$ap
1079 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
1080 add %sp,LOCALS+$U2,$rp
1082 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1083 add %sp,LOCALS+$Hsqr,$rp
1085 add %sp,LOCALS+$Rsqr,$bp
1086 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1087 add %sp,LOCALS+$res_x,$rp
1089 add %sp,LOCALS+$Hcub,$bp
1090 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1091 add %sp,LOCALS+$res_x,$rp
1093 add %sp,LOCALS+$U2,$bp
1094 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1095 add %sp,LOCALS+$res_y,$rp
1097 add %sp,LOCALS+$Hcub,$bp
1098 add %sp,LOCALS+$S1,$ap
1099 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
1100 add %sp,LOCALS+$S2,$rp
1102 add %sp,LOCALS+$R,$bp
1103 add %sp,LOCALS+$res_y,$ap
1104 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1105 add %sp,LOCALS+$res_y,$rp
1107 add %sp,LOCALS+$S2,$bp
1108 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1109 add %sp,LOCALS+$res_y,$rp
1111 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1112 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1113 ldx [%fp+STACK_BIAS-8],$rp
1115 for($i=0;$i<96;$i+=8) { # conditional moves
1117 ld [%sp+LOCALS+$i],@acc[0] ! res
1118 ld [%sp+LOCALS+$i+4],@acc[1]
1119 ld [$bp_real+$i],@acc[2] ! in2
1120 ld [$bp_real+$i+4],@acc[3]
1121 ld [$ap_real+$i],@acc[4] ! in1
1122 ld [$ap_real+$i+4],@acc[5]
1123 movrz $t1,@acc[2],@acc[0]
1124 movrz $t1,@acc[3],@acc[1]
1125 movrz $t2,@acc[4],@acc[0]
1126 movrz $t2,@acc[5],@acc[1]
1128 st @acc[1],[$rp+$i+4]
1135 .type ecp_nistz256_point_add,#function
1136 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1140 ########################################################################
1141 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1142 # const P256_POINT_AFFINE *in2);
1144 my ($res_x,$res_y,$res_z,
1145 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1147 # above map() describes stack layout with 10 temporary
1148 # 256-bit vectors on top. Then we reserve some space for
1149 # !in1infty, !in2infty, result of check for zero and return pointer.
1151 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1152 my $bp_real=$rp_real;
1155 .globl ecp_nistz256_point_add_affine
1157 ecp_nistz256_point_add_affine:
1158 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
1159 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
1160 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
1161 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
1162 be ecp_nistz256_point_add_affine_vis3
1165 save %sp,-STACK_FRAME-32*10-32,%sp
1167 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
1171 ld [$ap+64],$t0 ! in1_z
1185 or $t4,$t0,$t0 ! !in1infty
1187 st $t0,[%fp+STACK_BIAS-16]
1189 ld [$bp],@acc[0] ! in2_x
1197 ld [$bp+32],$t0 ! in2_y
1205 or @acc[1],@acc[0],@acc[0]
1206 or @acc[3],@acc[2],@acc[2]
1207 or @acc[5],@acc[4],@acc[4]
1208 or @acc[7],@acc[6],@acc[6]
1209 or @acc[2],@acc[0],@acc[0]
1210 or @acc[6],@acc[4],@acc[4]
1211 or @acc[4],@acc[0],@acc[0]
1219 or @acc[0],$t0,$t0 ! !in2infty
1221 st $t0,[%fp+STACK_BIAS-12]
1225 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
1226 add %sp,LOCALS+$Z1sqr,$rp
1229 add %sp,LOCALS+$Z1sqr,$ap
1230 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
1231 add %sp,LOCALS+$U2,$rp
1234 call __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
1235 add %sp,LOCALS+$H,$rp
1238 add %sp,LOCALS+$Z1sqr,$ap
1239 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
1240 add %sp,LOCALS+$S2,$rp
1243 add %sp,LOCALS+$H,$ap
1244 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1245 add %sp,LOCALS+$res_z,$rp
1248 add %sp,LOCALS+$S2,$ap
1249 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
1250 add %sp,LOCALS+$S2,$rp
1253 call __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
1254 add %sp,LOCALS+$R,$rp
1256 add %sp,LOCALS+$H,$bp
1257 add %sp,LOCALS+$H,$ap
1258 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1259 add %sp,LOCALS+$Hsqr,$rp
1261 add %sp,LOCALS+$R,$bp
1262 add %sp,LOCALS+$R,$ap
1263 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1264 add %sp,LOCALS+$Rsqr,$rp
1266 add %sp,LOCALS+$H,$bp
1267 add %sp,LOCALS+$Hsqr,$ap
1268 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1269 add %sp,LOCALS+$Hcub,$rp
1272 add %sp,LOCALS+$Hsqr,$ap
1273 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
1274 add %sp,LOCALS+$U2,$rp
1276 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1277 add %sp,LOCALS+$Hsqr,$rp
1279 add %sp,LOCALS+$Rsqr,$bp
1280 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1281 add %sp,LOCALS+$res_x,$rp
1283 add %sp,LOCALS+$Hcub,$bp
1284 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1285 add %sp,LOCALS+$res_x,$rp
1287 add %sp,LOCALS+$U2,$bp
1288 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1289 add %sp,LOCALS+$res_y,$rp
1292 add %sp,LOCALS+$Hcub,$ap
1293 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
1294 add %sp,LOCALS+$S2,$rp
1296 add %sp,LOCALS+$R,$bp
1297 add %sp,LOCALS+$res_y,$ap
1298 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1299 add %sp,LOCALS+$res_y,$rp
1301 add %sp,LOCALS+$S2,$bp
1302 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1303 add %sp,LOCALS+$res_y,$rp
1305 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1306 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1307 ldx [%fp+STACK_BIAS-8],$rp
1309 for($i=0;$i<64;$i+=8) { # conditional moves
1311 ld [%sp+LOCALS+$i],@acc[0] ! res
1312 ld [%sp+LOCALS+$i+4],@acc[1]
1313 ld [$bp_real+$i],@acc[2] ! in2
1314 ld [$bp_real+$i+4],@acc[3]
1315 ld [$ap_real+$i],@acc[4] ! in1
1316 ld [$ap_real+$i+4],@acc[5]
1317 movrz $t1,@acc[2],@acc[0]
1318 movrz $t1,@acc[3],@acc[1]
1319 movrz $t2,@acc[4],@acc[0]
1320 movrz $t2,@acc[5],@acc[1]
1322 st @acc[1],[$rp+$i+4]
1328 ld [%sp+LOCALS+$i],@acc[0] ! res
1329 ld [%sp+LOCALS+$i+4],@acc[1]
1330 ld [$ap_real+$i],@acc[4] ! in1
1331 ld [$ap_real+$i+4],@acc[5]
1332 movrz $t1,@ONE_mont[$j],@acc[0]
1333 movrz $t1,@ONE_mont[$j+1],@acc[1]
1334 movrz $t2,@acc[4],@acc[0]
1335 movrz $t2,@acc[5],@acc[1]
1337 st @acc[1],[$rp+$i+4]
1343 .type ecp_nistz256_point_add_affine,#function
1344 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1348 my ($out,$inp,$index)=map("%i$_",(0..2));
1352 ! void ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
1354 .globl ecp_nistz256_scatter_w5
1356 ecp_nistz256_scatter_w5:
1357 save %sp,-STACK_FRAME,%sp
1360 add $out,$index,$out
1371 st %l0,[$out+64*0-4]
1372 st %l1,[$out+64*1-4]
1373 st %l2,[$out+64*2-4]
1374 st %l3,[$out+64*3-4]
1375 st %l4,[$out+64*4-4]
1376 st %l5,[$out+64*5-4]
1377 st %l6,[$out+64*6-4]
1378 st %l7,[$out+64*7-4]
1390 st %l0,[$out+64*0-4]
1391 st %l1,[$out+64*1-4]
1392 st %l2,[$out+64*2-4]
1393 st %l3,[$out+64*3-4]
1394 st %l4,[$out+64*4-4]
1395 st %l5,[$out+64*5-4]
1396 st %l6,[$out+64*6-4]
1397 st %l7,[$out+64*7-4]
1408 st %l0,[$out+64*0-4]
1409 st %l1,[$out+64*1-4]
1410 st %l2,[$out+64*2-4]
1411 st %l3,[$out+64*3-4]
1412 st %l4,[$out+64*4-4]
1413 st %l5,[$out+64*5-4]
1414 st %l6,[$out+64*6-4]
1415 st %l7,[$out+64*7-4]
1419 .type ecp_nistz256_scatter_w5,#function
1420 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1422 ! void ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
1424 .globl ecp_nistz256_gather_w5
1426 ecp_nistz256_gather_w5:
1427 save %sp,-STACK_FRAME,%sp
1432 add $index,$mask,$index
1434 add $inp,$index,$inp
1517 .type ecp_nistz256_gather_w5,#function
1518 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1520 ! void ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
1522 .globl ecp_nistz256_scatter_w7
1524 ecp_nistz256_scatter_w7:
1525 save %sp,-STACK_FRAME,%sp
1527 add $out,$index,$out
1532 subcc $index,1,$index
1540 bne .Loop_scatter_w7
1545 .type ecp_nistz256_scatter_w7,#function
1546 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1548 ! void ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
1550 .globl ecp_nistz256_gather_w7
1552 ecp_nistz256_gather_w7:
1553 save %sp,-STACK_FRAME,%sp
1558 add $index,$mask,$index
1559 add $inp,$index,$inp
1563 ldub [$inp+64*0],%l0
1564 prefetch [$inp+3840+64*0],1
1565 subcc $index,1,$index
1566 ldub [$inp+64*1],%l1
1567 prefetch [$inp+3840+64*1],1
1568 ldub [$inp+64*2],%l2
1569 prefetch [$inp+3840+64*2],1
1570 ldub [$inp+64*3],%l3
1571 prefetch [$inp+3840+64*3],1
1586 .type ecp_nistz256_gather_w7,#function
1587 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1591 ########################################################################
1592 # Following subroutines are VIS3 counterparts of those above that
1593 # implement ones found in ecp_nistz256.c. Key difference is that they
1594 # use 128-bit multiplication and addition with 64-bit carry, and in order
1595 # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
1596 # entry and vice versa on return.
1598 my ($rp,$ap,$bp)=map("%i$_",(0..2));
1599 my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
1600 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
1601 my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
1602 my ($rp_real,$ap_real)=("%g2","%g3");
1603 my ($acc6,$acc7)=($bp,$bi); # used in squaring
1607 __ecp_nistz256_mul_by_2_vis3:
1608 addcc $acc0,$acc0,$acc0
1609 addxccc $acc1,$acc1,$acc1
1610 addxccc $acc2,$acc2,$acc2
1611 addxccc $acc3,$acc3,$acc3
1612 b .Lreduce_by_sub_vis3
1613 addxc %g0,%g0,$acc4 ! did it carry?
1614 .type __ecp_nistz256_mul_by_2_vis3,#function
1615 .size __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
1618 __ecp_nistz256_add_vis3:
1624 __ecp_nistz256_add_noload_vis3:
1626 addcc $t0,$acc0,$acc0
1627 addxccc $t1,$acc1,$acc1
1628 addxccc $t2,$acc2,$acc2
1629 addxccc $t3,$acc3,$acc3
1630 addxc %g0,%g0,$acc4 ! did it carry?
1632 .Lreduce_by_sub_vis3:
1634 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
1635 addxccc $acc1,$poly1,$t1
1636 addxccc $acc2,$minus1,$t2
1637 addxccc $acc3,$poly3,$t3
1638 addxc $acc4,$minus1,$acc4
1640 movrz $acc4,$t0,$acc0 ! ret = borrow ? ret : ret-modulus
1641 movrz $acc4,$t1,$acc1
1643 movrz $acc4,$t2,$acc2
1645 movrz $acc4,$t3,$acc3
1649 .type __ecp_nistz256_add_vis3,#function
1650 .size __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
1652 ! Trouble with subtraction is that there is no subtraction with 64-bit
1653 ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
1654 ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
1655 ! recall that SPARC is big-endian, which is why you'll observe that
1656 ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
1657 ! "collect" result back to 64-bit $acc0-$acc3.
1659 __ecp_nistz256_sub_from_vis3:
1668 subcc $acc0,$t0,$acc0
1670 subccc $acc4,$t1,$acc4
1672 subccc $acc1,$t2,$acc1
1674 and $acc0,$poly1,$acc0
1675 subccc $acc5,$t3,$acc5
1678 and $acc1,$poly1,$acc1
1680 or $acc0,$acc4,$acc0
1682 or $acc1,$acc5,$acc1
1684 subccc $acc2,$t0,$acc2
1685 subccc $acc4,$t1,$acc4
1686 subccc $acc3,$t2,$acc3
1687 and $acc2,$poly1,$acc2
1688 subccc $acc5,$t3,$acc5
1690 and $acc3,$poly1,$acc3
1692 or $acc2,$acc4,$acc2
1693 subc %g0,%g0,$acc4 ! did it borrow?
1694 b .Lreduce_by_add_vis3
1695 or $acc3,$acc5,$acc3
1696 .type __ecp_nistz256_sub_from_vis3,#function
1697 .size __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
1700 __ecp_nistz256_sub_morf_vis3:
1709 subcc $t0,$acc0,$acc0
1711 subccc $t1,$acc4,$acc4
1713 subccc $t2,$acc1,$acc1
1715 and $acc0,$poly1,$acc0
1716 subccc $t3,$acc5,$acc5
1719 and $acc1,$poly1,$acc1
1721 or $acc0,$acc4,$acc0
1723 or $acc1,$acc5,$acc1
1725 subccc $t0,$acc2,$acc2
1726 subccc $t1,$acc4,$acc4
1727 subccc $t2,$acc3,$acc3
1728 and $acc2,$poly1,$acc2
1729 subccc $t3,$acc5,$acc5
1731 and $acc3,$poly1,$acc3
1733 or $acc2,$acc4,$acc2
1734 subc %g0,%g0,$acc4 ! did it borrow?
1735 or $acc3,$acc5,$acc3
1737 .Lreduce_by_add_vis3:
1739 addcc $acc0,-1,$t0 ! add modulus
1741 addxccc $acc1,$poly1,$t1
1742 not $poly1,$poly1 ! restore $poly1
1743 addxccc $acc2,%g0,$t2
1746 movrnz $acc4,$t0,$acc0 ! if a-b borrowed, ret = ret+mod
1747 movrnz $acc4,$t1,$acc1
1749 movrnz $acc4,$t2,$acc2
1751 movrnz $acc4,$t3,$acc3
1755 .type __ecp_nistz256_sub_morf_vis3,#function
1756 .size __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
1759 __ecp_nistz256_div_by_2_vis3:
1760 ! ret = (a is odd ? a+mod : a) >> 1
1765 addcc $acc0,-1,$t0 ! add modulus
1766 addxccc $acc1,$t1,$t1
1767 addxccc $acc2,%g0,$t2
1768 addxccc $acc3,$t3,$t3
1769 addxc %g0,%g0,$acc4 ! carry bit
1771 movrnz $acc5,$t0,$acc0
1772 movrnz $acc5,$t1,$acc1
1773 movrnz $acc5,$t2,$acc2
1774 movrnz $acc5,$t3,$acc3
1775 movrz $acc5,%g0,$acc4
1790 sllx $acc4,63,$t3 ! don't forget carry bit
1796 .type __ecp_nistz256_div_by_2_vis3,#function
1797 .size __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
1799 ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
1800 ! 4x faster [on T4]...
1802 __ecp_nistz256_mul_mont_vis3:
1804 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1812 ldx [$bp+8],$bi ! b[1]
1814 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1816 addxccc $acc2,$t1,$acc2
1818 addxccc $acc3,$t2,$acc3
1822 for($i=1;$i<4;$i++) {
1823 # Reduction iteration is normally performed by accumulating
1824 # result of multiplication of modulus by "magic" digit [and
1825 # omitting least significant word, which is guaranteed to
1826 # be 0], but thanks to special form of modulus and "magic"
1827 # digit being equal to least significant word, it can be
1828 # performed with additions and subtractions alone. Indeed:
1830 # ffff0001.00000000.0000ffff.ffffffff
1832 # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1834 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1837 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1838 # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
1839 # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
1841 # or marking redundant operations:
1843 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
1844 # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
1845 # - 0000abcd.efgh0000.--------.--------.--------
1846 # ^^^^^^^^ but this word is calculated with umulxhi, because
1847 # there is no subtract with 64-bit borrow:-(
1850 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1851 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1852 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1854 addxccc $acc2,$t1,$acc1
1856 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1858 addxccc $acc4,$t3,$acc3
1860 addxc $acc5,%g0,$acc4
1862 addcc $acc0,$t0,$acc0 ! accumulate low parts of multiplication
1864 addxccc $acc1,$t1,$acc1
1866 addxccc $acc2,$t2,$acc2
1868 addxccc $acc3,$t3,$acc3
1870 addxc $acc4,%g0,$acc4
1872 $code.=<<___ if ($i<3);
1873 ldx [$bp+8*($i+1)],$bi ! bp[$i+1]
1876 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1878 addxccc $acc2,$t1,$acc2
1880 addxccc $acc3,$t2,$acc3
1881 addxccc $acc4,$t3,$acc4
1886 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1887 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1888 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1889 addxccc $acc2,$t1,$acc1
1890 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1891 addxccc $acc4,$t3,$acc3
1892 b .Lmul_final_vis3 ! see below
1893 addxc $acc5,%g0,$acc4
1894 .type __ecp_nistz256_mul_mont_vis3,#function
1895 .size __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
1897 ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
1898 ! instructions, but only 14% faster [on T4]...
1900 __ecp_nistz256_sqr_mont_vis3:
1901 ! | | | | | |a1*a0| |
1902 ! | | | | |a2*a0| | |
1903 ! | |a3*a2|a3*a0| | | |
1904 ! | | | |a2*a1| | | |
1905 ! | | |a3*a1| | | | |
1906 ! *| | | | | | | | 2|
1907 ! +|a3*a3|a2*a2|a1*a1|a0*a0|
1908 ! |--+--+--+--+--+--+--+--|
1909 ! |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1911 ! "can't overflow" below mark carrying into high part of
1912 ! multiplication result, which can't overflow, because it
1913 ! can never be all ones.
1915 mulx $a1,$a0,$acc1 ! a[1]*a[0]
1917 mulx $a2,$a0,$acc2 ! a[2]*a[0]
1919 mulx $a3,$a0,$acc3 ! a[3]*a[0]
1920 umulxhi $a3,$a0,$acc4
1922 addcc $acc2,$t1,$acc2 ! accumulate high parts of multiplication
1923 mulx $a2,$a1,$t0 ! a[2]*a[1]
1925 addxccc $acc3,$t2,$acc3
1926 mulx $a3,$a1,$t2 ! a[3]*a[1]
1928 addxc $acc4,%g0,$acc4 ! can't overflow
1930 mulx $a3,$a2,$acc5 ! a[3]*a[2]
1931 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1932 umulxhi $a3,$a2,$acc6
1934 addcc $t2,$t1,$t1 ! accumulate high parts of multiplication
1935 mulx $a0,$a0,$acc0 ! a[0]*a[0]
1936 addxc $t3,%g0,$t2 ! can't overflow
1938 addcc $acc3,$t0,$acc3 ! accumulate low parts of multiplication
1940 addxccc $acc4,$t1,$acc4
1941 mulx $a1,$a1,$t1 ! a[1]*a[1]
1942 addxccc $acc5,$t2,$acc5
1944 addxc $acc6,%g0,$acc6 ! can't overflow
1946 addcc $acc1,$acc1,$acc1 ! acc[1-6]*=2
1947 mulx $a2,$a2,$t2 ! a[2]*a[2]
1948 addxccc $acc2,$acc2,$acc2
1950 addxccc $acc3,$acc3,$acc3
1951 mulx $a3,$a3,$t3 ! a[3]*a[3]
1952 addxccc $acc4,$acc4,$acc4
1954 addxccc $acc5,$acc5,$acc5
1955 addxccc $acc6,$acc6,$acc6
1958 addcc $acc1,$a0,$acc1 ! +a[i]*a[i]
1959 addxccc $acc2,$t1,$acc2
1960 addxccc $acc3,$a1,$acc3
1961 addxccc $acc4,$t2,$acc4
1963 addxccc $acc5,$a2,$acc5
1965 addxccc $acc6,$t3,$acc6
1966 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1967 addxc $acc7,$a3,$acc7
1969 for($i=0;$i<3;$i++) { # reductions, see commentary
1970 # in multiplication for details
1972 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1973 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1975 addxccc $acc2,$t1,$acc1
1977 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1978 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1979 addxc %g0,$t3,$acc3 ! can't overflow
1983 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1984 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1985 addxccc $acc2,$t1,$acc1
1986 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1987 addxc %g0,$t3,$acc3 ! can't overflow
1989 addcc $acc0,$acc4,$acc0 ! accumulate upper half
1990 addxccc $acc1,$acc5,$acc1
1991 addxccc $acc2,$acc6,$acc2
1992 addxccc $acc3,$acc7,$acc3
1997 ! Final step is "if result > mod, subtract mod", but as comparison
1998 ! means subtraction, we do the subtraction and then copy outcome
1999 ! if it didn't borrow. But note that as we [have to] replace
2000 ! subtraction with addition with negative, carry/borrow logic is
2003 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
2004 not $poly3,$poly3 ! restore 0x00000000FFFFFFFE
2005 addxccc $acc1,$poly1,$t1
2006 addxccc $acc2,$minus1,$t2
2007 addxccc $acc3,$poly3,$t3
2008 addxccc $acc4,$minus1,%g0 ! did it carry?
2010 movcs %xcc,$t0,$acc0
2011 movcs %xcc,$t1,$acc1
2013 movcs %xcc,$t2,$acc2
2015 movcs %xcc,$t3,$acc3
2019 .type __ecp_nistz256_sqr_mont_vis3,#function
2020 .size __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
2023 ########################################################################
2024 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
2027 my ($res_x,$res_y,$res_z,
2029 $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
2030 # above map() describes stack layout with 10 temporary
2031 # 256-bit vectors on top.
2035 ecp_nistz256_point_double_vis3:
2036 save %sp,-STACK64_FRAME-32*10,%sp
2039 .Ldouble_shortcut_vis3:
2042 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2043 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2045 ! convert input to uint64_t[4]
2056 ld [$ap+32],$acc0 ! in_y
2064 ld [$ap+32+16],$acc2
2068 ld [$ap+32+24],$acc3
2072 stx $a0,[%sp+LOCALS64+$in_x]
2074 stx $a1,[%sp+LOCALS64+$in_x+8]
2076 stx $a2,[%sp+LOCALS64+$in_x+16]
2078 stx $a3,[%sp+LOCALS64+$in_x+24]
2080 stx $acc0,[%sp+LOCALS64+$in_y]
2082 stx $acc1,[%sp+LOCALS64+$in_y+8]
2084 stx $acc2,[%sp+LOCALS64+$in_y+16]
2085 stx $acc3,[%sp+LOCALS64+$in_y+24]
2087 ld [$ap+64],$a0 ! in_z
2105 stx $a0,[%sp+LOCALS64+$in_z]
2107 stx $a1,[%sp+LOCALS64+$in_z+8]
2109 stx $a2,[%sp+LOCALS64+$in_z+16]
2110 stx $a3,[%sp+LOCALS64+$in_z+24]
2112 ! in_y is still in $acc0-$acc3
2113 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(S, in_y);
2114 add %sp,LOCALS64+$S,$rp
2116 ! in_z is still in $a0-$a3
2117 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Zsqr, in_z);
2118 add %sp,LOCALS64+$Zsqr,$rp
2120 mov $acc0,$a0 ! put Zsqr aside
2125 add %sp,LOCALS64+$in_x,$bp
2126 call __ecp_nistz256_add_vis3 ! p256_add(M, Zsqr, in_x);
2127 add %sp,LOCALS64+$M,$rp
2129 mov $a0,$acc0 ! restore Zsqr
2130 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2132 ldx [%sp+LOCALS64+$S+8],$a1
2134 ldx [%sp+LOCALS64+$S+16],$a2
2136 ldx [%sp+LOCALS64+$S+24],$a3
2138 add %sp,LOCALS64+$in_x,$bp
2139 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(Zsqr, in_x, Zsqr);
2140 add %sp,LOCALS64+$Zsqr,$rp
2142 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(S, S);
2143 add %sp,LOCALS64+$S,$rp
2145 ldx [%sp+LOCALS64+$in_z],$bi
2146 ldx [%sp+LOCALS64+$in_y],$a0
2147 ldx [%sp+LOCALS64+$in_y+8],$a1
2148 ldx [%sp+LOCALS64+$in_y+16],$a2
2149 ldx [%sp+LOCALS64+$in_y+24],$a3
2150 add %sp,LOCALS64+$in_z,$bp
2151 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(tmp0, in_z, in_y);
2152 add %sp,LOCALS64+$tmp0,$rp
2154 ldx [%sp+LOCALS64+$M],$bi ! forward load
2155 ldx [%sp+LOCALS64+$Zsqr],$a0
2156 ldx [%sp+LOCALS64+$Zsqr+8],$a1
2157 ldx [%sp+LOCALS64+$Zsqr+16],$a2
2158 ldx [%sp+LOCALS64+$Zsqr+24],$a3
2160 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(res_z, tmp0);
2161 add %sp,LOCALS64+$res_z,$rp
2163 add %sp,LOCALS64+$M,$bp
2164 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(M, M, Zsqr);
2165 add %sp,LOCALS64+$M,$rp
2167 mov $acc0,$a0 ! put aside M
2171 call __ecp_nistz256_mul_by_2_vis3
2172 add %sp,LOCALS64+$M,$rp
2173 mov $a0,$t0 ! copy M
2174 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2176 ldx [%sp+LOCALS64+$S+8],$a1
2178 ldx [%sp+LOCALS64+$S+16],$a2
2180 ldx [%sp+LOCALS64+$S+24],$a3
2181 call __ecp_nistz256_add_noload_vis3 ! p256_mul_by_3(M, M);
2182 add %sp,LOCALS64+$M,$rp
2184 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(tmp0, S);
2185 add %sp,LOCALS64+$tmp0,$rp
2187 ldx [%sp+LOCALS64+$S],$bi ! forward load
2188 ldx [%sp+LOCALS64+$in_x],$a0
2189 ldx [%sp+LOCALS64+$in_x+8],$a1
2190 ldx [%sp+LOCALS64+$in_x+16],$a2
2191 ldx [%sp+LOCALS64+$in_x+24],$a3
2193 call __ecp_nistz256_div_by_2_vis3 ! p256_div_by_2(res_y, tmp0);
2194 add %sp,LOCALS64+$res_y,$rp
2196 add %sp,LOCALS64+$S,$bp
2197 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, in_x);
2198 add %sp,LOCALS64+$S,$rp
2200 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2201 ldx [%sp+LOCALS64+$M+8],$a1
2202 ldx [%sp+LOCALS64+$M+16],$a2
2203 ldx [%sp+LOCALS64+$M+24],$a3
2205 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(tmp0, S);
2206 add %sp,LOCALS64+$tmp0,$rp
2208 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(res_x, M);
2209 add %sp,LOCALS64+$res_x,$rp
2211 add %sp,LOCALS64+$tmp0,$bp
2212 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, tmp0);
2213 add %sp,LOCALS64+$res_x,$rp
2215 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2216 ldx [%sp+LOCALS64+$M+8],$a1
2217 ldx [%sp+LOCALS64+$M+16],$a2
2218 ldx [%sp+LOCALS64+$M+24],$a3
2220 add %sp,LOCALS64+$S,$bp
2221 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(S, S, res_x);
2222 add %sp,LOCALS64+$S,$rp
2225 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, M);
2226 add %sp,LOCALS64+$S,$rp
2228 ldx [%sp+LOCALS64+$res_x],$a0 ! forward load
2229 ldx [%sp+LOCALS64+$res_x+8],$a1
2230 ldx [%sp+LOCALS64+$res_x+16],$a2
2231 ldx [%sp+LOCALS64+$res_x+24],$a3
2233 add %sp,LOCALS64+$res_y,$bp
2234 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, S, res_y);
2235 add %sp,LOCALS64+$res_y,$bp
2237 ! convert output to uint_32[8]
2240 st $a0,[$rp_real] ! res_x
2245 st $t1,[$rp_real+12]
2246 st $a2,[$rp_real+16]
2247 st $t2,[$rp_real+20]
2248 st $a3,[$rp_real+24]
2249 st $t3,[$rp_real+28]
2251 ldx [%sp+LOCALS64+$res_z],$a0 ! forward load
2253 ldx [%sp+LOCALS64+$res_z+8],$a1
2255 ldx [%sp+LOCALS64+$res_z+16],$a2
2257 ldx [%sp+LOCALS64+$res_z+24],$a3
2259 st $acc0,[$rp_real+32] ! res_y
2260 st $t0, [$rp_real+32+4]
2261 st $acc1,[$rp_real+32+8]
2262 st $t1, [$rp_real+32+12]
2263 st $acc2,[$rp_real+32+16]
2264 st $t2, [$rp_real+32+20]
2265 st $acc3,[$rp_real+32+24]
2266 st $t3, [$rp_real+32+28]
2270 st $a0,[$rp_real+64] ! res_z
2272 st $t0,[$rp_real+64+4]
2274 st $a1,[$rp_real+64+8]
2275 st $t1,[$rp_real+64+12]
2276 st $a2,[$rp_real+64+16]
2277 st $t2,[$rp_real+64+20]
2278 st $a3,[$rp_real+64+24]
2279 st $t3,[$rp_real+64+28]
2283 .type ecp_nistz256_point_double_vis3,#function
2284 .size ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
2287 ########################################################################
2288 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
2289 # const P256_POINT *in2);
2291 my ($res_x,$res_y,$res_z,
2292 $in1_x,$in1_y,$in1_z,
2293 $in2_x,$in2_y,$in2_z,
2294 $H,$Hsqr,$R,$Rsqr,$Hcub,
2295 $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
2296 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2298 # above map() describes stack layout with 18 temporary
2299 # 256-bit vectors on top. Then we reserve some space for
2300 # !in1infty, !in2infty and result of check for zero.
2304 ecp_nistz256_point_add_vis3:
2305 save %sp,-STACK64_FRAME-32*18-32,%sp
2310 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2311 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2313 ! convert input to uint64_t[4]
2314 ld [$bp],$a0 ! in2_x
2324 ld [$bp+32],$acc0 ! in2_y
2332 ld [$bp+32+16],$acc2
2336 ld [$bp+32+24],$acc3
2340 stx $a0,[%sp+LOCALS64+$in2_x]
2342 stx $a1,[%sp+LOCALS64+$in2_x+8]
2344 stx $a2,[%sp+LOCALS64+$in2_x+16]
2346 stx $a3,[%sp+LOCALS64+$in2_x+24]
2348 stx $acc0,[%sp+LOCALS64+$in2_y]
2350 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2352 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2353 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2355 ld [$bp+64],$acc0 ! in2_z
2359 ld [$bp+64+16],$acc2
2361 ld [$bp+64+24],$acc3
2365 ld [$ap],$a0 ! in1_x
2381 stx $acc0,[%sp+LOCALS64+$in2_z]
2383 stx $acc1,[%sp+LOCALS64+$in2_z+8]
2385 stx $acc2,[%sp+LOCALS64+$in2_z+16]
2386 stx $acc3,[%sp+LOCALS64+$in2_z+24]
2388 or $acc1,$acc0,$acc0
2389 or $acc3,$acc2,$acc2
2390 or $acc2,$acc0,$acc0
2391 movrnz $acc0,-1,$acc0 ! !in2infty
2392 stx $acc0,[%fp+STACK_BIAS-8]
2395 ld [$ap+32],$acc0 ! in1_y
2402 ld [$ap+32+16],$acc2
2404 ld [$ap+32+24],$acc3
2408 stx $a0,[%sp+LOCALS64+$in1_x]
2410 stx $a1,[%sp+LOCALS64+$in1_x+8]
2412 stx $a2,[%sp+LOCALS64+$in1_x+16]
2414 stx $a3,[%sp+LOCALS64+$in1_x+24]
2416 stx $acc0,[%sp+LOCALS64+$in1_y]
2418 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2420 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2421 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2423 ldx [%sp+LOCALS64+$in2_z],$a0 ! forward load
2424 ldx [%sp+LOCALS64+$in2_z+8],$a1
2425 ldx [%sp+LOCALS64+$in2_z+16],$a2
2426 ldx [%sp+LOCALS64+$in2_z+24],$a3
2428 ld [$ap+64],$acc0 ! in1_z
2432 ld [$ap+64+16],$acc2
2434 ld [$ap+64+24],$acc3
2442 stx $acc0,[%sp+LOCALS64+$in1_z]
2444 stx $acc1,[%sp+LOCALS64+$in1_z+8]
2446 stx $acc2,[%sp+LOCALS64+$in1_z+16]
2447 stx $acc3,[%sp+LOCALS64+$in1_z+24]
2449 or $acc1,$acc0,$acc0
2450 or $acc3,$acc2,$acc2
2451 or $acc2,$acc0,$acc0
2452 movrnz $acc0,-1,$acc0 ! !in1infty
2453 stx $acc0,[%fp+STACK_BIAS-16]
2455 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z2sqr, in2_z);
2456 add %sp,LOCALS64+$Z2sqr,$rp
2458 ldx [%sp+LOCALS64+$in1_z],$a0
2459 ldx [%sp+LOCALS64+$in1_z+8],$a1
2460 ldx [%sp+LOCALS64+$in1_z+16],$a2
2461 ldx [%sp+LOCALS64+$in1_z+24],$a3
2462 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2463 add %sp,LOCALS64+$Z1sqr,$rp
2465 ldx [%sp+LOCALS64+$Z2sqr],$bi
2466 ldx [%sp+LOCALS64+$in2_z],$a0
2467 ldx [%sp+LOCALS64+$in2_z+8],$a1
2468 ldx [%sp+LOCALS64+$in2_z+16],$a2
2469 ldx [%sp+LOCALS64+$in2_z+24],$a3
2470 add %sp,LOCALS64+$Z2sqr,$bp
2471 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, Z2sqr, in2_z);
2472 add %sp,LOCALS64+$S1,$rp
2474 ldx [%sp+LOCALS64+$Z1sqr],$bi
2475 ldx [%sp+LOCALS64+$in1_z],$a0
2476 ldx [%sp+LOCALS64+$in1_z+8],$a1
2477 ldx [%sp+LOCALS64+$in1_z+16],$a2
2478 ldx [%sp+LOCALS64+$in1_z+24],$a3
2479 add %sp,LOCALS64+$Z1sqr,$bp
2480 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2481 add %sp,LOCALS64+$S2,$rp
2483 ldx [%sp+LOCALS64+$S1],$bi
2484 ldx [%sp+LOCALS64+$in1_y],$a0
2485 ldx [%sp+LOCALS64+$in1_y+8],$a1
2486 ldx [%sp+LOCALS64+$in1_y+16],$a2
2487 ldx [%sp+LOCALS64+$in1_y+24],$a3
2488 add %sp,LOCALS64+$S1,$bp
2489 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, S1, in1_y);
2490 add %sp,LOCALS64+$S1,$rp
2492 ldx [%sp+LOCALS64+$S2],$bi
2493 ldx [%sp+LOCALS64+$in2_y],$a0
2494 ldx [%sp+LOCALS64+$in2_y+8],$a1
2495 ldx [%sp+LOCALS64+$in2_y+16],$a2
2496 ldx [%sp+LOCALS64+$in2_y+24],$a3
2497 add %sp,LOCALS64+$S2,$bp
2498 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2499 add %sp,LOCALS64+$S2,$rp
2501 ldx [%sp+LOCALS64+$Z2sqr],$bi ! forward load
2502 ldx [%sp+LOCALS64+$in1_x],$a0
2503 ldx [%sp+LOCALS64+$in1_x+8],$a1
2504 ldx [%sp+LOCALS64+$in1_x+16],$a2
2505 ldx [%sp+LOCALS64+$in1_x+24],$a3
2507 add %sp,LOCALS64+$S1,$bp
2508 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, S1);
2509 add %sp,LOCALS64+$R,$rp
2511 or $acc1,$acc0,$acc0 ! see if result is zero
2512 or $acc3,$acc2,$acc2
2513 or $acc2,$acc0,$acc0
2514 stx $acc0,[%fp+STACK_BIAS-24]
2516 add %sp,LOCALS64+$Z2sqr,$bp
2517 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U1, in1_x, Z2sqr);
2518 add %sp,LOCALS64+$U1,$rp
2520 ldx [%sp+LOCALS64+$Z1sqr],$bi
2521 ldx [%sp+LOCALS64+$in2_x],$a0
2522 ldx [%sp+LOCALS64+$in2_x+8],$a1
2523 ldx [%sp+LOCALS64+$in2_x+16],$a2
2524 ldx [%sp+LOCALS64+$in2_x+24],$a3
2525 add %sp,LOCALS64+$Z1sqr,$bp
2526 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in2_x, Z1sqr);
2527 add %sp,LOCALS64+$U2,$rp
2529 ldx [%sp+LOCALS64+$R],$a0 ! forward load
2530 ldx [%sp+LOCALS64+$R+8],$a1
2531 ldx [%sp+LOCALS64+$R+16],$a2
2532 ldx [%sp+LOCALS64+$R+24],$a3
2534 add %sp,LOCALS64+$U1,$bp
2535 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, U1);
2536 add %sp,LOCALS64+$H,$rp
2538 or $acc1,$acc0,$acc0 ! see if result is zero
2539 or $acc3,$acc2,$acc2
2540 orcc $acc2,$acc0,$acc0
2542 bne,pt %xcc,.Ladd_proceed_vis3 ! is_equal(U1,U2)?
2545 ldx [%fp+STACK_BIAS-8],$t0
2546 ldx [%fp+STACK_BIAS-16],$t1
2547 ldx [%fp+STACK_BIAS-24],$t2
2549 be,pt %xcc,.Ladd_proceed_vis3 ! (in1infty || in2infty)?
2552 be,a,pt %xcc,.Ldouble_shortcut_vis3 ! is_equal(S1,S2)?
2553 add %sp,32*(12-10)+32,%sp ! difference in frame sizes
2558 st %g0,[$rp_real+12]
2559 st %g0,[$rp_real+16]
2560 st %g0,[$rp_real+20]
2561 st %g0,[$rp_real+24]
2562 st %g0,[$rp_real+28]
2563 st %g0,[$rp_real+32]
2564 st %g0,[$rp_real+32+4]
2565 st %g0,[$rp_real+32+8]
2566 st %g0,[$rp_real+32+12]
2567 st %g0,[$rp_real+32+16]
2568 st %g0,[$rp_real+32+20]
2569 st %g0,[$rp_real+32+24]
2570 st %g0,[$rp_real+32+28]
2571 st %g0,[$rp_real+64]
2572 st %g0,[$rp_real+64+4]
2573 st %g0,[$rp_real+64+8]
2574 st %g0,[$rp_real+64+12]
2575 st %g0,[$rp_real+64+16]
2576 st %g0,[$rp_real+64+20]
2577 st %g0,[$rp_real+64+24]
2578 st %g0,[$rp_real+64+28]
2584 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2585 add %sp,LOCALS64+$Rsqr,$rp
2587 ldx [%sp+LOCALS64+$H],$bi
2588 ldx [%sp+LOCALS64+$in1_z],$a0
2589 ldx [%sp+LOCALS64+$in1_z+8],$a1
2590 ldx [%sp+LOCALS64+$in1_z+16],$a2
2591 ldx [%sp+LOCALS64+$in1_z+24],$a3
2592 add %sp,LOCALS64+$H,$bp
2593 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2594 add %sp,LOCALS64+$res_z,$rp
2596 ldx [%sp+LOCALS64+$H],$a0
2597 ldx [%sp+LOCALS64+$H+8],$a1
2598 ldx [%sp+LOCALS64+$H+16],$a2
2599 ldx [%sp+LOCALS64+$H+24],$a3
2600 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2601 add %sp,LOCALS64+$Hsqr,$rp
2603 ldx [%sp+LOCALS64+$res_z],$bi
2604 ldx [%sp+LOCALS64+$in2_z],$a0
2605 ldx [%sp+LOCALS64+$in2_z+8],$a1
2606 ldx [%sp+LOCALS64+$in2_z+16],$a2
2607 ldx [%sp+LOCALS64+$in2_z+24],$a3
2608 add %sp,LOCALS64+$res_z,$bp
2609 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, res_z, in2_z);
2610 add %sp,LOCALS64+$res_z,$rp
2612 ldx [%sp+LOCALS64+$H],$bi
2613 ldx [%sp+LOCALS64+$Hsqr],$a0
2614 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2615 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2616 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2617 add %sp,LOCALS64+$H,$bp
2618 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2619 add %sp,LOCALS64+$Hcub,$rp
2621 ldx [%sp+LOCALS64+$U1],$bi
2622 ldx [%sp+LOCALS64+$Hsqr],$a0
2623 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2624 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2625 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2626 add %sp,LOCALS64+$U1,$bp
2627 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, U1, Hsqr);
2628 add %sp,LOCALS64+$U2,$rp
2630 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2631 add %sp,LOCALS64+$Hsqr,$rp
2633 add %sp,LOCALS64+$Rsqr,$bp
2634 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2635 add %sp,LOCALS64+$res_x,$rp
2637 add %sp,LOCALS64+$Hcub,$bp
2638 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2639 add %sp,LOCALS64+$res_x,$rp
2641 ldx [%sp+LOCALS64+$S1],$bi ! forward load
2642 ldx [%sp+LOCALS64+$Hcub],$a0
2643 ldx [%sp+LOCALS64+$Hcub+8],$a1
2644 ldx [%sp+LOCALS64+$Hcub+16],$a2
2645 ldx [%sp+LOCALS64+$Hcub+24],$a3
2647 add %sp,LOCALS64+$U2,$bp
2648 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2649 add %sp,LOCALS64+$res_y,$rp
2651 add %sp,LOCALS64+$S1,$bp
2652 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S1, Hcub);
2653 add %sp,LOCALS64+$S2,$rp
2655 ldx [%sp+LOCALS64+$R],$bi
2656 ldx [%sp+LOCALS64+$res_y],$a0
2657 ldx [%sp+LOCALS64+$res_y+8],$a1
2658 ldx [%sp+LOCALS64+$res_y+16],$a2
2659 ldx [%sp+LOCALS64+$res_y+24],$a3
2660 add %sp,LOCALS64+$R,$bp
2661 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2662 add %sp,LOCALS64+$res_y,$rp
2664 add %sp,LOCALS64+$S2,$bp
2665 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2666 add %sp,LOCALS64+$res_y,$rp
2668 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2669 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2671 for($i=0;$i<96;$i+=16) { # conditional moves
2673 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2674 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2675 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2676 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2677 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2678 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2679 movrz $t1,$acc2,$acc0
2680 movrz $t1,$acc3,$acc1
2681 movrz $t2,$acc4,$acc0
2682 movrz $t2,$acc5,$acc1
2685 st $acc0,[$rp_real+$i]
2686 st $acc2,[$rp_real+$i+4]
2687 st $acc1,[$rp_real+$i+8]
2688 st $acc3,[$rp_real+$i+12]
2695 .type ecp_nistz256_point_add_vis3,#function
2696 .size ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
2699 ########################################################################
2700 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
2701 # const P256_POINT_AFFINE *in2);
2703 my ($res_x,$res_y,$res_z,
2704 $in1_x,$in1_y,$in1_z,
2706 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
2708 # above map() describes stack layout with 15 temporary
2709 # 256-bit vectors on top. Then we reserve some space for
2710 # !in1infty and !in2infty.
2714 ecp_nistz256_point_add_affine_vis3:
2715 save %sp,-STACK64_FRAME-32*15-32,%sp
2720 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2721 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2723 ! convert input to uint64_t[4]
2724 ld [$bp],$a0 ! in2_x
2734 ld [$bp+32],$acc0 ! in2_y
2742 ld [$bp+32+16],$acc2
2746 ld [$bp+32+24],$acc3
2750 stx $a0,[%sp+LOCALS64+$in2_x]
2752 stx $a1,[%sp+LOCALS64+$in2_x+8]
2754 stx $a2,[%sp+LOCALS64+$in2_x+16]
2756 stx $a3,[%sp+LOCALS64+$in2_x+24]
2758 stx $acc0,[%sp+LOCALS64+$in2_y]
2760 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2762 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2763 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2767 or $acc1,$acc0,$acc0
2768 or $acc3,$acc2,$acc2
2770 or $acc2,$acc0,$acc0
2772 movrnz $a0,-1,$a0 ! !in2infty
2773 stx $a0,[%fp+STACK_BIAS-8]
2775 ld [$ap],$a0 ! in1_x
2785 ld [$ap+32],$acc0 ! in1_y
2793 ld [$ap+32+16],$acc2
2797 ld [$ap+32+24],$acc3
2801 stx $a0,[%sp+LOCALS64+$in1_x]
2803 stx $a1,[%sp+LOCALS64+$in1_x+8]
2805 stx $a2,[%sp+LOCALS64+$in1_x+16]
2807 stx $a3,[%sp+LOCALS64+$in1_x+24]
2809 stx $acc0,[%sp+LOCALS64+$in1_y]
2811 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2813 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2814 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2816 ld [$ap+64],$a0 ! in1_z
2830 stx $a0,[%sp+LOCALS64+$in1_z]
2832 stx $a1,[%sp+LOCALS64+$in1_z+8]
2834 stx $a2,[%sp+LOCALS64+$in1_z+16]
2835 stx $a3,[%sp+LOCALS64+$in1_z+24]
2840 movrnz $t0,-1,$t0 ! !in1infty
2841 stx $t0,[%fp+STACK_BIAS-16]
2843 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2844 add %sp,LOCALS64+$Z1sqr,$rp
2846 ldx [%sp+LOCALS64+$in2_x],$bi
2851 add %sp,LOCALS64+$in2_x,$bp
2852 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, Z1sqr, in2_x);
2853 add %sp,LOCALS64+$U2,$rp
2855 ldx [%sp+LOCALS64+$Z1sqr],$bi ! forward load
2856 ldx [%sp+LOCALS64+$in1_z],$a0
2857 ldx [%sp+LOCALS64+$in1_z+8],$a1
2858 ldx [%sp+LOCALS64+$in1_z+16],$a2
2859 ldx [%sp+LOCALS64+$in1_z+24],$a3
2861 add %sp,LOCALS64+$in1_x,$bp
2862 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, in1_x);
2863 add %sp,LOCALS64+$H,$rp
2865 add %sp,LOCALS64+$Z1sqr,$bp
2866 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2867 add %sp,LOCALS64+$S2,$rp
2869 ldx [%sp+LOCALS64+$H],$bi
2870 ldx [%sp+LOCALS64+$in1_z],$a0
2871 ldx [%sp+LOCALS64+$in1_z+8],$a1
2872 ldx [%sp+LOCALS64+$in1_z+16],$a2
2873 ldx [%sp+LOCALS64+$in1_z+24],$a3
2874 add %sp,LOCALS64+$H,$bp
2875 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2876 add %sp,LOCALS64+$res_z,$rp
2878 ldx [%sp+LOCALS64+$S2],$bi
2879 ldx [%sp+LOCALS64+$in2_y],$a0
2880 ldx [%sp+LOCALS64+$in2_y+8],$a1
2881 ldx [%sp+LOCALS64+$in2_y+16],$a2
2882 ldx [%sp+LOCALS64+$in2_y+24],$a3
2883 add %sp,LOCALS64+$S2,$bp
2884 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2885 add %sp,LOCALS64+$S2,$rp
2887 ldx [%sp+LOCALS64+$H],$a0 ! forward load
2888 ldx [%sp+LOCALS64+$H+8],$a1
2889 ldx [%sp+LOCALS64+$H+16],$a2
2890 ldx [%sp+LOCALS64+$H+24],$a3
2892 add %sp,LOCALS64+$in1_y,$bp
2893 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, in1_y);
2894 add %sp,LOCALS64+$R,$rp
2896 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2897 add %sp,LOCALS64+$Hsqr,$rp
2899 ldx [%sp+LOCALS64+$R],$a0
2900 ldx [%sp+LOCALS64+$R+8],$a1
2901 ldx [%sp+LOCALS64+$R+16],$a2
2902 ldx [%sp+LOCALS64+$R+24],$a3
2903 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2904 add %sp,LOCALS64+$Rsqr,$rp
2906 ldx [%sp+LOCALS64+$H],$bi
2907 ldx [%sp+LOCALS64+$Hsqr],$a0
2908 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2909 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2910 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2911 add %sp,LOCALS64+$H,$bp
2912 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2913 add %sp,LOCALS64+$Hcub,$rp
2915 ldx [%sp+LOCALS64+$Hsqr],$bi
2916 ldx [%sp+LOCALS64+$in1_x],$a0
2917 ldx [%sp+LOCALS64+$in1_x+8],$a1
2918 ldx [%sp+LOCALS64+$in1_x+16],$a2
2919 ldx [%sp+LOCALS64+$in1_x+24],$a3
2920 add %sp,LOCALS64+$Hsqr,$bp
2921 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in1_x, Hsqr);
2922 add %sp,LOCALS64+$U2,$rp
2924 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2925 add %sp,LOCALS64+$Hsqr,$rp
2927 add %sp,LOCALS64+$Rsqr,$bp
2928 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2929 add %sp,LOCALS64+$res_x,$rp
2931 add %sp,LOCALS64+$Hcub,$bp
2932 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2933 add %sp,LOCALS64+$res_x,$rp
2935 ldx [%sp+LOCALS64+$Hcub],$bi ! forward load
2936 ldx [%sp+LOCALS64+$in1_y],$a0
2937 ldx [%sp+LOCALS64+$in1_y+8],$a1
2938 ldx [%sp+LOCALS64+$in1_y+16],$a2
2939 ldx [%sp+LOCALS64+$in1_y+24],$a3
2941 add %sp,LOCALS64+$U2,$bp
2942 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2943 add %sp,LOCALS64+$res_y,$rp
2945 add %sp,LOCALS64+$Hcub,$bp
2946 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, in1_y, Hcub);
2947 add %sp,LOCALS64+$S2,$rp
2949 ldx [%sp+LOCALS64+$R],$bi
2950 ldx [%sp+LOCALS64+$res_y],$a0
2951 ldx [%sp+LOCALS64+$res_y+8],$a1
2952 ldx [%sp+LOCALS64+$res_y+16],$a2
2953 ldx [%sp+LOCALS64+$res_y+24],$a3
2954 add %sp,LOCALS64+$R,$bp
2955 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2956 add %sp,LOCALS64+$res_y,$rp
2958 add %sp,LOCALS64+$S2,$bp
2959 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2960 add %sp,LOCALS64+$res_y,$rp
2962 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2963 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2965 add %o7,.Lone_mont_vis3-1b,$bp
2967 for($i=0;$i<64;$i+=16) { # conditional moves
2969 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2970 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2971 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2972 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2973 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2974 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2975 movrz $t1,$acc2,$acc0
2976 movrz $t1,$acc3,$acc1
2977 movrz $t2,$acc4,$acc0
2978 movrz $t2,$acc5,$acc1
2981 st $acc0,[$rp_real+$i]
2982 st $acc2,[$rp_real+$i+4]
2983 st $acc1,[$rp_real+$i+8]
2984 st $acc3,[$rp_real+$i+12]
2987 for(;$i<96;$i+=16) {
2989 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2990 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2991 ldx [$bp+$i-64],$acc2 ! "in2"
2992 ldx [$bp+$i-64+8],$acc3
2993 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2994 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2995 movrz $t1,$acc2,$acc0
2996 movrz $t1,$acc3,$acc1
2997 movrz $t2,$acc4,$acc0
2998 movrz $t2,$acc5,$acc1
3001 st $acc0,[$rp_real+$i]
3002 st $acc2,[$rp_real+$i+4]
3003 st $acc1,[$rp_real+$i+8]
3004 st $acc3,[$rp_real+$i+12]
3010 .type ecp_nistz256_point_add_affine_vis3,#function
3011 .size ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
3014 .long 0x00000000,0x00000001, 0xffffffff,0x00000000
3015 .long 0xffffffff,0xffffffff, 0x00000000,0xfffffffe
3020 # Purpose of these subroutines is to explicitly encode VIS instructions,
3021 # so that one can compile the module without having to specify VIS
3022 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
3023 # Idea is to reserve for option to produce "universal" binary and let
3024 # programmer detect if current CPU is VIS capable at run-time.
3026 my ($mnemonic,$rs1,$rs2,$rd)=@_;
3027 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
3029 my %visopf = ( "addxc" => 0x011,
3031 "umulxhi" => 0x016 );
3033 $ref = "$mnemonic\t$rs1,$rs2,$rd";
3035 if ($opf=$visopf{$mnemonic}) {
3036 foreach ($rs1,$rs2,$rd) {
3037 return $ref if (!/%([goli])([0-9])/);
3041 return sprintf ".word\t0x%08x !%s",
3042 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
3049 foreach (split("\n",$code)) {
3050 s/\`([^\`]*)\`/eval $1/ge;
3052 s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
3053 &unvis3($1,$2,$3,$4)
3059 close STDOUT or die "error closing STDOUT: $!";