2 # Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # ECP_NISTZ256 module for SPARCv9.
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
26 # with/without -DECP_NISTZ256_ASM
27 # UltraSPARC III +12-18%
28 # SPARC T4 +99-550% (+66-150% on 32-bit Solaris)
30 # Ranges denote minimum and maximum improvement coefficients depending
31 # on benchmark. Lower coefficients are for ECDSA sign, server-side
32 # operation. Keep in mind that +200% means 3x improvement.
35 open STDOUT,">$output";
38 #include "sparc_arch.h"
40 #define LOCALS (STACK_BIAS+STACK_FRAME)
42 .register %g2,#scratch
43 .register %g3,#scratch
44 # define STACK64_FRAME STACK_FRAME
45 # define LOCALS64 LOCALS
47 # define STACK64_FRAME (2047+192)
48 # define LOCALS64 STACK64_FRAME
51 .section ".text",#alloc,#execinstr
53 ########################################################################
54 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
56 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
57 open TABLE,"<ecp_nistz256_table.c" or
58 open TABLE,"<${dir}../ecp_nistz256_table.c" or
59 die "failed to open ecp_nistz256_table.c:",$!;
64 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
68 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
69 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
71 die "insane number of elements" if ($#arr != 64*16*37-1);
74 .globl ecp_nistz256_precomputed
76 ecp_nistz256_precomputed:
78 ########################################################################
79 # this conversion smashes P256_POINT_AFFINE by individual bytes with
80 # 64 byte interval, similar to
84 @tbl = splice(@arr,0,64*16);
85 for($i=0;$i<64;$i++) {
87 for($j=0;$j<64;$j++) {
88 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
91 $code.=join(',',map { sprintf "0x%02x",$_} @line);
97 my ($rp,$ap,$bp)=map("%i$_",(0..2));
98 my @acc=map("%l$_",(0..7));
99 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
100 my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
101 my ($rp_real,$ap_real)=("%g2","%g3");
104 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
106 .LRR: ! 2^512 mod P precomputed for NIST P256 polynomial
107 .long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
108 .long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
110 .long 1,0,0,0,0,0,0,0
111 .asciz "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
113 ! void ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
114 .globl ecp_nistz256_to_mont
116 ecp_nistz256_to_mont:
117 save %sp,-STACK_FRAME,%sp
121 call __ecp_nistz256_mul_mont
125 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
127 ! void ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
128 .globl ecp_nistz256_from_mont
130 ecp_nistz256_from_mont:
131 save %sp,-STACK_FRAME,%sp
135 call __ecp_nistz256_mul_mont
139 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
141 ! void ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
142 ! const BN_ULONG %i2[8]);
143 .globl ecp_nistz256_mul_mont
145 ecp_nistz256_mul_mont:
146 save %sp,-STACK_FRAME,%sp
148 call __ecp_nistz256_mul_mont
152 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
154 ! void ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
155 .globl ecp_nistz256_sqr_mont
157 ecp_nistz256_sqr_mont:
158 save %sp,-STACK_FRAME,%sp
160 call __ecp_nistz256_mul_mont
164 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
167 ########################################################################
168 # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
169 # while all others are meant to keep 32. "Meant to" means that additions
170 # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
171 # they can affect outcome (follow 'and' with $mask). Also keep in mind
172 # that addition with carry is addition with 32-bit carry, even though
173 # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
174 # below for VIS3 code paths.]
178 __ecp_nistz256_mul_mont:
179 ld [$bp+0],$bi ! b[0]
182 srl $mask,0,$mask ! 0xffffffff
190 mulx $a0,$bi,$t0 ! a[0-7]*b[0], 64-bit results
198 srlx $t0,32,@acc[1] ! extract high parts
205 srlx $t7,32,@acc[0] ! "@acc[8]"
208 for($i=1;$i<8;$i++) {
210 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
211 ld [$bp+4*$i],$bi ! b[$i]
212 ld [$ap+4],$t1 ! re-load a[1-7]
213 addccc @acc[2],$t2,@acc[2]
214 addccc @acc[3],$t3,@acc[3]
217 addccc @acc[4],$t4,@acc[4]
218 addccc @acc[5],$t5,@acc[5]
221 addccc @acc[6],$t6,@acc[6]
222 addccc @acc[7],$t7,@acc[7]
225 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
228 # Reduction iteration is normally performed by accumulating
229 # result of multiplication of modulus by "magic" digit [and
230 # omitting least significant word, which is guaranteed to
231 # be 0], but thanks to special form of modulus and "magic"
232 # digit being equal to least significant word, it can be
233 # performed with additions and subtractions alone. Indeed:
235 # ffff.0001.0000.0000.0000.ffff.ffff.ffff
237 # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
239 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
242 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
243 # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
244 # - abcd.0000.0000.0000.0000.0000.0000.abcd
246 # or marking redundant operations:
248 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
249 # + abcd.0000.abcd.0000.0000.abcd.----.----.----
250 # - abcd.----.----.----.----.----.----.----
253 ! multiplication-less reduction
254 addcc @acc[3],$t0,@acc[3] ! r[3]+=r[0]
255 addccc @acc[4],%g0,@acc[4] ! r[4]+=0
256 and @acc[1],$mask,@acc[1]
257 and @acc[2],$mask,@acc[2]
258 addccc @acc[5],%g0,@acc[5] ! r[5]+=0
259 addccc @acc[6],$t0,@acc[6] ! r[6]+=r[0]
260 and @acc[3],$mask,@acc[3]
261 and @acc[4],$mask,@acc[4]
262 addccc @acc[7],%g0,@acc[7] ! r[7]+=0
263 addccc @acc[0],$t0,@acc[0] ! r[8]+=r[0] "@acc[8]"
264 and @acc[5],$mask,@acc[5]
265 and @acc[6],$mask,@acc[6]
266 addc $carry,%g0,$carry ! top-most carry
267 subcc @acc[7],$t0,@acc[7] ! r[7]-=r[0]
268 subccc @acc[0],%g0,@acc[0] ! r[8]-=0 "@acc[8]"
269 subc $carry,%g0,$carry ! top-most carry
270 and @acc[7],$mask,@acc[7]
271 and @acc[0],$mask,@acc[0] ! "@acc[8]"
273 push(@acc,shift(@acc)); # rotate registers to "omit" acc[0]
275 mulx $a0,$bi,$t0 ! a[0-7]*b[$i], 64-bit results
283 add @acc[0],$t0,$t0 ! accumulate low parts, can't overflow
285 srlx $t0,32,@acc[1] ! extract high parts
298 srlx $t7,32,@acc[0] ! "@acc[8]"
302 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
303 addccc @acc[2],$t2,@acc[2]
304 addccc @acc[3],$t3,@acc[3]
305 addccc @acc[4],$t4,@acc[4]
306 addccc @acc[5],$t5,@acc[5]
307 addccc @acc[6],$t6,@acc[6]
308 addccc @acc[7],$t7,@acc[7]
309 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
312 addcc @acc[3],$t0,@acc[3] ! multiplication-less reduction
313 addccc @acc[4],%g0,@acc[4]
314 addccc @acc[5],%g0,@acc[5]
315 addccc @acc[6],$t0,@acc[6]
316 addccc @acc[7],%g0,@acc[7]
317 addccc @acc[0],$t0,@acc[0] ! "@acc[8]"
318 addc $carry,%g0,$carry
319 subcc @acc[7],$t0,@acc[7]
320 subccc @acc[0],%g0,@acc[0] ! "@acc[8]"
321 subc $carry,%g0,$carry ! top-most carry
323 push(@acc,shift(@acc)); # rotate registers to omit acc[0]
325 ! Final step is "if result > mod, subtract mod", but we do it
326 ! "other way around", namely subtract modulus from result
327 ! and if it borrowed, add modulus back.
329 subcc @acc[0],-1,@acc[0] ! subtract modulus
330 subccc @acc[1],-1,@acc[1]
331 subccc @acc[2],-1,@acc[2]
332 subccc @acc[3],0,@acc[3]
333 subccc @acc[4],0,@acc[4]
334 subccc @acc[5],0,@acc[5]
335 subccc @acc[6],1,@acc[6]
336 subccc @acc[7],-1,@acc[7]
337 subc $carry,0,$carry ! broadcast borrow bit
339 ! Note that because mod has special form, i.e. consists of
340 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
341 ! using value of broadcasted borrow and the borrow bit itself.
342 ! To minimize dependency chain we first broadcast and then
343 ! extract the bit by negating (follow $bi).
345 addcc @acc[0],$carry,@acc[0] ! add modulus or zero
346 addccc @acc[1],$carry,@acc[1]
349 addccc @acc[2],$carry,@acc[2]
351 addccc @acc[3],0,@acc[3]
353 addccc @acc[4],0,@acc[4]
355 addccc @acc[5],0,@acc[5]
357 addccc @acc[6],$bi,@acc[6]
359 addc @acc[7],$carry,@acc[7]
363 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
365 ! void ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
366 ! const BN_ULONG %i2[8]);
367 .globl ecp_nistz256_add
370 save %sp,-STACK_FRAME,%sp
378 call __ecp_nistz256_add
382 .size ecp_nistz256_add,.-ecp_nistz256_add
386 ld [$bp+0],$t0 ! b[0]
390 addcc @acc[0],$t0,@acc[0]
393 addccc @acc[1],$t1,@acc[1]
396 addccc @acc[2],$t2,@acc[2]
397 addccc @acc[3],$t3,@acc[3]
398 addccc @acc[4],$t4,@acc[4]
399 addccc @acc[5],$t5,@acc[5]
400 addccc @acc[6],$t6,@acc[6]
401 addccc @acc[7],$t7,@acc[7]
402 subc %g0,%g0,$carry ! broadcast carry bit
406 ! if a+b carries, subtract modulus.
408 ! Note that because mod has special form, i.e. consists of
409 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
410 ! using value of broadcasted borrow and the borrow bit itself.
411 ! To minimize dependency chain we first broadcast and then
412 ! extract the bit by negating (follow $bi).
414 subcc @acc[0],$carry,@acc[0] ! subtract synthesized modulus
415 subccc @acc[1],$carry,@acc[1]
418 subccc @acc[2],$carry,@acc[2]
420 subccc @acc[3],0,@acc[3]
422 subccc @acc[4],0,@acc[4]
424 subccc @acc[5],0,@acc[5]
426 subccc @acc[6],$bi,@acc[6]
428 subc @acc[7],$carry,@acc[7]
432 .size __ecp_nistz256_add,.-__ecp_nistz256_add
434 ! void ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
435 .globl ecp_nistz256_mul_by_2
437 ecp_nistz256_mul_by_2:
438 save %sp,-STACK_FRAME,%sp
446 call __ecp_nistz256_mul_by_2
450 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
453 __ecp_nistz256_mul_by_2:
454 addcc @acc[0],@acc[0],@acc[0] ! a+a=2*a
455 addccc @acc[1],@acc[1],@acc[1]
456 addccc @acc[2],@acc[2],@acc[2]
457 addccc @acc[3],@acc[3],@acc[3]
458 addccc @acc[4],@acc[4],@acc[4]
459 addccc @acc[5],@acc[5],@acc[5]
460 addccc @acc[6],@acc[6],@acc[6]
461 addccc @acc[7],@acc[7],@acc[7]
463 subc %g0,%g0,$carry ! broadcast carry bit
464 .size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
466 ! void ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
467 .globl ecp_nistz256_mul_by_3
469 ecp_nistz256_mul_by_3:
470 save %sp,-STACK_FRAME,%sp
478 call __ecp_nistz256_mul_by_3
482 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
485 __ecp_nistz256_mul_by_3:
486 addcc @acc[0],@acc[0],$t0 ! a+a=2*a
487 addccc @acc[1],@acc[1],$t1
488 addccc @acc[2],@acc[2],$t2
489 addccc @acc[3],@acc[3],$t3
490 addccc @acc[4],@acc[4],$t4
491 addccc @acc[5],@acc[5],$t5
492 addccc @acc[6],@acc[6],$t6
493 addccc @acc[7],@acc[7],$t7
494 subc %g0,%g0,$carry ! broadcast carry bit
496 subcc $t0,$carry,$t0 ! .Lreduce_by_sub but without stores
498 subccc $t1,$carry,$t1
499 subccc $t2,$carry,$t2
506 addcc $t0,@acc[0],@acc[0] ! 2*a+a=3*a
507 addccc $t1,@acc[1],@acc[1]
508 addccc $t2,@acc[2],@acc[2]
509 addccc $t3,@acc[3],@acc[3]
510 addccc $t4,@acc[4],@acc[4]
511 addccc $t5,@acc[5],@acc[5]
512 addccc $t6,@acc[6],@acc[6]
513 addccc $t7,@acc[7],@acc[7]
515 subc %g0,%g0,$carry ! broadcast carry bit
516 .size __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
518 ! void ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
519 ! const BN_ULONG %i2[8]);
520 .globl ecp_nistz256_sub
523 save %sp,-STACK_FRAME,%sp
531 call __ecp_nistz256_sub_from
535 .size ecp_nistz256_sub,.-ecp_nistz256_sub
537 ! void ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
538 .globl ecp_nistz256_neg
541 save %sp,-STACK_FRAME,%sp
550 call __ecp_nistz256_sub_from
554 .size ecp_nistz256_neg,.-ecp_nistz256_neg
557 __ecp_nistz256_sub_from:
558 ld [$bp+0],$t0 ! b[0]
562 subcc @acc[0],$t0,@acc[0]
565 subccc @acc[1],$t1,@acc[1]
566 subccc @acc[2],$t2,@acc[2]
569 subccc @acc[3],$t3,@acc[3]
570 subccc @acc[4],$t4,@acc[4]
571 subccc @acc[5],$t5,@acc[5]
572 subccc @acc[6],$t6,@acc[6]
573 subccc @acc[7],$t7,@acc[7]
574 subc %g0,%g0,$carry ! broadcast borrow bit
578 ! if a-b borrows, add modulus.
580 ! Note that because mod has special form, i.e. consists of
581 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
582 ! using value of broadcasted borrow and the borrow bit itself.
583 ! To minimize dependency chain we first broadcast and then
584 ! extract the bit by negating (follow $bi).
586 addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
587 addccc @acc[1],$carry,@acc[1]
590 addccc @acc[2],$carry,@acc[2]
592 addccc @acc[3],0,@acc[3]
594 addccc @acc[4],0,@acc[4]
596 addccc @acc[5],0,@acc[5]
598 addccc @acc[6],$bi,@acc[6]
600 addc @acc[7],$carry,@acc[7]
604 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
607 __ecp_nistz256_sub_morf:
608 ld [$bp+0],$t0 ! b[0]
612 subcc $t0,@acc[0],@acc[0]
615 subccc $t1,@acc[1],@acc[1]
616 subccc $t2,@acc[2],@acc[2]
619 subccc $t3,@acc[3],@acc[3]
620 subccc $t4,@acc[4],@acc[4]
621 subccc $t5,@acc[5],@acc[5]
622 subccc $t6,@acc[6],@acc[6]
623 subccc $t7,@acc[7],@acc[7]
625 subc %g0,%g0,$carry ! broadcast borrow bit
626 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
628 ! void ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
629 .globl ecp_nistz256_div_by_2
631 ecp_nistz256_div_by_2:
632 save %sp,-STACK_FRAME,%sp
640 call __ecp_nistz256_div_by_2
644 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
647 __ecp_nistz256_div_by_2:
648 ! ret = (a is odd ? a+mod : a) >> 1
652 addcc @acc[0],$carry,@acc[0]
653 addccc @acc[1],$carry,@acc[1]
654 addccc @acc[2],$carry,@acc[2]
655 addccc @acc[3],0,@acc[3]
656 addccc @acc[4],0,@acc[4]
657 addccc @acc[5],0,@acc[5]
658 addccc @acc[6],$bi,@acc[6]
659 addccc @acc[7],$carry,@acc[7]
664 srl @acc[0],1,@acc[0]
666 srl @acc[1],1,@acc[1]
667 or @acc[0],$t0,@acc[0]
669 srl @acc[2],1,@acc[2]
670 or @acc[1],$t1,@acc[1]
673 srl @acc[3],1,@acc[3]
674 or @acc[2],$t2,@acc[2]
677 srl @acc[4],1,@acc[4]
678 or @acc[3],$t3,@acc[3]
681 srl @acc[5],1,@acc[5]
682 or @acc[4],$t4,@acc[4]
685 srl @acc[6],1,@acc[6]
686 or @acc[5],$t5,@acc[5]
689 srl @acc[7],1,@acc[7]
690 or @acc[6],$t6,@acc[6]
693 or @acc[7],$t7,@acc[7]
697 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
700 ########################################################################
701 # following subroutines are "literal" implementation of those found in
704 ########################################################################
705 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
708 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
709 # above map() describes stack layout with 4 temporary
710 # 256-bit vectors on top.
717 .globl ecp_nistz256_point_double
719 ecp_nistz256_point_double:
720 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
721 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
722 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
723 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
724 be ecp_nistz256_point_double_vis3
727 save %sp,-STACK_FRAME-32*4,%sp
732 .Lpoint_double_shortcut:
734 ld [$ap+32+4],@acc[1]
735 ld [$ap+32+8],@acc[2]
736 ld [$ap+32+12],@acc[3]
737 ld [$ap+32+16],@acc[4]
738 ld [$ap+32+20],@acc[5]
739 ld [$ap+32+24],@acc[6]
740 ld [$ap+32+28],@acc[7]
741 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
742 add %sp,LOCALS+$S,$rp
746 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
747 add %sp,LOCALS+$Zsqr,$rp
750 call __ecp_nistz256_add ! p256_add(M, Zsqr, in_x);
751 add %sp,LOCALS+$M,$rp
753 add %sp,LOCALS+$S,$bp
754 add %sp,LOCALS+$S,$ap
755 call __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
756 add %sp,LOCALS+$S,$rp
758 ld [$ap_real],@acc[0]
759 add %sp,LOCALS+$Zsqr,$bp
760 ld [$ap_real+4],@acc[1]
761 ld [$ap_real+8],@acc[2]
762 ld [$ap_real+12],@acc[3]
763 ld [$ap_real+16],@acc[4]
764 ld [$ap_real+20],@acc[5]
765 ld [$ap_real+24],@acc[6]
766 ld [$ap_real+28],@acc[7]
767 call __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
768 add %sp,LOCALS+$Zsqr,$rp
772 call __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
773 add %sp,LOCALS+$tmp0,$rp
775 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
778 add %sp,LOCALS+$Zsqr,$bp
779 add %sp,LOCALS+$M,$ap
780 call __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
781 add %sp,LOCALS+$M,$rp
783 call __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
784 add %sp,LOCALS+$M,$rp
786 add %sp,LOCALS+$S,$bp
787 add %sp,LOCALS+$S,$ap
788 call __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
789 add %sp,LOCALS+$tmp0,$rp
791 call __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
795 add %sp,LOCALS+$S,$ap
796 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
797 add %sp,LOCALS+$S,$rp
799 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
800 add %sp,LOCALS+$tmp0,$rp
802 add %sp,LOCALS+$M,$bp
803 add %sp,LOCALS+$M,$ap
804 call __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
807 add %sp,LOCALS+$tmp0,$bp
808 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
811 add %sp,LOCALS+$S,$bp
812 call __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
813 add %sp,LOCALS+$S,$rp
815 add %sp,LOCALS+$M,$bp
816 add %sp,LOCALS+$S,$ap
817 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
818 add %sp,LOCALS+$S,$rp
821 call __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
826 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
830 ########################################################################
831 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
832 # const P256_POINT *in2);
834 my ($res_x,$res_y,$res_z,
835 $H,$Hsqr,$R,$Rsqr,$Hcub,
836 $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
837 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
839 # above map() describes stack layout with 12 temporary
840 # 256-bit vectors on top. Then we reserve some space for
841 # !in1infty, !in2infty, result of check for zero and return pointer.
843 my $bp_real=$rp_real;
846 .globl ecp_nistz256_point_add
848 ecp_nistz256_point_add:
849 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
850 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
851 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
852 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
853 be ecp_nistz256_point_add_vis3
856 save %sp,-STACK_FRAME-32*12-32,%sp
858 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
862 ld [$bp],@acc[0] ! in2_x
870 ld [$bp+32],$t0 ! in2_y
878 or @acc[1],@acc[0],@acc[0]
879 or @acc[3],@acc[2],@acc[2]
880 or @acc[5],@acc[4],@acc[4]
881 or @acc[7],@acc[6],@acc[6]
882 or @acc[2],@acc[0],@acc[0]
883 or @acc[6],@acc[4],@acc[4]
884 or @acc[4],@acc[0],@acc[0]
892 or @acc[0],$t0,$t0 ! !in2infty
894 st $t0,[%fp+STACK_BIAS-12]
896 ld [$ap],@acc[0] ! in1_x
904 ld [$ap+32],$t0 ! in1_y
912 or @acc[1],@acc[0],@acc[0]
913 or @acc[3],@acc[2],@acc[2]
914 or @acc[5],@acc[4],@acc[4]
915 or @acc[7],@acc[6],@acc[6]
916 or @acc[2],@acc[0],@acc[0]
917 or @acc[6],@acc[4],@acc[4]
918 or @acc[4],@acc[0],@acc[0]
926 or @acc[0],$t0,$t0 ! !in1infty
928 st $t0,[%fp+STACK_BIAS-16]
932 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
933 add %sp,LOCALS+$Z2sqr,$rp
937 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
938 add %sp,LOCALS+$Z1sqr,$rp
941 add %sp,LOCALS+$Z2sqr,$ap
942 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
943 add %sp,LOCALS+$S1,$rp
946 add %sp,LOCALS+$Z1sqr,$ap
947 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
948 add %sp,LOCALS+$S2,$rp
951 add %sp,LOCALS+$S1,$ap
952 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
953 add %sp,LOCALS+$S1,$rp
956 add %sp,LOCALS+$S2,$ap
957 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
958 add %sp,LOCALS+$S2,$rp
960 add %sp,LOCALS+$S1,$bp
961 call __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
962 add %sp,LOCALS+$R,$rp
964 or @acc[1],@acc[0],@acc[0] ! see if result is zero
965 or @acc[3],@acc[2],@acc[2]
966 or @acc[5],@acc[4],@acc[4]
967 or @acc[7],@acc[6],@acc[6]
968 or @acc[2],@acc[0],@acc[0]
969 or @acc[6],@acc[4],@acc[4]
970 or @acc[4],@acc[0],@acc[0]
971 st @acc[0],[%fp+STACK_BIAS-20]
974 add %sp,LOCALS+$Z2sqr,$ap
975 call __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
976 add %sp,LOCALS+$U1,$rp
979 add %sp,LOCALS+$Z1sqr,$ap
980 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
981 add %sp,LOCALS+$U2,$rp
983 add %sp,LOCALS+$U1,$bp
984 call __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
985 add %sp,LOCALS+$H,$rp
987 or @acc[1],@acc[0],@acc[0] ! see if result is zero
988 or @acc[3],@acc[2],@acc[2]
989 or @acc[5],@acc[4],@acc[4]
990 or @acc[7],@acc[6],@acc[6]
991 or @acc[2],@acc[0],@acc[0]
992 or @acc[6],@acc[4],@acc[4]
993 orcc @acc[4],@acc[0],@acc[0]
995 bne,pt %icc,.Ladd_proceed ! is_equal(U1,U2)?
998 ld [%fp+STACK_BIAS-12],$t0
999 ld [%fp+STACK_BIAS-16],$t1
1000 ld [%fp+STACK_BIAS-20],$t2
1002 be,pt %icc,.Ladd_proceed ! (in1infty || in2infty)?
1005 be,pt %icc,.Ladd_double ! is_equal(S1,S2)?
1008 ldx [%fp+STACK_BIAS-8],$rp
1038 ldx [%fp+STACK_BIAS-8],$rp_real
1040 b .Lpoint_double_shortcut
1041 add %sp,32*(12-4)+32,%sp ! difference in frame sizes
1045 add %sp,LOCALS+$R,$bp
1046 add %sp,LOCALS+$R,$ap
1047 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1048 add %sp,LOCALS+$Rsqr,$rp
1051 add %sp,LOCALS+$H,$ap
1052 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1053 add %sp,LOCALS+$res_z,$rp
1055 add %sp,LOCALS+$H,$bp
1056 add %sp,LOCALS+$H,$ap
1057 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1058 add %sp,LOCALS+$Hsqr,$rp
1061 add %sp,LOCALS+$res_z,$ap
1062 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
1063 add %sp,LOCALS+$res_z,$rp
1065 add %sp,LOCALS+$H,$bp
1066 add %sp,LOCALS+$Hsqr,$ap
1067 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1068 add %sp,LOCALS+$Hcub,$rp
1070 add %sp,LOCALS+$U1,$bp
1071 add %sp,LOCALS+$Hsqr,$ap
1072 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
1073 add %sp,LOCALS+$U2,$rp
1075 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1076 add %sp,LOCALS+$Hsqr,$rp
1078 add %sp,LOCALS+$Rsqr,$bp
1079 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1080 add %sp,LOCALS+$res_x,$rp
1082 add %sp,LOCALS+$Hcub,$bp
1083 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1084 add %sp,LOCALS+$res_x,$rp
1086 add %sp,LOCALS+$U2,$bp
1087 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1088 add %sp,LOCALS+$res_y,$rp
1090 add %sp,LOCALS+$Hcub,$bp
1091 add %sp,LOCALS+$S1,$ap
1092 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
1093 add %sp,LOCALS+$S2,$rp
1095 add %sp,LOCALS+$R,$bp
1096 add %sp,LOCALS+$res_y,$ap
1097 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1098 add %sp,LOCALS+$res_y,$rp
1100 add %sp,LOCALS+$S2,$bp
1101 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1102 add %sp,LOCALS+$res_y,$rp
1104 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1105 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1106 ldx [%fp+STACK_BIAS-8],$rp
1108 for($i=0;$i<96;$i+=8) { # conditional moves
1110 ld [%sp+LOCALS+$i],@acc[0] ! res
1111 ld [%sp+LOCALS+$i+4],@acc[1]
1112 ld [$bp_real+$i],@acc[2] ! in2
1113 ld [$bp_real+$i+4],@acc[3]
1114 ld [$ap_real+$i],@acc[4] ! in1
1115 ld [$ap_real+$i+4],@acc[5]
1116 movrz $t1,@acc[2],@acc[0]
1117 movrz $t1,@acc[3],@acc[1]
1118 movrz $t2,@acc[4],@acc[0]
1119 movrz $t2,@acc[5],@acc[1]
1121 st @acc[1],[$rp+$i+4]
1128 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1132 ########################################################################
1133 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1134 # const P256_POINT_AFFINE *in2);
1136 my ($res_x,$res_y,$res_z,
1137 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1139 # above map() describes stack layout with 10 temporary
1140 # 256-bit vectors on top. Then we reserve some space for
1141 # !in1infty, !in2infty, result of check for zero and return pointer.
1143 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1144 my $bp_real=$rp_real;
1147 .globl ecp_nistz256_point_add_affine
1149 ecp_nistz256_point_add_affine:
1150 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
1151 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
1152 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
1153 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
1154 be ecp_nistz256_point_add_affine_vis3
1157 save %sp,-STACK_FRAME-32*10-32,%sp
1159 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
1163 ld [$ap],@acc[0] ! in1_x
1171 ld [$ap+32],$t0 ! in1_y
1179 or @acc[1],@acc[0],@acc[0]
1180 or @acc[3],@acc[2],@acc[2]
1181 or @acc[5],@acc[4],@acc[4]
1182 or @acc[7],@acc[6],@acc[6]
1183 or @acc[2],@acc[0],@acc[0]
1184 or @acc[6],@acc[4],@acc[4]
1185 or @acc[4],@acc[0],@acc[0]
1193 or @acc[0],$t0,$t0 ! !in1infty
1195 st $t0,[%fp+STACK_BIAS-16]
1197 ld [$bp],@acc[0] ! in2_x
1205 ld [$bp+32],$t0 ! in2_y
1213 or @acc[1],@acc[0],@acc[0]
1214 or @acc[3],@acc[2],@acc[2]
1215 or @acc[5],@acc[4],@acc[4]
1216 or @acc[7],@acc[6],@acc[6]
1217 or @acc[2],@acc[0],@acc[0]
1218 or @acc[6],@acc[4],@acc[4]
1219 or @acc[4],@acc[0],@acc[0]
1227 or @acc[0],$t0,$t0 ! !in2infty
1229 st $t0,[%fp+STACK_BIAS-12]
1233 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
1234 add %sp,LOCALS+$Z1sqr,$rp
1237 add %sp,LOCALS+$Z1sqr,$ap
1238 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
1239 add %sp,LOCALS+$U2,$rp
1242 call __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
1243 add %sp,LOCALS+$H,$rp
1246 add %sp,LOCALS+$Z1sqr,$ap
1247 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
1248 add %sp,LOCALS+$S2,$rp
1251 add %sp,LOCALS+$H,$ap
1252 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1253 add %sp,LOCALS+$res_z,$rp
1256 add %sp,LOCALS+$S2,$ap
1257 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
1258 add %sp,LOCALS+$S2,$rp
1261 call __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
1262 add %sp,LOCALS+$R,$rp
1264 add %sp,LOCALS+$H,$bp
1265 add %sp,LOCALS+$H,$ap
1266 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1267 add %sp,LOCALS+$Hsqr,$rp
1269 add %sp,LOCALS+$R,$bp
1270 add %sp,LOCALS+$R,$ap
1271 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1272 add %sp,LOCALS+$Rsqr,$rp
1274 add %sp,LOCALS+$H,$bp
1275 add %sp,LOCALS+$Hsqr,$ap
1276 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1277 add %sp,LOCALS+$Hcub,$rp
1280 add %sp,LOCALS+$Hsqr,$ap
1281 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
1282 add %sp,LOCALS+$U2,$rp
1284 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1285 add %sp,LOCALS+$Hsqr,$rp
1287 add %sp,LOCALS+$Rsqr,$bp
1288 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1289 add %sp,LOCALS+$res_x,$rp
1291 add %sp,LOCALS+$Hcub,$bp
1292 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1293 add %sp,LOCALS+$res_x,$rp
1295 add %sp,LOCALS+$U2,$bp
1296 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1297 add %sp,LOCALS+$res_y,$rp
1300 add %sp,LOCALS+$Hcub,$ap
1301 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
1302 add %sp,LOCALS+$S2,$rp
1304 add %sp,LOCALS+$R,$bp
1305 add %sp,LOCALS+$res_y,$ap
1306 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1307 add %sp,LOCALS+$res_y,$rp
1309 add %sp,LOCALS+$S2,$bp
1310 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1311 add %sp,LOCALS+$res_y,$rp
1313 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1314 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1315 ldx [%fp+STACK_BIAS-8],$rp
1317 for($i=0;$i<64;$i+=8) { # conditional moves
1319 ld [%sp+LOCALS+$i],@acc[0] ! res
1320 ld [%sp+LOCALS+$i+4],@acc[1]
1321 ld [$bp_real+$i],@acc[2] ! in2
1322 ld [$bp_real+$i+4],@acc[3]
1323 ld [$ap_real+$i],@acc[4] ! in1
1324 ld [$ap_real+$i+4],@acc[5]
1325 movrz $t1,@acc[2],@acc[0]
1326 movrz $t1,@acc[3],@acc[1]
1327 movrz $t2,@acc[4],@acc[0]
1328 movrz $t2,@acc[5],@acc[1]
1330 st @acc[1],[$rp+$i+4]
1336 ld [%sp+LOCALS+$i],@acc[0] ! res
1337 ld [%sp+LOCALS+$i+4],@acc[1]
1338 ld [$ap_real+$i],@acc[4] ! in1
1339 ld [$ap_real+$i+4],@acc[5]
1340 movrz $t1,@ONE_mont[$j],@acc[0]
1341 movrz $t1,@ONE_mont[$j+1],@acc[1]
1342 movrz $t2,@acc[4],@acc[0]
1343 movrz $t2,@acc[5],@acc[1]
1345 st @acc[1],[$rp+$i+4]
1351 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1355 my ($out,$inp,$index)=map("%i$_",(0..2));
1359 ! void ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
1361 .globl ecp_nistz256_scatter_w5
1363 ecp_nistz256_scatter_w5:
1364 save %sp,-STACK_FRAME,%sp
1367 add $out,$index,$out
1378 st %l0,[$out+64*0-4]
1379 st %l1,[$out+64*1-4]
1380 st %l2,[$out+64*2-4]
1381 st %l3,[$out+64*3-4]
1382 st %l4,[$out+64*4-4]
1383 st %l5,[$out+64*5-4]
1384 st %l6,[$out+64*6-4]
1385 st %l7,[$out+64*7-4]
1397 st %l0,[$out+64*0-4]
1398 st %l1,[$out+64*1-4]
1399 st %l2,[$out+64*2-4]
1400 st %l3,[$out+64*3-4]
1401 st %l4,[$out+64*4-4]
1402 st %l5,[$out+64*5-4]
1403 st %l6,[$out+64*6-4]
1404 st %l7,[$out+64*7-4]
1415 st %l0,[$out+64*0-4]
1416 st %l1,[$out+64*1-4]
1417 st %l2,[$out+64*2-4]
1418 st %l3,[$out+64*3-4]
1419 st %l4,[$out+64*4-4]
1420 st %l5,[$out+64*5-4]
1421 st %l6,[$out+64*6-4]
1422 st %l7,[$out+64*7-4]
1426 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1428 ! void ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
1430 .globl ecp_nistz256_gather_w5
1432 ecp_nistz256_gather_w5:
1433 save %sp,-STACK_FRAME,%sp
1438 add $index,$mask,$index
1440 add $inp,$index,$inp
1523 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1525 ! void ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
1527 .globl ecp_nistz256_scatter_w7
1529 ecp_nistz256_scatter_w7:
1530 save %sp,-STACK_FRAME,%sp
1532 add $out,$index,$out
1537 subcc $index,1,$index
1538 stb %l0,[$out+64*0-1]
1540 stb %l1,[$out+64*1-1]
1542 stb %l2,[$out+64*2-1]
1544 stb %l3,[$out+64*3-1]
1545 bne .Loop_scatter_w7
1550 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1552 ! void ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
1554 .globl ecp_nistz256_gather_w7
1556 ecp_nistz256_gather_w7:
1557 save %sp,-STACK_FRAME,%sp
1562 add $index,$mask,$index
1563 add $inp,$index,$inp
1567 ldub [$inp+64*0],%l0
1568 prefetch [$inp+3840+64*0],1
1569 subcc $index,1,$index
1570 ldub [$inp+64*1],%l1
1571 prefetch [$inp+3840+64*1],1
1572 ldub [$inp+64*2],%l2
1573 prefetch [$inp+3840+64*2],1
1574 ldub [$inp+64*3],%l3
1575 prefetch [$inp+3840+64*3],1
1590 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1594 ########################################################################
1595 # Following subroutines are VIS3 counterparts of those above that
1596 # implement ones found in ecp_nistz256.c. Key difference is that they
1597 # use 128-bit muliplication and addition with 64-bit carry, and in order
1598 # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
1599 # entry and vice versa on return.
1601 my ($rp,$ap,$bp)=map("%i$_",(0..2));
1602 my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
1603 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
1604 my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
1605 my ($rp_real,$ap_real)=("%g2","%g3");
1606 my ($acc6,$acc7)=($bp,$bi); # used in squaring
1610 __ecp_nistz256_mul_by_2_vis3:
1611 addcc $acc0,$acc0,$acc0
1612 addxccc $acc1,$acc1,$acc1
1613 addxccc $acc2,$acc2,$acc2
1614 addxccc $acc3,$acc3,$acc3
1615 b .Lreduce_by_sub_vis3
1616 addxc %g0,%g0,$acc4 ! did it carry?
1617 .size __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
1620 __ecp_nistz256_add_vis3:
1626 __ecp_nistz256_add_noload_vis3:
1628 addcc $t0,$acc0,$acc0
1629 addxccc $t1,$acc1,$acc1
1630 addxccc $t2,$acc2,$acc2
1631 addxccc $t3,$acc3,$acc3
1632 addxc %g0,%g0,$acc4 ! did it carry?
1634 .Lreduce_by_sub_vis3:
1636 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
1637 addxccc $acc1,$poly1,$t1
1638 addxccc $acc2,$minus1,$t2
1639 addxc $acc3,$poly3,$t3
1641 movrnz $acc4,$t0,$acc0 ! if a+b carried, ret = ret-mod
1642 movrnz $acc4,$t1,$acc1
1644 movrnz $acc4,$t2,$acc2
1646 movrnz $acc4,$t3,$acc3
1650 .size __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
1652 ! Trouble with subtraction is that there is no subtraction with 64-bit
1653 ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
1654 ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
1655 ! recall that SPARC is big-endian, which is why you'll observe that
1656 ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
1657 ! "collect" result back to 64-bit $acc0-$acc3.
1659 __ecp_nistz256_sub_from_vis3:
1668 subcc $acc0,$t0,$acc0
1670 subccc $acc4,$t1,$acc4
1672 subccc $acc1,$t2,$acc1
1674 and $acc0,$poly1,$acc0
1675 subccc $acc5,$t3,$acc5
1678 and $acc1,$poly1,$acc1
1680 or $acc0,$acc4,$acc0
1682 or $acc1,$acc5,$acc1
1684 subccc $acc2,$t0,$acc2
1685 subccc $acc4,$t1,$acc4
1686 subccc $acc3,$t2,$acc3
1687 and $acc2,$poly1,$acc2
1688 subccc $acc5,$t3,$acc5
1690 and $acc3,$poly1,$acc3
1692 or $acc2,$acc4,$acc2
1693 subc %g0,%g0,$acc4 ! did it borrow?
1694 b .Lreduce_by_add_vis3
1695 or $acc3,$acc5,$acc3
1696 .size __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
1699 __ecp_nistz256_sub_morf_vis3:
1708 subcc $t0,$acc0,$acc0
1710 subccc $t1,$acc4,$acc4
1712 subccc $t2,$acc1,$acc1
1714 and $acc0,$poly1,$acc0
1715 subccc $t3,$acc5,$acc5
1718 and $acc1,$poly1,$acc1
1720 or $acc0,$acc4,$acc0
1722 or $acc1,$acc5,$acc1
1724 subccc $t0,$acc2,$acc2
1725 subccc $t1,$acc4,$acc4
1726 subccc $t2,$acc3,$acc3
1727 and $acc2,$poly1,$acc2
1728 subccc $t3,$acc5,$acc5
1730 and $acc3,$poly1,$acc3
1732 or $acc2,$acc4,$acc2
1733 subc %g0,%g0,$acc4 ! did it borrow?
1734 or $acc3,$acc5,$acc3
1736 .Lreduce_by_add_vis3:
1738 addcc $acc0,-1,$t0 ! add modulus
1740 addxccc $acc1,$poly1,$t1
1741 not $poly1,$poly1 ! restore $poly1
1742 addxccc $acc2,%g0,$t2
1745 movrnz $acc4,$t0,$acc0 ! if a-b borrowed, ret = ret+mod
1746 movrnz $acc4,$t1,$acc1
1748 movrnz $acc4,$t2,$acc2
1750 movrnz $acc4,$t3,$acc3
1754 .size __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
1757 __ecp_nistz256_div_by_2_vis3:
1758 ! ret = (a is odd ? a+mod : a) >> 1
1763 addcc $acc0,-1,$t0 ! add modulus
1764 addxccc $acc1,$t1,$t1
1765 addxccc $acc2,%g0,$t2
1766 addxccc $acc3,$t3,$t3
1767 addxc %g0,%g0,$acc4 ! carry bit
1769 movrnz $acc5,$t0,$acc0
1770 movrnz $acc5,$t1,$acc1
1771 movrnz $acc5,$t2,$acc2
1772 movrnz $acc5,$t3,$acc3
1773 movrz $acc5,%g0,$acc4
1788 sllx $acc4,63,$t3 ! don't forget carry bit
1794 .size __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
1796 ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
1797 ! 4x faster [on T4]...
1799 __ecp_nistz256_mul_mont_vis3:
1801 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1809 ldx [$bp+8],$bi ! b[1]
1811 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1813 addxccc $acc2,$t1,$acc2
1815 addxccc $acc3,$t2,$acc3
1819 for($i=1;$i<4;$i++) {
1820 # Reduction iteration is normally performed by accumulating
1821 # result of multiplication of modulus by "magic" digit [and
1822 # omitting least significant word, which is guaranteed to
1823 # be 0], but thanks to special form of modulus and "magic"
1824 # digit being equal to least significant word, it can be
1825 # performed with additions and subtractions alone. Indeed:
1827 # ffff0001.00000000.0000ffff.ffffffff
1829 # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1831 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1834 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1835 # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
1836 # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
1838 # or marking redundant operations:
1840 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
1841 # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
1842 # - 0000abcd.efgh0000.--------.--------.--------
1843 # ^^^^^^^^ but this word is calculated with umulxhi, because
1844 # there is no subtract with 64-bit borrow:-(
1847 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1848 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1849 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1851 addxccc $acc2,$t1,$acc1
1853 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1855 addxccc $acc4,$t3,$acc3
1857 addxc $acc5,%g0,$acc4
1859 addcc $acc0,$t0,$acc0 ! accumulate low parts of multiplication
1861 addxccc $acc1,$t1,$acc1
1863 addxccc $acc2,$t2,$acc2
1865 addxccc $acc3,$t3,$acc3
1867 addxc $acc4,%g0,$acc4
1869 $code.=<<___ if ($i<3);
1870 ldx [$bp+8*($i+1)],$bi ! bp[$i+1]
1873 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1875 addxccc $acc2,$t1,$acc2
1877 addxccc $acc3,$t2,$acc3
1878 addxccc $acc4,$t3,$acc4
1883 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1884 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1885 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1886 addxccc $acc2,$t1,$acc1
1887 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1888 addxccc $acc4,$t3,$acc3
1889 b .Lmul_final_vis3 ! see below
1890 addxc $acc5,%g0,$acc4
1891 .size __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
1893 ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
1894 ! instructions, but only 14% faster [on T4]...
1896 __ecp_nistz256_sqr_mont_vis3:
1897 ! | | | | | |a1*a0| |
1898 ! | | | | |a2*a0| | |
1899 ! | |a3*a2|a3*a0| | | |
1900 ! | | | |a2*a1| | | |
1901 ! | | |a3*a1| | | | |
1902 ! *| | | | | | | | 2|
1903 ! +|a3*a3|a2*a2|a1*a1|a0*a0|
1904 ! |--+--+--+--+--+--+--+--|
1905 ! |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1907 ! "can't overflow" below mark carrying into high part of
1908 ! multiplication result, which can't overflow, because it
1909 ! can never be all ones.
1911 mulx $a1,$a0,$acc1 ! a[1]*a[0]
1913 mulx $a2,$a0,$acc2 ! a[2]*a[0]
1915 mulx $a3,$a0,$acc3 ! a[3]*a[0]
1916 umulxhi $a3,$a0,$acc4
1918 addcc $acc2,$t1,$acc2 ! accumulate high parts of multiplication
1919 mulx $a2,$a1,$t0 ! a[2]*a[1]
1921 addxccc $acc3,$t2,$acc3
1922 mulx $a3,$a1,$t2 ! a[3]*a[1]
1924 addxc $acc4,%g0,$acc4 ! can't overflow
1926 mulx $a3,$a2,$acc5 ! a[3]*a[2]
1927 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1928 umulxhi $a3,$a2,$acc6
1930 addcc $t2,$t1,$t1 ! accumulate high parts of multiplication
1931 mulx $a0,$a0,$acc0 ! a[0]*a[0]
1932 addxc $t3,%g0,$t2 ! can't overflow
1934 addcc $acc3,$t0,$acc3 ! accumulate low parts of multiplication
1936 addxccc $acc4,$t1,$acc4
1937 mulx $a1,$a1,$t1 ! a[1]*a[1]
1938 addxccc $acc5,$t2,$acc5
1940 addxc $acc6,%g0,$acc6 ! can't overflow
1942 addcc $acc1,$acc1,$acc1 ! acc[1-6]*=2
1943 mulx $a2,$a2,$t2 ! a[2]*a[2]
1944 addxccc $acc2,$acc2,$acc2
1946 addxccc $acc3,$acc3,$acc3
1947 mulx $a3,$a3,$t3 ! a[3]*a[3]
1948 addxccc $acc4,$acc4,$acc4
1950 addxccc $acc5,$acc5,$acc5
1951 addxccc $acc6,$acc6,$acc6
1954 addcc $acc1,$a0,$acc1 ! +a[i]*a[i]
1955 addxccc $acc2,$t1,$acc2
1956 addxccc $acc3,$a1,$acc3
1957 addxccc $acc4,$t2,$acc4
1959 addxccc $acc5,$a2,$acc5
1961 addxccc $acc6,$t3,$acc6
1962 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1963 addxc $acc7,$a3,$acc7
1965 for($i=0;$i<3;$i++) { # reductions, see commentary
1966 # in multiplication for details
1968 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1969 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1971 addxccc $acc2,$t1,$acc1
1973 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1974 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1975 addxc %g0,$t3,$acc3 ! cant't overflow
1979 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1980 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1981 addxccc $acc2,$t1,$acc1
1982 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1983 addxc %g0,$t3,$acc3 ! can't overflow
1985 addcc $acc0,$acc4,$acc0 ! accumulate upper half
1986 addxccc $acc1,$acc5,$acc1
1987 addxccc $acc2,$acc6,$acc2
1988 addxccc $acc3,$acc7,$acc3
1993 ! Final step is "if result > mod, subtract mod", but as comparison
1994 ! means subtraction, we do the subtraction and then copy outcome
1995 ! if it didn't borrow. But note that as we [have to] replace
1996 ! subtraction with addition with negative, carry/borrow logic is
1999 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
2000 not $poly3,$poly3 ! restore 0x00000000FFFFFFFE
2001 addxccc $acc1,$poly1,$t1
2002 addxccc $acc2,$minus1,$t2
2003 addxccc $acc3,$poly3,$t3
2004 addxccc $acc4,$minus1,%g0 ! did it carry?
2006 movcs %xcc,$t0,$acc0
2007 movcs %xcc,$t1,$acc1
2009 movcs %xcc,$t2,$acc2
2011 movcs %xcc,$t3,$acc3
2015 .size __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
2018 ########################################################################
2019 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
2022 my ($res_x,$res_y,$res_z,
2024 $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
2025 # above map() describes stack layout with 10 temporary
2026 # 256-bit vectors on top.
2030 ecp_nistz256_point_double_vis3:
2031 save %sp,-STACK64_FRAME-32*10,%sp
2034 .Ldouble_shortcut_vis3:
2037 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2038 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2040 ! convert input to uint64_t[4]
2051 ld [$ap+32],$acc0 ! in_y
2059 ld [$ap+32+16],$acc2
2063 ld [$ap+32+24],$acc3
2067 stx $a0,[%sp+LOCALS64+$in_x]
2069 stx $a1,[%sp+LOCALS64+$in_x+8]
2071 stx $a2,[%sp+LOCALS64+$in_x+16]
2073 stx $a3,[%sp+LOCALS64+$in_x+24]
2075 stx $acc0,[%sp+LOCALS64+$in_y]
2077 stx $acc1,[%sp+LOCALS64+$in_y+8]
2079 stx $acc2,[%sp+LOCALS64+$in_y+16]
2080 stx $acc3,[%sp+LOCALS64+$in_y+24]
2082 ld [$ap+64],$a0 ! in_z
2100 stx $a0,[%sp+LOCALS64+$in_z]
2102 stx $a1,[%sp+LOCALS64+$in_z+8]
2104 stx $a2,[%sp+LOCALS64+$in_z+16]
2105 stx $a3,[%sp+LOCALS64+$in_z+24]
2107 ! in_y is still in $acc0-$acc3
2108 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(S, in_y);
2109 add %sp,LOCALS64+$S,$rp
2111 ! in_z is still in $a0-$a3
2112 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Zsqr, in_z);
2113 add %sp,LOCALS64+$Zsqr,$rp
2115 mov $acc0,$a0 ! put Zsqr aside
2120 add %sp,LOCALS64+$in_x,$bp
2121 call __ecp_nistz256_add_vis3 ! p256_add(M, Zsqr, in_x);
2122 add %sp,LOCALS64+$M,$rp
2124 mov $a0,$acc0 ! restore Zsqr
2125 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2127 ldx [%sp+LOCALS64+$S+8],$a1
2129 ldx [%sp+LOCALS64+$S+16],$a2
2131 ldx [%sp+LOCALS64+$S+24],$a3
2133 add %sp,LOCALS64+$in_x,$bp
2134 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(Zsqr, in_x, Zsqr);
2135 add %sp,LOCALS64+$Zsqr,$rp
2137 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(S, S);
2138 add %sp,LOCALS64+$S,$rp
2140 ldx [%sp+LOCALS64+$in_z],$bi
2141 ldx [%sp+LOCALS64+$in_y],$a0
2142 ldx [%sp+LOCALS64+$in_y+8],$a1
2143 ldx [%sp+LOCALS64+$in_y+16],$a2
2144 ldx [%sp+LOCALS64+$in_y+24],$a3
2145 add %sp,LOCALS64+$in_z,$bp
2146 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(tmp0, in_z, in_y);
2147 add %sp,LOCALS64+$tmp0,$rp
2149 ldx [%sp+LOCALS64+$M],$bi ! forward load
2150 ldx [%sp+LOCALS64+$Zsqr],$a0
2151 ldx [%sp+LOCALS64+$Zsqr+8],$a1
2152 ldx [%sp+LOCALS64+$Zsqr+16],$a2
2153 ldx [%sp+LOCALS64+$Zsqr+24],$a3
2155 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(res_z, tmp0);
2156 add %sp,LOCALS64+$res_z,$rp
2158 add %sp,LOCALS64+$M,$bp
2159 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(M, M, Zsqr);
2160 add %sp,LOCALS64+$M,$rp
2162 mov $acc0,$a0 ! put aside M
2166 call __ecp_nistz256_mul_by_2_vis3
2167 add %sp,LOCALS64+$M,$rp
2168 mov $a0,$t0 ! copy M
2169 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2171 ldx [%sp+LOCALS64+$S+8],$a1
2173 ldx [%sp+LOCALS64+$S+16],$a2
2175 ldx [%sp+LOCALS64+$S+24],$a3
2176 call __ecp_nistz256_add_noload_vis3 ! p256_mul_by_3(M, M);
2177 add %sp,LOCALS64+$M,$rp
2179 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(tmp0, S);
2180 add %sp,LOCALS64+$tmp0,$rp
2182 ldx [%sp+LOCALS64+$S],$bi ! forward load
2183 ldx [%sp+LOCALS64+$in_x],$a0
2184 ldx [%sp+LOCALS64+$in_x+8],$a1
2185 ldx [%sp+LOCALS64+$in_x+16],$a2
2186 ldx [%sp+LOCALS64+$in_x+24],$a3
2188 call __ecp_nistz256_div_by_2_vis3 ! p256_div_by_2(res_y, tmp0);
2189 add %sp,LOCALS64+$res_y,$rp
2191 add %sp,LOCALS64+$S,$bp
2192 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, in_x);
2193 add %sp,LOCALS64+$S,$rp
2195 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2196 ldx [%sp+LOCALS64+$M+8],$a1
2197 ldx [%sp+LOCALS64+$M+16],$a2
2198 ldx [%sp+LOCALS64+$M+24],$a3
2200 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(tmp0, S);
2201 add %sp,LOCALS64+$tmp0,$rp
2203 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(res_x, M);
2204 add %sp,LOCALS64+$res_x,$rp
2206 add %sp,LOCALS64+$tmp0,$bp
2207 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, tmp0);
2208 add %sp,LOCALS64+$res_x,$rp
2210 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2211 ldx [%sp+LOCALS64+$M+8],$a1
2212 ldx [%sp+LOCALS64+$M+16],$a2
2213 ldx [%sp+LOCALS64+$M+24],$a3
2215 add %sp,LOCALS64+$S,$bp
2216 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(S, S, res_x);
2217 add %sp,LOCALS64+$S,$rp
2220 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, M);
2221 add %sp,LOCALS64+$S,$rp
2223 ldx [%sp+LOCALS64+$res_x],$a0 ! forward load
2224 ldx [%sp+LOCALS64+$res_x+8],$a1
2225 ldx [%sp+LOCALS64+$res_x+16],$a2
2226 ldx [%sp+LOCALS64+$res_x+24],$a3
2228 add %sp,LOCALS64+$res_y,$bp
2229 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, S, res_y);
2230 add %sp,LOCALS64+$res_y,$bp
2232 ! convert output to uint_32[8]
2235 st $a0,[$rp_real] ! res_x
2240 st $t1,[$rp_real+12]
2241 st $a2,[$rp_real+16]
2242 st $t2,[$rp_real+20]
2243 st $a3,[$rp_real+24]
2244 st $t3,[$rp_real+28]
2246 ldx [%sp+LOCALS64+$res_z],$a0 ! forward load
2248 ldx [%sp+LOCALS64+$res_z+8],$a1
2250 ldx [%sp+LOCALS64+$res_z+16],$a2
2252 ldx [%sp+LOCALS64+$res_z+24],$a3
2254 st $acc0,[$rp_real+32] ! res_y
2255 st $t0, [$rp_real+32+4]
2256 st $acc1,[$rp_real+32+8]
2257 st $t1, [$rp_real+32+12]
2258 st $acc2,[$rp_real+32+16]
2259 st $t2, [$rp_real+32+20]
2260 st $acc3,[$rp_real+32+24]
2261 st $t3, [$rp_real+32+28]
2265 st $a0,[$rp_real+64] ! res_z
2267 st $t0,[$rp_real+64+4]
2269 st $a1,[$rp_real+64+8]
2270 st $t1,[$rp_real+64+12]
2271 st $a2,[$rp_real+64+16]
2272 st $t2,[$rp_real+64+20]
2273 st $a3,[$rp_real+64+24]
2274 st $t3,[$rp_real+64+28]
2278 .size ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
2281 ########################################################################
2282 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
2283 # const P256_POINT *in2);
2285 my ($res_x,$res_y,$res_z,
2286 $in1_x,$in1_y,$in1_z,
2287 $in2_x,$in2_y,$in2_z,
2288 $H,$Hsqr,$R,$Rsqr,$Hcub,
2289 $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
2290 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2292 # above map() describes stack layout with 18 temporary
2293 # 256-bit vectors on top. Then we reserve some space for
2294 # !in1infty, !in2infty and result of check for zero.
2297 .globl ecp_nistz256_point_add_vis3
2299 ecp_nistz256_point_add_vis3:
2300 save %sp,-STACK64_FRAME-32*18-32,%sp
2305 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2306 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2308 ! convert input to uint64_t[4]
2309 ld [$bp],$a0 ! in2_x
2319 ld [$bp+32],$acc0 ! in2_y
2327 ld [$bp+32+16],$acc2
2331 ld [$bp+32+24],$acc3
2335 stx $a0,[%sp+LOCALS64+$in2_x]
2337 stx $a1,[%sp+LOCALS64+$in2_x+8]
2339 stx $a2,[%sp+LOCALS64+$in2_x+16]
2341 stx $a3,[%sp+LOCALS64+$in2_x+24]
2343 stx $acc0,[%sp+LOCALS64+$in2_y]
2345 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2347 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2348 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2352 or $acc1,$acc0,$acc0
2353 or $acc3,$acc2,$acc2
2355 or $acc2,$acc0,$acc0
2357 movrnz $a0,-1,$a0 ! !in2infty
2358 stx $a0,[%fp+STACK_BIAS-8]
2360 ld [$bp+64],$acc0 ! in2_z
2364 ld [$bp+64+16],$acc2
2366 ld [$bp+64+24],$acc3
2370 ld [$ap],$a0 ! in1_x
2386 stx $acc0,[%sp+LOCALS64+$in2_z]
2388 stx $acc1,[%sp+LOCALS64+$in2_z+8]
2390 stx $acc2,[%sp+LOCALS64+$in2_z+16]
2391 stx $acc3,[%sp+LOCALS64+$in2_z+24]
2394 ld [$ap+32],$acc0 ! in1_y
2401 ld [$ap+32+16],$acc2
2403 ld [$ap+32+24],$acc3
2407 stx $a0,[%sp+LOCALS64+$in1_x]
2409 stx $a1,[%sp+LOCALS64+$in1_x+8]
2411 stx $a2,[%sp+LOCALS64+$in1_x+16]
2413 stx $a3,[%sp+LOCALS64+$in1_x+24]
2415 stx $acc0,[%sp+LOCALS64+$in1_y]
2417 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2419 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2420 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2424 or $acc1,$acc0,$acc0
2425 or $acc3,$acc2,$acc2
2427 or $acc2,$acc0,$acc0
2429 movrnz $a0,-1,$a0 ! !in1infty
2430 stx $a0,[%fp+STACK_BIAS-16]
2432 ldx [%sp+LOCALS64+$in2_z],$a0 ! forward load
2433 ldx [%sp+LOCALS64+$in2_z+8],$a1
2434 ldx [%sp+LOCALS64+$in2_z+16],$a2
2435 ldx [%sp+LOCALS64+$in2_z+24],$a3
2437 ld [$ap+64],$acc0 ! in1_z
2441 ld [$ap+64+16],$acc2
2443 ld [$ap+64+24],$acc3
2451 stx $acc0,[%sp+LOCALS64+$in1_z]
2453 stx $acc1,[%sp+LOCALS64+$in1_z+8]
2455 stx $acc2,[%sp+LOCALS64+$in1_z+16]
2456 stx $acc3,[%sp+LOCALS64+$in1_z+24]
2458 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z2sqr, in2_z);
2459 add %sp,LOCALS64+$Z2sqr,$rp
2461 ldx [%sp+LOCALS64+$in1_z],$a0
2462 ldx [%sp+LOCALS64+$in1_z+8],$a1
2463 ldx [%sp+LOCALS64+$in1_z+16],$a2
2464 ldx [%sp+LOCALS64+$in1_z+24],$a3
2465 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2466 add %sp,LOCALS64+$Z1sqr,$rp
2468 ldx [%sp+LOCALS64+$Z2sqr],$bi
2469 ldx [%sp+LOCALS64+$in2_z],$a0
2470 ldx [%sp+LOCALS64+$in2_z+8],$a1
2471 ldx [%sp+LOCALS64+$in2_z+16],$a2
2472 ldx [%sp+LOCALS64+$in2_z+24],$a3
2473 add %sp,LOCALS64+$Z2sqr,$bp
2474 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, Z2sqr, in2_z);
2475 add %sp,LOCALS64+$S1,$rp
2477 ldx [%sp+LOCALS64+$Z1sqr],$bi
2478 ldx [%sp+LOCALS64+$in1_z],$a0
2479 ldx [%sp+LOCALS64+$in1_z+8],$a1
2480 ldx [%sp+LOCALS64+$in1_z+16],$a2
2481 ldx [%sp+LOCALS64+$in1_z+24],$a3
2482 add %sp,LOCALS64+$Z1sqr,$bp
2483 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2484 add %sp,LOCALS64+$S2,$rp
2486 ldx [%sp+LOCALS64+$S1],$bi
2487 ldx [%sp+LOCALS64+$in1_y],$a0
2488 ldx [%sp+LOCALS64+$in1_y+8],$a1
2489 ldx [%sp+LOCALS64+$in1_y+16],$a2
2490 ldx [%sp+LOCALS64+$in1_y+24],$a3
2491 add %sp,LOCALS64+$S1,$bp
2492 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, S1, in1_y);
2493 add %sp,LOCALS64+$S1,$rp
2495 ldx [%sp+LOCALS64+$S2],$bi
2496 ldx [%sp+LOCALS64+$in2_y],$a0
2497 ldx [%sp+LOCALS64+$in2_y+8],$a1
2498 ldx [%sp+LOCALS64+$in2_y+16],$a2
2499 ldx [%sp+LOCALS64+$in2_y+24],$a3
2500 add %sp,LOCALS64+$S2,$bp
2501 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2502 add %sp,LOCALS64+$S2,$rp
2504 ldx [%sp+LOCALS64+$Z2sqr],$bi ! forward load
2505 ldx [%sp+LOCALS64+$in1_x],$a0
2506 ldx [%sp+LOCALS64+$in1_x+8],$a1
2507 ldx [%sp+LOCALS64+$in1_x+16],$a2
2508 ldx [%sp+LOCALS64+$in1_x+24],$a3
2510 add %sp,LOCALS64+$S1,$bp
2511 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, S1);
2512 add %sp,LOCALS64+$R,$rp
2514 or $acc1,$acc0,$acc0 ! see if result is zero
2515 or $acc3,$acc2,$acc2
2516 or $acc2,$acc0,$acc0
2517 stx $acc0,[%fp+STACK_BIAS-24]
2519 add %sp,LOCALS64+$Z2sqr,$bp
2520 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U1, in1_x, Z2sqr);
2521 add %sp,LOCALS64+$U1,$rp
2523 ldx [%sp+LOCALS64+$Z1sqr],$bi
2524 ldx [%sp+LOCALS64+$in2_x],$a0
2525 ldx [%sp+LOCALS64+$in2_x+8],$a1
2526 ldx [%sp+LOCALS64+$in2_x+16],$a2
2527 ldx [%sp+LOCALS64+$in2_x+24],$a3
2528 add %sp,LOCALS64+$Z1sqr,$bp
2529 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in2_x, Z1sqr);
2530 add %sp,LOCALS64+$U2,$rp
2532 ldx [%sp+LOCALS64+$R],$a0 ! forward load
2533 ldx [%sp+LOCALS64+$R+8],$a1
2534 ldx [%sp+LOCALS64+$R+16],$a2
2535 ldx [%sp+LOCALS64+$R+24],$a3
2537 add %sp,LOCALS64+$U1,$bp
2538 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, U1);
2539 add %sp,LOCALS64+$H,$rp
2541 or $acc1,$acc0,$acc0 ! see if result is zero
2542 or $acc3,$acc2,$acc2
2543 orcc $acc2,$acc0,$acc0
2545 bne,pt %xcc,.Ladd_proceed_vis3 ! is_equal(U1,U2)?
2548 ldx [%fp+STACK_BIAS-8],$t0
2549 ldx [%fp+STACK_BIAS-16],$t1
2550 ldx [%fp+STACK_BIAS-24],$t2
2552 be,pt %xcc,.Ladd_proceed_vis3 ! (in1infty || in2infty)?
2555 be,a,pt %xcc,.Ldouble_shortcut_vis3 ! is_equal(S1,S2)?
2556 add %sp,32*(12-10)+32,%sp ! difference in frame sizes
2561 st %g0,[$rp_real+12]
2562 st %g0,[$rp_real+16]
2563 st %g0,[$rp_real+20]
2564 st %g0,[$rp_real+24]
2565 st %g0,[$rp_real+28]
2566 st %g0,[$rp_real+32]
2567 st %g0,[$rp_real+32+4]
2568 st %g0,[$rp_real+32+8]
2569 st %g0,[$rp_real+32+12]
2570 st %g0,[$rp_real+32+16]
2571 st %g0,[$rp_real+32+20]
2572 st %g0,[$rp_real+32+24]
2573 st %g0,[$rp_real+32+28]
2574 st %g0,[$rp_real+64]
2575 st %g0,[$rp_real+64+4]
2576 st %g0,[$rp_real+64+8]
2577 st %g0,[$rp_real+64+12]
2578 st %g0,[$rp_real+64+16]
2579 st %g0,[$rp_real+64+20]
2580 st %g0,[$rp_real+64+24]
2581 st %g0,[$rp_real+64+28]
2587 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2588 add %sp,LOCALS64+$Rsqr,$rp
2590 ldx [%sp+LOCALS64+$H],$bi
2591 ldx [%sp+LOCALS64+$in1_z],$a0
2592 ldx [%sp+LOCALS64+$in1_z+8],$a1
2593 ldx [%sp+LOCALS64+$in1_z+16],$a2
2594 ldx [%sp+LOCALS64+$in1_z+24],$a3
2595 add %sp,LOCALS64+$H,$bp
2596 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2597 add %sp,LOCALS64+$res_z,$rp
2599 ldx [%sp+LOCALS64+$H],$a0
2600 ldx [%sp+LOCALS64+$H+8],$a1
2601 ldx [%sp+LOCALS64+$H+16],$a2
2602 ldx [%sp+LOCALS64+$H+24],$a3
2603 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2604 add %sp,LOCALS64+$Hsqr,$rp
2606 ldx [%sp+LOCALS64+$res_z],$bi
2607 ldx [%sp+LOCALS64+$in2_z],$a0
2608 ldx [%sp+LOCALS64+$in2_z+8],$a1
2609 ldx [%sp+LOCALS64+$in2_z+16],$a2
2610 ldx [%sp+LOCALS64+$in2_z+24],$a3
2611 add %sp,LOCALS64+$res_z,$bp
2612 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, res_z, in2_z);
2613 add %sp,LOCALS64+$res_z,$rp
2615 ldx [%sp+LOCALS64+$H],$bi
2616 ldx [%sp+LOCALS64+$Hsqr],$a0
2617 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2618 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2619 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2620 add %sp,LOCALS64+$H,$bp
2621 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2622 add %sp,LOCALS64+$Hcub,$rp
2624 ldx [%sp+LOCALS64+$U1],$bi
2625 ldx [%sp+LOCALS64+$Hsqr],$a0
2626 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2627 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2628 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2629 add %sp,LOCALS64+$U1,$bp
2630 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, U1, Hsqr);
2631 add %sp,LOCALS64+$U2,$rp
2633 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2634 add %sp,LOCALS64+$Hsqr,$rp
2636 add %sp,LOCALS64+$Rsqr,$bp
2637 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2638 add %sp,LOCALS64+$res_x,$rp
2640 add %sp,LOCALS64+$Hcub,$bp
2641 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2642 add %sp,LOCALS64+$res_x,$rp
2644 ldx [%sp+LOCALS64+$S1],$bi ! forward load
2645 ldx [%sp+LOCALS64+$Hcub],$a0
2646 ldx [%sp+LOCALS64+$Hcub+8],$a1
2647 ldx [%sp+LOCALS64+$Hcub+16],$a2
2648 ldx [%sp+LOCALS64+$Hcub+24],$a3
2650 add %sp,LOCALS64+$U2,$bp
2651 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2652 add %sp,LOCALS64+$res_y,$rp
2654 add %sp,LOCALS64+$S1,$bp
2655 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S1, Hcub);
2656 add %sp,LOCALS64+$S2,$rp
2658 ldx [%sp+LOCALS64+$R],$bi
2659 ldx [%sp+LOCALS64+$res_y],$a0
2660 ldx [%sp+LOCALS64+$res_y+8],$a1
2661 ldx [%sp+LOCALS64+$res_y+16],$a2
2662 ldx [%sp+LOCALS64+$res_y+24],$a3
2663 add %sp,LOCALS64+$R,$bp
2664 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2665 add %sp,LOCALS64+$res_y,$rp
2667 add %sp,LOCALS64+$S2,$bp
2668 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2669 add %sp,LOCALS64+$res_y,$rp
2671 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2672 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2674 for($i=0;$i<96;$i+=16) { # conditional moves
2676 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2677 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2678 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2679 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2680 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2681 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2682 movrz $t1,$acc2,$acc0
2683 movrz $t1,$acc3,$acc1
2684 movrz $t2,$acc4,$acc0
2685 movrz $t2,$acc5,$acc1
2688 st $acc0,[$rp_real+$i]
2689 st $acc2,[$rp_real+$i+4]
2690 st $acc1,[$rp_real+$i+8]
2691 st $acc3,[$rp_real+$i+12]
2698 .size ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
2701 ########################################################################
2702 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
2703 # const P256_POINT_AFFINE *in2);
2705 my ($res_x,$res_y,$res_z,
2706 $in1_x,$in1_y,$in1_z,
2708 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
2710 # above map() describes stack layout with 15 temporary
2711 # 256-bit vectors on top. Then we reserve some space for
2712 # !in1infty and !in2infty.
2716 ecp_nistz256_point_add_affine_vis3:
2717 save %sp,-STACK64_FRAME-32*15-32,%sp
2722 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2723 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2725 ! convert input to uint64_t[4]
2726 ld [$bp],$a0 ! in2_x
2736 ld [$bp+32],$acc0 ! in2_y
2744 ld [$bp+32+16],$acc2
2748 ld [$bp+32+24],$acc3
2752 stx $a0,[%sp+LOCALS64+$in2_x]
2754 stx $a1,[%sp+LOCALS64+$in2_x+8]
2756 stx $a2,[%sp+LOCALS64+$in2_x+16]
2758 stx $a3,[%sp+LOCALS64+$in2_x+24]
2760 stx $acc0,[%sp+LOCALS64+$in2_y]
2762 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2764 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2765 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2769 or $acc1,$acc0,$acc0
2770 or $acc3,$acc2,$acc2
2772 or $acc2,$acc0,$acc0
2774 movrnz $a0,-1,$a0 ! !in2infty
2775 stx $a0,[%fp+STACK_BIAS-8]
2777 ld [$ap],$a0 ! in1_x
2787 ld [$ap+32],$acc0 ! in1_y
2795 ld [$ap+32+16],$acc2
2799 ld [$ap+32+24],$acc3
2803 stx $a0,[%sp+LOCALS64+$in1_x]
2805 stx $a1,[%sp+LOCALS64+$in1_x+8]
2807 stx $a2,[%sp+LOCALS64+$in1_x+16]
2809 stx $a3,[%sp+LOCALS64+$in1_x+24]
2811 stx $acc0,[%sp+LOCALS64+$in1_y]
2813 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2815 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2816 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2820 or $acc1,$acc0,$acc0
2821 or $acc3,$acc2,$acc2
2823 or $acc2,$acc0,$acc0
2825 movrnz $a0,-1,$a0 ! !in1infty
2826 stx $a0,[%fp+STACK_BIAS-16]
2828 ld [$ap+64],$a0 ! in1_z
2842 stx $a0,[%sp+LOCALS64+$in1_z]
2844 stx $a1,[%sp+LOCALS64+$in1_z+8]
2846 stx $a2,[%sp+LOCALS64+$in1_z+16]
2847 stx $a3,[%sp+LOCALS64+$in1_z+24]
2849 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2850 add %sp,LOCALS64+$Z1sqr,$rp
2852 ldx [%sp+LOCALS64+$in2_x],$bi
2857 add %sp,LOCALS64+$in2_x,$bp
2858 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, Z1sqr, in2_x);
2859 add %sp,LOCALS64+$U2,$rp
2861 ldx [%sp+LOCALS64+$Z1sqr],$bi ! forward load
2862 ldx [%sp+LOCALS64+$in1_z],$a0
2863 ldx [%sp+LOCALS64+$in1_z+8],$a1
2864 ldx [%sp+LOCALS64+$in1_z+16],$a2
2865 ldx [%sp+LOCALS64+$in1_z+24],$a3
2867 add %sp,LOCALS64+$in1_x,$bp
2868 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, in1_x);
2869 add %sp,LOCALS64+$H,$rp
2871 add %sp,LOCALS64+$Z1sqr,$bp
2872 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2873 add %sp,LOCALS64+$S2,$rp
2875 ldx [%sp+LOCALS64+$H],$bi
2876 ldx [%sp+LOCALS64+$in1_z],$a0
2877 ldx [%sp+LOCALS64+$in1_z+8],$a1
2878 ldx [%sp+LOCALS64+$in1_z+16],$a2
2879 ldx [%sp+LOCALS64+$in1_z+24],$a3
2880 add %sp,LOCALS64+$H,$bp
2881 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2882 add %sp,LOCALS64+$res_z,$rp
2884 ldx [%sp+LOCALS64+$S2],$bi
2885 ldx [%sp+LOCALS64+$in2_y],$a0
2886 ldx [%sp+LOCALS64+$in2_y+8],$a1
2887 ldx [%sp+LOCALS64+$in2_y+16],$a2
2888 ldx [%sp+LOCALS64+$in2_y+24],$a3
2889 add %sp,LOCALS64+$S2,$bp
2890 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2891 add %sp,LOCALS64+$S2,$rp
2893 ldx [%sp+LOCALS64+$H],$a0 ! forward load
2894 ldx [%sp+LOCALS64+$H+8],$a1
2895 ldx [%sp+LOCALS64+$H+16],$a2
2896 ldx [%sp+LOCALS64+$H+24],$a3
2898 add %sp,LOCALS64+$in1_y,$bp
2899 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, in1_y);
2900 add %sp,LOCALS64+$R,$rp
2902 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2903 add %sp,LOCALS64+$Hsqr,$rp
2905 ldx [%sp+LOCALS64+$R],$a0
2906 ldx [%sp+LOCALS64+$R+8],$a1
2907 ldx [%sp+LOCALS64+$R+16],$a2
2908 ldx [%sp+LOCALS64+$R+24],$a3
2909 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2910 add %sp,LOCALS64+$Rsqr,$rp
2912 ldx [%sp+LOCALS64+$H],$bi
2913 ldx [%sp+LOCALS64+$Hsqr],$a0
2914 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2915 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2916 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2917 add %sp,LOCALS64+$H,$bp
2918 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2919 add %sp,LOCALS64+$Hcub,$rp
2921 ldx [%sp+LOCALS64+$Hsqr],$bi
2922 ldx [%sp+LOCALS64+$in1_x],$a0
2923 ldx [%sp+LOCALS64+$in1_x+8],$a1
2924 ldx [%sp+LOCALS64+$in1_x+16],$a2
2925 ldx [%sp+LOCALS64+$in1_x+24],$a3
2926 add %sp,LOCALS64+$Hsqr,$bp
2927 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in1_x, Hsqr);
2928 add %sp,LOCALS64+$U2,$rp
2930 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2931 add %sp,LOCALS64+$Hsqr,$rp
2933 add %sp,LOCALS64+$Rsqr,$bp
2934 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2935 add %sp,LOCALS64+$res_x,$rp
2937 add %sp,LOCALS64+$Hcub,$bp
2938 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2939 add %sp,LOCALS64+$res_x,$rp
2941 ldx [%sp+LOCALS64+$Hcub],$bi ! forward load
2942 ldx [%sp+LOCALS64+$in1_y],$a0
2943 ldx [%sp+LOCALS64+$in1_y+8],$a1
2944 ldx [%sp+LOCALS64+$in1_y+16],$a2
2945 ldx [%sp+LOCALS64+$in1_y+24],$a3
2947 add %sp,LOCALS64+$U2,$bp
2948 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2949 add %sp,LOCALS64+$res_y,$rp
2951 add %sp,LOCALS64+$Hcub,$bp
2952 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, in1_y, Hcub);
2953 add %sp,LOCALS64+$S2,$rp
2955 ldx [%sp+LOCALS64+$R],$bi
2956 ldx [%sp+LOCALS64+$res_y],$a0
2957 ldx [%sp+LOCALS64+$res_y+8],$a1
2958 ldx [%sp+LOCALS64+$res_y+16],$a2
2959 ldx [%sp+LOCALS64+$res_y+24],$a3
2960 add %sp,LOCALS64+$R,$bp
2961 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2962 add %sp,LOCALS64+$res_y,$rp
2964 add %sp,LOCALS64+$S2,$bp
2965 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2966 add %sp,LOCALS64+$res_y,$rp
2968 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2969 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2971 add %o7,.Lone_mont_vis3-1b,$bp
2973 for($i=0;$i<64;$i+=16) { # conditional moves
2975 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2976 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2977 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2978 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2979 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2980 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2981 movrz $t1,$acc2,$acc0
2982 movrz $t1,$acc3,$acc1
2983 movrz $t2,$acc4,$acc0
2984 movrz $t2,$acc5,$acc1
2987 st $acc0,[$rp_real+$i]
2988 st $acc2,[$rp_real+$i+4]
2989 st $acc1,[$rp_real+$i+8]
2990 st $acc3,[$rp_real+$i+12]
2993 for(;$i<96;$i+=16) {
2995 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2996 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2997 ldx [$bp+$i-64],$acc2 ! "in2"
2998 ldx [$bp+$i-64+8],$acc3
2999 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
3000 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
3001 movrz $t1,$acc2,$acc0
3002 movrz $t1,$acc3,$acc1
3003 movrz $t2,$acc4,$acc0
3004 movrz $t2,$acc5,$acc1
3007 st $acc0,[$rp_real+$i]
3008 st $acc2,[$rp_real+$i+4]
3009 st $acc1,[$rp_real+$i+8]
3010 st $acc3,[$rp_real+$i+12]
3016 .size ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
3019 .long 0x00000000,0x00000001, 0xffffffff,0x00000000
3020 .long 0xffffffff,0xffffffff, 0x00000000,0xfffffffe
3025 # Purpose of these subroutines is to explicitly encode VIS instructions,
3026 # so that one can compile the module without having to specify VIS
3027 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
3028 # Idea is to reserve for option to produce "universal" binary and let
3029 # programmer detect if current CPU is VIS capable at run-time.
3031 my ($mnemonic,$rs1,$rs2,$rd)=@_;
3032 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
3034 my %visopf = ( "addxc" => 0x011,
3036 "umulxhi" => 0x016 );
3038 $ref = "$mnemonic\t$rs1,$rs2,$rd";
3040 if ($opf=$visopf{$mnemonic}) {
3041 foreach ($rs1,$rs2,$rd) {
3042 return $ref if (!/%([goli])([0-9])/);
3046 return sprintf ".word\t0x%08x !%s",
3047 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
3054 foreach (split("\n",$code)) {
3055 s/\`([^\`]*)\`/eval $1/ge;
3057 s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
3058 &unvis3($1,$2,$3,$4)