ec/asm/ecp_nistz256-sparcv9.pl: get corner logic right.
[openssl.git] / crypto / ec / asm / ecp_nistz256-sparcv9.pl
1 #!/usr/bin/env perl
2
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # ECP_NISTZ256 module for SPARCv9.
11 #
12 # February 2015.
13 #
14 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
15 # http://eprint.iacr.org/2013/816. In the process of adaptation
16 # original .c module was made 32-bit savvy in order to make this
17 # implementation possible.
18 #
19 #                       with/without -DECP_NISTZ256_ASM
20 # UltraSPARC III        +12-18%
21 # SPARC T4              +99-550% (+66-150% on 32-bit Solaris)
22 #
23 # Ranges denote minimum and maximum improvement coefficients depending
24 # on benchmark. Lower coefficients are for ECDSA sign, server-side
25 # operation. Keep in mind that +200% means 3x improvement.
26
27 $output = pop;
28 open STDOUT,">$output";
29
30 $code.=<<___;
31 #include "sparc_arch.h"
32
33 #define LOCALS  (STACK_BIAS+STACK_FRAME)
34 #ifdef  __arch64__
35 .register       %g2,#scratch
36 .register       %g3,#scratch
37 # define STACK64_FRAME  STACK_FRAME
38 # define LOCALS64       LOCALS
39 #else
40 # define STACK64_FRAME  (2047+192)
41 # define LOCALS64       STACK64_FRAME
42 #endif
43
44 .section        ".text",#alloc,#execinstr
45 ___
46 ########################################################################
47 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
48 #
49 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
50 open TABLE,"<ecp_nistz256_table.c"              or
51 open TABLE,"<${dir}../ecp_nistz256_table.c"     or
52 die "failed to open ecp_nistz256_table.c:",$!;
53
54 use integer;
55
56 foreach(<TABLE>) {
57         s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
58 }
59 close TABLE;
60
61 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
62 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
63 # amount of elements.
64 die "insane number of elements" if ($#arr != 64*16*37-1);
65
66 $code.=<<___;
67 .globl  ecp_nistz256_precomputed
68 .align  4096
69 ecp_nistz256_precomputed:
70 ___
71 ########################################################################
72 # this conversion smashes P256_POINT_AFFINE by individual bytes with
73 # 64 byte interval, similar to
74 #       1111222233334444
75 #       1234123412341234
76 for(1..37) {
77         @tbl = splice(@arr,0,64*16);
78         for($i=0;$i<64;$i++) {
79                 undef @line;
80                 for($j=0;$j<64;$j++) {
81                         push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
82                 }
83                 $code.=".byte\t";
84                 $code.=join(',',map { sprintf "0x%02x",$_} @line);
85                 $code.="\n";
86         }
87 }
88
89 {{{
90 my ($rp,$ap,$bp)=map("%i$_",(0..2));
91 my @acc=map("%l$_",(0..7));
92 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
93 my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
94 my ($rp_real,$ap_real)=("%g2","%g3");
95
96 $code.=<<___;
97 .size   ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
98 .align  64
99 .LRR:   ! 2^512 mod P precomputed for NIST P256 polynomial
100 .long   0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
101 .long   0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
102 .Lone:
103 .long   1,0,0,0,0,0,0,0
104 .asciz  "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
105
106 ! void  ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
107 .globl  ecp_nistz256_to_mont
108 .align  64
109 ecp_nistz256_to_mont:
110         save    %sp,-STACK_FRAME,%sp
111         nop
112 1:      call    .+8
113         add     %o7,.LRR-1b,$bp
114         call    __ecp_nistz256_mul_mont
115         nop
116         ret
117         restore
118 .size   ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
119
120 ! void  ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
121 .globl  ecp_nistz256_from_mont
122 .align  32
123 ecp_nistz256_from_mont:
124         save    %sp,-STACK_FRAME,%sp
125         nop
126 1:      call    .+8
127         add     %o7,.Lone-1b,$bp
128         call    __ecp_nistz256_mul_mont
129         nop
130         ret
131         restore
132 .size   ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
133
134 ! void  ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
135 !                                             const BN_ULONG %i2[8]);
136 .globl  ecp_nistz256_mul_mont
137 .align  32
138 ecp_nistz256_mul_mont:
139         save    %sp,-STACK_FRAME,%sp
140         nop
141         call    __ecp_nistz256_mul_mont
142         nop
143         ret
144         restore
145 .size   ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
146
147 ! void  ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
148 .globl  ecp_nistz256_sqr_mont
149 .align  32
150 ecp_nistz256_sqr_mont:
151         save    %sp,-STACK_FRAME,%sp
152         mov     $ap,$bp
153         call    __ecp_nistz256_mul_mont
154         nop
155         ret
156         restore
157 .size   ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
158 ___
159
160 ########################################################################
161 # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
162 # while all others are meant to keep 32. "Meant to" means that additions
163 # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
164 # they can affect outcome (follow 'and' with $mask). Also keep in mind
165 # that addition with carry is addition with 32-bit carry, even though
166 # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
167 # below for VIS3 code paths.]
168
169 $code.=<<___;
170 .align  32
171 __ecp_nistz256_mul_mont:
172         ld      [$bp+0],$bi             ! b[0]
173         mov     -1,$mask
174         ld      [$ap+0],$a0
175         srl     $mask,0,$mask           ! 0xffffffff
176         ld      [$ap+4],$t1
177         ld      [$ap+8],$t2
178         ld      [$ap+12],$t3
179         ld      [$ap+16],$t4
180         ld      [$ap+20],$t5
181         ld      [$ap+24],$t6
182         ld      [$ap+28],$t7
183         mulx    $a0,$bi,$t0             ! a[0-7]*b[0], 64-bit results
184         mulx    $t1,$bi,$t1
185         mulx    $t2,$bi,$t2
186         mulx    $t3,$bi,$t3
187         mulx    $t4,$bi,$t4
188         mulx    $t5,$bi,$t5
189         mulx    $t6,$bi,$t6
190         mulx    $t7,$bi,$t7
191         srlx    $t0,32,@acc[1]          ! extract high parts
192         srlx    $t1,32,@acc[2]
193         srlx    $t2,32,@acc[3]
194         srlx    $t3,32,@acc[4]
195         srlx    $t4,32,@acc[5]
196         srlx    $t5,32,@acc[6]
197         srlx    $t6,32,@acc[7]
198         srlx    $t7,32,@acc[0]          ! "@acc[8]"
199         mov     0,$carry
200 ___
201 for($i=1;$i<8;$i++) {
202 $code.=<<___;
203         addcc   @acc[1],$t1,@acc[1]     ! accumulate high parts
204         ld      [$bp+4*$i],$bi          ! b[$i]
205         ld      [$ap+4],$t1             ! re-load a[1-7]
206         addccc  @acc[2],$t2,@acc[2]
207         addccc  @acc[3],$t3,@acc[3]
208         ld      [$ap+8],$t2
209         ld      [$ap+12],$t3
210         addccc  @acc[4],$t4,@acc[4]
211         addccc  @acc[5],$t5,@acc[5]
212         ld      [$ap+16],$t4
213         ld      [$ap+20],$t5
214         addccc  @acc[6],$t6,@acc[6]
215         addccc  @acc[7],$t7,@acc[7]
216         ld      [$ap+24],$t6
217         ld      [$ap+28],$t7
218         addccc  @acc[0],$carry,@acc[0]  ! "@acc[8]"
219         addc    %g0,%g0,$carry
220 ___
221         # Reduction iteration is normally performed by accumulating
222         # result of multiplication of modulus by "magic" digit [and
223         # omitting least significant word, which is guaranteed to
224         # be 0], but thanks to special form of modulus and "magic"
225         # digit being equal to least significant word, it can be
226         # performed with additions and subtractions alone. Indeed:
227         #
228         #        ffff.0001.0000.0000.0000.ffff.ffff.ffff
229         # *                                         abcd
230         # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
231         #
232         # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
233         # rewrite above as:
234         #
235         #   xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
236         # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
237         # -      abcd.0000.0000.0000.0000.0000.0000.abcd
238         #
239         # or marking redundant operations:
240         #
241         #   xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
242         # + abcd.0000.abcd.0000.0000.abcd.----.----.----
243         # -      abcd.----.----.----.----.----.----.----
244
245 $code.=<<___;
246         ! multiplication-less reduction
247         addcc   @acc[3],$t0,@acc[3]     ! r[3]+=r[0]
248         addccc  @acc[4],%g0,@acc[4]     ! r[4]+=0
249          and    @acc[1],$mask,@acc[1]
250          and    @acc[2],$mask,@acc[2]
251         addccc  @acc[5],%g0,@acc[5]     ! r[5]+=0
252         addccc  @acc[6],$t0,@acc[6]     ! r[6]+=r[0]
253          and    @acc[3],$mask,@acc[3]
254          and    @acc[4],$mask,@acc[4]
255         addccc  @acc[7],%g0,@acc[7]     ! r[7]+=0
256         addccc  @acc[0],$t0,@acc[0]     ! r[8]+=r[0]    "@acc[8]"
257          and    @acc[5],$mask,@acc[5]
258          and    @acc[6],$mask,@acc[6]
259         addc    $carry,%g0,$carry       ! top-most carry
260         subcc   @acc[7],$t0,@acc[7]     ! r[7]-=r[0]
261         subccc  @acc[0],%g0,@acc[0]     ! r[8]-=0       "@acc[8]"
262         subc    $carry,%g0,$carry       ! top-most carry
263          and    @acc[7],$mask,@acc[7]
264          and    @acc[0],$mask,@acc[0]   ! "@acc[8]"
265 ___
266         push(@acc,shift(@acc));         # rotate registers to "omit" acc[0]
267 $code.=<<___;
268         mulx    $a0,$bi,$t0             ! a[0-7]*b[$i], 64-bit results
269         mulx    $t1,$bi,$t1
270         mulx    $t2,$bi,$t2
271         mulx    $t3,$bi,$t3
272         mulx    $t4,$bi,$t4
273         mulx    $t5,$bi,$t5
274         mulx    $t6,$bi,$t6
275         mulx    $t7,$bi,$t7
276         add     @acc[0],$t0,$t0         ! accumulate low parts, can't overflow
277         add     @acc[1],$t1,$t1
278         srlx    $t0,32,@acc[1]          ! extract high parts
279         add     @acc[2],$t2,$t2
280         srlx    $t1,32,@acc[2]
281         add     @acc[3],$t3,$t3
282         srlx    $t2,32,@acc[3]
283         add     @acc[4],$t4,$t4
284         srlx    $t3,32,@acc[4]
285         add     @acc[5],$t5,$t5
286         srlx    $t4,32,@acc[5]
287         add     @acc[6],$t6,$t6
288         srlx    $t5,32,@acc[6]
289         add     @acc[7],$t7,$t7
290         srlx    $t6,32,@acc[7]
291         srlx    $t7,32,@acc[0]          ! "@acc[8]"
292 ___
293 }
294 $code.=<<___;
295         addcc   @acc[1],$t1,@acc[1]     ! accumulate high parts
296         addccc  @acc[2],$t2,@acc[2]
297         addccc  @acc[3],$t3,@acc[3]
298         addccc  @acc[4],$t4,@acc[4]
299         addccc  @acc[5],$t5,@acc[5]
300         addccc  @acc[6],$t6,@acc[6]
301         addccc  @acc[7],$t7,@acc[7]
302         addccc  @acc[0],$carry,@acc[0]  ! "@acc[8]"
303         addc    %g0,%g0,$carry
304
305         addcc   @acc[3],$t0,@acc[3]     ! multiplication-less reduction
306         addccc  @acc[4],%g0,@acc[4]
307         addccc  @acc[5],%g0,@acc[5]
308         addccc  @acc[6],$t0,@acc[6]
309         addccc  @acc[7],%g0,@acc[7]
310         addccc  @acc[0],$t0,@acc[0]     ! "@acc[8]"
311         addc    $carry,%g0,$carry
312         subcc   @acc[7],$t0,@acc[7]
313         subccc  @acc[0],%g0,@acc[0]     ! "@acc[8]"
314         subc    $carry,%g0,$carry       ! top-most carry
315 ___
316         push(@acc,shift(@acc));         # rotate registers to omit acc[0]
317 $code.=<<___;
318         ! Final step is "if result > mod, subtract mod", but we do it
319         ! "other way around", namely subtract modulus from result
320         ! and if it borrowed, add modulus back.
321
322         subcc   @acc[0],-1,@acc[0]      ! subtract modulus
323         subccc  @acc[1],-1,@acc[1]
324         subccc  @acc[2],-1,@acc[2]
325         subccc  @acc[3],0,@acc[3]
326         subccc  @acc[4],0,@acc[4]
327         subccc  @acc[5],0,@acc[5]
328         subccc  @acc[6],1,@acc[6]
329         subccc  @acc[7],-1,@acc[7]
330         subc    $carry,0,$carry         ! broadcast borrow bit
331
332         ! Note that because mod has special form, i.e. consists of
333         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
334         ! using value of broadcasted borrow and the borrow bit itself.
335         ! To minimize dependency chain we first broadcast and then
336         ! extract the bit by negating (follow $bi).
337
338         addcc   @acc[0],$carry,@acc[0]  ! add modulus or zero
339         addccc  @acc[1],$carry,@acc[1]
340         neg     $carry,$bi
341         st      @acc[0],[$rp]
342         addccc  @acc[2],$carry,@acc[2]
343         st      @acc[1],[$rp+4]
344         addccc  @acc[3],0,@acc[3]
345         st      @acc[2],[$rp+8]
346         addccc  @acc[4],0,@acc[4]
347         st      @acc[3],[$rp+12]
348         addccc  @acc[5],0,@acc[5]
349         st      @acc[4],[$rp+16]
350         addccc  @acc[6],$bi,@acc[6]
351         st      @acc[5],[$rp+20]
352         addc    @acc[7],$carry,@acc[7]
353         st      @acc[6],[$rp+24]
354         retl
355         st      @acc[7],[$rp+28]
356 .size   __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
357
358 ! void  ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
359 !                                        const BN_ULONG %i2[8]);
360 .globl  ecp_nistz256_add
361 .align  32
362 ecp_nistz256_add:
363         save    %sp,-STACK_FRAME,%sp
364         ld      [$ap],@acc[0]
365         ld      [$ap+4],@acc[1]
366         ld      [$ap+8],@acc[2]
367         ld      [$ap+12],@acc[3]
368         ld      [$ap+16],@acc[4]
369         ld      [$ap+20],@acc[5]
370         ld      [$ap+24],@acc[6]
371         call    __ecp_nistz256_add
372         ld      [$ap+28],@acc[7]
373         ret
374         restore
375 .size   ecp_nistz256_add,.-ecp_nistz256_add
376
377 .align  32
378 __ecp_nistz256_add:
379         ld      [$bp+0],$t0             ! b[0]
380         ld      [$bp+4],$t1
381         ld      [$bp+8],$t2
382         ld      [$bp+12],$t3
383         addcc   @acc[0],$t0,@acc[0]
384         ld      [$bp+16],$t4
385         ld      [$bp+20],$t5
386         addccc  @acc[1],$t1,@acc[1]
387         ld      [$bp+24],$t6
388         ld      [$bp+28],$t7
389         addccc  @acc[2],$t2,@acc[2]
390         addccc  @acc[3],$t3,@acc[3]
391         addccc  @acc[4],$t4,@acc[4]
392         addccc  @acc[5],$t5,@acc[5]
393         addccc  @acc[6],$t6,@acc[6]
394         addccc  @acc[7],$t7,@acc[7]
395         subc    %g0,%g0,$carry          ! broadcast carry bit
396
397 .Lreduce_by_sub:
398
399         ! if a+b carries, subtract modulus.
400         !
401         ! Note that because mod has special form, i.e. consists of
402         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
403         ! using value of broadcasted borrow and the borrow bit itself.
404         ! To minimize dependency chain we first broadcast and then
405         ! extract the bit by negating (follow $bi).
406
407         subcc   @acc[0],$carry,@acc[0]  ! subtract synthesized modulus
408         subccc  @acc[1],$carry,@acc[1]
409         neg     $carry,$bi
410         st      @acc[0],[$rp]
411         subccc  @acc[2],$carry,@acc[2]
412         st      @acc[1],[$rp+4]
413         subccc  @acc[3],0,@acc[3]
414         st      @acc[2],[$rp+8]
415         subccc  @acc[4],0,@acc[4]
416         st      @acc[3],[$rp+12]
417         subccc  @acc[5],0,@acc[5]
418         st      @acc[4],[$rp+16]
419         subccc  @acc[6],$bi,@acc[6]
420         st      @acc[5],[$rp+20]
421         subc    @acc[7],$carry,@acc[7]
422         st      @acc[6],[$rp+24]
423         retl
424         st      @acc[7],[$rp+28]
425 .size   __ecp_nistz256_add,.-__ecp_nistz256_add
426
427 ! void  ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
428 .globl  ecp_nistz256_mul_by_2
429 .align  32
430 ecp_nistz256_mul_by_2:
431         save    %sp,-STACK_FRAME,%sp
432         ld      [$ap],@acc[0]
433         ld      [$ap+4],@acc[1]
434         ld      [$ap+8],@acc[2]
435         ld      [$ap+12],@acc[3]
436         ld      [$ap+16],@acc[4]
437         ld      [$ap+20],@acc[5]
438         ld      [$ap+24],@acc[6]
439         call    __ecp_nistz256_mul_by_2
440         ld      [$ap+28],@acc[7]
441         ret
442         restore
443 .size   ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
444
445 .align  32
446 __ecp_nistz256_mul_by_2:
447         addcc   @acc[0],@acc[0],@acc[0] ! a+a=2*a
448         addccc  @acc[1],@acc[1],@acc[1]
449         addccc  @acc[2],@acc[2],@acc[2]
450         addccc  @acc[3],@acc[3],@acc[3]
451         addccc  @acc[4],@acc[4],@acc[4]
452         addccc  @acc[5],@acc[5],@acc[5]
453         addccc  @acc[6],@acc[6],@acc[6]
454         addccc  @acc[7],@acc[7],@acc[7]
455         b       .Lreduce_by_sub
456         subc    %g0,%g0,$carry          ! broadcast carry bit
457 .size   __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
458
459 ! void  ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
460 .globl  ecp_nistz256_mul_by_3
461 .align  32
462 ecp_nistz256_mul_by_3:
463         save    %sp,-STACK_FRAME,%sp
464         ld      [$ap],@acc[0]
465         ld      [$ap+4],@acc[1]
466         ld      [$ap+8],@acc[2]
467         ld      [$ap+12],@acc[3]
468         ld      [$ap+16],@acc[4]
469         ld      [$ap+20],@acc[5]
470         ld      [$ap+24],@acc[6]
471         call    __ecp_nistz256_mul_by_3
472         ld      [$ap+28],@acc[7]
473         ret
474         restore
475 .size   ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
476
477 .align  32
478 __ecp_nistz256_mul_by_3:
479         addcc   @acc[0],@acc[0],$t0     ! a+a=2*a
480         addccc  @acc[1],@acc[1],$t1
481         addccc  @acc[2],@acc[2],$t2
482         addccc  @acc[3],@acc[3],$t3
483         addccc  @acc[4],@acc[4],$t4
484         addccc  @acc[5],@acc[5],$t5
485         addccc  @acc[6],@acc[6],$t6
486         addccc  @acc[7],@acc[7],$t7
487         subc    %g0,%g0,$carry          ! broadcast carry bit
488
489         subcc   $t0,$carry,$t0          ! .Lreduce_by_sub but without stores
490         neg     $carry,$bi
491         subccc  $t1,$carry,$t1
492         subccc  $t2,$carry,$t2
493         subccc  $t3,0,$t3
494         subccc  $t4,0,$t4
495         subccc  $t5,0,$t5
496         subccc  $t6,$bi,$t6
497         subc    $t7,$carry,$t7
498
499         addcc   $t0,@acc[0],@acc[0]     ! 2*a+a=3*a
500         addccc  $t1,@acc[1],@acc[1]
501         addccc  $t2,@acc[2],@acc[2]
502         addccc  $t3,@acc[3],@acc[3]
503         addccc  $t4,@acc[4],@acc[4]
504         addccc  $t5,@acc[5],@acc[5]
505         addccc  $t6,@acc[6],@acc[6]
506         addccc  $t7,@acc[7],@acc[7]
507         b       .Lreduce_by_sub
508         subc    %g0,%g0,$carry          ! broadcast carry bit
509 .size   __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
510
511 ! void  ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
512 !                                        const BN_ULONG %i2[8]);
513 .globl  ecp_nistz256_sub
514 .align  32
515 ecp_nistz256_sub:
516         save    %sp,-STACK_FRAME,%sp
517         ld      [$ap],@acc[0]
518         ld      [$ap+4],@acc[1]
519         ld      [$ap+8],@acc[2]
520         ld      [$ap+12],@acc[3]
521         ld      [$ap+16],@acc[4]
522         ld      [$ap+20],@acc[5]
523         ld      [$ap+24],@acc[6]
524         call    __ecp_nistz256_sub_from
525         ld      [$ap+28],@acc[7]
526         ret
527         restore
528 .size   ecp_nistz256_sub,.-ecp_nistz256_sub
529
530 ! void  ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
531 .globl  ecp_nistz256_neg
532 .align  32
533 ecp_nistz256_neg:
534         save    %sp,-STACK_FRAME,%sp
535         mov     $ap,$bp
536         mov     0,@acc[0]
537         mov     0,@acc[1]
538         mov     0,@acc[2]
539         mov     0,@acc[3]
540         mov     0,@acc[4]
541         mov     0,@acc[5]
542         mov     0,@acc[6]
543         call    __ecp_nistz256_sub_from
544         mov     0,@acc[7]
545         ret
546         restore
547 .size   ecp_nistz256_neg,.-ecp_nistz256_neg
548
549 .align  32
550 __ecp_nistz256_sub_from:
551         ld      [$bp+0],$t0             ! b[0]
552         ld      [$bp+4],$t1
553         ld      [$bp+8],$t2
554         ld      [$bp+12],$t3
555         subcc   @acc[0],$t0,@acc[0]
556         ld      [$bp+16],$t4
557         ld      [$bp+20],$t5
558         subccc  @acc[1],$t1,@acc[1]
559         subccc  @acc[2],$t2,@acc[2]
560         ld      [$bp+24],$t6
561         ld      [$bp+28],$t7
562         subccc  @acc[3],$t3,@acc[3]
563         subccc  @acc[4],$t4,@acc[4]
564         subccc  @acc[5],$t5,@acc[5]
565         subccc  @acc[6],$t6,@acc[6]
566         subccc  @acc[7],$t7,@acc[7]
567         subc    %g0,%g0,$carry          ! broadcast borrow bit
568
569 .Lreduce_by_add:
570
571         ! if a-b borrows, add modulus.
572         !
573         ! Note that because mod has special form, i.e. consists of
574         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
575         ! using value of broadcasted borrow and the borrow bit itself.
576         ! To minimize dependency chain we first broadcast and then
577         ! extract the bit by negating (follow $bi).
578
579         addcc   @acc[0],$carry,@acc[0]  ! add synthesized modulus
580         addccc  @acc[1],$carry,@acc[1]
581         neg     $carry,$bi
582         st      @acc[0],[$rp]
583         addccc  @acc[2],$carry,@acc[2]
584         st      @acc[1],[$rp+4]
585         addccc  @acc[3],0,@acc[3]
586         st      @acc[2],[$rp+8]
587         addccc  @acc[4],0,@acc[4]
588         st      @acc[3],[$rp+12]
589         addccc  @acc[5],0,@acc[5]
590         st      @acc[4],[$rp+16]
591         addccc  @acc[6],$bi,@acc[6]
592         st      @acc[5],[$rp+20]
593         addc    @acc[7],$carry,@acc[7]
594         st      @acc[6],[$rp+24]
595         retl
596         st      @acc[7],[$rp+28]
597 .size   __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
598
599 .align  32
600 __ecp_nistz256_sub_morf:
601         ld      [$bp+0],$t0             ! b[0]
602         ld      [$bp+4],$t1
603         ld      [$bp+8],$t2
604         ld      [$bp+12],$t3
605         subcc   $t0,@acc[0],@acc[0]
606         ld      [$bp+16],$t4
607         ld      [$bp+20],$t5
608         subccc  $t1,@acc[1],@acc[1]
609         subccc  $t2,@acc[2],@acc[2]
610         ld      [$bp+24],$t6
611         ld      [$bp+28],$t7
612         subccc  $t3,@acc[3],@acc[3]
613         subccc  $t4,@acc[4],@acc[4]
614         subccc  $t5,@acc[5],@acc[5]
615         subccc  $t6,@acc[6],@acc[6]
616         subccc  $t7,@acc[7],@acc[7]
617         b       .Lreduce_by_add
618         subc    %g0,%g0,$carry          ! broadcast borrow bit
619 .size   __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
620
621 ! void  ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
622 .globl  ecp_nistz256_div_by_2
623 .align  32
624 ecp_nistz256_div_by_2:
625         save    %sp,-STACK_FRAME,%sp
626         ld      [$ap],@acc[0]
627         ld      [$ap+4],@acc[1]
628         ld      [$ap+8],@acc[2]
629         ld      [$ap+12],@acc[3]
630         ld      [$ap+16],@acc[4]
631         ld      [$ap+20],@acc[5]
632         ld      [$ap+24],@acc[6]
633         call    __ecp_nistz256_div_by_2
634         ld      [$ap+28],@acc[7]
635         ret
636         restore
637 .size   ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
638
639 .align  32
640 __ecp_nistz256_div_by_2:
641         ! ret = (a is odd ? a+mod : a) >> 1
642
643         and     @acc[0],1,$bi
644         neg     $bi,$carry
645         addcc   @acc[0],$carry,@acc[0]
646         addccc  @acc[1],$carry,@acc[1]
647         addccc  @acc[2],$carry,@acc[2]
648         addccc  @acc[3],0,@acc[3]
649         addccc  @acc[4],0,@acc[4]
650         addccc  @acc[5],0,@acc[5]
651         addccc  @acc[6],$bi,@acc[6]
652         addccc  @acc[7],$carry,@acc[7]
653         addc    %g0,%g0,$carry
654
655         ! ret >>= 1
656
657         srl     @acc[0],1,@acc[0]
658         sll     @acc[1],31,$t0
659         srl     @acc[1],1,@acc[1]
660         or      @acc[0],$t0,@acc[0]
661         sll     @acc[2],31,$t1
662         srl     @acc[2],1,@acc[2]
663         or      @acc[1],$t1,@acc[1]
664         sll     @acc[3],31,$t2
665         st      @acc[0],[$rp]
666         srl     @acc[3],1,@acc[3]
667         or      @acc[2],$t2,@acc[2]
668         sll     @acc[4],31,$t3
669         st      @acc[1],[$rp+4]
670         srl     @acc[4],1,@acc[4]
671         or      @acc[3],$t3,@acc[3]
672         sll     @acc[5],31,$t4
673         st      @acc[2],[$rp+8]
674         srl     @acc[5],1,@acc[5]
675         or      @acc[4],$t4,@acc[4]
676         sll     @acc[6],31,$t5
677         st      @acc[3],[$rp+12]
678         srl     @acc[6],1,@acc[6]
679         or      @acc[5],$t5,@acc[5]
680         sll     @acc[7],31,$t6
681         st      @acc[4],[$rp+16]
682         srl     @acc[7],1,@acc[7]
683         or      @acc[6],$t6,@acc[6]
684         sll     $carry,31,$t7
685         st      @acc[5],[$rp+20]
686         or      @acc[7],$t7,@acc[7]
687         st      @acc[6],[$rp+24]
688         retl
689         st      @acc[7],[$rp+28]
690 .size   __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
691 ___
692
693 ########################################################################
694 # following subroutines are "literal" implemetation of those found in
695 # ecp_nistz256.c
696 #
697 ########################################################################
698 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
699 #
700 {
701 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
702 # above map() describes stack layout with 4 temporary
703 # 256-bit vectors on top.
704
705 $code.=<<___;
706 #ifdef __PIC__
707 SPARC_PIC_THUNK(%g1)
708 #endif
709
710 .globl  ecp_nistz256_point_double
711 .align  32
712 ecp_nistz256_point_double:
713         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
714         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
715         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
716         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
717         be      ecp_nistz256_point_double_vis3
718         nop
719
720         save    %sp,-STACK_FRAME-32*4,%sp
721
722         mov     $rp,$rp_real
723         mov     $ap,$ap_real
724
725 .Lpoint_double_shortcut:
726         ld      [$ap+32],@acc[0]
727         ld      [$ap+32+4],@acc[1]
728         ld      [$ap+32+8],@acc[2]
729         ld      [$ap+32+12],@acc[3]
730         ld      [$ap+32+16],@acc[4]
731         ld      [$ap+32+20],@acc[5]
732         ld      [$ap+32+24],@acc[6]
733         ld      [$ap+32+28],@acc[7]
734         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
735         add     %sp,LOCALS+$S,$rp
736
737         add     $ap_real,64,$bp
738         add     $ap_real,64,$ap
739         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
740         add     %sp,LOCALS+$Zsqr,$rp
741
742         add     $ap_real,0,$bp
743         call    __ecp_nistz256_add      ! p256_add(M, Zsqr, in_x);
744         add     %sp,LOCALS+$M,$rp
745
746         add     %sp,LOCALS+$S,$bp
747         add     %sp,LOCALS+$S,$ap
748         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
749         add     %sp,LOCALS+$S,$rp
750
751         ld      [$ap_real],@acc[0]
752         add     %sp,LOCALS+$Zsqr,$bp
753         ld      [$ap_real+4],@acc[1]
754         ld      [$ap_real+8],@acc[2]
755         ld      [$ap_real+12],@acc[3]
756         ld      [$ap_real+16],@acc[4]
757         ld      [$ap_real+20],@acc[5]
758         ld      [$ap_real+24],@acc[6]
759         ld      [$ap_real+28],@acc[7]
760         call    __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
761         add     %sp,LOCALS+$Zsqr,$rp
762
763         add     $ap_real,32,$bp
764         add     $ap_real,64,$ap
765         call    __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
766         add     %sp,LOCALS+$tmp0,$rp
767
768         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
769         add     $rp_real,64,$rp
770
771         add     %sp,LOCALS+$Zsqr,$bp
772         add     %sp,LOCALS+$M,$ap
773         call    __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
774         add     %sp,LOCALS+$M,$rp
775
776         call    __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
777         add     %sp,LOCALS+$M,$rp
778
779         add     %sp,LOCALS+$S,$bp
780         add     %sp,LOCALS+$S,$ap
781         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
782         add     %sp,LOCALS+$tmp0,$rp
783
784         call    __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
785         add     $rp_real,32,$rp
786
787         add     $ap_real,0,$bp
788         add     %sp,LOCALS+$S,$ap
789         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
790         add     %sp,LOCALS+$S,$rp
791
792         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
793         add     %sp,LOCALS+$tmp0,$rp
794
795         add     %sp,LOCALS+$M,$bp
796         add     %sp,LOCALS+$M,$ap
797         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
798         add     $rp_real,0,$rp
799
800         add     %sp,LOCALS+$tmp0,$bp
801         call    __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
802         add     $rp_real,0,$rp
803
804         add     %sp,LOCALS+$S,$bp
805         call    __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
806         add     %sp,LOCALS+$S,$rp
807
808         add     %sp,LOCALS+$M,$bp
809         add     %sp,LOCALS+$S,$ap
810         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
811         add     %sp,LOCALS+$S,$rp
812
813         add     $rp_real,32,$bp
814         call    __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
815         add     $rp_real,32,$rp
816
817         ret
818         restore
819 .size   ecp_nistz256_point_double,.-ecp_nistz256_point_double
820 ___
821 }
822
823 ########################################################################
824 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
825 #                             const P256_POINT *in2);
826 {
827 my ($res_x,$res_y,$res_z,
828     $H,$Hsqr,$R,$Rsqr,$Hcub,
829     $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
830 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
831
832 # above map() describes stack layout with 12 temporary
833 # 256-bit vectors on top. Then we reserve some space for
834 # !in1infty, !in2infty, result of check for zero and return pointer.
835
836 my $bp_real=$rp_real;
837
838 $code.=<<___;
839 .globl  ecp_nistz256_point_add
840 .align  32
841 ecp_nistz256_point_add:
842         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
843         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
844         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
845         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
846         be      ecp_nistz256_point_add_vis3
847         nop
848
849         save    %sp,-STACK_FRAME-32*12-32,%sp
850
851         stx     $rp,[%fp+STACK_BIAS-8]  ! off-load $rp
852         mov     $ap,$ap_real
853         mov     $bp,$bp_real
854
855         ld      [$bp],@acc[0]           ! in2_x
856         ld      [$bp+4],@acc[1]
857         ld      [$bp+8],@acc[2]
858         ld      [$bp+12],@acc[3]
859         ld      [$bp+16],@acc[4]
860         ld      [$bp+20],@acc[5]
861         ld      [$bp+24],@acc[6]
862         ld      [$bp+28],@acc[7]
863         ld      [$bp+32],$t0            ! in2_y
864         ld      [$bp+32+4],$t1
865         ld      [$bp+32+8],$t2
866         ld      [$bp+32+12],$t3
867         ld      [$bp+32+16],$t4
868         ld      [$bp+32+20],$t5
869         ld      [$bp+32+24],$t6
870         ld      [$bp+32+28],$t7
871         or      @acc[1],@acc[0],@acc[0]
872         or      @acc[3],@acc[2],@acc[2]
873         or      @acc[5],@acc[4],@acc[4]
874         or      @acc[7],@acc[6],@acc[6]
875         or      @acc[2],@acc[0],@acc[0]
876         or      @acc[6],@acc[4],@acc[4]
877         or      @acc[4],@acc[0],@acc[0]
878         or      $t1,$t0,$t0
879         or      $t3,$t2,$t2
880         or      $t5,$t4,$t4
881         or      $t7,$t6,$t6
882         or      $t2,$t0,$t0
883         or      $t6,$t4,$t4
884         or      $t4,$t0,$t0
885         or      @acc[0],$t0,$t0         ! !in2infty
886         movrnz  $t0,-1,$t0
887         st      $t0,[%fp+STACK_BIAS-12]
888
889         ld      [$ap],@acc[0]           ! in1_x
890         ld      [$ap+4],@acc[1]
891         ld      [$ap+8],@acc[2]
892         ld      [$ap+12],@acc[3]
893         ld      [$ap+16],@acc[4]
894         ld      [$ap+20],@acc[5]
895         ld      [$ap+24],@acc[6]
896         ld      [$ap+28],@acc[7]
897         ld      [$ap+32],$t0            ! in1_y
898         ld      [$ap+32+4],$t1
899         ld      [$ap+32+8],$t2
900         ld      [$ap+32+12],$t3
901         ld      [$ap+32+16],$t4
902         ld      [$ap+32+20],$t5
903         ld      [$ap+32+24],$t6
904         ld      [$ap+32+28],$t7
905         or      @acc[1],@acc[0],@acc[0]
906         or      @acc[3],@acc[2],@acc[2]
907         or      @acc[5],@acc[4],@acc[4]
908         or      @acc[7],@acc[6],@acc[6]
909         or      @acc[2],@acc[0],@acc[0]
910         or      @acc[6],@acc[4],@acc[4]
911         or      @acc[4],@acc[0],@acc[0]
912         or      $t1,$t0,$t0
913         or      $t3,$t2,$t2
914         or      $t5,$t4,$t4
915         or      $t7,$t6,$t6
916         or      $t2,$t0,$t0
917         or      $t6,$t4,$t4
918         or      $t4,$t0,$t0
919         or      @acc[0],$t0,$t0         ! !in1infty
920         movrnz  $t0,-1,$t0
921         st      $t0,[%fp+STACK_BIAS-16]
922
923         add     $bp_real,64,$bp
924         add     $bp_real,64,$ap
925         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
926         add     %sp,LOCALS+$Z2sqr,$rp
927
928         add     $ap_real,64,$bp
929         add     $ap_real,64,$ap
930         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
931         add     %sp,LOCALS+$Z1sqr,$rp
932
933         add     $bp_real,64,$bp
934         add     %sp,LOCALS+$Z2sqr,$ap
935         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
936         add     %sp,LOCALS+$S1,$rp
937
938         add     $ap_real,64,$bp
939         add     %sp,LOCALS+$Z1sqr,$ap
940         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
941         add     %sp,LOCALS+$S2,$rp
942
943         add     $ap_real,32,$bp
944         add     %sp,LOCALS+$S1,$ap
945         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
946         add     %sp,LOCALS+$S1,$rp
947
948         add     $bp_real,32,$bp
949         add     %sp,LOCALS+$S2,$ap
950         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
951         add     %sp,LOCALS+$S2,$rp
952
953         add     %sp,LOCALS+$S1,$bp
954         call    __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
955         add     %sp,LOCALS+$R,$rp
956
957         or      @acc[1],@acc[0],@acc[0] ! see if result is zero
958         or      @acc[3],@acc[2],@acc[2]
959         or      @acc[5],@acc[4],@acc[4]
960         or      @acc[7],@acc[6],@acc[6]
961         or      @acc[2],@acc[0],@acc[0]
962         or      @acc[6],@acc[4],@acc[4]
963         or      @acc[4],@acc[0],@acc[0]
964         st      @acc[0],[%fp+STACK_BIAS-20]
965
966         add     $ap_real,0,$bp
967         add     %sp,LOCALS+$Z2sqr,$ap
968         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
969         add     %sp,LOCALS+$U1,$rp
970
971         add     $bp_real,0,$bp
972         add     %sp,LOCALS+$Z1sqr,$ap
973         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
974         add     %sp,LOCALS+$U2,$rp
975
976         add     %sp,LOCALS+$U1,$bp
977         call    __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
978         add     %sp,LOCALS+$H,$rp
979
980         or      @acc[1],@acc[0],@acc[0] ! see if result is zero
981         or      @acc[3],@acc[2],@acc[2]
982         or      @acc[5],@acc[4],@acc[4]
983         or      @acc[7],@acc[6],@acc[6]
984         or      @acc[2],@acc[0],@acc[0]
985         or      @acc[6],@acc[4],@acc[4]
986         orcc    @acc[4],@acc[0],@acc[0]
987
988         bne,pt  %icc,.Ladd_proceed      ! is_equal(U1,U2)?
989         nop
990
991         ld      [%fp+STACK_BIAS-12],$t0
992         ld      [%fp+STACK_BIAS-16],$t1
993         ld      [%fp+STACK_BIAS-20],$t2
994         andcc   $t0,$t1,%g0
995         be,pt   %icc,.Ladd_proceed      ! (in1infty || in2infty)?
996         nop
997         andcc   $t2,$t2,%g0
998         be,pt   %icc,.Ladd_double       ! is_equal(S1,S2)?
999         nop
1000
1001         ldx     [%fp+STACK_BIAS-8],$rp
1002         st      %g0,[$rp]
1003         st      %g0,[$rp+4]
1004         st      %g0,[$rp+8]
1005         st      %g0,[$rp+12]
1006         st      %g0,[$rp+16]
1007         st      %g0,[$rp+20]
1008         st      %g0,[$rp+24]
1009         st      %g0,[$rp+28]
1010         st      %g0,[$rp+32]
1011         st      %g0,[$rp+32+4]
1012         st      %g0,[$rp+32+8]
1013         st      %g0,[$rp+32+12]
1014         st      %g0,[$rp+32+16]
1015         st      %g0,[$rp+32+20]
1016         st      %g0,[$rp+32+24]
1017         st      %g0,[$rp+32+28]
1018         st      %g0,[$rp+64]
1019         st      %g0,[$rp+64+4]
1020         st      %g0,[$rp+64+8]
1021         st      %g0,[$rp+64+12]
1022         st      %g0,[$rp+64+16]
1023         st      %g0,[$rp+64+20]
1024         st      %g0,[$rp+64+24]
1025         st      %g0,[$rp+64+28]
1026         b       .Ladd_done
1027         nop
1028
1029 .align  16
1030 .Ladd_double:
1031         ldx     [%fp+STACK_BIAS-8],$rp_real
1032         mov     $ap_real,$ap
1033         b       .Lpoint_double_shortcut
1034         add     %sp,32*(12-4)+32,%sp    ! difference in frame sizes
1035
1036 .align  16
1037 .Ladd_proceed:
1038         add     %sp,LOCALS+$R,$bp
1039         add     %sp,LOCALS+$R,$ap
1040         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1041         add     %sp,LOCALS+$Rsqr,$rp
1042
1043         add     $ap_real,64,$bp
1044         add     %sp,LOCALS+$H,$ap
1045         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1046         add     %sp,LOCALS+$res_z,$rp
1047
1048         add     %sp,LOCALS+$H,$bp
1049         add     %sp,LOCALS+$H,$ap
1050         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1051         add     %sp,LOCALS+$Hsqr,$rp
1052
1053         add     $bp_real,64,$bp
1054         add     %sp,LOCALS+$res_z,$ap
1055         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
1056         add     %sp,LOCALS+$res_z,$rp
1057
1058         add     %sp,LOCALS+$H,$bp
1059         add     %sp,LOCALS+$Hsqr,$ap
1060         call    __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1061         add     %sp,LOCALS+$Hcub,$rp
1062
1063         add     %sp,LOCALS+$U1,$bp
1064         add     %sp,LOCALS+$Hsqr,$ap
1065         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
1066         add     %sp,LOCALS+$U2,$rp
1067
1068         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1069         add     %sp,LOCALS+$Hsqr,$rp
1070
1071         add     %sp,LOCALS+$Rsqr,$bp
1072         call    __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1073         add     %sp,LOCALS+$res_x,$rp
1074
1075         add     %sp,LOCALS+$Hcub,$bp
1076         call    __ecp_nistz256_sub_from !  p256_sub(res_x, res_x, Hcub);
1077         add     %sp,LOCALS+$res_x,$rp
1078
1079         add     %sp,LOCALS+$U2,$bp
1080         call    __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1081         add     %sp,LOCALS+$res_y,$rp
1082
1083         add     %sp,LOCALS+$Hcub,$bp
1084         add     %sp,LOCALS+$S1,$ap
1085         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
1086         add     %sp,LOCALS+$S2,$rp
1087
1088         add     %sp,LOCALS+$R,$bp
1089         add     %sp,LOCALS+$res_y,$ap
1090         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1091         add     %sp,LOCALS+$res_y,$rp
1092
1093         add     %sp,LOCALS+$S2,$bp
1094         call    __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1095         add     %sp,LOCALS+$res_y,$rp
1096
1097         ld      [%fp+STACK_BIAS-16],$t1 ! !in1infty
1098         ld      [%fp+STACK_BIAS-12],$t2 ! !in2infty
1099         ldx     [%fp+STACK_BIAS-8],$rp
1100 ___
1101 for($i=0;$i<96;$i+=8) {                 # conditional moves
1102 $code.=<<___;
1103         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1104         ld      [%sp+LOCALS+$i+4],@acc[1]
1105         ld      [$bp_real+$i],@acc[2]           ! in2
1106         ld      [$bp_real+$i+4],@acc[3]
1107         ld      [$ap_real+$i],@acc[4]           ! in1
1108         ld      [$ap_real+$i+4],@acc[5]
1109         movrz   $t1,@acc[2],@acc[0]
1110         movrz   $t1,@acc[3],@acc[1]
1111         movrz   $t2,@acc[4],@acc[0]
1112         movrz   $t2,@acc[5],@acc[1]
1113         st      @acc[0],[$rp+$i]
1114         st      @acc[1],[$rp+$i+4]
1115 ___
1116 }
1117 $code.=<<___;
1118 .Ladd_done:
1119         ret
1120         restore
1121 .size   ecp_nistz256_point_add,.-ecp_nistz256_point_add
1122 ___
1123 }
1124
1125 ########################################################################
1126 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1127 #                                    const P256_POINT_AFFINE *in2);
1128 {
1129 my ($res_x,$res_y,$res_z,
1130     $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1131 my $Z1sqr = $S2;
1132 # above map() describes stack layout with 10 temporary
1133 # 256-bit vectors on top. Then we reserve some space for
1134 # !in1infty, !in2infty, result of check for zero and return pointer.
1135
1136 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1137 my $bp_real=$rp_real;
1138
1139 $code.=<<___;
1140 .globl  ecp_nistz256_point_add_affine
1141 .align  32
1142 ecp_nistz256_point_add_affine:
1143         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
1144         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
1145         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
1146         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
1147         be      ecp_nistz256_point_add_affine_vis3
1148         nop
1149
1150         save    %sp,-STACK_FRAME-32*10-32,%sp
1151
1152         stx     $rp,[%fp+STACK_BIAS-8]  ! off-load $rp
1153         mov     $ap,$ap_real
1154         mov     $bp,$bp_real
1155
1156         ld      [$ap],@acc[0]           ! in1_x
1157         ld      [$ap+4],@acc[1]
1158         ld      [$ap+8],@acc[2]
1159         ld      [$ap+12],@acc[3]
1160         ld      [$ap+16],@acc[4]
1161         ld      [$ap+20],@acc[5]
1162         ld      [$ap+24],@acc[6]
1163         ld      [$ap+28],@acc[7]
1164         ld      [$ap+32],$t0            ! in1_y
1165         ld      [$ap+32+4],$t1
1166         ld      [$ap+32+8],$t2
1167         ld      [$ap+32+12],$t3
1168         ld      [$ap+32+16],$t4
1169         ld      [$ap+32+20],$t5
1170         ld      [$ap+32+24],$t6
1171         ld      [$ap+32+28],$t7
1172         or      @acc[1],@acc[0],@acc[0]
1173         or      @acc[3],@acc[2],@acc[2]
1174         or      @acc[5],@acc[4],@acc[4]
1175         or      @acc[7],@acc[6],@acc[6]
1176         or      @acc[2],@acc[0],@acc[0]
1177         or      @acc[6],@acc[4],@acc[4]
1178         or      @acc[4],@acc[0],@acc[0]
1179         or      $t1,$t0,$t0
1180         or      $t3,$t2,$t2
1181         or      $t5,$t4,$t4
1182         or      $t7,$t6,$t6
1183         or      $t2,$t0,$t0
1184         or      $t6,$t4,$t4
1185         or      $t4,$t0,$t0
1186         or      @acc[0],$t0,$t0         ! !in1infty
1187         movrnz  $t0,-1,$t0
1188         st      $t0,[%fp+STACK_BIAS-16]
1189
1190         ld      [$bp],@acc[0]           ! in2_x
1191         ld      [$bp+4],@acc[1]
1192         ld      [$bp+8],@acc[2]
1193         ld      [$bp+12],@acc[3]
1194         ld      [$bp+16],@acc[4]
1195         ld      [$bp+20],@acc[5]
1196         ld      [$bp+24],@acc[6]
1197         ld      [$bp+28],@acc[7]
1198         ld      [$bp+32],$t0            ! in2_y
1199         ld      [$bp+32+4],$t1
1200         ld      [$bp+32+8],$t2
1201         ld      [$bp+32+12],$t3
1202         ld      [$bp+32+16],$t4
1203         ld      [$bp+32+20],$t5
1204         ld      [$bp+32+24],$t6
1205         ld      [$bp+32+28],$t7
1206         or      @acc[1],@acc[0],@acc[0]
1207         or      @acc[3],@acc[2],@acc[2]
1208         or      @acc[5],@acc[4],@acc[4]
1209         or      @acc[7],@acc[6],@acc[6]
1210         or      @acc[2],@acc[0],@acc[0]
1211         or      @acc[6],@acc[4],@acc[4]
1212         or      @acc[4],@acc[0],@acc[0]
1213         or      $t1,$t0,$t0
1214         or      $t3,$t2,$t2
1215         or      $t5,$t4,$t4
1216         or      $t7,$t6,$t6
1217         or      $t2,$t0,$t0
1218         or      $t6,$t4,$t4
1219         or      $t4,$t0,$t0
1220         or      @acc[0],$t0,$t0         ! !in2infty
1221         movrnz  $t0,-1,$t0
1222         st      $t0,[%fp+STACK_BIAS-12]
1223
1224         add     $ap_real,64,$bp
1225         add     $ap_real,64,$ap
1226         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
1227         add     %sp,LOCALS+$Z1sqr,$rp
1228
1229         add     $bp_real,0,$bp
1230         add     %sp,LOCALS+$Z1sqr,$ap
1231         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
1232         add     %sp,LOCALS+$U2,$rp
1233
1234         add     $ap_real,0,$bp
1235         call    __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
1236         add     %sp,LOCALS+$H,$rp
1237
1238         add     $ap_real,64,$bp
1239         add     %sp,LOCALS+$Z1sqr,$ap
1240         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
1241         add     %sp,LOCALS+$S2,$rp
1242
1243         add     $ap_real,64,$bp
1244         add     %sp,LOCALS+$H,$ap
1245         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1246         add     %sp,LOCALS+$res_z,$rp
1247
1248         add     $bp_real,32,$bp
1249         add     %sp,LOCALS+$S2,$ap
1250         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
1251         add     %sp,LOCALS+$S2,$rp
1252
1253         add     $ap_real,32,$bp
1254         call    __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
1255         add     %sp,LOCALS+$R,$rp
1256
1257         add     %sp,LOCALS+$H,$bp
1258         add     %sp,LOCALS+$H,$ap
1259         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1260         add     %sp,LOCALS+$Hsqr,$rp
1261
1262         add     %sp,LOCALS+$R,$bp
1263         add     %sp,LOCALS+$R,$ap
1264         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1265         add     %sp,LOCALS+$Rsqr,$rp
1266
1267         add     %sp,LOCALS+$H,$bp
1268         add     %sp,LOCALS+$Hsqr,$ap
1269         call    __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1270         add     %sp,LOCALS+$Hcub,$rp
1271
1272         add     $ap_real,0,$bp
1273         add     %sp,LOCALS+$Hsqr,$ap
1274         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
1275         add     %sp,LOCALS+$U2,$rp
1276
1277         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1278         add     %sp,LOCALS+$Hsqr,$rp
1279
1280         add     %sp,LOCALS+$Rsqr,$bp
1281         call    __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1282         add     %sp,LOCALS+$res_x,$rp
1283
1284         add     %sp,LOCALS+$Hcub,$bp
1285         call    __ecp_nistz256_sub_from !  p256_sub(res_x, res_x, Hcub);
1286         add     %sp,LOCALS+$res_x,$rp
1287
1288         add     %sp,LOCALS+$U2,$bp
1289         call    __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1290         add     %sp,LOCALS+$res_y,$rp
1291
1292         add     $ap_real,32,$bp
1293         add     %sp,LOCALS+$Hcub,$ap
1294         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
1295         add     %sp,LOCALS+$S2,$rp
1296
1297         add     %sp,LOCALS+$R,$bp
1298         add     %sp,LOCALS+$res_y,$ap
1299         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1300         add     %sp,LOCALS+$res_y,$rp
1301
1302         add     %sp,LOCALS+$S2,$bp
1303         call    __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1304         add     %sp,LOCALS+$res_y,$rp
1305
1306         ld      [%fp+STACK_BIAS-16],$t1 ! !in1infty
1307         ld      [%fp+STACK_BIAS-12],$t2 ! !in2infty
1308         ldx     [%fp+STACK_BIAS-8],$rp
1309 ___
1310 for($i=0;$i<64;$i+=8) {                 # conditional moves
1311 $code.=<<___;
1312         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1313         ld      [%sp+LOCALS+$i+4],@acc[1]
1314         ld      [$bp_real+$i],@acc[2]           ! in2
1315         ld      [$bp_real+$i+4],@acc[3]
1316         ld      [$ap_real+$i],@acc[4]           ! in1
1317         ld      [$ap_real+$i+4],@acc[5]
1318         movrz   $t1,@acc[2],@acc[0]
1319         movrz   $t1,@acc[3],@acc[1]
1320         movrz   $t2,@acc[4],@acc[0]
1321         movrz   $t2,@acc[5],@acc[1]
1322         st      @acc[0],[$rp+$i]
1323         st      @acc[1],[$rp+$i+4]
1324 ___
1325 }
1326 for(;$i<96;$i+=8) {
1327 my $j=($i-64)/4;
1328 $code.=<<___;
1329         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1330         ld      [%sp+LOCALS+$i+4],@acc[1]
1331         ld      [$ap_real+$i],@acc[4]           ! in1
1332         ld      [$ap_real+$i+4],@acc[5]
1333         movrz   $t1,@ONE_mont[$j],@acc[0]
1334         movrz   $t1,@ONE_mont[$j+1],@acc[1]
1335         movrz   $t2,@acc[4],@acc[0]
1336         movrz   $t2,@acc[5],@acc[1]
1337         st      @acc[0],[$rp+$i]
1338         st      @acc[1],[$rp+$i+4]
1339 ___
1340 }
1341 $code.=<<___;
1342         ret
1343         restore
1344 .size   ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1345 ___
1346 }                                                               }}}
1347 {{{
1348 my ($out,$inp,$index)=map("%i$_",(0..2));
1349 my $mask="%o0";
1350
1351 $code.=<<___;
1352 ! void  ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
1353 !                                         int %i2);
1354 .globl  ecp_nistz256_scatter_w5
1355 .align  32
1356 ecp_nistz256_scatter_w5:
1357         save    %sp,-STACK_FRAME,%sp
1358
1359         sll     $index,2,$index
1360         add     $out,$index,$out
1361
1362         ld      [$inp],%l0              ! X
1363         ld      [$inp+4],%l1
1364         ld      [$inp+8],%l2
1365         ld      [$inp+12],%l3
1366         ld      [$inp+16],%l4
1367         ld      [$inp+20],%l5
1368         ld      [$inp+24],%l6
1369         ld      [$inp+28],%l7
1370         add     $inp,32,$inp
1371         st      %l0,[$out+64*0-4]
1372         st      %l1,[$out+64*1-4]
1373         st      %l2,[$out+64*2-4]
1374         st      %l3,[$out+64*3-4]
1375         st      %l4,[$out+64*4-4]
1376         st      %l5,[$out+64*5-4]
1377         st      %l6,[$out+64*6-4]
1378         st      %l7,[$out+64*7-4]
1379         add     $out,64*8,$out
1380
1381         ld      [$inp],%l0              ! Y
1382         ld      [$inp+4],%l1
1383         ld      [$inp+8],%l2
1384         ld      [$inp+12],%l3
1385         ld      [$inp+16],%l4
1386         ld      [$inp+20],%l5
1387         ld      [$inp+24],%l6
1388         ld      [$inp+28],%l7
1389         add     $inp,32,$inp
1390         st      %l0,[$out+64*0-4]
1391         st      %l1,[$out+64*1-4]
1392         st      %l2,[$out+64*2-4]
1393         st      %l3,[$out+64*3-4]
1394         st      %l4,[$out+64*4-4]
1395         st      %l5,[$out+64*5-4]
1396         st      %l6,[$out+64*6-4]
1397         st      %l7,[$out+64*7-4]
1398         add     $out,64*8,$out
1399
1400         ld      [$inp],%l0              ! Z
1401         ld      [$inp+4],%l1
1402         ld      [$inp+8],%l2
1403         ld      [$inp+12],%l3
1404         ld      [$inp+16],%l4
1405         ld      [$inp+20],%l5
1406         ld      [$inp+24],%l6
1407         ld      [$inp+28],%l7
1408         st      %l0,[$out+64*0-4]
1409         st      %l1,[$out+64*1-4]
1410         st      %l2,[$out+64*2-4]
1411         st      %l3,[$out+64*3-4]
1412         st      %l4,[$out+64*4-4]
1413         st      %l5,[$out+64*5-4]
1414         st      %l6,[$out+64*6-4]
1415         st      %l7,[$out+64*7-4]
1416
1417         ret
1418         restore
1419 .size   ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1420
1421 ! void  ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
1422 !                                              int %i2);
1423 .globl  ecp_nistz256_gather_w5
1424 .align  32
1425 ecp_nistz256_gather_w5:
1426         save    %sp,-STACK_FRAME,%sp
1427
1428         neg     $index,$mask
1429         srax    $mask,63,$mask
1430
1431         add     $index,$mask,$index
1432         sll     $index,2,$index
1433         add     $inp,$index,$inp
1434
1435         ld      [$inp+64*0],%l0
1436         ld      [$inp+64*1],%l1
1437         ld      [$inp+64*2],%l2
1438         ld      [$inp+64*3],%l3
1439         ld      [$inp+64*4],%l4
1440         ld      [$inp+64*5],%l5
1441         ld      [$inp+64*6],%l6
1442         ld      [$inp+64*7],%l7
1443         add     $inp,64*8,$inp
1444         and     %l0,$mask,%l0
1445         and     %l1,$mask,%l1
1446         st      %l0,[$out]              ! X
1447         and     %l2,$mask,%l2
1448         st      %l1,[$out+4]
1449         and     %l3,$mask,%l3
1450         st      %l2,[$out+8]
1451         and     %l4,$mask,%l4
1452         st      %l3,[$out+12]
1453         and     %l5,$mask,%l5
1454         st      %l4,[$out+16]
1455         and     %l6,$mask,%l6
1456         st      %l5,[$out+20]
1457         and     %l7,$mask,%l7
1458         st      %l6,[$out+24]
1459         st      %l7,[$out+28]
1460         add     $out,32,$out
1461
1462         ld      [$inp+64*0],%l0
1463         ld      [$inp+64*1],%l1
1464         ld      [$inp+64*2],%l2
1465         ld      [$inp+64*3],%l3
1466         ld      [$inp+64*4],%l4
1467         ld      [$inp+64*5],%l5
1468         ld      [$inp+64*6],%l6
1469         ld      [$inp+64*7],%l7
1470         add     $inp,64*8,$inp
1471         and     %l0,$mask,%l0
1472         and     %l1,$mask,%l1
1473         st      %l0,[$out]              ! Y
1474         and     %l2,$mask,%l2
1475         st      %l1,[$out+4]
1476         and     %l3,$mask,%l3
1477         st      %l2,[$out+8]
1478         and     %l4,$mask,%l4
1479         st      %l3,[$out+12]
1480         and     %l5,$mask,%l5
1481         st      %l4,[$out+16]
1482         and     %l6,$mask,%l6
1483         st      %l5,[$out+20]
1484         and     %l7,$mask,%l7
1485         st      %l6,[$out+24]
1486         st      %l7,[$out+28]
1487         add     $out,32,$out
1488
1489         ld      [$inp+64*0],%l0
1490         ld      [$inp+64*1],%l1
1491         ld      [$inp+64*2],%l2
1492         ld      [$inp+64*3],%l3
1493         ld      [$inp+64*4],%l4
1494         ld      [$inp+64*5],%l5
1495         ld      [$inp+64*6],%l6
1496         ld      [$inp+64*7],%l7
1497         and     %l0,$mask,%l0
1498         and     %l1,$mask,%l1
1499         st      %l0,[$out]              ! Z
1500         and     %l2,$mask,%l2
1501         st      %l1,[$out+4]
1502         and     %l3,$mask,%l3
1503         st      %l2,[$out+8]
1504         and     %l4,$mask,%l4
1505         st      %l3,[$out+12]
1506         and     %l5,$mask,%l5
1507         st      %l4,[$out+16]
1508         and     %l6,$mask,%l6
1509         st      %l5,[$out+20]
1510         and     %l7,$mask,%l7
1511         st      %l6,[$out+24]
1512         st      %l7,[$out+28]
1513
1514         ret
1515         restore
1516 .size   ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1517
1518 ! void  ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
1519 !                                         int %i2);
1520 .globl  ecp_nistz256_scatter_w7
1521 .align  32
1522 ecp_nistz256_scatter_w7:
1523         save    %sp,-STACK_FRAME,%sp
1524         nop
1525         add     $out,$index,$out
1526         mov     64/4,$index
1527 .Loop_scatter_w7:
1528         ld      [$inp],%l0
1529         add     $inp,4,$inp
1530         subcc   $index,1,$index
1531         stb     %l0,[$out+64*0-1]
1532         srl     %l0,8,%l1
1533         stb     %l1,[$out+64*1-1]
1534         srl     %l0,16,%l2
1535         stb     %l2,[$out+64*2-1]
1536         srl     %l0,24,%l3
1537         stb     %l3,[$out+64*3-1]
1538         bne     .Loop_scatter_w7
1539         add     $out,64*4,$out
1540
1541         ret
1542         restore
1543 .size   ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1544
1545 ! void  ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
1546 !                                                     int %i2);
1547 .globl  ecp_nistz256_gather_w7
1548 .align  32
1549 ecp_nistz256_gather_w7:
1550         save    %sp,-STACK_FRAME,%sp
1551
1552         neg     $index,$mask
1553         srax    $mask,63,$mask
1554
1555         add     $index,$mask,$index
1556         add     $inp,$index,$inp
1557         mov     64/4,$index
1558
1559 .Loop_gather_w7:
1560         ldub    [$inp+64*0],%l0
1561         prefetch [$inp+3840+64*0],1
1562         subcc   $index,1,$index
1563         ldub    [$inp+64*1],%l1
1564         prefetch [$inp+3840+64*1],1
1565         ldub    [$inp+64*2],%l2
1566         prefetch [$inp+3840+64*2],1
1567         ldub    [$inp+64*3],%l3
1568         prefetch [$inp+3840+64*3],1
1569         add     $inp,64*4,$inp
1570         sll     %l1,8,%l1
1571         sll     %l2,16,%l2
1572         or      %l0,%l1,%l0
1573         sll     %l3,24,%l3
1574         or      %l0,%l2,%l0
1575         or      %l0,%l3,%l0
1576         and     %l0,$mask,%l0
1577         st      %l0,[$out]
1578         bne     .Loop_gather_w7
1579         add     $out,4,$out
1580
1581         ret
1582         restore
1583 .size   ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1584 ___
1585 }}}
1586 {{{
1587 ########################################################################
1588 # Following subroutines are VIS3 counterparts of those above that
1589 # implement ones found in ecp_nistz256.c. Key difference is that they
1590 # use 128-bit muliplication and addition with 64-bit carry, and in order
1591 # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
1592 # entry and vice versa on return.
1593 #
1594 my ($rp,$ap,$bp)=map("%i$_",(0..2));
1595 my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
1596 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
1597 my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
1598 my ($rp_real,$ap_real)=("%g2","%g3");
1599 my ($acc6,$acc7)=($bp,$bi);     # used in squaring
1600
1601 $code.=<<___;
1602 .align  32
1603 __ecp_nistz256_mul_by_2_vis3:
1604         addcc   $acc0,$acc0,$acc0
1605         addxccc $acc1,$acc1,$acc1
1606         addxccc $acc2,$acc2,$acc2
1607         addxccc $acc3,$acc3,$acc3
1608         b       .Lreduce_by_sub_vis3
1609         addxc   %g0,%g0,$acc4           ! did it carry?
1610 .size   __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
1611
1612 .align  32
1613 __ecp_nistz256_add_vis3:
1614         ldx     [$bp+0],$t0
1615         ldx     [$bp+8],$t1
1616         ldx     [$bp+16],$t2
1617         ldx     [$bp+24],$t3
1618
1619 __ecp_nistz256_add_noload_vis3:
1620
1621         addcc   $t0,$acc0,$acc0
1622         addxccc $t1,$acc1,$acc1
1623         addxccc $t2,$acc2,$acc2
1624         addxccc $t3,$acc3,$acc3
1625         addxc   %g0,%g0,$acc4           ! did it carry?
1626
1627 .Lreduce_by_sub_vis3:
1628
1629         addcc   $acc0,1,$t0             ! add -modulus, i.e. subtract
1630         addxccc $acc1,$poly1,$t1
1631         addxccc $acc2,$minus1,$t2
1632         addxc   $acc3,$poly3,$t3
1633
1634         movrnz  $acc4,$t0,$acc0         ! if a+b carried, ret = ret-mod
1635         movrnz  $acc4,$t1,$acc1
1636         stx     $acc0,[$rp]
1637         movrnz  $acc4,$t2,$acc2
1638         stx     $acc1,[$rp+8]
1639         movrnz  $acc4,$t3,$acc3
1640         stx     $acc2,[$rp+16]
1641         retl
1642         stx     $acc3,[$rp+24]
1643 .size   __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
1644
1645 ! Trouble with subtraction is that there is no subtraction with 64-bit
1646 ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
1647 ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
1648 ! recall that SPARC is big-endian, which is why you'll observe that
1649 ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
1650 ! "collect" result back to 64-bit $acc0-$acc3.
1651 .align  32
1652 __ecp_nistz256_sub_from_vis3:
1653         ld      [$bp+4],$t0
1654         ld      [$bp+0],$t1
1655         ld      [$bp+12],$t2
1656         ld      [$bp+8],$t3
1657
1658         srlx    $acc0,32,$acc4
1659         not     $poly1,$poly1
1660         srlx    $acc1,32,$acc5
1661         subcc   $acc0,$t0,$acc0
1662         ld      [$bp+20],$t0
1663         subccc  $acc4,$t1,$acc4
1664         ld      [$bp+16],$t1
1665         subccc  $acc1,$t2,$acc1
1666         ld      [$bp+28],$t2
1667         and     $acc0,$poly1,$acc0
1668         subccc  $acc5,$t3,$acc5
1669         ld      [$bp+24],$t3
1670         sllx    $acc4,32,$acc4
1671         and     $acc1,$poly1,$acc1
1672         sllx    $acc5,32,$acc5
1673         or      $acc0,$acc4,$acc0
1674         srlx    $acc2,32,$acc4
1675         or      $acc1,$acc5,$acc1
1676         srlx    $acc3,32,$acc5
1677         subccc  $acc2,$t0,$acc2
1678         subccc  $acc4,$t1,$acc4
1679         subccc  $acc3,$t2,$acc3
1680         and     $acc2,$poly1,$acc2
1681         subccc  $acc5,$t3,$acc5
1682         sllx    $acc4,32,$acc4
1683         and     $acc3,$poly1,$acc3
1684         sllx    $acc5,32,$acc5
1685         or      $acc2,$acc4,$acc2
1686         subc    %g0,%g0,$acc4           ! did it borrow?
1687         b       .Lreduce_by_add_vis3
1688         or      $acc3,$acc5,$acc3
1689 .size   __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
1690
1691 .align  32
1692 __ecp_nistz256_sub_morf_vis3:
1693         ld      [$bp+4],$t0
1694         ld      [$bp+0],$t1
1695         ld      [$bp+12],$t2
1696         ld      [$bp+8],$t3
1697
1698         srlx    $acc0,32,$acc4
1699         not     $poly1,$poly1
1700         srlx    $acc1,32,$acc5
1701         subcc   $t0,$acc0,$acc0
1702         ld      [$bp+20],$t0
1703         subccc  $t1,$acc4,$acc4
1704         ld      [$bp+16],$t1
1705         subccc  $t2,$acc1,$acc1
1706         ld      [$bp+28],$t2
1707         and     $acc0,$poly1,$acc0
1708         subccc  $t3,$acc5,$acc5
1709         ld      [$bp+24],$t3
1710         sllx    $acc4,32,$acc4
1711         and     $acc1,$poly1,$acc1
1712         sllx    $acc5,32,$acc5
1713         or      $acc0,$acc4,$acc0
1714         srlx    $acc2,32,$acc4
1715         or      $acc1,$acc5,$acc1
1716         srlx    $acc3,32,$acc5
1717         subccc  $t0,$acc2,$acc2
1718         subccc  $t1,$acc4,$acc4
1719         subccc  $t2,$acc3,$acc3
1720         and     $acc2,$poly1,$acc2
1721         subccc  $t3,$acc5,$acc5
1722         sllx    $acc4,32,$acc4
1723         and     $acc3,$poly1,$acc3
1724         sllx    $acc5,32,$acc5
1725         or      $acc2,$acc4,$acc2
1726         subc    %g0,%g0,$acc4           ! did it borrow?
1727         or      $acc3,$acc5,$acc3
1728
1729 .Lreduce_by_add_vis3:
1730
1731         addcc   $acc0,-1,$t0            ! add modulus
1732         not     $poly3,$t3
1733         addxccc $acc1,$poly1,$t1
1734         not     $poly1,$poly1           ! restore $poly1
1735         addxccc $acc2,%g0,$t2
1736         addxc   $acc3,$t3,$t3
1737
1738         movrnz  $acc4,$t0,$acc0         ! if a-b borrowed, ret = ret+mod
1739         movrnz  $acc4,$t1,$acc1
1740         stx     $acc0,[$rp]
1741         movrnz  $acc4,$t2,$acc2
1742         stx     $acc1,[$rp+8]
1743         movrnz  $acc4,$t3,$acc3
1744         stx     $acc2,[$rp+16]
1745         retl
1746         stx     $acc3,[$rp+24]
1747 .size   __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
1748
1749 .align  32
1750 __ecp_nistz256_div_by_2_vis3:
1751         ! ret = (a is odd ? a+mod : a) >> 1
1752
1753         not     $poly1,$t1
1754         not     $poly3,$t3
1755         and     $acc0,1,$acc5
1756         addcc   $acc0,-1,$t0            ! add modulus
1757         addxccc $acc1,$t1,$t1
1758         addxccc $acc2,%g0,$t2
1759         addxccc $acc3,$t3,$t3
1760         addxc   %g0,%g0,$acc4           ! carry bit
1761
1762         movrnz  $acc5,$t0,$acc0
1763         movrnz  $acc5,$t1,$acc1
1764         movrnz  $acc5,$t2,$acc2
1765         movrnz  $acc5,$t3,$acc3
1766         movrz   $acc5,%g0,$acc4
1767
1768         ! ret >>= 1
1769
1770         srlx    $acc0,1,$acc0
1771         sllx    $acc1,63,$t0
1772         srlx    $acc1,1,$acc1
1773         or      $acc0,$t0,$acc0
1774         sllx    $acc2,63,$t1
1775         srlx    $acc2,1,$acc2
1776         or      $acc1,$t1,$acc1
1777         sllx    $acc3,63,$t2
1778         stx     $acc0,[$rp]
1779         srlx    $acc3,1,$acc3
1780         or      $acc2,$t2,$acc2
1781         sllx    $acc4,63,$t3            ! don't forget carry bit
1782         stx     $acc1,[$rp+8]
1783         or      $acc3,$t3,$acc3
1784         stx     $acc2,[$rp+16]
1785         retl
1786         stx     $acc3,[$rp+24]
1787 .size   __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
1788
1789 ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
1790 ! 4x faster [on T4]...
1791 .align  32
1792 __ecp_nistz256_mul_mont_vis3:
1793         mulx    $a0,$bi,$acc0
1794         not     $poly3,$poly3           ! 0xFFFFFFFF00000001
1795         umulxhi $a0,$bi,$t0
1796         mulx    $a1,$bi,$acc1
1797         umulxhi $a1,$bi,$t1
1798         mulx    $a2,$bi,$acc2
1799         umulxhi $a2,$bi,$t2
1800         mulx    $a3,$bi,$acc3
1801         umulxhi $a3,$bi,$t3
1802         ldx     [$bp+8],$bi             ! b[1]
1803
1804         addcc   $acc1,$t0,$acc1         ! accumulate high parts of multiplication
1805          sllx   $acc0,32,$t0
1806         addxccc $acc2,$t1,$acc2
1807          srlx   $acc0,32,$t1
1808         addxccc $acc3,$t2,$acc3
1809         addxc   %g0,$t3,$acc4
1810         mov     0,$acc5
1811 ___
1812 for($i=1;$i<4;$i++) {
1813         # Reduction iteration is normally performed by accumulating
1814         # result of multiplication of modulus by "magic" digit [and
1815         # omitting least significant word, which is guaranteed to
1816         # be 0], but thanks to special form of modulus and "magic"
1817         # digit being equal to least significant word, it can be
1818         # performed with additions and subtractions alone. Indeed:
1819         #
1820         #            ffff0001.00000000.0000ffff.ffffffff
1821         # *                                     abcdefgh
1822         # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1823         #
1824         # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1825         # rewrite above as:
1826         #
1827         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1828         # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
1829         # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
1830         #
1831         # or marking redundant operations:
1832         #
1833         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
1834         # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
1835         # - 0000abcd.efgh0000.--------.--------.--------
1836         #   ^^^^^^^^ but this word is calculated with umulxhi, because
1837         #            there is no subtract with 64-bit borrow:-(
1838
1839 $code.=<<___;
1840         sub     $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1841         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1842         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1843         mulx    $a0,$bi,$t0
1844         addxccc $acc2,$t1,$acc1
1845         mulx    $a1,$bi,$t1
1846         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1847         mulx    $a2,$bi,$t2
1848         addxccc $acc4,$t3,$acc3
1849         mulx    $a3,$bi,$t3
1850         addxc   $acc5,%g0,$acc4
1851
1852         addcc   $acc0,$t0,$acc0         ! accumulate low parts of multiplication
1853         umulxhi $a0,$bi,$t0
1854         addxccc $acc1,$t1,$acc1
1855         umulxhi $a1,$bi,$t1
1856         addxccc $acc2,$t2,$acc2
1857         umulxhi $a2,$bi,$t2
1858         addxccc $acc3,$t3,$acc3
1859         umulxhi $a3,$bi,$t3
1860         addxc   $acc4,%g0,$acc4
1861 ___
1862 $code.=<<___    if ($i<3);
1863         ldx     [$bp+8*($i+1)],$bi      ! bp[$i+1]
1864 ___
1865 $code.=<<___;
1866         addcc   $acc1,$t0,$acc1         ! accumulate high parts of multiplication 
1867          sllx   $acc0,32,$t0
1868         addxccc $acc2,$t1,$acc2
1869          srlx   $acc0,32,$t1
1870         addxccc $acc3,$t2,$acc3
1871         addxccc $acc4,$t3,$acc4
1872         addxc   %g0,%g0,$acc5
1873 ___
1874 }
1875 $code.=<<___;
1876         sub     $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1877         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1878         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1879         addxccc $acc2,$t1,$acc1
1880         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1881         addxccc $acc4,$t3,$acc3
1882         b       .Lmul_final_vis3        ! see below
1883         addxc   $acc5,%g0,$acc4
1884 .size   __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
1885
1886 ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
1887 ! instructions, but only 14% faster [on T4]...
1888 .align  32
1889 __ecp_nistz256_sqr_mont_vis3:
1890         !  |  |  |  |  |  |a1*a0|  |
1891         !  |  |  |  |  |a2*a0|  |  |
1892         !  |  |a3*a2|a3*a0|  |  |  |
1893         !  |  |  |  |a2*a1|  |  |  |
1894         !  |  |  |a3*a1|  |  |  |  |
1895         ! *|  |  |  |  |  |  |  | 2|
1896         ! +|a3*a3|a2*a2|a1*a1|a0*a0|
1897         !  |--+--+--+--+--+--+--+--|
1898         !  |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1899         !
1900         !  "can't overflow" below mark carrying into high part of
1901         !  multiplication result, which can't overflow, because it
1902         !  can never be all ones.
1903
1904         mulx    $a1,$a0,$acc1           ! a[1]*a[0]
1905         umulxhi $a1,$a0,$t1
1906         mulx    $a2,$a0,$acc2           ! a[2]*a[0]
1907         umulxhi $a2,$a0,$t2
1908         mulx    $a3,$a0,$acc3           ! a[3]*a[0]
1909         umulxhi $a3,$a0,$acc4
1910
1911         addcc   $acc2,$t1,$acc2         ! accumulate high parts of multiplication
1912         mulx    $a2,$a1,$t0             ! a[2]*a[1]
1913         umulxhi $a2,$a1,$t1
1914         addxccc $acc3,$t2,$acc3
1915         mulx    $a3,$a1,$t2             ! a[3]*a[1]
1916         umulxhi $a3,$a1,$t3
1917         addxc   $acc4,%g0,$acc4         ! can't overflow
1918
1919         mulx    $a3,$a2,$acc5           ! a[3]*a[2]
1920         not     $poly3,$poly3           ! 0xFFFFFFFF00000001
1921         umulxhi $a3,$a2,$acc6
1922
1923         addcc   $t2,$t1,$t1             ! accumulate high parts of multiplication
1924         mulx    $a0,$a0,$acc0           ! a[0]*a[0]
1925         addxc   $t3,%g0,$t2             ! can't overflow
1926
1927         addcc   $acc3,$t0,$acc3         ! accumulate low parts of multiplication
1928         umulxhi $a0,$a0,$a0
1929         addxccc $acc4,$t1,$acc4
1930         mulx    $a1,$a1,$t1             ! a[1]*a[1]
1931         addxccc $acc5,$t2,$acc5
1932         umulxhi $a1,$a1,$a1
1933         addxc   $acc6,%g0,$acc6         ! can't overflow
1934
1935         addcc   $acc1,$acc1,$acc1       ! acc[1-6]*=2
1936         mulx    $a2,$a2,$t2             ! a[2]*a[2]
1937         addxccc $acc2,$acc2,$acc2
1938         umulxhi $a2,$a2,$a2
1939         addxccc $acc3,$acc3,$acc3
1940         mulx    $a3,$a3,$t3             ! a[3]*a[3]
1941         addxccc $acc4,$acc4,$acc4
1942         umulxhi $a3,$a3,$a3
1943         addxccc $acc5,$acc5,$acc5
1944         addxccc $acc6,$acc6,$acc6
1945         addxc   %g0,%g0,$acc7
1946
1947         addcc   $acc1,$a0,$acc1         ! +a[i]*a[i]
1948         addxccc $acc2,$t1,$acc2
1949         addxccc $acc3,$a1,$acc3
1950         addxccc $acc4,$t2,$acc4
1951          sllx   $acc0,32,$t0
1952         addxccc $acc5,$a2,$acc5
1953          srlx   $acc0,32,$t1
1954         addxccc $acc6,$t3,$acc6
1955          sub    $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1956         addxc   $acc7,$a3,$acc7
1957 ___
1958 for($i=0;$i<3;$i++) {                   # reductions, see commentary
1959                                         # in multiplication for details
1960 $code.=<<___;
1961         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1962         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1963          sllx   $acc0,32,$t0
1964         addxccc $acc2,$t1,$acc1
1965          srlx   $acc0,32,$t1
1966         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1967          sub    $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1968         addxc   %g0,$t3,$acc3           ! cant't overflow
1969 ___
1970 }
1971 $code.=<<___;
1972         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1973         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1974         addxccc $acc2,$t1,$acc1
1975         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1976         addxc   %g0,$t3,$acc3           ! can't overflow
1977
1978         addcc   $acc0,$acc4,$acc0       ! accumulate upper half
1979         addxccc $acc1,$acc5,$acc1
1980         addxccc $acc2,$acc6,$acc2
1981         addxccc $acc3,$acc7,$acc3
1982         addxc   %g0,%g0,$acc4
1983
1984 .Lmul_final_vis3:
1985
1986         ! Final step is "if result > mod, subtract mod", but as comparison
1987         ! means subtraction, we do the subtraction and then copy outcome
1988         ! if it didn't borrow. But note that as we [have to] replace
1989         ! subtraction with addition with negative, carry/borrow logic is
1990         ! inverse.
1991
1992         addcc   $acc0,1,$t0             ! add -modulus, i.e. subtract
1993         not     $poly3,$poly3           ! restore 0x00000000FFFFFFFE
1994         addxccc $acc1,$poly1,$t1
1995         addxccc $acc2,$minus1,$t2
1996         addxccc $acc3,$poly3,$t3
1997         addxccc $acc4,$minus1,%g0       ! did it carry?
1998
1999         movcs   %xcc,$t0,$acc0
2000         movcs   %xcc,$t1,$acc1
2001         stx     $acc0,[$rp]
2002         movcs   %xcc,$t2,$acc2
2003         stx     $acc1,[$rp+8]
2004         movcs   %xcc,$t3,$acc3
2005         stx     $acc2,[$rp+16]
2006         retl
2007         stx     $acc3,[$rp+24]
2008 .size   __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
2009 ___
2010
2011 ########################################################################
2012 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
2013 #
2014 {
2015 my ($res_x,$res_y,$res_z,
2016     $in_x,$in_y,$in_z,
2017     $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
2018 # above map() describes stack layout with 10 temporary
2019 # 256-bit vectors on top.
2020
2021 $code.=<<___;
2022 .align  32
2023 ecp_nistz256_point_double_vis3:
2024         save    %sp,-STACK64_FRAME-32*10,%sp
2025
2026         mov     $rp,$rp_real
2027 .Ldouble_shortcut_vis3:
2028         mov     -1,$minus1
2029         mov     -2,$poly3
2030         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2031         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2032
2033         ! convert input to uint64_t[4]
2034         ld      [$ap],$a0                       ! in_x
2035         ld      [$ap+4],$t0
2036         ld      [$ap+8],$a1
2037         ld      [$ap+12],$t1
2038         ld      [$ap+16],$a2
2039         ld      [$ap+20],$t2
2040         ld      [$ap+24],$a3
2041         ld      [$ap+28],$t3
2042         sllx    $t0,32,$t0
2043         sllx    $t1,32,$t1
2044         ld      [$ap+32],$acc0                  ! in_y
2045         or      $a0,$t0,$a0
2046         ld      [$ap+32+4],$t0
2047         sllx    $t2,32,$t2
2048         ld      [$ap+32+8],$acc1
2049         or      $a1,$t1,$a1
2050         ld      [$ap+32+12],$t1
2051         sllx    $t3,32,$t3
2052         ld      [$ap+32+16],$acc2
2053         or      $a2,$t2,$a2
2054         ld      [$ap+32+20],$t2
2055         or      $a3,$t3,$a3
2056         ld      [$ap+32+24],$acc3
2057         sllx    $t0,32,$t0
2058         ld      [$ap+32+28],$t3
2059         sllx    $t1,32,$t1
2060         stx     $a0,[%sp+LOCALS64+$in_x]
2061         sllx    $t2,32,$t2
2062         stx     $a1,[%sp+LOCALS64+$in_x+8]
2063         sllx    $t3,32,$t3
2064         stx     $a2,[%sp+LOCALS64+$in_x+16]
2065         or      $acc0,$t0,$acc0
2066         stx     $a3,[%sp+LOCALS64+$in_x+24]
2067         or      $acc1,$t1,$acc1
2068         stx     $acc0,[%sp+LOCALS64+$in_y]
2069         or      $acc2,$t2,$acc2
2070         stx     $acc1,[%sp+LOCALS64+$in_y+8]
2071         or      $acc3,$t3,$acc3
2072         stx     $acc2,[%sp+LOCALS64+$in_y+16]
2073         stx     $acc3,[%sp+LOCALS64+$in_y+24]
2074
2075         ld      [$ap+64],$a0                    ! in_z
2076         ld      [$ap+64+4],$t0
2077         ld      [$ap+64+8],$a1
2078         ld      [$ap+64+12],$t1
2079         ld      [$ap+64+16],$a2
2080         ld      [$ap+64+20],$t2
2081         ld      [$ap+64+24],$a3
2082         ld      [$ap+64+28],$t3
2083         sllx    $t0,32,$t0
2084         sllx    $t1,32,$t1
2085         or      $a0,$t0,$a0
2086         sllx    $t2,32,$t2
2087         or      $a1,$t1,$a1
2088         sllx    $t3,32,$t3
2089         or      $a2,$t2,$a2
2090         or      $a3,$t3,$a3
2091         sllx    $t0,32,$t0
2092         sllx    $t1,32,$t1
2093         stx     $a0,[%sp+LOCALS64+$in_z]
2094         sllx    $t2,32,$t2
2095         stx     $a1,[%sp+LOCALS64+$in_z+8]
2096         sllx    $t3,32,$t3
2097         stx     $a2,[%sp+LOCALS64+$in_z+16]
2098         stx     $a3,[%sp+LOCALS64+$in_z+24]
2099
2100         ! in_y is still in $acc0-$acc3
2101         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(S, in_y);
2102         add     %sp,LOCALS64+$S,$rp
2103
2104         ! in_z is still in $a0-$a3
2105         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Zsqr, in_z);
2106         add     %sp,LOCALS64+$Zsqr,$rp
2107
2108         mov     $acc0,$a0                       ! put Zsqr aside
2109         mov     $acc1,$a1
2110         mov     $acc2,$a2
2111         mov     $acc3,$a3
2112
2113         add     %sp,LOCALS64+$in_x,$bp
2114         call    __ecp_nistz256_add_vis3         ! p256_add(M, Zsqr, in_x);
2115         add     %sp,LOCALS64+$M,$rp
2116
2117         mov     $a0,$acc0                       ! restore Zsqr
2118         ldx     [%sp+LOCALS64+$S],$a0           ! forward load
2119         mov     $a1,$acc1
2120         ldx     [%sp+LOCALS64+$S+8],$a1
2121         mov     $a2,$acc2
2122         ldx     [%sp+LOCALS64+$S+16],$a2
2123         mov     $a3,$acc3
2124         ldx     [%sp+LOCALS64+$S+24],$a3
2125
2126         add     %sp,LOCALS64+$in_x,$bp
2127         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(Zsqr, in_x, Zsqr);
2128         add     %sp,LOCALS64+$Zsqr,$rp
2129
2130         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(S, S);
2131         add     %sp,LOCALS64+$S,$rp
2132
2133         ldx     [%sp+LOCALS64+$in_z],$bi
2134         ldx     [%sp+LOCALS64+$in_y],$a0
2135         ldx     [%sp+LOCALS64+$in_y+8],$a1
2136         ldx     [%sp+LOCALS64+$in_y+16],$a2
2137         ldx     [%sp+LOCALS64+$in_y+24],$a3
2138         add     %sp,LOCALS64+$in_z,$bp
2139         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(tmp0, in_z, in_y);
2140         add     %sp,LOCALS64+$tmp0,$rp
2141
2142         ldx     [%sp+LOCALS64+$M],$bi           ! forward load
2143         ldx     [%sp+LOCALS64+$Zsqr],$a0
2144         ldx     [%sp+LOCALS64+$Zsqr+8],$a1
2145         ldx     [%sp+LOCALS64+$Zsqr+16],$a2
2146         ldx     [%sp+LOCALS64+$Zsqr+24],$a3
2147
2148         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(res_z, tmp0);
2149         add     %sp,LOCALS64+$res_z,$rp
2150
2151         add     %sp,LOCALS64+$M,$bp
2152         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(M, M, Zsqr);
2153         add     %sp,LOCALS64+$M,$rp
2154
2155         mov     $acc0,$a0                       ! put aside M
2156         mov     $acc1,$a1
2157         mov     $acc2,$a2
2158         mov     $acc3,$a3
2159         call    __ecp_nistz256_mul_by_2_vis3
2160         add     %sp,LOCALS64+$M,$rp
2161         mov     $a0,$t0                         ! copy M
2162         ldx     [%sp+LOCALS64+$S],$a0           ! forward load
2163         mov     $a1,$t1
2164         ldx     [%sp+LOCALS64+$S+8],$a1
2165         mov     $a2,$t2
2166         ldx     [%sp+LOCALS64+$S+16],$a2
2167         mov     $a3,$t3
2168         ldx     [%sp+LOCALS64+$S+24],$a3
2169         call    __ecp_nistz256_add_noload_vis3  ! p256_mul_by_3(M, M);
2170         add     %sp,LOCALS64+$M,$rp
2171
2172         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(tmp0, S);
2173         add     %sp,LOCALS64+$tmp0,$rp
2174
2175         ldx     [%sp+LOCALS64+$S],$bi           ! forward load
2176         ldx     [%sp+LOCALS64+$in_x],$a0
2177         ldx     [%sp+LOCALS64+$in_x+8],$a1
2178         ldx     [%sp+LOCALS64+$in_x+16],$a2
2179         ldx     [%sp+LOCALS64+$in_x+24],$a3
2180
2181         call    __ecp_nistz256_div_by_2_vis3    ! p256_div_by_2(res_y, tmp0);
2182         add     %sp,LOCALS64+$res_y,$rp
2183
2184         add     %sp,LOCALS64+$S,$bp
2185         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S, S, in_x);
2186         add     %sp,LOCALS64+$S,$rp
2187
2188         ldx     [%sp+LOCALS64+$M],$a0           ! forward load
2189         ldx     [%sp+LOCALS64+$M+8],$a1
2190         ldx     [%sp+LOCALS64+$M+16],$a2
2191         ldx     [%sp+LOCALS64+$M+24],$a3
2192
2193         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(tmp0, S);
2194         add     %sp,LOCALS64+$tmp0,$rp
2195
2196         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(res_x, M);
2197         add     %sp,LOCALS64+$res_x,$rp
2198
2199         add     %sp,LOCALS64+$tmp0,$bp
2200         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_x, res_x, tmp0);
2201         add     %sp,LOCALS64+$res_x,$rp
2202
2203         ldx     [%sp+LOCALS64+$M],$a0           ! forward load
2204         ldx     [%sp+LOCALS64+$M+8],$a1
2205         ldx     [%sp+LOCALS64+$M+16],$a2
2206         ldx     [%sp+LOCALS64+$M+24],$a3
2207
2208         add     %sp,LOCALS64+$S,$bp
2209         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(S, S, res_x);
2210         add     %sp,LOCALS64+$S,$rp
2211
2212         mov     $acc0,$bi
2213         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S, S, M);
2214         add     %sp,LOCALS64+$S,$rp
2215
2216         ldx     [%sp+LOCALS64+$res_x],$a0       ! forward load
2217         ldx     [%sp+LOCALS64+$res_x+8],$a1
2218         ldx     [%sp+LOCALS64+$res_x+16],$a2
2219         ldx     [%sp+LOCALS64+$res_x+24],$a3
2220
2221         add     %sp,LOCALS64+$res_y,$bp
2222         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, S, res_y);
2223         add     %sp,LOCALS64+$res_y,$bp
2224
2225         ! convert output to uint_32[8]
2226         srlx    $a0,32,$t0
2227         srlx    $a1,32,$t1
2228         st      $a0,[$rp_real]                  ! res_x
2229         srlx    $a2,32,$t2
2230         st      $t0,[$rp_real+4]
2231         srlx    $a3,32,$t3
2232         st      $a1,[$rp_real+8]
2233         st      $t1,[$rp_real+12]
2234         st      $a2,[$rp_real+16]
2235         st      $t2,[$rp_real+20]
2236         st      $a3,[$rp_real+24]
2237         st      $t3,[$rp_real+28]
2238
2239         ldx     [%sp+LOCALS64+$res_z],$a0       ! forward load
2240         srlx    $acc0,32,$t0
2241         ldx     [%sp+LOCALS64+$res_z+8],$a1
2242         srlx    $acc1,32,$t1
2243         ldx     [%sp+LOCALS64+$res_z+16],$a2
2244         srlx    $acc2,32,$t2
2245         ldx     [%sp+LOCALS64+$res_z+24],$a3
2246         srlx    $acc3,32,$t3
2247         st      $acc0,[$rp_real+32]             ! res_y
2248         st      $t0,  [$rp_real+32+4]
2249         st      $acc1,[$rp_real+32+8]
2250         st      $t1,  [$rp_real+32+12]
2251         st      $acc2,[$rp_real+32+16]
2252         st      $t2,  [$rp_real+32+20]
2253         st      $acc3,[$rp_real+32+24]
2254         st      $t3,  [$rp_real+32+28]
2255
2256         srlx    $a0,32,$t0
2257         srlx    $a1,32,$t1
2258         st      $a0,[$rp_real+64]               ! res_z
2259         srlx    $a2,32,$t2
2260         st      $t0,[$rp_real+64+4]
2261         srlx    $a3,32,$t3
2262         st      $a1,[$rp_real+64+8]
2263         st      $t1,[$rp_real+64+12]
2264         st      $a2,[$rp_real+64+16]
2265         st      $t2,[$rp_real+64+20]
2266         st      $a3,[$rp_real+64+24]
2267         st      $t3,[$rp_real+64+28]
2268
2269         ret
2270         restore
2271 .size   ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
2272 ___
2273 }
2274 ########################################################################
2275 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
2276 #                             const P256_POINT *in2);
2277 {
2278 my ($res_x,$res_y,$res_z,
2279     $in1_x,$in1_y,$in1_z,
2280     $in2_x,$in2_y,$in2_z,
2281     $H,$Hsqr,$R,$Rsqr,$Hcub,
2282     $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
2283 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2284
2285 # above map() describes stack layout with 18 temporary
2286 # 256-bit vectors on top. Then we reserve some space for
2287 # !in1infty, !in2infty and result of check for zero.
2288
2289 $code.=<<___;
2290 .globl  ecp_nistz256_point_add_vis3
2291 .align  32
2292 ecp_nistz256_point_add_vis3:
2293         save    %sp,-STACK64_FRAME-32*18-32,%sp
2294
2295         mov     $rp,$rp_real
2296         mov     -1,$minus1
2297         mov     -2,$poly3
2298         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2299         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2300
2301         ! convert input to uint64_t[4]
2302         ld      [$bp],$a0                       ! in2_x
2303         ld      [$bp+4],$t0
2304         ld      [$bp+8],$a1
2305         ld      [$bp+12],$t1
2306         ld      [$bp+16],$a2
2307         ld      [$bp+20],$t2
2308         ld      [$bp+24],$a3
2309         ld      [$bp+28],$t3
2310         sllx    $t0,32,$t0
2311         sllx    $t1,32,$t1
2312         ld      [$bp+32],$acc0                  ! in2_y
2313         or      $a0,$t0,$a0
2314         ld      [$bp+32+4],$t0
2315         sllx    $t2,32,$t2
2316         ld      [$bp+32+8],$acc1
2317         or      $a1,$t1,$a1
2318         ld      [$bp+32+12],$t1
2319         sllx    $t3,32,$t3
2320         ld      [$bp+32+16],$acc2
2321         or      $a2,$t2,$a2
2322         ld      [$bp+32+20],$t2
2323         or      $a3,$t3,$a3
2324         ld      [$bp+32+24],$acc3
2325         sllx    $t0,32,$t0
2326         ld      [$bp+32+28],$t3
2327         sllx    $t1,32,$t1
2328         stx     $a0,[%sp+LOCALS64+$in2_x]
2329         sllx    $t2,32,$t2
2330         stx     $a1,[%sp+LOCALS64+$in2_x+8]
2331         sllx    $t3,32,$t3
2332         stx     $a2,[%sp+LOCALS64+$in2_x+16]
2333         or      $acc0,$t0,$acc0
2334         stx     $a3,[%sp+LOCALS64+$in2_x+24]
2335         or      $acc1,$t1,$acc1
2336         stx     $acc0,[%sp+LOCALS64+$in2_y]
2337         or      $acc2,$t2,$acc2
2338         stx     $acc1,[%sp+LOCALS64+$in2_y+8]
2339         or      $acc3,$t3,$acc3
2340         stx     $acc2,[%sp+LOCALS64+$in2_y+16]
2341         stx     $acc3,[%sp+LOCALS64+$in2_y+24]
2342
2343         or      $a1,$a0,$a0
2344         or      $a3,$a2,$a2
2345         or      $acc1,$acc0,$acc0
2346         or      $acc3,$acc2,$acc2
2347         or      $a2,$a0,$a0
2348         or      $acc2,$acc0,$acc0
2349         or      $acc0,$a0,$a0
2350         movrnz  $a0,-1,$a0                      ! !in2infty
2351         stx     $a0,[%fp+STACK_BIAS-8]
2352
2353         ld      [$bp+64],$acc0                  ! in2_z
2354         ld      [$bp+64+4],$t0
2355         ld      [$bp+64+8],$acc1
2356         ld      [$bp+64+12],$t1
2357         ld      [$bp+64+16],$acc2
2358         ld      [$bp+64+20],$t2
2359         ld      [$bp+64+24],$acc3
2360         ld      [$bp+64+28],$t3
2361         sllx    $t0,32,$t0
2362         sllx    $t1,32,$t1
2363         ld      [$ap],$a0                       ! in1_x
2364         or      $acc0,$t0,$acc0
2365         ld      [$ap+4],$t0
2366         sllx    $t2,32,$t2
2367         ld      [$ap+8],$a1
2368         or      $acc1,$t1,$acc1
2369         ld      [$ap+12],$t1
2370         sllx    $t3,32,$t3
2371         ld      [$ap+16],$a2
2372         or      $acc2,$t2,$acc2
2373         ld      [$ap+20],$t2
2374         or      $acc3,$t3,$acc3
2375         ld      [$ap+24],$a3
2376         sllx    $t0,32,$t0
2377         ld      [$ap+28],$t3
2378         sllx    $t1,32,$t1
2379         stx     $acc0,[%sp+LOCALS64+$in2_z]
2380         sllx    $t2,32,$t2
2381         stx     $acc1,[%sp+LOCALS64+$in2_z+8]
2382         sllx    $t3,32,$t3
2383         stx     $acc2,[%sp+LOCALS64+$in2_z+16]
2384         stx     $acc3,[%sp+LOCALS64+$in2_z+24]
2385
2386         or      $a0,$t0,$a0
2387         ld      [$ap+32],$acc0                  ! in1_y
2388         or      $a1,$t1,$a1
2389         ld      [$ap+32+4],$t0
2390         or      $a2,$t2,$a2
2391         ld      [$ap+32+8],$acc1
2392         or      $a3,$t3,$a3
2393         ld      [$ap+32+12],$t1
2394         ld      [$ap+32+16],$acc2
2395         ld      [$ap+32+20],$t2
2396         ld      [$ap+32+24],$acc3
2397         sllx    $t0,32,$t0
2398         ld      [$ap+32+28],$t3
2399         sllx    $t1,32,$t1
2400         stx     $a0,[%sp+LOCALS64+$in1_x]
2401         sllx    $t2,32,$t2
2402         stx     $a1,[%sp+LOCALS64+$in1_x+8]
2403         sllx    $t3,32,$t3
2404         stx     $a2,[%sp+LOCALS64+$in1_x+16]
2405         or      $acc0,$t0,$acc0
2406         stx     $a3,[%sp+LOCALS64+$in1_x+24]
2407         or      $acc1,$t1,$acc1
2408         stx     $acc0,[%sp+LOCALS64+$in1_y]
2409         or      $acc2,$t2,$acc2
2410         stx     $acc1,[%sp+LOCALS64+$in1_y+8]
2411         or      $acc3,$t3,$acc3
2412         stx     $acc2,[%sp+LOCALS64+$in1_y+16]
2413         stx     $acc3,[%sp+LOCALS64+$in1_y+24]
2414
2415         or      $a1,$a0,$a0
2416         or      $a3,$a2,$a2
2417         or      $acc1,$acc0,$acc0
2418         or      $acc3,$acc2,$acc2
2419         or      $a2,$a0,$a0
2420         or      $acc2,$acc0,$acc0
2421         or      $acc0,$a0,$a0
2422         movrnz  $a0,-1,$a0                      ! !in1infty
2423         stx     $a0,[%fp+STACK_BIAS-16]
2424
2425         ldx     [%sp+LOCALS64+$in2_z],$a0       ! forward load
2426         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2427         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2428         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2429
2430         ld      [$ap+64],$acc0                  ! in1_z
2431         ld      [$ap+64+4],$t0
2432         ld      [$ap+64+8],$acc1
2433         ld      [$ap+64+12],$t1
2434         ld      [$ap+64+16],$acc2
2435         ld      [$ap+64+20],$t2
2436         ld      [$ap+64+24],$acc3
2437         ld      [$ap+64+28],$t3
2438         sllx    $t0,32,$t0
2439         sllx    $t1,32,$t1
2440         or      $acc0,$t0,$acc0
2441         sllx    $t2,32,$t2
2442         or      $acc1,$t1,$acc1
2443         sllx    $t3,32,$t3
2444         stx     $acc0,[%sp+LOCALS64+$in1_z]
2445         or      $acc2,$t2,$acc2
2446         stx     $acc1,[%sp+LOCALS64+$in1_z+8]
2447         or      $acc3,$t3,$acc3
2448         stx     $acc2,[%sp+LOCALS64+$in1_z+16]
2449         stx     $acc3,[%sp+LOCALS64+$in1_z+24]
2450
2451         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z2sqr, in2_z);
2452         add     %sp,LOCALS64+$Z2sqr,$rp
2453
2454         ldx     [%sp+LOCALS64+$in1_z],$a0
2455         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2456         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2457         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2458         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z1sqr, in1_z);
2459         add     %sp,LOCALS64+$Z1sqr,$rp
2460
2461         ldx     [%sp+LOCALS64+$Z2sqr],$bi
2462         ldx     [%sp+LOCALS64+$in2_z],$a0
2463         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2464         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2465         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2466         add     %sp,LOCALS64+$Z2sqr,$bp
2467         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S1, Z2sqr, in2_z);
2468         add     %sp,LOCALS64+$S1,$rp
2469
2470         ldx     [%sp+LOCALS64+$Z1sqr],$bi
2471         ldx     [%sp+LOCALS64+$in1_z],$a0
2472         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2473         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2474         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2475         add     %sp,LOCALS64+$Z1sqr,$bp
2476         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, Z1sqr, in1_z);
2477         add     %sp,LOCALS64+$S2,$rp
2478
2479         ldx     [%sp+LOCALS64+$S1],$bi
2480         ldx     [%sp+LOCALS64+$in1_y],$a0
2481         ldx     [%sp+LOCALS64+$in1_y+8],$a1
2482         ldx     [%sp+LOCALS64+$in1_y+16],$a2
2483         ldx     [%sp+LOCALS64+$in1_y+24],$a3
2484         add     %sp,LOCALS64+$S1,$bp
2485         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S1, S1, in1_y);
2486         add     %sp,LOCALS64+$S1,$rp
2487
2488         ldx     [%sp+LOCALS64+$S2],$bi
2489         ldx     [%sp+LOCALS64+$in2_y],$a0
2490         ldx     [%sp+LOCALS64+$in2_y+8],$a1
2491         ldx     [%sp+LOCALS64+$in2_y+16],$a2
2492         ldx     [%sp+LOCALS64+$in2_y+24],$a3
2493         add     %sp,LOCALS64+$S2,$bp
2494         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S2, in2_y);
2495         add     %sp,LOCALS64+$S2,$rp
2496
2497         ldx     [%sp+LOCALS64+$Z2sqr],$bi       ! forward load
2498         ldx     [%sp+LOCALS64+$in1_x],$a0
2499         ldx     [%sp+LOCALS64+$in1_x+8],$a1
2500         ldx     [%sp+LOCALS64+$in1_x+16],$a2
2501         ldx     [%sp+LOCALS64+$in1_x+24],$a3
2502
2503         add     %sp,LOCALS64+$S1,$bp
2504         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(R, S2, S1);
2505         add     %sp,LOCALS64+$R,$rp
2506
2507         or      $acc1,$acc0,$acc0               ! see if result is zero
2508         or      $acc3,$acc2,$acc2
2509         or      $acc2,$acc0,$acc0
2510         stx     $acc0,[%fp+STACK_BIAS-24]
2511
2512         add     %sp,LOCALS64+$Z2sqr,$bp
2513         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U1, in1_x, Z2sqr);
2514         add     %sp,LOCALS64+$U1,$rp
2515
2516         ldx     [%sp+LOCALS64+$Z1sqr],$bi
2517         ldx     [%sp+LOCALS64+$in2_x],$a0
2518         ldx     [%sp+LOCALS64+$in2_x+8],$a1
2519         ldx     [%sp+LOCALS64+$in2_x+16],$a2
2520         ldx     [%sp+LOCALS64+$in2_x+24],$a3
2521         add     %sp,LOCALS64+$Z1sqr,$bp
2522         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, in2_x, Z1sqr);
2523         add     %sp,LOCALS64+$U2,$rp
2524
2525         ldx     [%sp+LOCALS64+$R],$a0           ! forward load
2526         ldx     [%sp+LOCALS64+$R+8],$a1
2527         ldx     [%sp+LOCALS64+$R+16],$a2
2528         ldx     [%sp+LOCALS64+$R+24],$a3
2529
2530         add     %sp,LOCALS64+$U1,$bp
2531         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(H, U2, U1);
2532         add     %sp,LOCALS64+$H,$rp
2533
2534         or      $acc1,$acc0,$acc0               ! see if result is zero
2535         or      $acc3,$acc2,$acc2
2536         orcc    $acc2,$acc0,$acc0
2537
2538         bne,pt  %xcc,.Ladd_proceed_vis3         ! is_equal(U1,U2)?
2539         nop
2540
2541         ldx     [%fp+STACK_BIAS-8],$t0
2542         ldx     [%fp+STACK_BIAS-16],$t1
2543         ldx     [%fp+STACK_BIAS-24],$t2
2544         andcc   $t0,$t1,%g0
2545         be,pt   %xcc,.Ladd_proceed_vis3         ! (in1infty || in2infty)?
2546         nop
2547         andcc   $t2,$t2,%g0
2548         be,a,pt %xcc,.Ldouble_shortcut_vis3     ! is_equal(S1,S2)?
2549         add     %sp,32*(12-10)+32,%sp           ! difference in frame sizes
2550
2551         st      %g0,[$rp_real]
2552         st      %g0,[$rp_real+4]
2553         st      %g0,[$rp_real+8]
2554         st      %g0,[$rp_real+12]
2555         st      %g0,[$rp_real+16]
2556         st      %g0,[$rp_real+20]
2557         st      %g0,[$rp_real+24]
2558         st      %g0,[$rp_real+28]
2559         st      %g0,[$rp_real+32]
2560         st      %g0,[$rp_real+32+4]
2561         st      %g0,[$rp_real+32+8]
2562         st      %g0,[$rp_real+32+12]
2563         st      %g0,[$rp_real+32+16]
2564         st      %g0,[$rp_real+32+20]
2565         st      %g0,[$rp_real+32+24]
2566         st      %g0,[$rp_real+32+28]
2567         st      %g0,[$rp_real+64]
2568         st      %g0,[$rp_real+64+4]
2569         st      %g0,[$rp_real+64+8]
2570         st      %g0,[$rp_real+64+12]
2571         st      %g0,[$rp_real+64+16]
2572         st      %g0,[$rp_real+64+20]
2573         st      %g0,[$rp_real+64+24]
2574         st      %g0,[$rp_real+64+28]
2575         b       .Ladd_done_vis3
2576         nop
2577
2578 .align  16
2579 .Ladd_proceed_vis3:
2580         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Rsqr, R);
2581         add     %sp,LOCALS64+$Rsqr,$rp
2582
2583         ldx     [%sp+LOCALS64+$H],$bi
2584         ldx     [%sp+LOCALS64+$in1_z],$a0
2585         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2586         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2587         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2588         add     %sp,LOCALS64+$H,$bp
2589         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, H, in1_z);
2590         add     %sp,LOCALS64+$res_z,$rp
2591
2592         ldx     [%sp+LOCALS64+$H],$a0
2593         ldx     [%sp+LOCALS64+$H+8],$a1
2594         ldx     [%sp+LOCALS64+$H+16],$a2
2595         ldx     [%sp+LOCALS64+$H+24],$a3
2596         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Hsqr, H);
2597         add     %sp,LOCALS64+$Hsqr,$rp
2598
2599         ldx     [%sp+LOCALS64+$res_z],$bi
2600         ldx     [%sp+LOCALS64+$in2_z],$a0
2601         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2602         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2603         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2604         add     %sp,LOCALS64+$res_z,$bp
2605         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, res_z, in2_z);
2606         add     %sp,LOCALS64+$res_z,$rp
2607
2608         ldx     [%sp+LOCALS64+$H],$bi
2609         ldx     [%sp+LOCALS64+$Hsqr],$a0
2610         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2611         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2612         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2613         add     %sp,LOCALS64+$H,$bp
2614         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(Hcub, Hsqr, H);
2615         add     %sp,LOCALS64+$Hcub,$rp
2616
2617         ldx     [%sp+LOCALS64+$U1],$bi
2618         ldx     [%sp+LOCALS64+$Hsqr],$a0
2619         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2620         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2621         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2622         add     %sp,LOCALS64+$U1,$bp
2623         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, U1, Hsqr);
2624         add     %sp,LOCALS64+$U2,$rp
2625
2626         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(Hsqr, U2);
2627         add     %sp,LOCALS64+$Hsqr,$rp
2628
2629         add     %sp,LOCALS64+$Rsqr,$bp
2630         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_x, Rsqr, Hsqr);
2631         add     %sp,LOCALS64+$res_x,$rp
2632
2633         add     %sp,LOCALS64+$Hcub,$bp
2634         call    __ecp_nistz256_sub_from_vis3    !  p256_sub(res_x, res_x, Hcub);
2635         add     %sp,LOCALS64+$res_x,$rp
2636
2637         ldx     [%sp+LOCALS64+$S1],$bi          ! forward load
2638         ldx     [%sp+LOCALS64+$Hcub],$a0
2639         ldx     [%sp+LOCALS64+$Hcub+8],$a1
2640         ldx     [%sp+LOCALS64+$Hcub+16],$a2
2641         ldx     [%sp+LOCALS64+$Hcub+24],$a3
2642
2643         add     %sp,LOCALS64+$U2,$bp
2644         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_y, U2, res_x);
2645         add     %sp,LOCALS64+$res_y,$rp
2646
2647         add     %sp,LOCALS64+$S1,$bp
2648         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S1, Hcub);
2649         add     %sp,LOCALS64+$S2,$rp
2650
2651         ldx     [%sp+LOCALS64+$R],$bi
2652         ldx     [%sp+LOCALS64+$res_y],$a0
2653         ldx     [%sp+LOCALS64+$res_y+8],$a1
2654         ldx     [%sp+LOCALS64+$res_y+16],$a2
2655         ldx     [%sp+LOCALS64+$res_y+24],$a3
2656         add     %sp,LOCALS64+$R,$bp
2657         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_y, res_y, R);
2658         add     %sp,LOCALS64+$res_y,$rp
2659
2660         add     %sp,LOCALS64+$S2,$bp
2661         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, res_y, S2);
2662         add     %sp,LOCALS64+$res_y,$rp
2663
2664         ldx     [%fp+STACK_BIAS-16],$t1         ! !in1infty
2665         ldx     [%fp+STACK_BIAS-8],$t2          ! !in2infty
2666 ___
2667 for($i=0;$i<96;$i+=16) {                        # conditional moves
2668 $code.=<<___;
2669         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
2670         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
2671         ldx     [%sp+LOCALS64+$in2_x+$i],$acc2  ! in2
2672         ldx     [%sp+LOCALS64+$in2_x+$i+8],$acc3
2673         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
2674         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
2675         movrz   $t1,$acc2,$acc0
2676         movrz   $t1,$acc3,$acc1
2677         movrz   $t2,$acc4,$acc0
2678         movrz   $t2,$acc5,$acc1
2679         srlx    $acc0,32,$acc2
2680         srlx    $acc1,32,$acc3
2681         st      $acc0,[$rp_real+$i]
2682         st      $acc2,[$rp_real+$i+4]
2683         st      $acc1,[$rp_real+$i+8]
2684         st      $acc3,[$rp_real+$i+12]
2685 ___
2686 }
2687 $code.=<<___;
2688 .Ladd_done_vis3:
2689         ret
2690         restore
2691 .size   ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
2692 ___
2693 }
2694 ########################################################################
2695 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
2696 #                                    const P256_POINT_AFFINE *in2);
2697 {
2698 my ($res_x,$res_y,$res_z,
2699     $in1_x,$in1_y,$in1_z,
2700     $in2_x,$in2_y,
2701     $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
2702 my $Z1sqr = $S2;
2703 # above map() describes stack layout with 15 temporary
2704 # 256-bit vectors on top. Then we reserve some space for
2705 # !in1infty and !in2infty.
2706
2707 $code.=<<___;
2708 .align  32
2709 ecp_nistz256_point_add_affine_vis3:
2710         save    %sp,-STACK64_FRAME-32*15-32,%sp
2711
2712         mov     $rp,$rp_real
2713         mov     -1,$minus1
2714         mov     -2,$poly3
2715         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2716         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2717
2718         ! convert input to uint64_t[4]
2719         ld      [$bp],$a0                       ! in2_x
2720         ld      [$bp+4],$t0
2721         ld      [$bp+8],$a1
2722         ld      [$bp+12],$t1
2723         ld      [$bp+16],$a2
2724         ld      [$bp+20],$t2
2725         ld      [$bp+24],$a3
2726         ld      [$bp+28],$t3
2727         sllx    $t0,32,$t0
2728         sllx    $t1,32,$t1
2729         ld      [$bp+32],$acc0                  ! in2_y
2730         or      $a0,$t0,$a0
2731         ld      [$bp+32+4],$t0
2732         sllx    $t2,32,$t2
2733         ld      [$bp+32+8],$acc1
2734         or      $a1,$t1,$a1
2735         ld      [$bp+32+12],$t1
2736         sllx    $t3,32,$t3
2737         ld      [$bp+32+16],$acc2
2738         or      $a2,$t2,$a2
2739         ld      [$bp+32+20],$t2
2740         or      $a3,$t3,$a3
2741         ld      [$bp+32+24],$acc3
2742         sllx    $t0,32,$t0
2743         ld      [$bp+32+28],$t3
2744         sllx    $t1,32,$t1
2745         stx     $a0,[%sp+LOCALS64+$in2_x]
2746         sllx    $t2,32,$t2
2747         stx     $a1,[%sp+LOCALS64+$in2_x+8]
2748         sllx    $t3,32,$t3
2749         stx     $a2,[%sp+LOCALS64+$in2_x+16]
2750         or      $acc0,$t0,$acc0
2751         stx     $a3,[%sp+LOCALS64+$in2_x+24]
2752         or      $acc1,$t1,$acc1
2753         stx     $acc0,[%sp+LOCALS64+$in2_y]
2754         or      $acc2,$t2,$acc2
2755         stx     $acc1,[%sp+LOCALS64+$in2_y+8]
2756         or      $acc3,$t3,$acc3
2757         stx     $acc2,[%sp+LOCALS64+$in2_y+16]
2758         stx     $acc3,[%sp+LOCALS64+$in2_y+24]
2759
2760         or      $a1,$a0,$a0
2761         or      $a3,$a2,$a2
2762         or      $acc1,$acc0,$acc0
2763         or      $acc3,$acc2,$acc2
2764         or      $a2,$a0,$a0
2765         or      $acc2,$acc0,$acc0
2766         or      $acc0,$a0,$a0
2767         movrnz  $a0,-1,$a0                      ! !in2infty
2768         stx     $a0,[%fp+STACK_BIAS-8]
2769
2770         ld      [$ap],$a0                       ! in1_x
2771         ld      [$ap+4],$t0
2772         ld      [$ap+8],$a1
2773         ld      [$ap+12],$t1
2774         ld      [$ap+16],$a2
2775         ld      [$ap+20],$t2
2776         ld      [$ap+24],$a3
2777         ld      [$ap+28],$t3
2778         sllx    $t0,32,$t0
2779         sllx    $t1,32,$t1
2780         ld      [$ap+32],$acc0                  ! in1_y
2781         or      $a0,$t0,$a0
2782         ld      [$ap+32+4],$t0
2783         sllx    $t2,32,$t2
2784         ld      [$ap+32+8],$acc1
2785         or      $a1,$t1,$a1
2786         ld      [$ap+32+12],$t1
2787         sllx    $t3,32,$t3
2788         ld      [$ap+32+16],$acc2
2789         or      $a2,$t2,$a2
2790         ld      [$ap+32+20],$t2
2791         or      $a3,$t3,$a3
2792         ld      [$ap+32+24],$acc3
2793         sllx    $t0,32,$t0
2794         ld      [$ap+32+28],$t3
2795         sllx    $t1,32,$t1
2796         stx     $a0,[%sp+LOCALS64+$in1_x]
2797         sllx    $t2,32,$t2
2798         stx     $a1,[%sp+LOCALS64+$in1_x+8]
2799         sllx    $t3,32,$t3
2800         stx     $a2,[%sp+LOCALS64+$in1_x+16]
2801         or      $acc0,$t0,$acc0
2802         stx     $a3,[%sp+LOCALS64+$in1_x+24]
2803         or      $acc1,$t1,$acc1
2804         stx     $acc0,[%sp+LOCALS64+$in1_y]
2805         or      $acc2,$t2,$acc2
2806         stx     $acc1,[%sp+LOCALS64+$in1_y+8]
2807         or      $acc3,$t3,$acc3
2808         stx     $acc2,[%sp+LOCALS64+$in1_y+16]
2809         stx     $acc3,[%sp+LOCALS64+$in1_y+24]
2810
2811         or      $a1,$a0,$a0
2812         or      $a3,$a2,$a2
2813         or      $acc1,$acc0,$acc0
2814         or      $acc3,$acc2,$acc2
2815         or      $a2,$a0,$a0
2816         or      $acc2,$acc0,$acc0
2817         or      $acc0,$a0,$a0
2818         movrnz  $a0,-1,$a0                      ! !in1infty
2819         stx     $a0,[%fp+STACK_BIAS-16]
2820
2821         ld      [$ap+64],$a0                    ! in1_z
2822         ld      [$ap+64+4],$t0
2823         ld      [$ap+64+8],$a1
2824         ld      [$ap+64+12],$t1
2825         ld      [$ap+64+16],$a2
2826         ld      [$ap+64+20],$t2
2827         ld      [$ap+64+24],$a3
2828         ld      [$ap+64+28],$t3
2829         sllx    $t0,32,$t0
2830         sllx    $t1,32,$t1
2831         or      $a0,$t0,$a0
2832         sllx    $t2,32,$t2
2833         or      $a1,$t1,$a1
2834         sllx    $t3,32,$t3
2835         stx     $a0,[%sp+LOCALS64+$in1_z]
2836         or      $a2,$t2,$a2
2837         stx     $a1,[%sp+LOCALS64+$in1_z+8]
2838         or      $a3,$t3,$a3
2839         stx     $a2,[%sp+LOCALS64+$in1_z+16]
2840         stx     $a3,[%sp+LOCALS64+$in1_z+24]
2841
2842         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z1sqr, in1_z);
2843         add     %sp,LOCALS64+$Z1sqr,$rp
2844
2845         ldx     [%sp+LOCALS64+$in2_x],$bi
2846         mov     $acc0,$a0
2847         mov     $acc1,$a1
2848         mov     $acc2,$a2
2849         mov     $acc3,$a3
2850         add     %sp,LOCALS64+$in2_x,$bp
2851         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, Z1sqr, in2_x);
2852         add     %sp,LOCALS64+$U2,$rp
2853
2854         ldx     [%sp+LOCALS64+$Z1sqr],$bi       ! forward load
2855         ldx     [%sp+LOCALS64+$in1_z],$a0
2856         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2857         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2858         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2859
2860         add     %sp,LOCALS64+$in1_x,$bp
2861         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(H, U2, in1_x);
2862         add     %sp,LOCALS64+$H,$rp
2863
2864         add     %sp,LOCALS64+$Z1sqr,$bp
2865         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, Z1sqr, in1_z);
2866         add     %sp,LOCALS64+$S2,$rp
2867
2868         ldx     [%sp+LOCALS64+$H],$bi
2869         ldx     [%sp+LOCALS64+$in1_z],$a0
2870         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2871         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2872         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2873         add     %sp,LOCALS64+$H,$bp
2874         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, H, in1_z);
2875         add     %sp,LOCALS64+$res_z,$rp
2876
2877         ldx     [%sp+LOCALS64+$S2],$bi
2878         ldx     [%sp+LOCALS64+$in2_y],$a0
2879         ldx     [%sp+LOCALS64+$in2_y+8],$a1
2880         ldx     [%sp+LOCALS64+$in2_y+16],$a2
2881         ldx     [%sp+LOCALS64+$in2_y+24],$a3
2882         add     %sp,LOCALS64+$S2,$bp
2883         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S2, in2_y);
2884         add     %sp,LOCALS64+$S2,$rp
2885
2886         ldx     [%sp+LOCALS64+$H],$a0           ! forward load
2887         ldx     [%sp+LOCALS64+$H+8],$a1
2888         ldx     [%sp+LOCALS64+$H+16],$a2
2889         ldx     [%sp+LOCALS64+$H+24],$a3
2890
2891         add     %sp,LOCALS64+$in1_y,$bp
2892         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(R, S2, in1_y);
2893         add     %sp,LOCALS64+$R,$rp
2894
2895         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Hsqr, H);
2896         add     %sp,LOCALS64+$Hsqr,$rp
2897
2898         ldx     [%sp+LOCALS64+$R],$a0
2899         ldx     [%sp+LOCALS64+$R+8],$a1
2900         ldx     [%sp+LOCALS64+$R+16],$a2
2901         ldx     [%sp+LOCALS64+$R+24],$a3
2902         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Rsqr, R);
2903         add     %sp,LOCALS64+$Rsqr,$rp
2904
2905         ldx     [%sp+LOCALS64+$H],$bi
2906         ldx     [%sp+LOCALS64+$Hsqr],$a0
2907         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2908         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2909         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2910         add     %sp,LOCALS64+$H,$bp
2911         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(Hcub, Hsqr, H);
2912         add     %sp,LOCALS64+$Hcub,$rp
2913
2914         ldx     [%sp+LOCALS64+$Hsqr],$bi
2915         ldx     [%sp+LOCALS64+$in1_x],$a0
2916         ldx     [%sp+LOCALS64+$in1_x+8],$a1
2917         ldx     [%sp+LOCALS64+$in1_x+16],$a2
2918         ldx     [%sp+LOCALS64+$in1_x+24],$a3
2919         add     %sp,LOCALS64+$Hsqr,$bp
2920         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, in1_x, Hsqr);
2921         add     %sp,LOCALS64+$U2,$rp
2922
2923         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(Hsqr, U2);
2924         add     %sp,LOCALS64+$Hsqr,$rp
2925
2926         add     %sp,LOCALS64+$Rsqr,$bp
2927         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_x, Rsqr, Hsqr);
2928         add     %sp,LOCALS64+$res_x,$rp
2929
2930         add     %sp,LOCALS64+$Hcub,$bp
2931         call    __ecp_nistz256_sub_from_vis3    !  p256_sub(res_x, res_x, Hcub);
2932         add     %sp,LOCALS64+$res_x,$rp
2933
2934         ldx     [%sp+LOCALS64+$Hcub],$bi        ! forward load
2935         ldx     [%sp+LOCALS64+$in1_y],$a0
2936         ldx     [%sp+LOCALS64+$in1_y+8],$a1
2937         ldx     [%sp+LOCALS64+$in1_y+16],$a2
2938         ldx     [%sp+LOCALS64+$in1_y+24],$a3
2939
2940         add     %sp,LOCALS64+$U2,$bp
2941         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_y, U2, res_x);
2942         add     %sp,LOCALS64+$res_y,$rp
2943
2944         add     %sp,LOCALS64+$Hcub,$bp
2945         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, in1_y, Hcub);
2946         add     %sp,LOCALS64+$S2,$rp
2947
2948         ldx     [%sp+LOCALS64+$R],$bi
2949         ldx     [%sp+LOCALS64+$res_y],$a0
2950         ldx     [%sp+LOCALS64+$res_y+8],$a1
2951         ldx     [%sp+LOCALS64+$res_y+16],$a2
2952         ldx     [%sp+LOCALS64+$res_y+24],$a3
2953         add     %sp,LOCALS64+$R,$bp
2954         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_y, res_y, R);
2955         add     %sp,LOCALS64+$res_y,$rp
2956
2957         add     %sp,LOCALS64+$S2,$bp
2958         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, res_y, S2);
2959         add     %sp,LOCALS64+$res_y,$rp
2960
2961         ldx     [%fp+STACK_BIAS-16],$t1         ! !in1infty
2962         ldx     [%fp+STACK_BIAS-8],$t2          ! !in2infty
2963 1:      call    .+8
2964         add     %o7,.Lone_mont_vis3-1b,$bp
2965 ___
2966 for($i=0;$i<64;$i+=16) {                        # conditional moves
2967 $code.=<<___;
2968         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
2969         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
2970         ldx     [%sp+LOCALS64+$in2_x+$i],$acc2  ! in2
2971         ldx     [%sp+LOCALS64+$in2_x+$i+8],$acc3
2972         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
2973         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
2974         movrz   $t1,$acc2,$acc0
2975         movrz   $t1,$acc3,$acc1
2976         movrz   $t2,$acc4,$acc0
2977         movrz   $t2,$acc5,$acc1
2978         srlx    $acc0,32,$acc2
2979         srlx    $acc1,32,$acc3
2980         st      $acc0,[$rp_real+$i]
2981         st      $acc2,[$rp_real+$i+4]
2982         st      $acc1,[$rp_real+$i+8]
2983         st      $acc3,[$rp_real+$i+12]
2984 ___
2985 }
2986 for(;$i<96;$i+=16) {
2987 $code.=<<___;
2988         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
2989         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
2990         ldx     [$bp+$i-64],$acc2               ! "in2"
2991         ldx     [$bp+$i-64+8],$acc3
2992         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
2993         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
2994         movrz   $t1,$acc2,$acc0
2995         movrz   $t1,$acc3,$acc1
2996         movrz   $t2,$acc4,$acc0
2997         movrz   $t2,$acc5,$acc1
2998         srlx    $acc0,32,$acc2
2999         srlx    $acc1,32,$acc3
3000         st      $acc0,[$rp_real+$i]
3001         st      $acc2,[$rp_real+$i+4]
3002         st      $acc1,[$rp_real+$i+8]
3003         st      $acc3,[$rp_real+$i+12]
3004 ___
3005 }
3006 $code.=<<___;
3007         ret
3008         restore
3009 .size   ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
3010 .align  64
3011 .Lone_mont_vis3:
3012 .long   0x00000000,0x00000001, 0xffffffff,0x00000000
3013 .long   0xffffffff,0xffffffff, 0x00000000,0xfffffffe
3014 .align  64
3015 ___
3016 }                                                               }}}
3017 \f
3018 # Purpose of these subroutines is to explicitly encode VIS instructions,
3019 # so that one can compile the module without having to specify VIS
3020 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
3021 # Idea is to reserve for option to produce "universal" binary and let
3022 # programmer detect if current CPU is VIS capable at run-time.
3023 sub unvis3 {
3024 my ($mnemonic,$rs1,$rs2,$rd)=@_;
3025 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
3026 my ($ref,$opf);
3027 my %visopf = (  "addxc"         => 0x011,
3028                 "addxccc"       => 0x013,
3029                 "umulxhi"       => 0x016        );
3030
3031     $ref = "$mnemonic\t$rs1,$rs2,$rd";
3032
3033     if ($opf=$visopf{$mnemonic}) {
3034         foreach ($rs1,$rs2,$rd) {
3035             return $ref if (!/%([goli])([0-9])/);
3036             $_=$bias{$1}+$2;
3037         }
3038
3039         return  sprintf ".word\t0x%08x !%s",
3040                         0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
3041                         $ref;
3042     } else {
3043         return $ref;
3044     }
3045 }
3046
3047 foreach (split("\n",$code)) {
3048         s/\`([^\`]*)\`/eval $1/ge;
3049
3050         s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
3051                 &unvis3($1,$2,$3,$4)
3052          /ge;
3053
3054         print $_,"\n";
3055 }
3056
3057 close STDOUT;