Add OpenSSL copyright to .pl files
[openssl.git] / crypto / ec / asm / ecp_nistz256-sparcv9.pl
1 #! /usr/bin/env perl
2 # Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # ECP_NISTZ256 module for SPARCv9.
18 #
19 # February 2015.
20 #
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
25 #
26 #                       with/without -DECP_NISTZ256_ASM
27 # UltraSPARC III        +12-18%
28 # SPARC T4              +99-550% (+66-150% on 32-bit Solaris)
29 #
30 # Ranges denote minimum and maximum improvement coefficients depending
31 # on benchmark. Lower coefficients are for ECDSA sign, server-side
32 # operation. Keep in mind that +200% means 3x improvement.
33
34 $output = pop;
35 open STDOUT,">$output";
36
37 $code.=<<___;
38 #include "sparc_arch.h"
39
40 #define LOCALS  (STACK_BIAS+STACK_FRAME)
41 #ifdef  __arch64__
42 .register       %g2,#scratch
43 .register       %g3,#scratch
44 # define STACK64_FRAME  STACK_FRAME
45 # define LOCALS64       LOCALS
46 #else
47 # define STACK64_FRAME  (2047+192)
48 # define LOCALS64       STACK64_FRAME
49 #endif
50
51 .section        ".text",#alloc,#execinstr
52 ___
53 ########################################################################
54 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
55 #
56 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
57 open TABLE,"<ecp_nistz256_table.c"              or
58 open TABLE,"<${dir}../ecp_nistz256_table.c"     or
59 die "failed to open ecp_nistz256_table.c:",$!;
60
61 use integer;
62
63 foreach(<TABLE>) {
64         s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
65 }
66 close TABLE;
67
68 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
69 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
70 # amount of elements.
71 die "insane number of elements" if ($#arr != 64*16*37-1);
72
73 $code.=<<___;
74 .globl  ecp_nistz256_precomputed
75 .align  4096
76 ecp_nistz256_precomputed:
77 ___
78 ########################################################################
79 # this conversion smashes P256_POINT_AFFINE by individual bytes with
80 # 64 byte interval, similar to
81 #       1111222233334444
82 #       1234123412341234
83 for(1..37) {
84         @tbl = splice(@arr,0,64*16);
85         for($i=0;$i<64;$i++) {
86                 undef @line;
87                 for($j=0;$j<64;$j++) {
88                         push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
89                 }
90                 $code.=".byte\t";
91                 $code.=join(',',map { sprintf "0x%02x",$_} @line);
92                 $code.="\n";
93         }
94 }
95
96 {{{
97 my ($rp,$ap,$bp)=map("%i$_",(0..2));
98 my @acc=map("%l$_",(0..7));
99 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
100 my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
101 my ($rp_real,$ap_real)=("%g2","%g3");
102
103 $code.=<<___;
104 .size   ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
105 .align  64
106 .LRR:   ! 2^512 mod P precomputed for NIST P256 polynomial
107 .long   0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
108 .long   0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
109 .Lone:
110 .long   1,0,0,0,0,0,0,0
111 .asciz  "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
112
113 ! void  ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
114 .globl  ecp_nistz256_to_mont
115 .align  64
116 ecp_nistz256_to_mont:
117         save    %sp,-STACK_FRAME,%sp
118         nop
119 1:      call    .+8
120         add     %o7,.LRR-1b,$bp
121         call    __ecp_nistz256_mul_mont
122         nop
123         ret
124         restore
125 .size   ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
126
127 ! void  ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
128 .globl  ecp_nistz256_from_mont
129 .align  32
130 ecp_nistz256_from_mont:
131         save    %sp,-STACK_FRAME,%sp
132         nop
133 1:      call    .+8
134         add     %o7,.Lone-1b,$bp
135         call    __ecp_nistz256_mul_mont
136         nop
137         ret
138         restore
139 .size   ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
140
141 ! void  ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
142 !                                             const BN_ULONG %i2[8]);
143 .globl  ecp_nistz256_mul_mont
144 .align  32
145 ecp_nistz256_mul_mont:
146         save    %sp,-STACK_FRAME,%sp
147         nop
148         call    __ecp_nistz256_mul_mont
149         nop
150         ret
151         restore
152 .size   ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
153
154 ! void  ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
155 .globl  ecp_nistz256_sqr_mont
156 .align  32
157 ecp_nistz256_sqr_mont:
158         save    %sp,-STACK_FRAME,%sp
159         mov     $ap,$bp
160         call    __ecp_nistz256_mul_mont
161         nop
162         ret
163         restore
164 .size   ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
165 ___
166
167 ########################################################################
168 # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
169 # while all others are meant to keep 32. "Meant to" means that additions
170 # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
171 # they can affect outcome (follow 'and' with $mask). Also keep in mind
172 # that addition with carry is addition with 32-bit carry, even though
173 # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
174 # below for VIS3 code paths.]
175
176 $code.=<<___;
177 .align  32
178 __ecp_nistz256_mul_mont:
179         ld      [$bp+0],$bi             ! b[0]
180         mov     -1,$mask
181         ld      [$ap+0],$a0
182         srl     $mask,0,$mask           ! 0xffffffff
183         ld      [$ap+4],$t1
184         ld      [$ap+8],$t2
185         ld      [$ap+12],$t3
186         ld      [$ap+16],$t4
187         ld      [$ap+20],$t5
188         ld      [$ap+24],$t6
189         ld      [$ap+28],$t7
190         mulx    $a0,$bi,$t0             ! a[0-7]*b[0], 64-bit results
191         mulx    $t1,$bi,$t1
192         mulx    $t2,$bi,$t2
193         mulx    $t3,$bi,$t3
194         mulx    $t4,$bi,$t4
195         mulx    $t5,$bi,$t5
196         mulx    $t6,$bi,$t6
197         mulx    $t7,$bi,$t7
198         srlx    $t0,32,@acc[1]          ! extract high parts
199         srlx    $t1,32,@acc[2]
200         srlx    $t2,32,@acc[3]
201         srlx    $t3,32,@acc[4]
202         srlx    $t4,32,@acc[5]
203         srlx    $t5,32,@acc[6]
204         srlx    $t6,32,@acc[7]
205         srlx    $t7,32,@acc[0]          ! "@acc[8]"
206         mov     0,$carry
207 ___
208 for($i=1;$i<8;$i++) {
209 $code.=<<___;
210         addcc   @acc[1],$t1,@acc[1]     ! accumulate high parts
211         ld      [$bp+4*$i],$bi          ! b[$i]
212         ld      [$ap+4],$t1             ! re-load a[1-7]
213         addccc  @acc[2],$t2,@acc[2]
214         addccc  @acc[3],$t3,@acc[3]
215         ld      [$ap+8],$t2
216         ld      [$ap+12],$t3
217         addccc  @acc[4],$t4,@acc[4]
218         addccc  @acc[5],$t5,@acc[5]
219         ld      [$ap+16],$t4
220         ld      [$ap+20],$t5
221         addccc  @acc[6],$t6,@acc[6]
222         addccc  @acc[7],$t7,@acc[7]
223         ld      [$ap+24],$t6
224         ld      [$ap+28],$t7
225         addccc  @acc[0],$carry,@acc[0]  ! "@acc[8]"
226         addc    %g0,%g0,$carry
227 ___
228         # Reduction iteration is normally performed by accumulating
229         # result of multiplication of modulus by "magic" digit [and
230         # omitting least significant word, which is guaranteed to
231         # be 0], but thanks to special form of modulus and "magic"
232         # digit being equal to least significant word, it can be
233         # performed with additions and subtractions alone. Indeed:
234         #
235         #        ffff.0001.0000.0000.0000.ffff.ffff.ffff
236         # *                                         abcd
237         # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
238         #
239         # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
240         # rewrite above as:
241         #
242         #   xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
243         # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
244         # -      abcd.0000.0000.0000.0000.0000.0000.abcd
245         #
246         # or marking redundant operations:
247         #
248         #   xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
249         # + abcd.0000.abcd.0000.0000.abcd.----.----.----
250         # -      abcd.----.----.----.----.----.----.----
251
252 $code.=<<___;
253         ! multiplication-less reduction
254         addcc   @acc[3],$t0,@acc[3]     ! r[3]+=r[0]
255         addccc  @acc[4],%g0,@acc[4]     ! r[4]+=0
256          and    @acc[1],$mask,@acc[1]
257          and    @acc[2],$mask,@acc[2]
258         addccc  @acc[5],%g0,@acc[5]     ! r[5]+=0
259         addccc  @acc[6],$t0,@acc[6]     ! r[6]+=r[0]
260          and    @acc[3],$mask,@acc[3]
261          and    @acc[4],$mask,@acc[4]
262         addccc  @acc[7],%g0,@acc[7]     ! r[7]+=0
263         addccc  @acc[0],$t0,@acc[0]     ! r[8]+=r[0]    "@acc[8]"
264          and    @acc[5],$mask,@acc[5]
265          and    @acc[6],$mask,@acc[6]
266         addc    $carry,%g0,$carry       ! top-most carry
267         subcc   @acc[7],$t0,@acc[7]     ! r[7]-=r[0]
268         subccc  @acc[0],%g0,@acc[0]     ! r[8]-=0       "@acc[8]"
269         subc    $carry,%g0,$carry       ! top-most carry
270          and    @acc[7],$mask,@acc[7]
271          and    @acc[0],$mask,@acc[0]   ! "@acc[8]"
272 ___
273         push(@acc,shift(@acc));         # rotate registers to "omit" acc[0]
274 $code.=<<___;
275         mulx    $a0,$bi,$t0             ! a[0-7]*b[$i], 64-bit results
276         mulx    $t1,$bi,$t1
277         mulx    $t2,$bi,$t2
278         mulx    $t3,$bi,$t3
279         mulx    $t4,$bi,$t4
280         mulx    $t5,$bi,$t5
281         mulx    $t6,$bi,$t6
282         mulx    $t7,$bi,$t7
283         add     @acc[0],$t0,$t0         ! accumulate low parts, can't overflow
284         add     @acc[1],$t1,$t1
285         srlx    $t0,32,@acc[1]          ! extract high parts
286         add     @acc[2],$t2,$t2
287         srlx    $t1,32,@acc[2]
288         add     @acc[3],$t3,$t3
289         srlx    $t2,32,@acc[3]
290         add     @acc[4],$t4,$t4
291         srlx    $t3,32,@acc[4]
292         add     @acc[5],$t5,$t5
293         srlx    $t4,32,@acc[5]
294         add     @acc[6],$t6,$t6
295         srlx    $t5,32,@acc[6]
296         add     @acc[7],$t7,$t7
297         srlx    $t6,32,@acc[7]
298         srlx    $t7,32,@acc[0]          ! "@acc[8]"
299 ___
300 }
301 $code.=<<___;
302         addcc   @acc[1],$t1,@acc[1]     ! accumulate high parts
303         addccc  @acc[2],$t2,@acc[2]
304         addccc  @acc[3],$t3,@acc[3]
305         addccc  @acc[4],$t4,@acc[4]
306         addccc  @acc[5],$t5,@acc[5]
307         addccc  @acc[6],$t6,@acc[6]
308         addccc  @acc[7],$t7,@acc[7]
309         addccc  @acc[0],$carry,@acc[0]  ! "@acc[8]"
310         addc    %g0,%g0,$carry
311
312         addcc   @acc[3],$t0,@acc[3]     ! multiplication-less reduction
313         addccc  @acc[4],%g0,@acc[4]
314         addccc  @acc[5],%g0,@acc[5]
315         addccc  @acc[6],$t0,@acc[6]
316         addccc  @acc[7],%g0,@acc[7]
317         addccc  @acc[0],$t0,@acc[0]     ! "@acc[8]"
318         addc    $carry,%g0,$carry
319         subcc   @acc[7],$t0,@acc[7]
320         subccc  @acc[0],%g0,@acc[0]     ! "@acc[8]"
321         subc    $carry,%g0,$carry       ! top-most carry
322 ___
323         push(@acc,shift(@acc));         # rotate registers to omit acc[0]
324 $code.=<<___;
325         ! Final step is "if result > mod, subtract mod", but we do it
326         ! "other way around", namely subtract modulus from result
327         ! and if it borrowed, add modulus back.
328
329         subcc   @acc[0],-1,@acc[0]      ! subtract modulus
330         subccc  @acc[1],-1,@acc[1]
331         subccc  @acc[2],-1,@acc[2]
332         subccc  @acc[3],0,@acc[3]
333         subccc  @acc[4],0,@acc[4]
334         subccc  @acc[5],0,@acc[5]
335         subccc  @acc[6],1,@acc[6]
336         subccc  @acc[7],-1,@acc[7]
337         subc    $carry,0,$carry         ! broadcast borrow bit
338
339         ! Note that because mod has special form, i.e. consists of
340         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
341         ! using value of broadcasted borrow and the borrow bit itself.
342         ! To minimize dependency chain we first broadcast and then
343         ! extract the bit by negating (follow $bi).
344
345         addcc   @acc[0],$carry,@acc[0]  ! add modulus or zero
346         addccc  @acc[1],$carry,@acc[1]
347         neg     $carry,$bi
348         st      @acc[0],[$rp]
349         addccc  @acc[2],$carry,@acc[2]
350         st      @acc[1],[$rp+4]
351         addccc  @acc[3],0,@acc[3]
352         st      @acc[2],[$rp+8]
353         addccc  @acc[4],0,@acc[4]
354         st      @acc[3],[$rp+12]
355         addccc  @acc[5],0,@acc[5]
356         st      @acc[4],[$rp+16]
357         addccc  @acc[6],$bi,@acc[6]
358         st      @acc[5],[$rp+20]
359         addc    @acc[7],$carry,@acc[7]
360         st      @acc[6],[$rp+24]
361         retl
362         st      @acc[7],[$rp+28]
363 .size   __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
364
365 ! void  ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
366 !                                        const BN_ULONG %i2[8]);
367 .globl  ecp_nistz256_add
368 .align  32
369 ecp_nistz256_add:
370         save    %sp,-STACK_FRAME,%sp
371         ld      [$ap],@acc[0]
372         ld      [$ap+4],@acc[1]
373         ld      [$ap+8],@acc[2]
374         ld      [$ap+12],@acc[3]
375         ld      [$ap+16],@acc[4]
376         ld      [$ap+20],@acc[5]
377         ld      [$ap+24],@acc[6]
378         call    __ecp_nistz256_add
379         ld      [$ap+28],@acc[7]
380         ret
381         restore
382 .size   ecp_nistz256_add,.-ecp_nistz256_add
383
384 .align  32
385 __ecp_nistz256_add:
386         ld      [$bp+0],$t0             ! b[0]
387         ld      [$bp+4],$t1
388         ld      [$bp+8],$t2
389         ld      [$bp+12],$t3
390         addcc   @acc[0],$t0,@acc[0]
391         ld      [$bp+16],$t4
392         ld      [$bp+20],$t5
393         addccc  @acc[1],$t1,@acc[1]
394         ld      [$bp+24],$t6
395         ld      [$bp+28],$t7
396         addccc  @acc[2],$t2,@acc[2]
397         addccc  @acc[3],$t3,@acc[3]
398         addccc  @acc[4],$t4,@acc[4]
399         addccc  @acc[5],$t5,@acc[5]
400         addccc  @acc[6],$t6,@acc[6]
401         addccc  @acc[7],$t7,@acc[7]
402         subc    %g0,%g0,$carry          ! broadcast carry bit
403
404 .Lreduce_by_sub:
405
406         ! if a+b carries, subtract modulus.
407         !
408         ! Note that because mod has special form, i.e. consists of
409         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
410         ! using value of broadcasted borrow and the borrow bit itself.
411         ! To minimize dependency chain we first broadcast and then
412         ! extract the bit by negating (follow $bi).
413
414         subcc   @acc[0],$carry,@acc[0]  ! subtract synthesized modulus
415         subccc  @acc[1],$carry,@acc[1]
416         neg     $carry,$bi
417         st      @acc[0],[$rp]
418         subccc  @acc[2],$carry,@acc[2]
419         st      @acc[1],[$rp+4]
420         subccc  @acc[3],0,@acc[3]
421         st      @acc[2],[$rp+8]
422         subccc  @acc[4],0,@acc[4]
423         st      @acc[3],[$rp+12]
424         subccc  @acc[5],0,@acc[5]
425         st      @acc[4],[$rp+16]
426         subccc  @acc[6],$bi,@acc[6]
427         st      @acc[5],[$rp+20]
428         subc    @acc[7],$carry,@acc[7]
429         st      @acc[6],[$rp+24]
430         retl
431         st      @acc[7],[$rp+28]
432 .size   __ecp_nistz256_add,.-__ecp_nistz256_add
433
434 ! void  ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
435 .globl  ecp_nistz256_mul_by_2
436 .align  32
437 ecp_nistz256_mul_by_2:
438         save    %sp,-STACK_FRAME,%sp
439         ld      [$ap],@acc[0]
440         ld      [$ap+4],@acc[1]
441         ld      [$ap+8],@acc[2]
442         ld      [$ap+12],@acc[3]
443         ld      [$ap+16],@acc[4]
444         ld      [$ap+20],@acc[5]
445         ld      [$ap+24],@acc[6]
446         call    __ecp_nistz256_mul_by_2
447         ld      [$ap+28],@acc[7]
448         ret
449         restore
450 .size   ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
451
452 .align  32
453 __ecp_nistz256_mul_by_2:
454         addcc   @acc[0],@acc[0],@acc[0] ! a+a=2*a
455         addccc  @acc[1],@acc[1],@acc[1]
456         addccc  @acc[2],@acc[2],@acc[2]
457         addccc  @acc[3],@acc[3],@acc[3]
458         addccc  @acc[4],@acc[4],@acc[4]
459         addccc  @acc[5],@acc[5],@acc[5]
460         addccc  @acc[6],@acc[6],@acc[6]
461         addccc  @acc[7],@acc[7],@acc[7]
462         b       .Lreduce_by_sub
463         subc    %g0,%g0,$carry          ! broadcast carry bit
464 .size   __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
465
466 ! void  ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
467 .globl  ecp_nistz256_mul_by_3
468 .align  32
469 ecp_nistz256_mul_by_3:
470         save    %sp,-STACK_FRAME,%sp
471         ld      [$ap],@acc[0]
472         ld      [$ap+4],@acc[1]
473         ld      [$ap+8],@acc[2]
474         ld      [$ap+12],@acc[3]
475         ld      [$ap+16],@acc[4]
476         ld      [$ap+20],@acc[5]
477         ld      [$ap+24],@acc[6]
478         call    __ecp_nistz256_mul_by_3
479         ld      [$ap+28],@acc[7]
480         ret
481         restore
482 .size   ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
483
484 .align  32
485 __ecp_nistz256_mul_by_3:
486         addcc   @acc[0],@acc[0],$t0     ! a+a=2*a
487         addccc  @acc[1],@acc[1],$t1
488         addccc  @acc[2],@acc[2],$t2
489         addccc  @acc[3],@acc[3],$t3
490         addccc  @acc[4],@acc[4],$t4
491         addccc  @acc[5],@acc[5],$t5
492         addccc  @acc[6],@acc[6],$t6
493         addccc  @acc[7],@acc[7],$t7
494         subc    %g0,%g0,$carry          ! broadcast carry bit
495
496         subcc   $t0,$carry,$t0          ! .Lreduce_by_sub but without stores
497         neg     $carry,$bi
498         subccc  $t1,$carry,$t1
499         subccc  $t2,$carry,$t2
500         subccc  $t3,0,$t3
501         subccc  $t4,0,$t4
502         subccc  $t5,0,$t5
503         subccc  $t6,$bi,$t6
504         subc    $t7,$carry,$t7
505
506         addcc   $t0,@acc[0],@acc[0]     ! 2*a+a=3*a
507         addccc  $t1,@acc[1],@acc[1]
508         addccc  $t2,@acc[2],@acc[2]
509         addccc  $t3,@acc[3],@acc[3]
510         addccc  $t4,@acc[4],@acc[4]
511         addccc  $t5,@acc[5],@acc[5]
512         addccc  $t6,@acc[6],@acc[6]
513         addccc  $t7,@acc[7],@acc[7]
514         b       .Lreduce_by_sub
515         subc    %g0,%g0,$carry          ! broadcast carry bit
516 .size   __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
517
518 ! void  ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
519 !                                        const BN_ULONG %i2[8]);
520 .globl  ecp_nistz256_sub
521 .align  32
522 ecp_nistz256_sub:
523         save    %sp,-STACK_FRAME,%sp
524         ld      [$ap],@acc[0]
525         ld      [$ap+4],@acc[1]
526         ld      [$ap+8],@acc[2]
527         ld      [$ap+12],@acc[3]
528         ld      [$ap+16],@acc[4]
529         ld      [$ap+20],@acc[5]
530         ld      [$ap+24],@acc[6]
531         call    __ecp_nistz256_sub_from
532         ld      [$ap+28],@acc[7]
533         ret
534         restore
535 .size   ecp_nistz256_sub,.-ecp_nistz256_sub
536
537 ! void  ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
538 .globl  ecp_nistz256_neg
539 .align  32
540 ecp_nistz256_neg:
541         save    %sp,-STACK_FRAME,%sp
542         mov     $ap,$bp
543         mov     0,@acc[0]
544         mov     0,@acc[1]
545         mov     0,@acc[2]
546         mov     0,@acc[3]
547         mov     0,@acc[4]
548         mov     0,@acc[5]
549         mov     0,@acc[6]
550         call    __ecp_nistz256_sub_from
551         mov     0,@acc[7]
552         ret
553         restore
554 .size   ecp_nistz256_neg,.-ecp_nistz256_neg
555
556 .align  32
557 __ecp_nistz256_sub_from:
558         ld      [$bp+0],$t0             ! b[0]
559         ld      [$bp+4],$t1
560         ld      [$bp+8],$t2
561         ld      [$bp+12],$t3
562         subcc   @acc[0],$t0,@acc[0]
563         ld      [$bp+16],$t4
564         ld      [$bp+20],$t5
565         subccc  @acc[1],$t1,@acc[1]
566         subccc  @acc[2],$t2,@acc[2]
567         ld      [$bp+24],$t6
568         ld      [$bp+28],$t7
569         subccc  @acc[3],$t3,@acc[3]
570         subccc  @acc[4],$t4,@acc[4]
571         subccc  @acc[5],$t5,@acc[5]
572         subccc  @acc[6],$t6,@acc[6]
573         subccc  @acc[7],$t7,@acc[7]
574         subc    %g0,%g0,$carry          ! broadcast borrow bit
575
576 .Lreduce_by_add:
577
578         ! if a-b borrows, add modulus.
579         !
580         ! Note that because mod has special form, i.e. consists of
581         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
582         ! using value of broadcasted borrow and the borrow bit itself.
583         ! To minimize dependency chain we first broadcast and then
584         ! extract the bit by negating (follow $bi).
585
586         addcc   @acc[0],$carry,@acc[0]  ! add synthesized modulus
587         addccc  @acc[1],$carry,@acc[1]
588         neg     $carry,$bi
589         st      @acc[0],[$rp]
590         addccc  @acc[2],$carry,@acc[2]
591         st      @acc[1],[$rp+4]
592         addccc  @acc[3],0,@acc[3]
593         st      @acc[2],[$rp+8]
594         addccc  @acc[4],0,@acc[4]
595         st      @acc[3],[$rp+12]
596         addccc  @acc[5],0,@acc[5]
597         st      @acc[4],[$rp+16]
598         addccc  @acc[6],$bi,@acc[6]
599         st      @acc[5],[$rp+20]
600         addc    @acc[7],$carry,@acc[7]
601         st      @acc[6],[$rp+24]
602         retl
603         st      @acc[7],[$rp+28]
604 .size   __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
605
606 .align  32
607 __ecp_nistz256_sub_morf:
608         ld      [$bp+0],$t0             ! b[0]
609         ld      [$bp+4],$t1
610         ld      [$bp+8],$t2
611         ld      [$bp+12],$t3
612         subcc   $t0,@acc[0],@acc[0]
613         ld      [$bp+16],$t4
614         ld      [$bp+20],$t5
615         subccc  $t1,@acc[1],@acc[1]
616         subccc  $t2,@acc[2],@acc[2]
617         ld      [$bp+24],$t6
618         ld      [$bp+28],$t7
619         subccc  $t3,@acc[3],@acc[3]
620         subccc  $t4,@acc[4],@acc[4]
621         subccc  $t5,@acc[5],@acc[5]
622         subccc  $t6,@acc[6],@acc[6]
623         subccc  $t7,@acc[7],@acc[7]
624         b       .Lreduce_by_add
625         subc    %g0,%g0,$carry          ! broadcast borrow bit
626 .size   __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
627
628 ! void  ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
629 .globl  ecp_nistz256_div_by_2
630 .align  32
631 ecp_nistz256_div_by_2:
632         save    %sp,-STACK_FRAME,%sp
633         ld      [$ap],@acc[0]
634         ld      [$ap+4],@acc[1]
635         ld      [$ap+8],@acc[2]
636         ld      [$ap+12],@acc[3]
637         ld      [$ap+16],@acc[4]
638         ld      [$ap+20],@acc[5]
639         ld      [$ap+24],@acc[6]
640         call    __ecp_nistz256_div_by_2
641         ld      [$ap+28],@acc[7]
642         ret
643         restore
644 .size   ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
645
646 .align  32
647 __ecp_nistz256_div_by_2:
648         ! ret = (a is odd ? a+mod : a) >> 1
649
650         and     @acc[0],1,$bi
651         neg     $bi,$carry
652         addcc   @acc[0],$carry,@acc[0]
653         addccc  @acc[1],$carry,@acc[1]
654         addccc  @acc[2],$carry,@acc[2]
655         addccc  @acc[3],0,@acc[3]
656         addccc  @acc[4],0,@acc[4]
657         addccc  @acc[5],0,@acc[5]
658         addccc  @acc[6],$bi,@acc[6]
659         addccc  @acc[7],$carry,@acc[7]
660         addc    %g0,%g0,$carry
661
662         ! ret >>= 1
663
664         srl     @acc[0],1,@acc[0]
665         sll     @acc[1],31,$t0
666         srl     @acc[1],1,@acc[1]
667         or      @acc[0],$t0,@acc[0]
668         sll     @acc[2],31,$t1
669         srl     @acc[2],1,@acc[2]
670         or      @acc[1],$t1,@acc[1]
671         sll     @acc[3],31,$t2
672         st      @acc[0],[$rp]
673         srl     @acc[3],1,@acc[3]
674         or      @acc[2],$t2,@acc[2]
675         sll     @acc[4],31,$t3
676         st      @acc[1],[$rp+4]
677         srl     @acc[4],1,@acc[4]
678         or      @acc[3],$t3,@acc[3]
679         sll     @acc[5],31,$t4
680         st      @acc[2],[$rp+8]
681         srl     @acc[5],1,@acc[5]
682         or      @acc[4],$t4,@acc[4]
683         sll     @acc[6],31,$t5
684         st      @acc[3],[$rp+12]
685         srl     @acc[6],1,@acc[6]
686         or      @acc[5],$t5,@acc[5]
687         sll     @acc[7],31,$t6
688         st      @acc[4],[$rp+16]
689         srl     @acc[7],1,@acc[7]
690         or      @acc[6],$t6,@acc[6]
691         sll     $carry,31,$t7
692         st      @acc[5],[$rp+20]
693         or      @acc[7],$t7,@acc[7]
694         st      @acc[6],[$rp+24]
695         retl
696         st      @acc[7],[$rp+28]
697 .size   __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
698 ___
699
700 ########################################################################
701 # following subroutines are "literal" implementation of those found in
702 # ecp_nistz256.c
703 #
704 ########################################################################
705 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
706 #
707 {
708 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
709 # above map() describes stack layout with 4 temporary
710 # 256-bit vectors on top.
711
712 $code.=<<___;
713 #ifdef __PIC__
714 SPARC_PIC_THUNK(%g1)
715 #endif
716
717 .globl  ecp_nistz256_point_double
718 .align  32
719 ecp_nistz256_point_double:
720         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
721         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
722         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
723         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
724         be      ecp_nistz256_point_double_vis3
725         nop
726
727         save    %sp,-STACK_FRAME-32*4,%sp
728
729         mov     $rp,$rp_real
730         mov     $ap,$ap_real
731
732 .Lpoint_double_shortcut:
733         ld      [$ap+32],@acc[0]
734         ld      [$ap+32+4],@acc[1]
735         ld      [$ap+32+8],@acc[2]
736         ld      [$ap+32+12],@acc[3]
737         ld      [$ap+32+16],@acc[4]
738         ld      [$ap+32+20],@acc[5]
739         ld      [$ap+32+24],@acc[6]
740         ld      [$ap+32+28],@acc[7]
741         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
742         add     %sp,LOCALS+$S,$rp
743
744         add     $ap_real,64,$bp
745         add     $ap_real,64,$ap
746         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
747         add     %sp,LOCALS+$Zsqr,$rp
748
749         add     $ap_real,0,$bp
750         call    __ecp_nistz256_add      ! p256_add(M, Zsqr, in_x);
751         add     %sp,LOCALS+$M,$rp
752
753         add     %sp,LOCALS+$S,$bp
754         add     %sp,LOCALS+$S,$ap
755         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
756         add     %sp,LOCALS+$S,$rp
757
758         ld      [$ap_real],@acc[0]
759         add     %sp,LOCALS+$Zsqr,$bp
760         ld      [$ap_real+4],@acc[1]
761         ld      [$ap_real+8],@acc[2]
762         ld      [$ap_real+12],@acc[3]
763         ld      [$ap_real+16],@acc[4]
764         ld      [$ap_real+20],@acc[5]
765         ld      [$ap_real+24],@acc[6]
766         ld      [$ap_real+28],@acc[7]
767         call    __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
768         add     %sp,LOCALS+$Zsqr,$rp
769
770         add     $ap_real,32,$bp
771         add     $ap_real,64,$ap
772         call    __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
773         add     %sp,LOCALS+$tmp0,$rp
774
775         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
776         add     $rp_real,64,$rp
777
778         add     %sp,LOCALS+$Zsqr,$bp
779         add     %sp,LOCALS+$M,$ap
780         call    __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
781         add     %sp,LOCALS+$M,$rp
782
783         call    __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
784         add     %sp,LOCALS+$M,$rp
785
786         add     %sp,LOCALS+$S,$bp
787         add     %sp,LOCALS+$S,$ap
788         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
789         add     %sp,LOCALS+$tmp0,$rp
790
791         call    __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
792         add     $rp_real,32,$rp
793
794         add     $ap_real,0,$bp
795         add     %sp,LOCALS+$S,$ap
796         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
797         add     %sp,LOCALS+$S,$rp
798
799         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
800         add     %sp,LOCALS+$tmp0,$rp
801
802         add     %sp,LOCALS+$M,$bp
803         add     %sp,LOCALS+$M,$ap
804         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
805         add     $rp_real,0,$rp
806
807         add     %sp,LOCALS+$tmp0,$bp
808         call    __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
809         add     $rp_real,0,$rp
810
811         add     %sp,LOCALS+$S,$bp
812         call    __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
813         add     %sp,LOCALS+$S,$rp
814
815         add     %sp,LOCALS+$M,$bp
816         add     %sp,LOCALS+$S,$ap
817         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
818         add     %sp,LOCALS+$S,$rp
819
820         add     $rp_real,32,$bp
821         call    __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
822         add     $rp_real,32,$rp
823
824         ret
825         restore
826 .size   ecp_nistz256_point_double,.-ecp_nistz256_point_double
827 ___
828 }
829
830 ########################################################################
831 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
832 #                             const P256_POINT *in2);
833 {
834 my ($res_x,$res_y,$res_z,
835     $H,$Hsqr,$R,$Rsqr,$Hcub,
836     $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
837 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
838
839 # above map() describes stack layout with 12 temporary
840 # 256-bit vectors on top. Then we reserve some space for
841 # !in1infty, !in2infty, result of check for zero and return pointer.
842
843 my $bp_real=$rp_real;
844
845 $code.=<<___;
846 .globl  ecp_nistz256_point_add
847 .align  32
848 ecp_nistz256_point_add:
849         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
850         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
851         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
852         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
853         be      ecp_nistz256_point_add_vis3
854         nop
855
856         save    %sp,-STACK_FRAME-32*12-32,%sp
857
858         stx     $rp,[%fp+STACK_BIAS-8]  ! off-load $rp
859         mov     $ap,$ap_real
860         mov     $bp,$bp_real
861
862         ld      [$bp],@acc[0]           ! in2_x
863         ld      [$bp+4],@acc[1]
864         ld      [$bp+8],@acc[2]
865         ld      [$bp+12],@acc[3]
866         ld      [$bp+16],@acc[4]
867         ld      [$bp+20],@acc[5]
868         ld      [$bp+24],@acc[6]
869         ld      [$bp+28],@acc[7]
870         ld      [$bp+32],$t0            ! in2_y
871         ld      [$bp+32+4],$t1
872         ld      [$bp+32+8],$t2
873         ld      [$bp+32+12],$t3
874         ld      [$bp+32+16],$t4
875         ld      [$bp+32+20],$t5
876         ld      [$bp+32+24],$t6
877         ld      [$bp+32+28],$t7
878         or      @acc[1],@acc[0],@acc[0]
879         or      @acc[3],@acc[2],@acc[2]
880         or      @acc[5],@acc[4],@acc[4]
881         or      @acc[7],@acc[6],@acc[6]
882         or      @acc[2],@acc[0],@acc[0]
883         or      @acc[6],@acc[4],@acc[4]
884         or      @acc[4],@acc[0],@acc[0]
885         or      $t1,$t0,$t0
886         or      $t3,$t2,$t2
887         or      $t5,$t4,$t4
888         or      $t7,$t6,$t6
889         or      $t2,$t0,$t0
890         or      $t6,$t4,$t4
891         or      $t4,$t0,$t0
892         or      @acc[0],$t0,$t0         ! !in2infty
893         movrnz  $t0,-1,$t0
894         st      $t0,[%fp+STACK_BIAS-12]
895
896         ld      [$ap],@acc[0]           ! in1_x
897         ld      [$ap+4],@acc[1]
898         ld      [$ap+8],@acc[2]
899         ld      [$ap+12],@acc[3]
900         ld      [$ap+16],@acc[4]
901         ld      [$ap+20],@acc[5]
902         ld      [$ap+24],@acc[6]
903         ld      [$ap+28],@acc[7]
904         ld      [$ap+32],$t0            ! in1_y
905         ld      [$ap+32+4],$t1
906         ld      [$ap+32+8],$t2
907         ld      [$ap+32+12],$t3
908         ld      [$ap+32+16],$t4
909         ld      [$ap+32+20],$t5
910         ld      [$ap+32+24],$t6
911         ld      [$ap+32+28],$t7
912         or      @acc[1],@acc[0],@acc[0]
913         or      @acc[3],@acc[2],@acc[2]
914         or      @acc[5],@acc[4],@acc[4]
915         or      @acc[7],@acc[6],@acc[6]
916         or      @acc[2],@acc[0],@acc[0]
917         or      @acc[6],@acc[4],@acc[4]
918         or      @acc[4],@acc[0],@acc[0]
919         or      $t1,$t0,$t0
920         or      $t3,$t2,$t2
921         or      $t5,$t4,$t4
922         or      $t7,$t6,$t6
923         or      $t2,$t0,$t0
924         or      $t6,$t4,$t4
925         or      $t4,$t0,$t0
926         or      @acc[0],$t0,$t0         ! !in1infty
927         movrnz  $t0,-1,$t0
928         st      $t0,[%fp+STACK_BIAS-16]
929
930         add     $bp_real,64,$bp
931         add     $bp_real,64,$ap
932         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
933         add     %sp,LOCALS+$Z2sqr,$rp
934
935         add     $ap_real,64,$bp
936         add     $ap_real,64,$ap
937         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
938         add     %sp,LOCALS+$Z1sqr,$rp
939
940         add     $bp_real,64,$bp
941         add     %sp,LOCALS+$Z2sqr,$ap
942         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
943         add     %sp,LOCALS+$S1,$rp
944
945         add     $ap_real,64,$bp
946         add     %sp,LOCALS+$Z1sqr,$ap
947         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
948         add     %sp,LOCALS+$S2,$rp
949
950         add     $ap_real,32,$bp
951         add     %sp,LOCALS+$S1,$ap
952         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
953         add     %sp,LOCALS+$S1,$rp
954
955         add     $bp_real,32,$bp
956         add     %sp,LOCALS+$S2,$ap
957         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
958         add     %sp,LOCALS+$S2,$rp
959
960         add     %sp,LOCALS+$S1,$bp
961         call    __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
962         add     %sp,LOCALS+$R,$rp
963
964         or      @acc[1],@acc[0],@acc[0] ! see if result is zero
965         or      @acc[3],@acc[2],@acc[2]
966         or      @acc[5],@acc[4],@acc[4]
967         or      @acc[7],@acc[6],@acc[6]
968         or      @acc[2],@acc[0],@acc[0]
969         or      @acc[6],@acc[4],@acc[4]
970         or      @acc[4],@acc[0],@acc[0]
971         st      @acc[0],[%fp+STACK_BIAS-20]
972
973         add     $ap_real,0,$bp
974         add     %sp,LOCALS+$Z2sqr,$ap
975         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
976         add     %sp,LOCALS+$U1,$rp
977
978         add     $bp_real,0,$bp
979         add     %sp,LOCALS+$Z1sqr,$ap
980         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
981         add     %sp,LOCALS+$U2,$rp
982
983         add     %sp,LOCALS+$U1,$bp
984         call    __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
985         add     %sp,LOCALS+$H,$rp
986
987         or      @acc[1],@acc[0],@acc[0] ! see if result is zero
988         or      @acc[3],@acc[2],@acc[2]
989         or      @acc[5],@acc[4],@acc[4]
990         or      @acc[7],@acc[6],@acc[6]
991         or      @acc[2],@acc[0],@acc[0]
992         or      @acc[6],@acc[4],@acc[4]
993         orcc    @acc[4],@acc[0],@acc[0]
994
995         bne,pt  %icc,.Ladd_proceed      ! is_equal(U1,U2)?
996         nop
997
998         ld      [%fp+STACK_BIAS-12],$t0
999         ld      [%fp+STACK_BIAS-16],$t1
1000         ld      [%fp+STACK_BIAS-20],$t2
1001         andcc   $t0,$t1,%g0
1002         be,pt   %icc,.Ladd_proceed      ! (in1infty || in2infty)?
1003         nop
1004         andcc   $t2,$t2,%g0
1005         be,pt   %icc,.Ladd_double       ! is_equal(S1,S2)?
1006         nop
1007
1008         ldx     [%fp+STACK_BIAS-8],$rp
1009         st      %g0,[$rp]
1010         st      %g0,[$rp+4]
1011         st      %g0,[$rp+8]
1012         st      %g0,[$rp+12]
1013         st      %g0,[$rp+16]
1014         st      %g0,[$rp+20]
1015         st      %g0,[$rp+24]
1016         st      %g0,[$rp+28]
1017         st      %g0,[$rp+32]
1018         st      %g0,[$rp+32+4]
1019         st      %g0,[$rp+32+8]
1020         st      %g0,[$rp+32+12]
1021         st      %g0,[$rp+32+16]
1022         st      %g0,[$rp+32+20]
1023         st      %g0,[$rp+32+24]
1024         st      %g0,[$rp+32+28]
1025         st      %g0,[$rp+64]
1026         st      %g0,[$rp+64+4]
1027         st      %g0,[$rp+64+8]
1028         st      %g0,[$rp+64+12]
1029         st      %g0,[$rp+64+16]
1030         st      %g0,[$rp+64+20]
1031         st      %g0,[$rp+64+24]
1032         st      %g0,[$rp+64+28]
1033         b       .Ladd_done
1034         nop
1035
1036 .align  16
1037 .Ladd_double:
1038         ldx     [%fp+STACK_BIAS-8],$rp_real
1039         mov     $ap_real,$ap
1040         b       .Lpoint_double_shortcut
1041         add     %sp,32*(12-4)+32,%sp    ! difference in frame sizes
1042
1043 .align  16
1044 .Ladd_proceed:
1045         add     %sp,LOCALS+$R,$bp
1046         add     %sp,LOCALS+$R,$ap
1047         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1048         add     %sp,LOCALS+$Rsqr,$rp
1049
1050         add     $ap_real,64,$bp
1051         add     %sp,LOCALS+$H,$ap
1052         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1053         add     %sp,LOCALS+$res_z,$rp
1054
1055         add     %sp,LOCALS+$H,$bp
1056         add     %sp,LOCALS+$H,$ap
1057         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1058         add     %sp,LOCALS+$Hsqr,$rp
1059
1060         add     $bp_real,64,$bp
1061         add     %sp,LOCALS+$res_z,$ap
1062         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
1063         add     %sp,LOCALS+$res_z,$rp
1064
1065         add     %sp,LOCALS+$H,$bp
1066         add     %sp,LOCALS+$Hsqr,$ap
1067         call    __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1068         add     %sp,LOCALS+$Hcub,$rp
1069
1070         add     %sp,LOCALS+$U1,$bp
1071         add     %sp,LOCALS+$Hsqr,$ap
1072         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
1073         add     %sp,LOCALS+$U2,$rp
1074
1075         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1076         add     %sp,LOCALS+$Hsqr,$rp
1077
1078         add     %sp,LOCALS+$Rsqr,$bp
1079         call    __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1080         add     %sp,LOCALS+$res_x,$rp
1081
1082         add     %sp,LOCALS+$Hcub,$bp
1083         call    __ecp_nistz256_sub_from !  p256_sub(res_x, res_x, Hcub);
1084         add     %sp,LOCALS+$res_x,$rp
1085
1086         add     %sp,LOCALS+$U2,$bp
1087         call    __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1088         add     %sp,LOCALS+$res_y,$rp
1089
1090         add     %sp,LOCALS+$Hcub,$bp
1091         add     %sp,LOCALS+$S1,$ap
1092         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
1093         add     %sp,LOCALS+$S2,$rp
1094
1095         add     %sp,LOCALS+$R,$bp
1096         add     %sp,LOCALS+$res_y,$ap
1097         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1098         add     %sp,LOCALS+$res_y,$rp
1099
1100         add     %sp,LOCALS+$S2,$bp
1101         call    __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1102         add     %sp,LOCALS+$res_y,$rp
1103
1104         ld      [%fp+STACK_BIAS-16],$t1 ! !in1infty
1105         ld      [%fp+STACK_BIAS-12],$t2 ! !in2infty
1106         ldx     [%fp+STACK_BIAS-8],$rp
1107 ___
1108 for($i=0;$i<96;$i+=8) {                 # conditional moves
1109 $code.=<<___;
1110         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1111         ld      [%sp+LOCALS+$i+4],@acc[1]
1112         ld      [$bp_real+$i],@acc[2]           ! in2
1113         ld      [$bp_real+$i+4],@acc[3]
1114         ld      [$ap_real+$i],@acc[4]           ! in1
1115         ld      [$ap_real+$i+4],@acc[5]
1116         movrz   $t1,@acc[2],@acc[0]
1117         movrz   $t1,@acc[3],@acc[1]
1118         movrz   $t2,@acc[4],@acc[0]
1119         movrz   $t2,@acc[5],@acc[1]
1120         st      @acc[0],[$rp+$i]
1121         st      @acc[1],[$rp+$i+4]
1122 ___
1123 }
1124 $code.=<<___;
1125 .Ladd_done:
1126         ret
1127         restore
1128 .size   ecp_nistz256_point_add,.-ecp_nistz256_point_add
1129 ___
1130 }
1131
1132 ########################################################################
1133 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1134 #                                    const P256_POINT_AFFINE *in2);
1135 {
1136 my ($res_x,$res_y,$res_z,
1137     $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1138 my $Z1sqr = $S2;
1139 # above map() describes stack layout with 10 temporary
1140 # 256-bit vectors on top. Then we reserve some space for
1141 # !in1infty, !in2infty, result of check for zero and return pointer.
1142
1143 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1144 my $bp_real=$rp_real;
1145
1146 $code.=<<___;
1147 .globl  ecp_nistz256_point_add_affine
1148 .align  32
1149 ecp_nistz256_point_add_affine:
1150         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
1151         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
1152         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
1153         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
1154         be      ecp_nistz256_point_add_affine_vis3
1155         nop
1156
1157         save    %sp,-STACK_FRAME-32*10-32,%sp
1158
1159         stx     $rp,[%fp+STACK_BIAS-8]  ! off-load $rp
1160         mov     $ap,$ap_real
1161         mov     $bp,$bp_real
1162
1163         ld      [$ap],@acc[0]           ! in1_x
1164         ld      [$ap+4],@acc[1]
1165         ld      [$ap+8],@acc[2]
1166         ld      [$ap+12],@acc[3]
1167         ld      [$ap+16],@acc[4]
1168         ld      [$ap+20],@acc[5]
1169         ld      [$ap+24],@acc[6]
1170         ld      [$ap+28],@acc[7]
1171         ld      [$ap+32],$t0            ! in1_y
1172         ld      [$ap+32+4],$t1
1173         ld      [$ap+32+8],$t2
1174         ld      [$ap+32+12],$t3
1175         ld      [$ap+32+16],$t4
1176         ld      [$ap+32+20],$t5
1177         ld      [$ap+32+24],$t6
1178         ld      [$ap+32+28],$t7
1179         or      @acc[1],@acc[0],@acc[0]
1180         or      @acc[3],@acc[2],@acc[2]
1181         or      @acc[5],@acc[4],@acc[4]
1182         or      @acc[7],@acc[6],@acc[6]
1183         or      @acc[2],@acc[0],@acc[0]
1184         or      @acc[6],@acc[4],@acc[4]
1185         or      @acc[4],@acc[0],@acc[0]
1186         or      $t1,$t0,$t0
1187         or      $t3,$t2,$t2
1188         or      $t5,$t4,$t4
1189         or      $t7,$t6,$t6
1190         or      $t2,$t0,$t0
1191         or      $t6,$t4,$t4
1192         or      $t4,$t0,$t0
1193         or      @acc[0],$t0,$t0         ! !in1infty
1194         movrnz  $t0,-1,$t0
1195         st      $t0,[%fp+STACK_BIAS-16]
1196
1197         ld      [$bp],@acc[0]           ! in2_x
1198         ld      [$bp+4],@acc[1]
1199         ld      [$bp+8],@acc[2]
1200         ld      [$bp+12],@acc[3]
1201         ld      [$bp+16],@acc[4]
1202         ld      [$bp+20],@acc[5]
1203         ld      [$bp+24],@acc[6]
1204         ld      [$bp+28],@acc[7]
1205         ld      [$bp+32],$t0            ! in2_y
1206         ld      [$bp+32+4],$t1
1207         ld      [$bp+32+8],$t2
1208         ld      [$bp+32+12],$t3
1209         ld      [$bp+32+16],$t4
1210         ld      [$bp+32+20],$t5
1211         ld      [$bp+32+24],$t6
1212         ld      [$bp+32+28],$t7
1213         or      @acc[1],@acc[0],@acc[0]
1214         or      @acc[3],@acc[2],@acc[2]
1215         or      @acc[5],@acc[4],@acc[4]
1216         or      @acc[7],@acc[6],@acc[6]
1217         or      @acc[2],@acc[0],@acc[0]
1218         or      @acc[6],@acc[4],@acc[4]
1219         or      @acc[4],@acc[0],@acc[0]
1220         or      $t1,$t0,$t0
1221         or      $t3,$t2,$t2
1222         or      $t5,$t4,$t4
1223         or      $t7,$t6,$t6
1224         or      $t2,$t0,$t0
1225         or      $t6,$t4,$t4
1226         or      $t4,$t0,$t0
1227         or      @acc[0],$t0,$t0         ! !in2infty
1228         movrnz  $t0,-1,$t0
1229         st      $t0,[%fp+STACK_BIAS-12]
1230
1231         add     $ap_real,64,$bp
1232         add     $ap_real,64,$ap
1233         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
1234         add     %sp,LOCALS+$Z1sqr,$rp
1235
1236         add     $bp_real,0,$bp
1237         add     %sp,LOCALS+$Z1sqr,$ap
1238         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
1239         add     %sp,LOCALS+$U2,$rp
1240
1241         add     $ap_real,0,$bp
1242         call    __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
1243         add     %sp,LOCALS+$H,$rp
1244
1245         add     $ap_real,64,$bp
1246         add     %sp,LOCALS+$Z1sqr,$ap
1247         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
1248         add     %sp,LOCALS+$S2,$rp
1249
1250         add     $ap_real,64,$bp
1251         add     %sp,LOCALS+$H,$ap
1252         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1253         add     %sp,LOCALS+$res_z,$rp
1254
1255         add     $bp_real,32,$bp
1256         add     %sp,LOCALS+$S2,$ap
1257         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
1258         add     %sp,LOCALS+$S2,$rp
1259
1260         add     $ap_real,32,$bp
1261         call    __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
1262         add     %sp,LOCALS+$R,$rp
1263
1264         add     %sp,LOCALS+$H,$bp
1265         add     %sp,LOCALS+$H,$ap
1266         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1267         add     %sp,LOCALS+$Hsqr,$rp
1268
1269         add     %sp,LOCALS+$R,$bp
1270         add     %sp,LOCALS+$R,$ap
1271         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1272         add     %sp,LOCALS+$Rsqr,$rp
1273
1274         add     %sp,LOCALS+$H,$bp
1275         add     %sp,LOCALS+$Hsqr,$ap
1276         call    __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1277         add     %sp,LOCALS+$Hcub,$rp
1278
1279         add     $ap_real,0,$bp
1280         add     %sp,LOCALS+$Hsqr,$ap
1281         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
1282         add     %sp,LOCALS+$U2,$rp
1283
1284         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1285         add     %sp,LOCALS+$Hsqr,$rp
1286
1287         add     %sp,LOCALS+$Rsqr,$bp
1288         call    __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1289         add     %sp,LOCALS+$res_x,$rp
1290
1291         add     %sp,LOCALS+$Hcub,$bp
1292         call    __ecp_nistz256_sub_from !  p256_sub(res_x, res_x, Hcub);
1293         add     %sp,LOCALS+$res_x,$rp
1294
1295         add     %sp,LOCALS+$U2,$bp
1296         call    __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1297         add     %sp,LOCALS+$res_y,$rp
1298
1299         add     $ap_real,32,$bp
1300         add     %sp,LOCALS+$Hcub,$ap
1301         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
1302         add     %sp,LOCALS+$S2,$rp
1303
1304         add     %sp,LOCALS+$R,$bp
1305         add     %sp,LOCALS+$res_y,$ap
1306         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1307         add     %sp,LOCALS+$res_y,$rp
1308
1309         add     %sp,LOCALS+$S2,$bp
1310         call    __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1311         add     %sp,LOCALS+$res_y,$rp
1312
1313         ld      [%fp+STACK_BIAS-16],$t1 ! !in1infty
1314         ld      [%fp+STACK_BIAS-12],$t2 ! !in2infty
1315         ldx     [%fp+STACK_BIAS-8],$rp
1316 ___
1317 for($i=0;$i<64;$i+=8) {                 # conditional moves
1318 $code.=<<___;
1319         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1320         ld      [%sp+LOCALS+$i+4],@acc[1]
1321         ld      [$bp_real+$i],@acc[2]           ! in2
1322         ld      [$bp_real+$i+4],@acc[3]
1323         ld      [$ap_real+$i],@acc[4]           ! in1
1324         ld      [$ap_real+$i+4],@acc[5]
1325         movrz   $t1,@acc[2],@acc[0]
1326         movrz   $t1,@acc[3],@acc[1]
1327         movrz   $t2,@acc[4],@acc[0]
1328         movrz   $t2,@acc[5],@acc[1]
1329         st      @acc[0],[$rp+$i]
1330         st      @acc[1],[$rp+$i+4]
1331 ___
1332 }
1333 for(;$i<96;$i+=8) {
1334 my $j=($i-64)/4;
1335 $code.=<<___;
1336         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1337         ld      [%sp+LOCALS+$i+4],@acc[1]
1338         ld      [$ap_real+$i],@acc[4]           ! in1
1339         ld      [$ap_real+$i+4],@acc[5]
1340         movrz   $t1,@ONE_mont[$j],@acc[0]
1341         movrz   $t1,@ONE_mont[$j+1],@acc[1]
1342         movrz   $t2,@acc[4],@acc[0]
1343         movrz   $t2,@acc[5],@acc[1]
1344         st      @acc[0],[$rp+$i]
1345         st      @acc[1],[$rp+$i+4]
1346 ___
1347 }
1348 $code.=<<___;
1349         ret
1350         restore
1351 .size   ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1352 ___
1353 }                                                               }}}
1354 {{{
1355 my ($out,$inp,$index)=map("%i$_",(0..2));
1356 my $mask="%o0";
1357
1358 $code.=<<___;
1359 ! void  ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
1360 !                                         int %i2);
1361 .globl  ecp_nistz256_scatter_w5
1362 .align  32
1363 ecp_nistz256_scatter_w5:
1364         save    %sp,-STACK_FRAME,%sp
1365
1366         sll     $index,2,$index
1367         add     $out,$index,$out
1368
1369         ld      [$inp],%l0              ! X
1370         ld      [$inp+4],%l1
1371         ld      [$inp+8],%l2
1372         ld      [$inp+12],%l3
1373         ld      [$inp+16],%l4
1374         ld      [$inp+20],%l5
1375         ld      [$inp+24],%l6
1376         ld      [$inp+28],%l7
1377         add     $inp,32,$inp
1378         st      %l0,[$out+64*0-4]
1379         st      %l1,[$out+64*1-4]
1380         st      %l2,[$out+64*2-4]
1381         st      %l3,[$out+64*3-4]
1382         st      %l4,[$out+64*4-4]
1383         st      %l5,[$out+64*5-4]
1384         st      %l6,[$out+64*6-4]
1385         st      %l7,[$out+64*7-4]
1386         add     $out,64*8,$out
1387
1388         ld      [$inp],%l0              ! Y
1389         ld      [$inp+4],%l1
1390         ld      [$inp+8],%l2
1391         ld      [$inp+12],%l3
1392         ld      [$inp+16],%l4
1393         ld      [$inp+20],%l5
1394         ld      [$inp+24],%l6
1395         ld      [$inp+28],%l7
1396         add     $inp,32,$inp
1397         st      %l0,[$out+64*0-4]
1398         st      %l1,[$out+64*1-4]
1399         st      %l2,[$out+64*2-4]
1400         st      %l3,[$out+64*3-4]
1401         st      %l4,[$out+64*4-4]
1402         st      %l5,[$out+64*5-4]
1403         st      %l6,[$out+64*6-4]
1404         st      %l7,[$out+64*7-4]
1405         add     $out,64*8,$out
1406
1407         ld      [$inp],%l0              ! Z
1408         ld      [$inp+4],%l1
1409         ld      [$inp+8],%l2
1410         ld      [$inp+12],%l3
1411         ld      [$inp+16],%l4
1412         ld      [$inp+20],%l5
1413         ld      [$inp+24],%l6
1414         ld      [$inp+28],%l7
1415         st      %l0,[$out+64*0-4]
1416         st      %l1,[$out+64*1-4]
1417         st      %l2,[$out+64*2-4]
1418         st      %l3,[$out+64*3-4]
1419         st      %l4,[$out+64*4-4]
1420         st      %l5,[$out+64*5-4]
1421         st      %l6,[$out+64*6-4]
1422         st      %l7,[$out+64*7-4]
1423
1424         ret
1425         restore
1426 .size   ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1427
1428 ! void  ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
1429 !                                              int %i2);
1430 .globl  ecp_nistz256_gather_w5
1431 .align  32
1432 ecp_nistz256_gather_w5:
1433         save    %sp,-STACK_FRAME,%sp
1434
1435         neg     $index,$mask
1436         srax    $mask,63,$mask
1437
1438         add     $index,$mask,$index
1439         sll     $index,2,$index
1440         add     $inp,$index,$inp
1441
1442         ld      [$inp+64*0],%l0
1443         ld      [$inp+64*1],%l1
1444         ld      [$inp+64*2],%l2
1445         ld      [$inp+64*3],%l3
1446         ld      [$inp+64*4],%l4
1447         ld      [$inp+64*5],%l5
1448         ld      [$inp+64*6],%l6
1449         ld      [$inp+64*7],%l7
1450         add     $inp,64*8,$inp
1451         and     %l0,$mask,%l0
1452         and     %l1,$mask,%l1
1453         st      %l0,[$out]              ! X
1454         and     %l2,$mask,%l2
1455         st      %l1,[$out+4]
1456         and     %l3,$mask,%l3
1457         st      %l2,[$out+8]
1458         and     %l4,$mask,%l4
1459         st      %l3,[$out+12]
1460         and     %l5,$mask,%l5
1461         st      %l4,[$out+16]
1462         and     %l6,$mask,%l6
1463         st      %l5,[$out+20]
1464         and     %l7,$mask,%l7
1465         st      %l6,[$out+24]
1466         st      %l7,[$out+28]
1467         add     $out,32,$out
1468
1469         ld      [$inp+64*0],%l0
1470         ld      [$inp+64*1],%l1
1471         ld      [$inp+64*2],%l2
1472         ld      [$inp+64*3],%l3
1473         ld      [$inp+64*4],%l4
1474         ld      [$inp+64*5],%l5
1475         ld      [$inp+64*6],%l6
1476         ld      [$inp+64*7],%l7
1477         add     $inp,64*8,$inp
1478         and     %l0,$mask,%l0
1479         and     %l1,$mask,%l1
1480         st      %l0,[$out]              ! Y
1481         and     %l2,$mask,%l2
1482         st      %l1,[$out+4]
1483         and     %l3,$mask,%l3
1484         st      %l2,[$out+8]
1485         and     %l4,$mask,%l4
1486         st      %l3,[$out+12]
1487         and     %l5,$mask,%l5
1488         st      %l4,[$out+16]
1489         and     %l6,$mask,%l6
1490         st      %l5,[$out+20]
1491         and     %l7,$mask,%l7
1492         st      %l6,[$out+24]
1493         st      %l7,[$out+28]
1494         add     $out,32,$out
1495
1496         ld      [$inp+64*0],%l0
1497         ld      [$inp+64*1],%l1
1498         ld      [$inp+64*2],%l2
1499         ld      [$inp+64*3],%l3
1500         ld      [$inp+64*4],%l4
1501         ld      [$inp+64*5],%l5
1502         ld      [$inp+64*6],%l6
1503         ld      [$inp+64*7],%l7
1504         and     %l0,$mask,%l0
1505         and     %l1,$mask,%l1
1506         st      %l0,[$out]              ! Z
1507         and     %l2,$mask,%l2
1508         st      %l1,[$out+4]
1509         and     %l3,$mask,%l3
1510         st      %l2,[$out+8]
1511         and     %l4,$mask,%l4
1512         st      %l3,[$out+12]
1513         and     %l5,$mask,%l5
1514         st      %l4,[$out+16]
1515         and     %l6,$mask,%l6
1516         st      %l5,[$out+20]
1517         and     %l7,$mask,%l7
1518         st      %l6,[$out+24]
1519         st      %l7,[$out+28]
1520
1521         ret
1522         restore
1523 .size   ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1524
1525 ! void  ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
1526 !                                         int %i2);
1527 .globl  ecp_nistz256_scatter_w7
1528 .align  32
1529 ecp_nistz256_scatter_w7:
1530         save    %sp,-STACK_FRAME,%sp
1531         nop
1532         add     $out,$index,$out
1533         mov     64/4,$index
1534 .Loop_scatter_w7:
1535         ld      [$inp],%l0
1536         add     $inp,4,$inp
1537         subcc   $index,1,$index
1538         stb     %l0,[$out+64*0-1]
1539         srl     %l0,8,%l1
1540         stb     %l1,[$out+64*1-1]
1541         srl     %l0,16,%l2
1542         stb     %l2,[$out+64*2-1]
1543         srl     %l0,24,%l3
1544         stb     %l3,[$out+64*3-1]
1545         bne     .Loop_scatter_w7
1546         add     $out,64*4,$out
1547
1548         ret
1549         restore
1550 .size   ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1551
1552 ! void  ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
1553 !                                                     int %i2);
1554 .globl  ecp_nistz256_gather_w7
1555 .align  32
1556 ecp_nistz256_gather_w7:
1557         save    %sp,-STACK_FRAME,%sp
1558
1559         neg     $index,$mask
1560         srax    $mask,63,$mask
1561
1562         add     $index,$mask,$index
1563         add     $inp,$index,$inp
1564         mov     64/4,$index
1565
1566 .Loop_gather_w7:
1567         ldub    [$inp+64*0],%l0
1568         prefetch [$inp+3840+64*0],1
1569         subcc   $index,1,$index
1570         ldub    [$inp+64*1],%l1
1571         prefetch [$inp+3840+64*1],1
1572         ldub    [$inp+64*2],%l2
1573         prefetch [$inp+3840+64*2],1
1574         ldub    [$inp+64*3],%l3
1575         prefetch [$inp+3840+64*3],1
1576         add     $inp,64*4,$inp
1577         sll     %l1,8,%l1
1578         sll     %l2,16,%l2
1579         or      %l0,%l1,%l0
1580         sll     %l3,24,%l3
1581         or      %l0,%l2,%l0
1582         or      %l0,%l3,%l0
1583         and     %l0,$mask,%l0
1584         st      %l0,[$out]
1585         bne     .Loop_gather_w7
1586         add     $out,4,$out
1587
1588         ret
1589         restore
1590 .size   ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1591 ___
1592 }}}
1593 {{{
1594 ########################################################################
1595 # Following subroutines are VIS3 counterparts of those above that
1596 # implement ones found in ecp_nistz256.c. Key difference is that they
1597 # use 128-bit muliplication and addition with 64-bit carry, and in order
1598 # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
1599 # entry and vice versa on return.
1600 #
1601 my ($rp,$ap,$bp)=map("%i$_",(0..2));
1602 my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
1603 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
1604 my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
1605 my ($rp_real,$ap_real)=("%g2","%g3");
1606 my ($acc6,$acc7)=($bp,$bi);     # used in squaring
1607
1608 $code.=<<___;
1609 .align  32
1610 __ecp_nistz256_mul_by_2_vis3:
1611         addcc   $acc0,$acc0,$acc0
1612         addxccc $acc1,$acc1,$acc1
1613         addxccc $acc2,$acc2,$acc2
1614         addxccc $acc3,$acc3,$acc3
1615         b       .Lreduce_by_sub_vis3
1616         addxc   %g0,%g0,$acc4           ! did it carry?
1617 .size   __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
1618
1619 .align  32
1620 __ecp_nistz256_add_vis3:
1621         ldx     [$bp+0],$t0
1622         ldx     [$bp+8],$t1
1623         ldx     [$bp+16],$t2
1624         ldx     [$bp+24],$t3
1625
1626 __ecp_nistz256_add_noload_vis3:
1627
1628         addcc   $t0,$acc0,$acc0
1629         addxccc $t1,$acc1,$acc1
1630         addxccc $t2,$acc2,$acc2
1631         addxccc $t3,$acc3,$acc3
1632         addxc   %g0,%g0,$acc4           ! did it carry?
1633
1634 .Lreduce_by_sub_vis3:
1635
1636         addcc   $acc0,1,$t0             ! add -modulus, i.e. subtract
1637         addxccc $acc1,$poly1,$t1
1638         addxccc $acc2,$minus1,$t2
1639         addxc   $acc3,$poly3,$t3
1640
1641         movrnz  $acc4,$t0,$acc0         ! if a+b carried, ret = ret-mod
1642         movrnz  $acc4,$t1,$acc1
1643         stx     $acc0,[$rp]
1644         movrnz  $acc4,$t2,$acc2
1645         stx     $acc1,[$rp+8]
1646         movrnz  $acc4,$t3,$acc3
1647         stx     $acc2,[$rp+16]
1648         retl
1649         stx     $acc3,[$rp+24]
1650 .size   __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
1651
1652 ! Trouble with subtraction is that there is no subtraction with 64-bit
1653 ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
1654 ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
1655 ! recall that SPARC is big-endian, which is why you'll observe that
1656 ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
1657 ! "collect" result back to 64-bit $acc0-$acc3.
1658 .align  32
1659 __ecp_nistz256_sub_from_vis3:
1660         ld      [$bp+4],$t0
1661         ld      [$bp+0],$t1
1662         ld      [$bp+12],$t2
1663         ld      [$bp+8],$t3
1664
1665         srlx    $acc0,32,$acc4
1666         not     $poly1,$poly1
1667         srlx    $acc1,32,$acc5
1668         subcc   $acc0,$t0,$acc0
1669         ld      [$bp+20],$t0
1670         subccc  $acc4,$t1,$acc4
1671         ld      [$bp+16],$t1
1672         subccc  $acc1,$t2,$acc1
1673         ld      [$bp+28],$t2
1674         and     $acc0,$poly1,$acc0
1675         subccc  $acc5,$t3,$acc5
1676         ld      [$bp+24],$t3
1677         sllx    $acc4,32,$acc4
1678         and     $acc1,$poly1,$acc1
1679         sllx    $acc5,32,$acc5
1680         or      $acc0,$acc4,$acc0
1681         srlx    $acc2,32,$acc4
1682         or      $acc1,$acc5,$acc1
1683         srlx    $acc3,32,$acc5
1684         subccc  $acc2,$t0,$acc2
1685         subccc  $acc4,$t1,$acc4
1686         subccc  $acc3,$t2,$acc3
1687         and     $acc2,$poly1,$acc2
1688         subccc  $acc5,$t3,$acc5
1689         sllx    $acc4,32,$acc4
1690         and     $acc3,$poly1,$acc3
1691         sllx    $acc5,32,$acc5
1692         or      $acc2,$acc4,$acc2
1693         subc    %g0,%g0,$acc4           ! did it borrow?
1694         b       .Lreduce_by_add_vis3
1695         or      $acc3,$acc5,$acc3
1696 .size   __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
1697
1698 .align  32
1699 __ecp_nistz256_sub_morf_vis3:
1700         ld      [$bp+4],$t0
1701         ld      [$bp+0],$t1
1702         ld      [$bp+12],$t2
1703         ld      [$bp+8],$t3
1704
1705         srlx    $acc0,32,$acc4
1706         not     $poly1,$poly1
1707         srlx    $acc1,32,$acc5
1708         subcc   $t0,$acc0,$acc0
1709         ld      [$bp+20],$t0
1710         subccc  $t1,$acc4,$acc4
1711         ld      [$bp+16],$t1
1712         subccc  $t2,$acc1,$acc1
1713         ld      [$bp+28],$t2
1714         and     $acc0,$poly1,$acc0
1715         subccc  $t3,$acc5,$acc5
1716         ld      [$bp+24],$t3
1717         sllx    $acc4,32,$acc4
1718         and     $acc1,$poly1,$acc1
1719         sllx    $acc5,32,$acc5
1720         or      $acc0,$acc4,$acc0
1721         srlx    $acc2,32,$acc4
1722         or      $acc1,$acc5,$acc1
1723         srlx    $acc3,32,$acc5
1724         subccc  $t0,$acc2,$acc2
1725         subccc  $t1,$acc4,$acc4
1726         subccc  $t2,$acc3,$acc3
1727         and     $acc2,$poly1,$acc2
1728         subccc  $t3,$acc5,$acc5
1729         sllx    $acc4,32,$acc4
1730         and     $acc3,$poly1,$acc3
1731         sllx    $acc5,32,$acc5
1732         or      $acc2,$acc4,$acc2
1733         subc    %g0,%g0,$acc4           ! did it borrow?
1734         or      $acc3,$acc5,$acc3
1735
1736 .Lreduce_by_add_vis3:
1737
1738         addcc   $acc0,-1,$t0            ! add modulus
1739         not     $poly3,$t3
1740         addxccc $acc1,$poly1,$t1
1741         not     $poly1,$poly1           ! restore $poly1
1742         addxccc $acc2,%g0,$t2
1743         addxc   $acc3,$t3,$t3
1744
1745         movrnz  $acc4,$t0,$acc0         ! if a-b borrowed, ret = ret+mod
1746         movrnz  $acc4,$t1,$acc1
1747         stx     $acc0,[$rp]
1748         movrnz  $acc4,$t2,$acc2
1749         stx     $acc1,[$rp+8]
1750         movrnz  $acc4,$t3,$acc3
1751         stx     $acc2,[$rp+16]
1752         retl
1753         stx     $acc3,[$rp+24]
1754 .size   __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
1755
1756 .align  32
1757 __ecp_nistz256_div_by_2_vis3:
1758         ! ret = (a is odd ? a+mod : a) >> 1
1759
1760         not     $poly1,$t1
1761         not     $poly3,$t3
1762         and     $acc0,1,$acc5
1763         addcc   $acc0,-1,$t0            ! add modulus
1764         addxccc $acc1,$t1,$t1
1765         addxccc $acc2,%g0,$t2
1766         addxccc $acc3,$t3,$t3
1767         addxc   %g0,%g0,$acc4           ! carry bit
1768
1769         movrnz  $acc5,$t0,$acc0
1770         movrnz  $acc5,$t1,$acc1
1771         movrnz  $acc5,$t2,$acc2
1772         movrnz  $acc5,$t3,$acc3
1773         movrz   $acc5,%g0,$acc4
1774
1775         ! ret >>= 1
1776
1777         srlx    $acc0,1,$acc0
1778         sllx    $acc1,63,$t0
1779         srlx    $acc1,1,$acc1
1780         or      $acc0,$t0,$acc0
1781         sllx    $acc2,63,$t1
1782         srlx    $acc2,1,$acc2
1783         or      $acc1,$t1,$acc1
1784         sllx    $acc3,63,$t2
1785         stx     $acc0,[$rp]
1786         srlx    $acc3,1,$acc3
1787         or      $acc2,$t2,$acc2
1788         sllx    $acc4,63,$t3            ! don't forget carry bit
1789         stx     $acc1,[$rp+8]
1790         or      $acc3,$t3,$acc3
1791         stx     $acc2,[$rp+16]
1792         retl
1793         stx     $acc3,[$rp+24]
1794 .size   __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
1795
1796 ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
1797 ! 4x faster [on T4]...
1798 .align  32
1799 __ecp_nistz256_mul_mont_vis3:
1800         mulx    $a0,$bi,$acc0
1801         not     $poly3,$poly3           ! 0xFFFFFFFF00000001
1802         umulxhi $a0,$bi,$t0
1803         mulx    $a1,$bi,$acc1
1804         umulxhi $a1,$bi,$t1
1805         mulx    $a2,$bi,$acc2
1806         umulxhi $a2,$bi,$t2
1807         mulx    $a3,$bi,$acc3
1808         umulxhi $a3,$bi,$t3
1809         ldx     [$bp+8],$bi             ! b[1]
1810
1811         addcc   $acc1,$t0,$acc1         ! accumulate high parts of multiplication
1812          sllx   $acc0,32,$t0
1813         addxccc $acc2,$t1,$acc2
1814          srlx   $acc0,32,$t1
1815         addxccc $acc3,$t2,$acc3
1816         addxc   %g0,$t3,$acc4
1817         mov     0,$acc5
1818 ___
1819 for($i=1;$i<4;$i++) {
1820         # Reduction iteration is normally performed by accumulating
1821         # result of multiplication of modulus by "magic" digit [and
1822         # omitting least significant word, which is guaranteed to
1823         # be 0], but thanks to special form of modulus and "magic"
1824         # digit being equal to least significant word, it can be
1825         # performed with additions and subtractions alone. Indeed:
1826         #
1827         #            ffff0001.00000000.0000ffff.ffffffff
1828         # *                                     abcdefgh
1829         # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1830         #
1831         # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1832         # rewrite above as:
1833         #
1834         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1835         # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
1836         # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
1837         #
1838         # or marking redundant operations:
1839         #
1840         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
1841         # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
1842         # - 0000abcd.efgh0000.--------.--------.--------
1843         #   ^^^^^^^^ but this word is calculated with umulxhi, because
1844         #            there is no subtract with 64-bit borrow:-(
1845
1846 $code.=<<___;
1847         sub     $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1848         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1849         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1850         mulx    $a0,$bi,$t0
1851         addxccc $acc2,$t1,$acc1
1852         mulx    $a1,$bi,$t1
1853         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1854         mulx    $a2,$bi,$t2
1855         addxccc $acc4,$t3,$acc3
1856         mulx    $a3,$bi,$t3
1857         addxc   $acc5,%g0,$acc4
1858
1859         addcc   $acc0,$t0,$acc0         ! accumulate low parts of multiplication
1860         umulxhi $a0,$bi,$t0
1861         addxccc $acc1,$t1,$acc1
1862         umulxhi $a1,$bi,$t1
1863         addxccc $acc2,$t2,$acc2
1864         umulxhi $a2,$bi,$t2
1865         addxccc $acc3,$t3,$acc3
1866         umulxhi $a3,$bi,$t3
1867         addxc   $acc4,%g0,$acc4
1868 ___
1869 $code.=<<___    if ($i<3);
1870         ldx     [$bp+8*($i+1)],$bi      ! bp[$i+1]
1871 ___
1872 $code.=<<___;
1873         addcc   $acc1,$t0,$acc1         ! accumulate high parts of multiplication 
1874          sllx   $acc0,32,$t0
1875         addxccc $acc2,$t1,$acc2
1876          srlx   $acc0,32,$t1
1877         addxccc $acc3,$t2,$acc3
1878         addxccc $acc4,$t3,$acc4
1879         addxc   %g0,%g0,$acc5
1880 ___
1881 }
1882 $code.=<<___;
1883         sub     $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1884         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1885         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1886         addxccc $acc2,$t1,$acc1
1887         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1888         addxccc $acc4,$t3,$acc3
1889         b       .Lmul_final_vis3        ! see below
1890         addxc   $acc5,%g0,$acc4
1891 .size   __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
1892
1893 ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
1894 ! instructions, but only 14% faster [on T4]...
1895 .align  32
1896 __ecp_nistz256_sqr_mont_vis3:
1897         !  |  |  |  |  |  |a1*a0|  |
1898         !  |  |  |  |  |a2*a0|  |  |
1899         !  |  |a3*a2|a3*a0|  |  |  |
1900         !  |  |  |  |a2*a1|  |  |  |
1901         !  |  |  |a3*a1|  |  |  |  |
1902         ! *|  |  |  |  |  |  |  | 2|
1903         ! +|a3*a3|a2*a2|a1*a1|a0*a0|
1904         !  |--+--+--+--+--+--+--+--|
1905         !  |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1906         !
1907         !  "can't overflow" below mark carrying into high part of
1908         !  multiplication result, which can't overflow, because it
1909         !  can never be all ones.
1910
1911         mulx    $a1,$a0,$acc1           ! a[1]*a[0]
1912         umulxhi $a1,$a0,$t1
1913         mulx    $a2,$a0,$acc2           ! a[2]*a[0]
1914         umulxhi $a2,$a0,$t2
1915         mulx    $a3,$a0,$acc3           ! a[3]*a[0]
1916         umulxhi $a3,$a0,$acc4
1917
1918         addcc   $acc2,$t1,$acc2         ! accumulate high parts of multiplication
1919         mulx    $a2,$a1,$t0             ! a[2]*a[1]
1920         umulxhi $a2,$a1,$t1
1921         addxccc $acc3,$t2,$acc3
1922         mulx    $a3,$a1,$t2             ! a[3]*a[1]
1923         umulxhi $a3,$a1,$t3
1924         addxc   $acc4,%g0,$acc4         ! can't overflow
1925
1926         mulx    $a3,$a2,$acc5           ! a[3]*a[2]
1927         not     $poly3,$poly3           ! 0xFFFFFFFF00000001
1928         umulxhi $a3,$a2,$acc6
1929
1930         addcc   $t2,$t1,$t1             ! accumulate high parts of multiplication
1931         mulx    $a0,$a0,$acc0           ! a[0]*a[0]
1932         addxc   $t3,%g0,$t2             ! can't overflow
1933
1934         addcc   $acc3,$t0,$acc3         ! accumulate low parts of multiplication
1935         umulxhi $a0,$a0,$a0
1936         addxccc $acc4,$t1,$acc4
1937         mulx    $a1,$a1,$t1             ! a[1]*a[1]
1938         addxccc $acc5,$t2,$acc5
1939         umulxhi $a1,$a1,$a1
1940         addxc   $acc6,%g0,$acc6         ! can't overflow
1941
1942         addcc   $acc1,$acc1,$acc1       ! acc[1-6]*=2
1943         mulx    $a2,$a2,$t2             ! a[2]*a[2]
1944         addxccc $acc2,$acc2,$acc2
1945         umulxhi $a2,$a2,$a2
1946         addxccc $acc3,$acc3,$acc3
1947         mulx    $a3,$a3,$t3             ! a[3]*a[3]
1948         addxccc $acc4,$acc4,$acc4
1949         umulxhi $a3,$a3,$a3
1950         addxccc $acc5,$acc5,$acc5
1951         addxccc $acc6,$acc6,$acc6
1952         addxc   %g0,%g0,$acc7
1953
1954         addcc   $acc1,$a0,$acc1         ! +a[i]*a[i]
1955         addxccc $acc2,$t1,$acc2
1956         addxccc $acc3,$a1,$acc3
1957         addxccc $acc4,$t2,$acc4
1958          sllx   $acc0,32,$t0
1959         addxccc $acc5,$a2,$acc5
1960          srlx   $acc0,32,$t1
1961         addxccc $acc6,$t3,$acc6
1962          sub    $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1963         addxc   $acc7,$a3,$acc7
1964 ___
1965 for($i=0;$i<3;$i++) {                   # reductions, see commentary
1966                                         # in multiplication for details
1967 $code.=<<___;
1968         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1969         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1970          sllx   $acc0,32,$t0
1971         addxccc $acc2,$t1,$acc1
1972          srlx   $acc0,32,$t1
1973         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1974          sub    $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1975         addxc   %g0,$t3,$acc3           ! cant't overflow
1976 ___
1977 }
1978 $code.=<<___;
1979         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1980         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1981         addxccc $acc2,$t1,$acc1
1982         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1983         addxc   %g0,$t3,$acc3           ! can't overflow
1984
1985         addcc   $acc0,$acc4,$acc0       ! accumulate upper half
1986         addxccc $acc1,$acc5,$acc1
1987         addxccc $acc2,$acc6,$acc2
1988         addxccc $acc3,$acc7,$acc3
1989         addxc   %g0,%g0,$acc4
1990
1991 .Lmul_final_vis3:
1992
1993         ! Final step is "if result > mod, subtract mod", but as comparison
1994         ! means subtraction, we do the subtraction and then copy outcome
1995         ! if it didn't borrow. But note that as we [have to] replace
1996         ! subtraction with addition with negative, carry/borrow logic is
1997         ! inverse.
1998
1999         addcc   $acc0,1,$t0             ! add -modulus, i.e. subtract
2000         not     $poly3,$poly3           ! restore 0x00000000FFFFFFFE
2001         addxccc $acc1,$poly1,$t1
2002         addxccc $acc2,$minus1,$t2
2003         addxccc $acc3,$poly3,$t3
2004         addxccc $acc4,$minus1,%g0       ! did it carry?
2005
2006         movcs   %xcc,$t0,$acc0
2007         movcs   %xcc,$t1,$acc1
2008         stx     $acc0,[$rp]
2009         movcs   %xcc,$t2,$acc2
2010         stx     $acc1,[$rp+8]
2011         movcs   %xcc,$t3,$acc3
2012         stx     $acc2,[$rp+16]
2013         retl
2014         stx     $acc3,[$rp+24]
2015 .size   __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
2016 ___
2017
2018 ########################################################################
2019 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
2020 #
2021 {
2022 my ($res_x,$res_y,$res_z,
2023     $in_x,$in_y,$in_z,
2024     $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
2025 # above map() describes stack layout with 10 temporary
2026 # 256-bit vectors on top.
2027
2028 $code.=<<___;
2029 .align  32
2030 ecp_nistz256_point_double_vis3:
2031         save    %sp,-STACK64_FRAME-32*10,%sp
2032
2033         mov     $rp,$rp_real
2034 .Ldouble_shortcut_vis3:
2035         mov     -1,$minus1
2036         mov     -2,$poly3
2037         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2038         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2039
2040         ! convert input to uint64_t[4]
2041         ld      [$ap],$a0                       ! in_x
2042         ld      [$ap+4],$t0
2043         ld      [$ap+8],$a1
2044         ld      [$ap+12],$t1
2045         ld      [$ap+16],$a2
2046         ld      [$ap+20],$t2
2047         ld      [$ap+24],$a3
2048         ld      [$ap+28],$t3
2049         sllx    $t0,32,$t0
2050         sllx    $t1,32,$t1
2051         ld      [$ap+32],$acc0                  ! in_y
2052         or      $a0,$t0,$a0
2053         ld      [$ap+32+4],$t0
2054         sllx    $t2,32,$t2
2055         ld      [$ap+32+8],$acc1
2056         or      $a1,$t1,$a1
2057         ld      [$ap+32+12],$t1
2058         sllx    $t3,32,$t3
2059         ld      [$ap+32+16],$acc2
2060         or      $a2,$t2,$a2
2061         ld      [$ap+32+20],$t2
2062         or      $a3,$t3,$a3
2063         ld      [$ap+32+24],$acc3
2064         sllx    $t0,32,$t0
2065         ld      [$ap+32+28],$t3
2066         sllx    $t1,32,$t1
2067         stx     $a0,[%sp+LOCALS64+$in_x]
2068         sllx    $t2,32,$t2
2069         stx     $a1,[%sp+LOCALS64+$in_x+8]
2070         sllx    $t3,32,$t3
2071         stx     $a2,[%sp+LOCALS64+$in_x+16]
2072         or      $acc0,$t0,$acc0
2073         stx     $a3,[%sp+LOCALS64+$in_x+24]
2074         or      $acc1,$t1,$acc1
2075         stx     $acc0,[%sp+LOCALS64+$in_y]
2076         or      $acc2,$t2,$acc2
2077         stx     $acc1,[%sp+LOCALS64+$in_y+8]
2078         or      $acc3,$t3,$acc3
2079         stx     $acc2,[%sp+LOCALS64+$in_y+16]
2080         stx     $acc3,[%sp+LOCALS64+$in_y+24]
2081
2082         ld      [$ap+64],$a0                    ! in_z
2083         ld      [$ap+64+4],$t0
2084         ld      [$ap+64+8],$a1
2085         ld      [$ap+64+12],$t1
2086         ld      [$ap+64+16],$a2
2087         ld      [$ap+64+20],$t2
2088         ld      [$ap+64+24],$a3
2089         ld      [$ap+64+28],$t3
2090         sllx    $t0,32,$t0
2091         sllx    $t1,32,$t1
2092         or      $a0,$t0,$a0
2093         sllx    $t2,32,$t2
2094         or      $a1,$t1,$a1
2095         sllx    $t3,32,$t3
2096         or      $a2,$t2,$a2
2097         or      $a3,$t3,$a3
2098         sllx    $t0,32,$t0
2099         sllx    $t1,32,$t1
2100         stx     $a0,[%sp+LOCALS64+$in_z]
2101         sllx    $t2,32,$t2
2102         stx     $a1,[%sp+LOCALS64+$in_z+8]
2103         sllx    $t3,32,$t3
2104         stx     $a2,[%sp+LOCALS64+$in_z+16]
2105         stx     $a3,[%sp+LOCALS64+$in_z+24]
2106
2107         ! in_y is still in $acc0-$acc3
2108         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(S, in_y);
2109         add     %sp,LOCALS64+$S,$rp
2110
2111         ! in_z is still in $a0-$a3
2112         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Zsqr, in_z);
2113         add     %sp,LOCALS64+$Zsqr,$rp
2114
2115         mov     $acc0,$a0                       ! put Zsqr aside
2116         mov     $acc1,$a1
2117         mov     $acc2,$a2
2118         mov     $acc3,$a3
2119
2120         add     %sp,LOCALS64+$in_x,$bp
2121         call    __ecp_nistz256_add_vis3         ! p256_add(M, Zsqr, in_x);
2122         add     %sp,LOCALS64+$M,$rp
2123
2124         mov     $a0,$acc0                       ! restore Zsqr
2125         ldx     [%sp+LOCALS64+$S],$a0           ! forward load
2126         mov     $a1,$acc1
2127         ldx     [%sp+LOCALS64+$S+8],$a1
2128         mov     $a2,$acc2
2129         ldx     [%sp+LOCALS64+$S+16],$a2
2130         mov     $a3,$acc3
2131         ldx     [%sp+LOCALS64+$S+24],$a3
2132
2133         add     %sp,LOCALS64+$in_x,$bp
2134         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(Zsqr, in_x, Zsqr);
2135         add     %sp,LOCALS64+$Zsqr,$rp
2136
2137         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(S, S);
2138         add     %sp,LOCALS64+$S,$rp
2139
2140         ldx     [%sp+LOCALS64+$in_z],$bi
2141         ldx     [%sp+LOCALS64+$in_y],$a0
2142         ldx     [%sp+LOCALS64+$in_y+8],$a1
2143         ldx     [%sp+LOCALS64+$in_y+16],$a2
2144         ldx     [%sp+LOCALS64+$in_y+24],$a3
2145         add     %sp,LOCALS64+$in_z,$bp
2146         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(tmp0, in_z, in_y);
2147         add     %sp,LOCALS64+$tmp0,$rp
2148
2149         ldx     [%sp+LOCALS64+$M],$bi           ! forward load
2150         ldx     [%sp+LOCALS64+$Zsqr],$a0
2151         ldx     [%sp+LOCALS64+$Zsqr+8],$a1
2152         ldx     [%sp+LOCALS64+$Zsqr+16],$a2
2153         ldx     [%sp+LOCALS64+$Zsqr+24],$a3
2154
2155         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(res_z, tmp0);
2156         add     %sp,LOCALS64+$res_z,$rp
2157
2158         add     %sp,LOCALS64+$M,$bp
2159         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(M, M, Zsqr);
2160         add     %sp,LOCALS64+$M,$rp
2161
2162         mov     $acc0,$a0                       ! put aside M
2163         mov     $acc1,$a1
2164         mov     $acc2,$a2
2165         mov     $acc3,$a3
2166         call    __ecp_nistz256_mul_by_2_vis3
2167         add     %sp,LOCALS64+$M,$rp
2168         mov     $a0,$t0                         ! copy M
2169         ldx     [%sp+LOCALS64+$S],$a0           ! forward load
2170         mov     $a1,$t1
2171         ldx     [%sp+LOCALS64+$S+8],$a1
2172         mov     $a2,$t2
2173         ldx     [%sp+LOCALS64+$S+16],$a2
2174         mov     $a3,$t3
2175         ldx     [%sp+LOCALS64+$S+24],$a3
2176         call    __ecp_nistz256_add_noload_vis3  ! p256_mul_by_3(M, M);
2177         add     %sp,LOCALS64+$M,$rp
2178
2179         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(tmp0, S);
2180         add     %sp,LOCALS64+$tmp0,$rp
2181
2182         ldx     [%sp+LOCALS64+$S],$bi           ! forward load
2183         ldx     [%sp+LOCALS64+$in_x],$a0
2184         ldx     [%sp+LOCALS64+$in_x+8],$a1
2185         ldx     [%sp+LOCALS64+$in_x+16],$a2
2186         ldx     [%sp+LOCALS64+$in_x+24],$a3
2187
2188         call    __ecp_nistz256_div_by_2_vis3    ! p256_div_by_2(res_y, tmp0);
2189         add     %sp,LOCALS64+$res_y,$rp
2190
2191         add     %sp,LOCALS64+$S,$bp
2192         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S, S, in_x);
2193         add     %sp,LOCALS64+$S,$rp
2194
2195         ldx     [%sp+LOCALS64+$M],$a0           ! forward load
2196         ldx     [%sp+LOCALS64+$M+8],$a1
2197         ldx     [%sp+LOCALS64+$M+16],$a2
2198         ldx     [%sp+LOCALS64+$M+24],$a3
2199
2200         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(tmp0, S);
2201         add     %sp,LOCALS64+$tmp0,$rp
2202
2203         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(res_x, M);
2204         add     %sp,LOCALS64+$res_x,$rp
2205
2206         add     %sp,LOCALS64+$tmp0,$bp
2207         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_x, res_x, tmp0);
2208         add     %sp,LOCALS64+$res_x,$rp
2209
2210         ldx     [%sp+LOCALS64+$M],$a0           ! forward load
2211         ldx     [%sp+LOCALS64+$M+8],$a1
2212         ldx     [%sp+LOCALS64+$M+16],$a2
2213         ldx     [%sp+LOCALS64+$M+24],$a3
2214
2215         add     %sp,LOCALS64+$S,$bp
2216         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(S, S, res_x);
2217         add     %sp,LOCALS64+$S,$rp
2218
2219         mov     $acc0,$bi
2220         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S, S, M);
2221         add     %sp,LOCALS64+$S,$rp
2222
2223         ldx     [%sp+LOCALS64+$res_x],$a0       ! forward load
2224         ldx     [%sp+LOCALS64+$res_x+8],$a1
2225         ldx     [%sp+LOCALS64+$res_x+16],$a2
2226         ldx     [%sp+LOCALS64+$res_x+24],$a3
2227
2228         add     %sp,LOCALS64+$res_y,$bp
2229         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, S, res_y);
2230         add     %sp,LOCALS64+$res_y,$bp
2231
2232         ! convert output to uint_32[8]
2233         srlx    $a0,32,$t0
2234         srlx    $a1,32,$t1
2235         st      $a0,[$rp_real]                  ! res_x
2236         srlx    $a2,32,$t2
2237         st      $t0,[$rp_real+4]
2238         srlx    $a3,32,$t3
2239         st      $a1,[$rp_real+8]
2240         st      $t1,[$rp_real+12]
2241         st      $a2,[$rp_real+16]
2242         st      $t2,[$rp_real+20]
2243         st      $a3,[$rp_real+24]
2244         st      $t3,[$rp_real+28]
2245
2246         ldx     [%sp+LOCALS64+$res_z],$a0       ! forward load
2247         srlx    $acc0,32,$t0
2248         ldx     [%sp+LOCALS64+$res_z+8],$a1
2249         srlx    $acc1,32,$t1
2250         ldx     [%sp+LOCALS64+$res_z+16],$a2
2251         srlx    $acc2,32,$t2
2252         ldx     [%sp+LOCALS64+$res_z+24],$a3
2253         srlx    $acc3,32,$t3
2254         st      $acc0,[$rp_real+32]             ! res_y
2255         st      $t0,  [$rp_real+32+4]
2256         st      $acc1,[$rp_real+32+8]
2257         st      $t1,  [$rp_real+32+12]
2258         st      $acc2,[$rp_real+32+16]
2259         st      $t2,  [$rp_real+32+20]
2260         st      $acc3,[$rp_real+32+24]
2261         st      $t3,  [$rp_real+32+28]
2262
2263         srlx    $a0,32,$t0
2264         srlx    $a1,32,$t1
2265         st      $a0,[$rp_real+64]               ! res_z
2266         srlx    $a2,32,$t2
2267         st      $t0,[$rp_real+64+4]
2268         srlx    $a3,32,$t3
2269         st      $a1,[$rp_real+64+8]
2270         st      $t1,[$rp_real+64+12]
2271         st      $a2,[$rp_real+64+16]
2272         st      $t2,[$rp_real+64+20]
2273         st      $a3,[$rp_real+64+24]
2274         st      $t3,[$rp_real+64+28]
2275
2276         ret
2277         restore
2278 .size   ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
2279 ___
2280 }
2281 ########################################################################
2282 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
2283 #                             const P256_POINT *in2);
2284 {
2285 my ($res_x,$res_y,$res_z,
2286     $in1_x,$in1_y,$in1_z,
2287     $in2_x,$in2_y,$in2_z,
2288     $H,$Hsqr,$R,$Rsqr,$Hcub,
2289     $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
2290 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2291
2292 # above map() describes stack layout with 18 temporary
2293 # 256-bit vectors on top. Then we reserve some space for
2294 # !in1infty, !in2infty and result of check for zero.
2295
2296 $code.=<<___;
2297 .globl  ecp_nistz256_point_add_vis3
2298 .align  32
2299 ecp_nistz256_point_add_vis3:
2300         save    %sp,-STACK64_FRAME-32*18-32,%sp
2301
2302         mov     $rp,$rp_real
2303         mov     -1,$minus1
2304         mov     -2,$poly3
2305         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2306         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2307
2308         ! convert input to uint64_t[4]
2309         ld      [$bp],$a0                       ! in2_x
2310         ld      [$bp+4],$t0
2311         ld      [$bp+8],$a1
2312         ld      [$bp+12],$t1
2313         ld      [$bp+16],$a2
2314         ld      [$bp+20],$t2
2315         ld      [$bp+24],$a3
2316         ld      [$bp+28],$t3
2317         sllx    $t0,32,$t0
2318         sllx    $t1,32,$t1
2319         ld      [$bp+32],$acc0                  ! in2_y
2320         or      $a0,$t0,$a0
2321         ld      [$bp+32+4],$t0
2322         sllx    $t2,32,$t2
2323         ld      [$bp+32+8],$acc1
2324         or      $a1,$t1,$a1
2325         ld      [$bp+32+12],$t1
2326         sllx    $t3,32,$t3
2327         ld      [$bp+32+16],$acc2
2328         or      $a2,$t2,$a2
2329         ld      [$bp+32+20],$t2
2330         or      $a3,$t3,$a3
2331         ld      [$bp+32+24],$acc3
2332         sllx    $t0,32,$t0
2333         ld      [$bp+32+28],$t3
2334         sllx    $t1,32,$t1
2335         stx     $a0,[%sp+LOCALS64+$in2_x]
2336         sllx    $t2,32,$t2
2337         stx     $a1,[%sp+LOCALS64+$in2_x+8]
2338         sllx    $t3,32,$t3
2339         stx     $a2,[%sp+LOCALS64+$in2_x+16]
2340         or      $acc0,$t0,$acc0
2341         stx     $a3,[%sp+LOCALS64+$in2_x+24]
2342         or      $acc1,$t1,$acc1
2343         stx     $acc0,[%sp+LOCALS64+$in2_y]
2344         or      $acc2,$t2,$acc2
2345         stx     $acc1,[%sp+LOCALS64+$in2_y+8]
2346         or      $acc3,$t3,$acc3
2347         stx     $acc2,[%sp+LOCALS64+$in2_y+16]
2348         stx     $acc3,[%sp+LOCALS64+$in2_y+24]
2349
2350         or      $a1,$a0,$a0
2351         or      $a3,$a2,$a2
2352         or      $acc1,$acc0,$acc0
2353         or      $acc3,$acc2,$acc2
2354         or      $a2,$a0,$a0
2355         or      $acc2,$acc0,$acc0
2356         or      $acc0,$a0,$a0
2357         movrnz  $a0,-1,$a0                      ! !in2infty
2358         stx     $a0,[%fp+STACK_BIAS-8]
2359
2360         ld      [$bp+64],$acc0                  ! in2_z
2361         ld      [$bp+64+4],$t0
2362         ld      [$bp+64+8],$acc1
2363         ld      [$bp+64+12],$t1
2364         ld      [$bp+64+16],$acc2
2365         ld      [$bp+64+20],$t2
2366         ld      [$bp+64+24],$acc3
2367         ld      [$bp+64+28],$t3
2368         sllx    $t0,32,$t0
2369         sllx    $t1,32,$t1
2370         ld      [$ap],$a0                       ! in1_x
2371         or      $acc0,$t0,$acc0
2372         ld      [$ap+4],$t0
2373         sllx    $t2,32,$t2
2374         ld      [$ap+8],$a1
2375         or      $acc1,$t1,$acc1
2376         ld      [$ap+12],$t1
2377         sllx    $t3,32,$t3
2378         ld      [$ap+16],$a2
2379         or      $acc2,$t2,$acc2
2380         ld      [$ap+20],$t2
2381         or      $acc3,$t3,$acc3
2382         ld      [$ap+24],$a3
2383         sllx    $t0,32,$t0
2384         ld      [$ap+28],$t3
2385         sllx    $t1,32,$t1
2386         stx     $acc0,[%sp+LOCALS64+$in2_z]
2387         sllx    $t2,32,$t2
2388         stx     $acc1,[%sp+LOCALS64+$in2_z+8]
2389         sllx    $t3,32,$t3
2390         stx     $acc2,[%sp+LOCALS64+$in2_z+16]
2391         stx     $acc3,[%sp+LOCALS64+$in2_z+24]
2392
2393         or      $a0,$t0,$a0
2394         ld      [$ap+32],$acc0                  ! in1_y
2395         or      $a1,$t1,$a1
2396         ld      [$ap+32+4],$t0
2397         or      $a2,$t2,$a2
2398         ld      [$ap+32+8],$acc1
2399         or      $a3,$t3,$a3
2400         ld      [$ap+32+12],$t1
2401         ld      [$ap+32+16],$acc2
2402         ld      [$ap+32+20],$t2
2403         ld      [$ap+32+24],$acc3
2404         sllx    $t0,32,$t0
2405         ld      [$ap+32+28],$t3
2406         sllx    $t1,32,$t1
2407         stx     $a0,[%sp+LOCALS64+$in1_x]
2408         sllx    $t2,32,$t2
2409         stx     $a1,[%sp+LOCALS64+$in1_x+8]
2410         sllx    $t3,32,$t3
2411         stx     $a2,[%sp+LOCALS64+$in1_x+16]
2412         or      $acc0,$t0,$acc0
2413         stx     $a3,[%sp+LOCALS64+$in1_x+24]
2414         or      $acc1,$t1,$acc1
2415         stx     $acc0,[%sp+LOCALS64+$in1_y]
2416         or      $acc2,$t2,$acc2
2417         stx     $acc1,[%sp+LOCALS64+$in1_y+8]
2418         or      $acc3,$t3,$acc3
2419         stx     $acc2,[%sp+LOCALS64+$in1_y+16]
2420         stx     $acc3,[%sp+LOCALS64+$in1_y+24]
2421
2422         or      $a1,$a0,$a0
2423         or      $a3,$a2,$a2
2424         or      $acc1,$acc0,$acc0
2425         or      $acc3,$acc2,$acc2
2426         or      $a2,$a0,$a0
2427         or      $acc2,$acc0,$acc0
2428         or      $acc0,$a0,$a0
2429         movrnz  $a0,-1,$a0                      ! !in1infty
2430         stx     $a0,[%fp+STACK_BIAS-16]
2431
2432         ldx     [%sp+LOCALS64+$in2_z],$a0       ! forward load
2433         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2434         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2435         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2436
2437         ld      [$ap+64],$acc0                  ! in1_z
2438         ld      [$ap+64+4],$t0
2439         ld      [$ap+64+8],$acc1
2440         ld      [$ap+64+12],$t1
2441         ld      [$ap+64+16],$acc2
2442         ld      [$ap+64+20],$t2
2443         ld      [$ap+64+24],$acc3
2444         ld      [$ap+64+28],$t3
2445         sllx    $t0,32,$t0
2446         sllx    $t1,32,$t1
2447         or      $acc0,$t0,$acc0
2448         sllx    $t2,32,$t2
2449         or      $acc1,$t1,$acc1
2450         sllx    $t3,32,$t3
2451         stx     $acc0,[%sp+LOCALS64+$in1_z]
2452         or      $acc2,$t2,$acc2
2453         stx     $acc1,[%sp+LOCALS64+$in1_z+8]
2454         or      $acc3,$t3,$acc3
2455         stx     $acc2,[%sp+LOCALS64+$in1_z+16]
2456         stx     $acc3,[%sp+LOCALS64+$in1_z+24]
2457
2458         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z2sqr, in2_z);
2459         add     %sp,LOCALS64+$Z2sqr,$rp
2460
2461         ldx     [%sp+LOCALS64+$in1_z],$a0
2462         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2463         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2464         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2465         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z1sqr, in1_z);
2466         add     %sp,LOCALS64+$Z1sqr,$rp
2467
2468         ldx     [%sp+LOCALS64+$Z2sqr],$bi
2469         ldx     [%sp+LOCALS64+$in2_z],$a0
2470         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2471         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2472         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2473         add     %sp,LOCALS64+$Z2sqr,$bp
2474         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S1, Z2sqr, in2_z);
2475         add     %sp,LOCALS64+$S1,$rp
2476
2477         ldx     [%sp+LOCALS64+$Z1sqr],$bi
2478         ldx     [%sp+LOCALS64+$in1_z],$a0
2479         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2480         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2481         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2482         add     %sp,LOCALS64+$Z1sqr,$bp
2483         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, Z1sqr, in1_z);
2484         add     %sp,LOCALS64+$S2,$rp
2485
2486         ldx     [%sp+LOCALS64+$S1],$bi
2487         ldx     [%sp+LOCALS64+$in1_y],$a0
2488         ldx     [%sp+LOCALS64+$in1_y+8],$a1
2489         ldx     [%sp+LOCALS64+$in1_y+16],$a2
2490         ldx     [%sp+LOCALS64+$in1_y+24],$a3
2491         add     %sp,LOCALS64+$S1,$bp
2492         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S1, S1, in1_y);
2493         add     %sp,LOCALS64+$S1,$rp
2494
2495         ldx     [%sp+LOCALS64+$S2],$bi
2496         ldx     [%sp+LOCALS64+$in2_y],$a0
2497         ldx     [%sp+LOCALS64+$in2_y+8],$a1
2498         ldx     [%sp+LOCALS64+$in2_y+16],$a2
2499         ldx     [%sp+LOCALS64+$in2_y+24],$a3
2500         add     %sp,LOCALS64+$S2,$bp
2501         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S2, in2_y);
2502         add     %sp,LOCALS64+$S2,$rp
2503
2504         ldx     [%sp+LOCALS64+$Z2sqr],$bi       ! forward load
2505         ldx     [%sp+LOCALS64+$in1_x],$a0
2506         ldx     [%sp+LOCALS64+$in1_x+8],$a1
2507         ldx     [%sp+LOCALS64+$in1_x+16],$a2
2508         ldx     [%sp+LOCALS64+$in1_x+24],$a3
2509
2510         add     %sp,LOCALS64+$S1,$bp
2511         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(R, S2, S1);
2512         add     %sp,LOCALS64+$R,$rp
2513
2514         or      $acc1,$acc0,$acc0               ! see if result is zero
2515         or      $acc3,$acc2,$acc2
2516         or      $acc2,$acc0,$acc0
2517         stx     $acc0,[%fp+STACK_BIAS-24]
2518
2519         add     %sp,LOCALS64+$Z2sqr,$bp
2520         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U1, in1_x, Z2sqr);
2521         add     %sp,LOCALS64+$U1,$rp
2522
2523         ldx     [%sp+LOCALS64+$Z1sqr],$bi
2524         ldx     [%sp+LOCALS64+$in2_x],$a0
2525         ldx     [%sp+LOCALS64+$in2_x+8],$a1
2526         ldx     [%sp+LOCALS64+$in2_x+16],$a2
2527         ldx     [%sp+LOCALS64+$in2_x+24],$a3
2528         add     %sp,LOCALS64+$Z1sqr,$bp
2529         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, in2_x, Z1sqr);
2530         add     %sp,LOCALS64+$U2,$rp
2531
2532         ldx     [%sp+LOCALS64+$R],$a0           ! forward load
2533         ldx     [%sp+LOCALS64+$R+8],$a1
2534         ldx     [%sp+LOCALS64+$R+16],$a2
2535         ldx     [%sp+LOCALS64+$R+24],$a3
2536
2537         add     %sp,LOCALS64+$U1,$bp
2538         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(H, U2, U1);
2539         add     %sp,LOCALS64+$H,$rp
2540
2541         or      $acc1,$acc0,$acc0               ! see if result is zero
2542         or      $acc3,$acc2,$acc2
2543         orcc    $acc2,$acc0,$acc0
2544
2545         bne,pt  %xcc,.Ladd_proceed_vis3         ! is_equal(U1,U2)?
2546         nop
2547
2548         ldx     [%fp+STACK_BIAS-8],$t0
2549         ldx     [%fp+STACK_BIAS-16],$t1
2550         ldx     [%fp+STACK_BIAS-24],$t2
2551         andcc   $t0,$t1,%g0
2552         be,pt   %xcc,.Ladd_proceed_vis3         ! (in1infty || in2infty)?
2553         nop
2554         andcc   $t2,$t2,%g0
2555         be,a,pt %xcc,.Ldouble_shortcut_vis3     ! is_equal(S1,S2)?
2556         add     %sp,32*(12-10)+32,%sp           ! difference in frame sizes
2557
2558         st      %g0,[$rp_real]
2559         st      %g0,[$rp_real+4]
2560         st      %g0,[$rp_real+8]
2561         st      %g0,[$rp_real+12]
2562         st      %g0,[$rp_real+16]
2563         st      %g0,[$rp_real+20]
2564         st      %g0,[$rp_real+24]
2565         st      %g0,[$rp_real+28]
2566         st      %g0,[$rp_real+32]
2567         st      %g0,[$rp_real+32+4]
2568         st      %g0,[$rp_real+32+8]
2569         st      %g0,[$rp_real+32+12]
2570         st      %g0,[$rp_real+32+16]
2571         st      %g0,[$rp_real+32+20]
2572         st      %g0,[$rp_real+32+24]
2573         st      %g0,[$rp_real+32+28]
2574         st      %g0,[$rp_real+64]
2575         st      %g0,[$rp_real+64+4]
2576         st      %g0,[$rp_real+64+8]
2577         st      %g0,[$rp_real+64+12]
2578         st      %g0,[$rp_real+64+16]
2579         st      %g0,[$rp_real+64+20]
2580         st      %g0,[$rp_real+64+24]
2581         st      %g0,[$rp_real+64+28]
2582         b       .Ladd_done_vis3
2583         nop
2584
2585 .align  16
2586 .Ladd_proceed_vis3:
2587         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Rsqr, R);
2588         add     %sp,LOCALS64+$Rsqr,$rp
2589
2590         ldx     [%sp+LOCALS64+$H],$bi
2591         ldx     [%sp+LOCALS64+$in1_z],$a0
2592         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2593         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2594         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2595         add     %sp,LOCALS64+$H,$bp
2596         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, H, in1_z);
2597         add     %sp,LOCALS64+$res_z,$rp
2598
2599         ldx     [%sp+LOCALS64+$H],$a0
2600         ldx     [%sp+LOCALS64+$H+8],$a1
2601         ldx     [%sp+LOCALS64+$H+16],$a2
2602         ldx     [%sp+LOCALS64+$H+24],$a3
2603         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Hsqr, H);
2604         add     %sp,LOCALS64+$Hsqr,$rp
2605
2606         ldx     [%sp+LOCALS64+$res_z],$bi
2607         ldx     [%sp+LOCALS64+$in2_z],$a0
2608         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2609         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2610         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2611         add     %sp,LOCALS64+$res_z,$bp
2612         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, res_z, in2_z);
2613         add     %sp,LOCALS64+$res_z,$rp
2614
2615         ldx     [%sp+LOCALS64+$H],$bi
2616         ldx     [%sp+LOCALS64+$Hsqr],$a0
2617         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2618         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2619         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2620         add     %sp,LOCALS64+$H,$bp
2621         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(Hcub, Hsqr, H);
2622         add     %sp,LOCALS64+$Hcub,$rp
2623
2624         ldx     [%sp+LOCALS64+$U1],$bi
2625         ldx     [%sp+LOCALS64+$Hsqr],$a0
2626         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2627         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2628         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2629         add     %sp,LOCALS64+$U1,$bp
2630         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, U1, Hsqr);
2631         add     %sp,LOCALS64+$U2,$rp
2632
2633         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(Hsqr, U2);
2634         add     %sp,LOCALS64+$Hsqr,$rp
2635
2636         add     %sp,LOCALS64+$Rsqr,$bp
2637         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_x, Rsqr, Hsqr);
2638         add     %sp,LOCALS64+$res_x,$rp
2639
2640         add     %sp,LOCALS64+$Hcub,$bp
2641         call    __ecp_nistz256_sub_from_vis3    !  p256_sub(res_x, res_x, Hcub);
2642         add     %sp,LOCALS64+$res_x,$rp
2643
2644         ldx     [%sp+LOCALS64+$S1],$bi          ! forward load
2645         ldx     [%sp+LOCALS64+$Hcub],$a0
2646         ldx     [%sp+LOCALS64+$Hcub+8],$a1
2647         ldx     [%sp+LOCALS64+$Hcub+16],$a2
2648         ldx     [%sp+LOCALS64+$Hcub+24],$a3
2649
2650         add     %sp,LOCALS64+$U2,$bp
2651         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_y, U2, res_x);
2652         add     %sp,LOCALS64+$res_y,$rp
2653
2654         add     %sp,LOCALS64+$S1,$bp
2655         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S1, Hcub);
2656         add     %sp,LOCALS64+$S2,$rp
2657
2658         ldx     [%sp+LOCALS64+$R],$bi
2659         ldx     [%sp+LOCALS64+$res_y],$a0
2660         ldx     [%sp+LOCALS64+$res_y+8],$a1
2661         ldx     [%sp+LOCALS64+$res_y+16],$a2
2662         ldx     [%sp+LOCALS64+$res_y+24],$a3
2663         add     %sp,LOCALS64+$R,$bp
2664         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_y, res_y, R);
2665         add     %sp,LOCALS64+$res_y,$rp
2666
2667         add     %sp,LOCALS64+$S2,$bp
2668         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, res_y, S2);
2669         add     %sp,LOCALS64+$res_y,$rp
2670
2671         ldx     [%fp+STACK_BIAS-16],$t1         ! !in1infty
2672         ldx     [%fp+STACK_BIAS-8],$t2          ! !in2infty
2673 ___
2674 for($i=0;$i<96;$i+=16) {                        # conditional moves
2675 $code.=<<___;
2676         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
2677         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
2678         ldx     [%sp+LOCALS64+$in2_x+$i],$acc2  ! in2
2679         ldx     [%sp+LOCALS64+$in2_x+$i+8],$acc3
2680         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
2681         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
2682         movrz   $t1,$acc2,$acc0
2683         movrz   $t1,$acc3,$acc1
2684         movrz   $t2,$acc4,$acc0
2685         movrz   $t2,$acc5,$acc1
2686         srlx    $acc0,32,$acc2
2687         srlx    $acc1,32,$acc3
2688         st      $acc0,[$rp_real+$i]
2689         st      $acc2,[$rp_real+$i+4]
2690         st      $acc1,[$rp_real+$i+8]
2691         st      $acc3,[$rp_real+$i+12]
2692 ___
2693 }
2694 $code.=<<___;
2695 .Ladd_done_vis3:
2696         ret
2697         restore
2698 .size   ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
2699 ___
2700 }
2701 ########################################################################
2702 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
2703 #                                    const P256_POINT_AFFINE *in2);
2704 {
2705 my ($res_x,$res_y,$res_z,
2706     $in1_x,$in1_y,$in1_z,
2707     $in2_x,$in2_y,
2708     $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
2709 my $Z1sqr = $S2;
2710 # above map() describes stack layout with 15 temporary
2711 # 256-bit vectors on top. Then we reserve some space for
2712 # !in1infty and !in2infty.
2713
2714 $code.=<<___;
2715 .align  32
2716 ecp_nistz256_point_add_affine_vis3:
2717         save    %sp,-STACK64_FRAME-32*15-32,%sp
2718
2719         mov     $rp,$rp_real
2720         mov     -1,$minus1
2721         mov     -2,$poly3
2722         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2723         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2724
2725         ! convert input to uint64_t[4]
2726         ld      [$bp],$a0                       ! in2_x
2727         ld      [$bp+4],$t0
2728         ld      [$bp+8],$a1
2729         ld      [$bp+12],$t1
2730         ld      [$bp+16],$a2
2731         ld      [$bp+20],$t2
2732         ld      [$bp+24],$a3
2733         ld      [$bp+28],$t3
2734         sllx    $t0,32,$t0
2735         sllx    $t1,32,$t1
2736         ld      [$bp+32],$acc0                  ! in2_y
2737         or      $a0,$t0,$a0
2738         ld      [$bp+32+4],$t0
2739         sllx    $t2,32,$t2
2740         ld      [$bp+32+8],$acc1
2741         or      $a1,$t1,$a1
2742         ld      [$bp+32+12],$t1
2743         sllx    $t3,32,$t3
2744         ld      [$bp+32+16],$acc2
2745         or      $a2,$t2,$a2
2746         ld      [$bp+32+20],$t2
2747         or      $a3,$t3,$a3
2748         ld      [$bp+32+24],$acc3
2749         sllx    $t0,32,$t0
2750         ld      [$bp+32+28],$t3
2751         sllx    $t1,32,$t1
2752         stx     $a0,[%sp+LOCALS64+$in2_x]
2753         sllx    $t2,32,$t2
2754         stx     $a1,[%sp+LOCALS64+$in2_x+8]
2755         sllx    $t3,32,$t3
2756         stx     $a2,[%sp+LOCALS64+$in2_x+16]
2757         or      $acc0,$t0,$acc0
2758         stx     $a3,[%sp+LOCALS64+$in2_x+24]
2759         or      $acc1,$t1,$acc1
2760         stx     $acc0,[%sp+LOCALS64+$in2_y]
2761         or      $acc2,$t2,$acc2
2762         stx     $acc1,[%sp+LOCALS64+$in2_y+8]
2763         or      $acc3,$t3,$acc3
2764         stx     $acc2,[%sp+LOCALS64+$in2_y+16]
2765         stx     $acc3,[%sp+LOCALS64+$in2_y+24]
2766
2767         or      $a1,$a0,$a0
2768         or      $a3,$a2,$a2
2769         or      $acc1,$acc0,$acc0
2770         or      $acc3,$acc2,$acc2
2771         or      $a2,$a0,$a0
2772         or      $acc2,$acc0,$acc0
2773         or      $acc0,$a0,$a0
2774         movrnz  $a0,-1,$a0                      ! !in2infty
2775         stx     $a0,[%fp+STACK_BIAS-8]
2776
2777         ld      [$ap],$a0                       ! in1_x
2778         ld      [$ap+4],$t0
2779         ld      [$ap+8],$a1
2780         ld      [$ap+12],$t1
2781         ld      [$ap+16],$a2
2782         ld      [$ap+20],$t2
2783         ld      [$ap+24],$a3
2784         ld      [$ap+28],$t3
2785         sllx    $t0,32,$t0
2786         sllx    $t1,32,$t1
2787         ld      [$ap+32],$acc0                  ! in1_y
2788         or      $a0,$t0,$a0
2789         ld      [$ap+32+4],$t0
2790         sllx    $t2,32,$t2
2791         ld      [$ap+32+8],$acc1
2792         or      $a1,$t1,$a1
2793         ld      [$ap+32+12],$t1
2794         sllx    $t3,32,$t3
2795         ld      [$ap+32+16],$acc2
2796         or      $a2,$t2,$a2
2797         ld      [$ap+32+20],$t2
2798         or      $a3,$t3,$a3
2799         ld      [$ap+32+24],$acc3
2800         sllx    $t0,32,$t0
2801         ld      [$ap+32+28],$t3
2802         sllx    $t1,32,$t1
2803         stx     $a0,[%sp+LOCALS64+$in1_x]
2804         sllx    $t2,32,$t2
2805         stx     $a1,[%sp+LOCALS64+$in1_x+8]
2806         sllx    $t3,32,$t3
2807         stx     $a2,[%sp+LOCALS64+$in1_x+16]
2808         or      $acc0,$t0,$acc0
2809         stx     $a3,[%sp+LOCALS64+$in1_x+24]
2810         or      $acc1,$t1,$acc1
2811         stx     $acc0,[%sp+LOCALS64+$in1_y]
2812         or      $acc2,$t2,$acc2
2813         stx     $acc1,[%sp+LOCALS64+$in1_y+8]
2814         or      $acc3,$t3,$acc3
2815         stx     $acc2,[%sp+LOCALS64+$in1_y+16]
2816         stx     $acc3,[%sp+LOCALS64+$in1_y+24]
2817
2818         or      $a1,$a0,$a0
2819         or      $a3,$a2,$a2
2820         or      $acc1,$acc0,$acc0
2821         or      $acc3,$acc2,$acc2
2822         or      $a2,$a0,$a0
2823         or      $acc2,$acc0,$acc0
2824         or      $acc0,$a0,$a0
2825         movrnz  $a0,-1,$a0                      ! !in1infty
2826         stx     $a0,[%fp+STACK_BIAS-16]
2827
2828         ld      [$ap+64],$a0                    ! in1_z
2829         ld      [$ap+64+4],$t0
2830         ld      [$ap+64+8],$a1
2831         ld      [$ap+64+12],$t1
2832         ld      [$ap+64+16],$a2
2833         ld      [$ap+64+20],$t2
2834         ld      [$ap+64+24],$a3
2835         ld      [$ap+64+28],$t3
2836         sllx    $t0,32,$t0
2837         sllx    $t1,32,$t1
2838         or      $a0,$t0,$a0
2839         sllx    $t2,32,$t2
2840         or      $a1,$t1,$a1
2841         sllx    $t3,32,$t3
2842         stx     $a0,[%sp+LOCALS64+$in1_z]
2843         or      $a2,$t2,$a2
2844         stx     $a1,[%sp+LOCALS64+$in1_z+8]
2845         or      $a3,$t3,$a3
2846         stx     $a2,[%sp+LOCALS64+$in1_z+16]
2847         stx     $a3,[%sp+LOCALS64+$in1_z+24]
2848
2849         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z1sqr, in1_z);
2850         add     %sp,LOCALS64+$Z1sqr,$rp
2851
2852         ldx     [%sp+LOCALS64+$in2_x],$bi
2853         mov     $acc0,$a0
2854         mov     $acc1,$a1
2855         mov     $acc2,$a2
2856         mov     $acc3,$a3
2857         add     %sp,LOCALS64+$in2_x,$bp
2858         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, Z1sqr, in2_x);
2859         add     %sp,LOCALS64+$U2,$rp
2860
2861         ldx     [%sp+LOCALS64+$Z1sqr],$bi       ! forward load
2862         ldx     [%sp+LOCALS64+$in1_z],$a0
2863         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2864         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2865         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2866
2867         add     %sp,LOCALS64+$in1_x,$bp
2868         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(H, U2, in1_x);
2869         add     %sp,LOCALS64+$H,$rp
2870
2871         add     %sp,LOCALS64+$Z1sqr,$bp
2872         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, Z1sqr, in1_z);
2873         add     %sp,LOCALS64+$S2,$rp
2874
2875         ldx     [%sp+LOCALS64+$H],$bi
2876         ldx     [%sp+LOCALS64+$in1_z],$a0
2877         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2878         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2879         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2880         add     %sp,LOCALS64+$H,$bp
2881         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, H, in1_z);
2882         add     %sp,LOCALS64+$res_z,$rp
2883
2884         ldx     [%sp+LOCALS64+$S2],$bi
2885         ldx     [%sp+LOCALS64+$in2_y],$a0
2886         ldx     [%sp+LOCALS64+$in2_y+8],$a1
2887         ldx     [%sp+LOCALS64+$in2_y+16],$a2
2888         ldx     [%sp+LOCALS64+$in2_y+24],$a3
2889         add     %sp,LOCALS64+$S2,$bp
2890         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S2, in2_y);
2891         add     %sp,LOCALS64+$S2,$rp
2892
2893         ldx     [%sp+LOCALS64+$H],$a0           ! forward load
2894         ldx     [%sp+LOCALS64+$H+8],$a1
2895         ldx     [%sp+LOCALS64+$H+16],$a2
2896         ldx     [%sp+LOCALS64+$H+24],$a3
2897
2898         add     %sp,LOCALS64+$in1_y,$bp
2899         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(R, S2, in1_y);
2900         add     %sp,LOCALS64+$R,$rp
2901
2902         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Hsqr, H);
2903         add     %sp,LOCALS64+$Hsqr,$rp
2904
2905         ldx     [%sp+LOCALS64+$R],$a0
2906         ldx     [%sp+LOCALS64+$R+8],$a1
2907         ldx     [%sp+LOCALS64+$R+16],$a2
2908         ldx     [%sp+LOCALS64+$R+24],$a3
2909         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Rsqr, R);
2910         add     %sp,LOCALS64+$Rsqr,$rp
2911
2912         ldx     [%sp+LOCALS64+$H],$bi
2913         ldx     [%sp+LOCALS64+$Hsqr],$a0
2914         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2915         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2916         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2917         add     %sp,LOCALS64+$H,$bp
2918         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(Hcub, Hsqr, H);
2919         add     %sp,LOCALS64+$Hcub,$rp
2920
2921         ldx     [%sp+LOCALS64+$Hsqr],$bi
2922         ldx     [%sp+LOCALS64+$in1_x],$a0
2923         ldx     [%sp+LOCALS64+$in1_x+8],$a1
2924         ldx     [%sp+LOCALS64+$in1_x+16],$a2
2925         ldx     [%sp+LOCALS64+$in1_x+24],$a3
2926         add     %sp,LOCALS64+$Hsqr,$bp
2927         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, in1_x, Hsqr);
2928         add     %sp,LOCALS64+$U2,$rp
2929
2930         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(Hsqr, U2);
2931         add     %sp,LOCALS64+$Hsqr,$rp
2932
2933         add     %sp,LOCALS64+$Rsqr,$bp
2934         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_x, Rsqr, Hsqr);
2935         add     %sp,LOCALS64+$res_x,$rp
2936
2937         add     %sp,LOCALS64+$Hcub,$bp
2938         call    __ecp_nistz256_sub_from_vis3    !  p256_sub(res_x, res_x, Hcub);
2939         add     %sp,LOCALS64+$res_x,$rp
2940
2941         ldx     [%sp+LOCALS64+$Hcub],$bi        ! forward load
2942         ldx     [%sp+LOCALS64+$in1_y],$a0
2943         ldx     [%sp+LOCALS64+$in1_y+8],$a1
2944         ldx     [%sp+LOCALS64+$in1_y+16],$a2
2945         ldx     [%sp+LOCALS64+$in1_y+24],$a3
2946
2947         add     %sp,LOCALS64+$U2,$bp
2948         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_y, U2, res_x);
2949         add     %sp,LOCALS64+$res_y,$rp
2950
2951         add     %sp,LOCALS64+$Hcub,$bp
2952         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, in1_y, Hcub);
2953         add     %sp,LOCALS64+$S2,$rp
2954
2955         ldx     [%sp+LOCALS64+$R],$bi
2956         ldx     [%sp+LOCALS64+$res_y],$a0
2957         ldx     [%sp+LOCALS64+$res_y+8],$a1
2958         ldx     [%sp+LOCALS64+$res_y+16],$a2
2959         ldx     [%sp+LOCALS64+$res_y+24],$a3
2960         add     %sp,LOCALS64+$R,$bp
2961         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_y, res_y, R);
2962         add     %sp,LOCALS64+$res_y,$rp
2963
2964         add     %sp,LOCALS64+$S2,$bp
2965         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, res_y, S2);
2966         add     %sp,LOCALS64+$res_y,$rp
2967
2968         ldx     [%fp+STACK_BIAS-16],$t1         ! !in1infty
2969         ldx     [%fp+STACK_BIAS-8],$t2          ! !in2infty
2970 1:      call    .+8
2971         add     %o7,.Lone_mont_vis3-1b,$bp
2972 ___
2973 for($i=0;$i<64;$i+=16) {                        # conditional moves
2974 $code.=<<___;
2975         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
2976         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
2977         ldx     [%sp+LOCALS64+$in2_x+$i],$acc2  ! in2
2978         ldx     [%sp+LOCALS64+$in2_x+$i+8],$acc3
2979         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
2980         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
2981         movrz   $t1,$acc2,$acc0
2982         movrz   $t1,$acc3,$acc1
2983         movrz   $t2,$acc4,$acc0
2984         movrz   $t2,$acc5,$acc1
2985         srlx    $acc0,32,$acc2
2986         srlx    $acc1,32,$acc3
2987         st      $acc0,[$rp_real+$i]
2988         st      $acc2,[$rp_real+$i+4]
2989         st      $acc1,[$rp_real+$i+8]
2990         st      $acc3,[$rp_real+$i+12]
2991 ___
2992 }
2993 for(;$i<96;$i+=16) {
2994 $code.=<<___;
2995         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
2996         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
2997         ldx     [$bp+$i-64],$acc2               ! "in2"
2998         ldx     [$bp+$i-64+8],$acc3
2999         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
3000         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
3001         movrz   $t1,$acc2,$acc0
3002         movrz   $t1,$acc3,$acc1
3003         movrz   $t2,$acc4,$acc0
3004         movrz   $t2,$acc5,$acc1
3005         srlx    $acc0,32,$acc2
3006         srlx    $acc1,32,$acc3
3007         st      $acc0,[$rp_real+$i]
3008         st      $acc2,[$rp_real+$i+4]
3009         st      $acc1,[$rp_real+$i+8]
3010         st      $acc3,[$rp_real+$i+12]
3011 ___
3012 }
3013 $code.=<<___;
3014         ret
3015         restore
3016 .size   ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
3017 .align  64
3018 .Lone_mont_vis3:
3019 .long   0x00000000,0x00000001, 0xffffffff,0x00000000
3020 .long   0xffffffff,0xffffffff, 0x00000000,0xfffffffe
3021 .align  64
3022 ___
3023 }                                                               }}}
3024 \f
3025 # Purpose of these subroutines is to explicitly encode VIS instructions,
3026 # so that one can compile the module without having to specify VIS
3027 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
3028 # Idea is to reserve for option to produce "universal" binary and let
3029 # programmer detect if current CPU is VIS capable at run-time.
3030 sub unvis3 {
3031 my ($mnemonic,$rs1,$rs2,$rd)=@_;
3032 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
3033 my ($ref,$opf);
3034 my %visopf = (  "addxc"         => 0x011,
3035                 "addxccc"       => 0x013,
3036                 "umulxhi"       => 0x016        );
3037
3038     $ref = "$mnemonic\t$rs1,$rs2,$rd";
3039
3040     if ($opf=$visopf{$mnemonic}) {
3041         foreach ($rs1,$rs2,$rd) {
3042             return $ref if (!/%([goli])([0-9])/);
3043             $_=$bias{$1}+$2;
3044         }
3045
3046         return  sprintf ".word\t0x%08x !%s",
3047                         0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
3048                         $ref;
3049     } else {
3050         return $ref;
3051     }
3052 }
3053
3054 foreach (split("\n",$code)) {
3055         s/\`([^\`]*)\`/eval $1/ge;
3056
3057         s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
3058                 &unvis3($1,$2,$3,$4)
3059          /ge;
3060
3061         print $_,"\n";
3062 }
3063
3064 close STDOUT;