3f39088661b514d5cc7a113145a4ad0b90fcef84
[openssl.git] / crypto / ec / asm / ecp_nistz256-sparcv9.pl
1 #! /usr/bin/env perl
2 # Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # ECP_NISTZ256 module for SPARCv9.
18 #
19 # February 2015.
20 #
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
25 #
26 #                       with/without -DECP_NISTZ256_ASM
27 # UltraSPARC III        +12-18%
28 # SPARC T4              +99-550% (+66-150% on 32-bit Solaris)
29 #
30 # Ranges denote minimum and maximum improvement coefficients depending
31 # on benchmark. Lower coefficients are for ECDSA sign, server-side
32 # operation. Keep in mind that +200% means 3x improvement.
33
34 $output = pop;
35 open STDOUT,">$output";
36
37 $code.=<<___;
38 #include "sparc_arch.h"
39
40 #define LOCALS  (STACK_BIAS+STACK_FRAME)
41 #ifdef  __arch64__
42 .register       %g2,#scratch
43 .register       %g3,#scratch
44 # define STACK64_FRAME  STACK_FRAME
45 # define LOCALS64       LOCALS
46 #else
47 # define STACK64_FRAME  (2047+192)
48 # define LOCALS64       STACK64_FRAME
49 #endif
50
51 .section        ".text",#alloc,#execinstr
52 ___
53 ########################################################################
54 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
55 #
56 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
57 open TABLE,"<ecp_nistz256_table.c"              or
58 open TABLE,"<${dir}../ecp_nistz256_table.c"     or
59 die "failed to open ecp_nistz256_table.c:",$!;
60
61 use integer;
62
63 foreach(<TABLE>) {
64         s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
65 }
66 close TABLE;
67
68 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
69 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
70 # amount of elements.
71 die "insane number of elements" if ($#arr != 64*16*37-1);
72
73 $code.=<<___;
74 .globl  ecp_nistz256_precomputed
75 .align  4096
76 ecp_nistz256_precomputed:
77 ___
78 ########################################################################
79 # this conversion smashes P256_POINT_AFFINE by individual bytes with
80 # 64 byte interval, similar to
81 #       1111222233334444
82 #       1234123412341234
83 for(1..37) {
84         @tbl = splice(@arr,0,64*16);
85         for($i=0;$i<64;$i++) {
86                 undef @line;
87                 for($j=0;$j<64;$j++) {
88                         push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
89                 }
90                 $code.=".byte\t";
91                 $code.=join(',',map { sprintf "0x%02x",$_} @line);
92                 $code.="\n";
93         }
94 }
95
96 {{{
97 my ($rp,$ap,$bp)=map("%i$_",(0..2));
98 my @acc=map("%l$_",(0..7));
99 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
100 my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
101 my ($rp_real,$ap_real)=("%g2","%g3");
102
103 $code.=<<___;
104 .type   ecp_nistz256_precomputed,#object
105 .size   ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
106 .align  64
107 .LRR:   ! 2^512 mod P precomputed for NIST P256 polynomial
108 .long   0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
109 .long   0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
110 .Lone:
111 .long   1,0,0,0,0,0,0,0
112 .asciz  "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
113
114 ! void  ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
115 .globl  ecp_nistz256_to_mont
116 .align  64
117 ecp_nistz256_to_mont:
118         save    %sp,-STACK_FRAME,%sp
119         nop
120 1:      call    .+8
121         add     %o7,.LRR-1b,$bp
122         call    __ecp_nistz256_mul_mont
123         nop
124         ret
125         restore
126 .type   ecp_nistz256_to_mont,#function
127 .size   ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
128
129 ! void  ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
130 .globl  ecp_nistz256_from_mont
131 .align  32
132 ecp_nistz256_from_mont:
133         save    %sp,-STACK_FRAME,%sp
134         nop
135 1:      call    .+8
136         add     %o7,.Lone-1b,$bp
137         call    __ecp_nistz256_mul_mont
138         nop
139         ret
140         restore
141 .type   ecp_nistz256_from_mont,#function
142 .size   ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
143
144 ! void  ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
145 !                                             const BN_ULONG %i2[8]);
146 .globl  ecp_nistz256_mul_mont
147 .align  32
148 ecp_nistz256_mul_mont:
149         save    %sp,-STACK_FRAME,%sp
150         nop
151         call    __ecp_nistz256_mul_mont
152         nop
153         ret
154         restore
155 .type   ecp_nistz256_mul_mont,#function
156 .size   ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
157
158 ! void  ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
159 .globl  ecp_nistz256_sqr_mont
160 .align  32
161 ecp_nistz256_sqr_mont:
162         save    %sp,-STACK_FRAME,%sp
163         mov     $ap,$bp
164         call    __ecp_nistz256_mul_mont
165         nop
166         ret
167         restore
168 .type   ecp_nistz256_sqr_mont,#function
169 .size   ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
170 ___
171
172 ########################################################################
173 # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
174 # while all others are meant to keep 32. "Meant to" means that additions
175 # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
176 # they can affect outcome (follow 'and' with $mask). Also keep in mind
177 # that addition with carry is addition with 32-bit carry, even though
178 # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
179 # below for VIS3 code paths.]
180
181 $code.=<<___;
182 .align  32
183 __ecp_nistz256_mul_mont:
184         ld      [$bp+0],$bi             ! b[0]
185         mov     -1,$mask
186         ld      [$ap+0],$a0
187         srl     $mask,0,$mask           ! 0xffffffff
188         ld      [$ap+4],$t1
189         ld      [$ap+8],$t2
190         ld      [$ap+12],$t3
191         ld      [$ap+16],$t4
192         ld      [$ap+20],$t5
193         ld      [$ap+24],$t6
194         ld      [$ap+28],$t7
195         mulx    $a0,$bi,$t0             ! a[0-7]*b[0], 64-bit results
196         mulx    $t1,$bi,$t1
197         mulx    $t2,$bi,$t2
198         mulx    $t3,$bi,$t3
199         mulx    $t4,$bi,$t4
200         mulx    $t5,$bi,$t5
201         mulx    $t6,$bi,$t6
202         mulx    $t7,$bi,$t7
203         srlx    $t0,32,@acc[1]          ! extract high parts
204         srlx    $t1,32,@acc[2]
205         srlx    $t2,32,@acc[3]
206         srlx    $t3,32,@acc[4]
207         srlx    $t4,32,@acc[5]
208         srlx    $t5,32,@acc[6]
209         srlx    $t6,32,@acc[7]
210         srlx    $t7,32,@acc[0]          ! "@acc[8]"
211         mov     0,$carry
212 ___
213 for($i=1;$i<8;$i++) {
214 $code.=<<___;
215         addcc   @acc[1],$t1,@acc[1]     ! accumulate high parts
216         ld      [$bp+4*$i],$bi          ! b[$i]
217         ld      [$ap+4],$t1             ! re-load a[1-7]
218         addccc  @acc[2],$t2,@acc[2]
219         addccc  @acc[3],$t3,@acc[3]
220         ld      [$ap+8],$t2
221         ld      [$ap+12],$t3
222         addccc  @acc[4],$t4,@acc[4]
223         addccc  @acc[5],$t5,@acc[5]
224         ld      [$ap+16],$t4
225         ld      [$ap+20],$t5
226         addccc  @acc[6],$t6,@acc[6]
227         addccc  @acc[7],$t7,@acc[7]
228         ld      [$ap+24],$t6
229         ld      [$ap+28],$t7
230         addccc  @acc[0],$carry,@acc[0]  ! "@acc[8]"
231         addc    %g0,%g0,$carry
232 ___
233         # Reduction iteration is normally performed by accumulating
234         # result of multiplication of modulus by "magic" digit [and
235         # omitting least significant word, which is guaranteed to
236         # be 0], but thanks to special form of modulus and "magic"
237         # digit being equal to least significant word, it can be
238         # performed with additions and subtractions alone. Indeed:
239         #
240         #        ffff.0001.0000.0000.0000.ffff.ffff.ffff
241         # *                                         abcd
242         # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
243         #
244         # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
245         # rewrite above as:
246         #
247         #   xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
248         # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
249         # -      abcd.0000.0000.0000.0000.0000.0000.abcd
250         #
251         # or marking redundant operations:
252         #
253         #   xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
254         # + abcd.0000.abcd.0000.0000.abcd.----.----.----
255         # -      abcd.----.----.----.----.----.----.----
256
257 $code.=<<___;
258         ! multiplication-less reduction
259         addcc   @acc[3],$t0,@acc[3]     ! r[3]+=r[0]
260         addccc  @acc[4],%g0,@acc[4]     ! r[4]+=0
261          and    @acc[1],$mask,@acc[1]
262          and    @acc[2],$mask,@acc[2]
263         addccc  @acc[5],%g0,@acc[5]     ! r[5]+=0
264         addccc  @acc[6],$t0,@acc[6]     ! r[6]+=r[0]
265          and    @acc[3],$mask,@acc[3]
266          and    @acc[4],$mask,@acc[4]
267         addccc  @acc[7],%g0,@acc[7]     ! r[7]+=0
268         addccc  @acc[0],$t0,@acc[0]     ! r[8]+=r[0]    "@acc[8]"
269          and    @acc[5],$mask,@acc[5]
270          and    @acc[6],$mask,@acc[6]
271         addc    $carry,%g0,$carry       ! top-most carry
272         subcc   @acc[7],$t0,@acc[7]     ! r[7]-=r[0]
273         subccc  @acc[0],%g0,@acc[0]     ! r[8]-=0       "@acc[8]"
274         subc    $carry,%g0,$carry       ! top-most carry
275          and    @acc[7],$mask,@acc[7]
276          and    @acc[0],$mask,@acc[0]   ! "@acc[8]"
277 ___
278         push(@acc,shift(@acc));         # rotate registers to "omit" acc[0]
279 $code.=<<___;
280         mulx    $a0,$bi,$t0             ! a[0-7]*b[$i], 64-bit results
281         mulx    $t1,$bi,$t1
282         mulx    $t2,$bi,$t2
283         mulx    $t3,$bi,$t3
284         mulx    $t4,$bi,$t4
285         mulx    $t5,$bi,$t5
286         mulx    $t6,$bi,$t6
287         mulx    $t7,$bi,$t7
288         add     @acc[0],$t0,$t0         ! accumulate low parts, can't overflow
289         add     @acc[1],$t1,$t1
290         srlx    $t0,32,@acc[1]          ! extract high parts
291         add     @acc[2],$t2,$t2
292         srlx    $t1,32,@acc[2]
293         add     @acc[3],$t3,$t3
294         srlx    $t2,32,@acc[3]
295         add     @acc[4],$t4,$t4
296         srlx    $t3,32,@acc[4]
297         add     @acc[5],$t5,$t5
298         srlx    $t4,32,@acc[5]
299         add     @acc[6],$t6,$t6
300         srlx    $t5,32,@acc[6]
301         add     @acc[7],$t7,$t7
302         srlx    $t6,32,@acc[7]
303         srlx    $t7,32,@acc[0]          ! "@acc[8]"
304 ___
305 }
306 $code.=<<___;
307         addcc   @acc[1],$t1,@acc[1]     ! accumulate high parts
308         addccc  @acc[2],$t2,@acc[2]
309         addccc  @acc[3],$t3,@acc[3]
310         addccc  @acc[4],$t4,@acc[4]
311         addccc  @acc[5],$t5,@acc[5]
312         addccc  @acc[6],$t6,@acc[6]
313         addccc  @acc[7],$t7,@acc[7]
314         addccc  @acc[0],$carry,@acc[0]  ! "@acc[8]"
315         addc    %g0,%g0,$carry
316
317         addcc   @acc[3],$t0,@acc[3]     ! multiplication-less reduction
318         addccc  @acc[4],%g0,@acc[4]
319         addccc  @acc[5],%g0,@acc[5]
320         addccc  @acc[6],$t0,@acc[6]
321         addccc  @acc[7],%g0,@acc[7]
322         addccc  @acc[0],$t0,@acc[0]     ! "@acc[8]"
323         addc    $carry,%g0,$carry
324         subcc   @acc[7],$t0,@acc[7]
325         subccc  @acc[0],%g0,@acc[0]     ! "@acc[8]"
326         subc    $carry,%g0,$carry       ! top-most carry
327 ___
328         push(@acc,shift(@acc));         # rotate registers to omit acc[0]
329 $code.=<<___;
330         ! Final step is "if result > mod, subtract mod", but we do it
331         ! "other way around", namely subtract modulus from result
332         ! and if it borrowed, add modulus back.
333
334         subcc   @acc[0],-1,@acc[0]      ! subtract modulus
335         subccc  @acc[1],-1,@acc[1]
336         subccc  @acc[2],-1,@acc[2]
337         subccc  @acc[3],0,@acc[3]
338         subccc  @acc[4],0,@acc[4]
339         subccc  @acc[5],0,@acc[5]
340         subccc  @acc[6],1,@acc[6]
341         subccc  @acc[7],-1,@acc[7]
342         subc    $carry,0,$carry         ! broadcast borrow bit
343
344         ! Note that because mod has special form, i.e. consists of
345         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
346         ! using value of broadcasted borrow and the borrow bit itself.
347         ! To minimize dependency chain we first broadcast and then
348         ! extract the bit by negating (follow $bi).
349
350         addcc   @acc[0],$carry,@acc[0]  ! add modulus or zero
351         addccc  @acc[1],$carry,@acc[1]
352         neg     $carry,$bi
353         st      @acc[0],[$rp]
354         addccc  @acc[2],$carry,@acc[2]
355         st      @acc[1],[$rp+4]
356         addccc  @acc[3],0,@acc[3]
357         st      @acc[2],[$rp+8]
358         addccc  @acc[4],0,@acc[4]
359         st      @acc[3],[$rp+12]
360         addccc  @acc[5],0,@acc[5]
361         st      @acc[4],[$rp+16]
362         addccc  @acc[6],$bi,@acc[6]
363         st      @acc[5],[$rp+20]
364         addc    @acc[7],$carry,@acc[7]
365         st      @acc[6],[$rp+24]
366         retl
367         st      @acc[7],[$rp+28]
368 .type   __ecp_nistz256_mul_mont,#function
369 .size   __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
370
371 ! void  ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
372 !                                        const BN_ULONG %i2[8]);
373 .globl  ecp_nistz256_add
374 .align  32
375 ecp_nistz256_add:
376         save    %sp,-STACK_FRAME,%sp
377         ld      [$ap],@acc[0]
378         ld      [$ap+4],@acc[1]
379         ld      [$ap+8],@acc[2]
380         ld      [$ap+12],@acc[3]
381         ld      [$ap+16],@acc[4]
382         ld      [$ap+20],@acc[5]
383         ld      [$ap+24],@acc[6]
384         call    __ecp_nistz256_add
385         ld      [$ap+28],@acc[7]
386         ret
387         restore
388 .type   ecp_nistz256_add,#function
389 .size   ecp_nistz256_add,.-ecp_nistz256_add
390
391 .align  32
392 __ecp_nistz256_add:
393         ld      [$bp+0],$t0             ! b[0]
394         ld      [$bp+4],$t1
395         ld      [$bp+8],$t2
396         ld      [$bp+12],$t3
397         addcc   @acc[0],$t0,@acc[0]
398         ld      [$bp+16],$t4
399         ld      [$bp+20],$t5
400         addccc  @acc[1],$t1,@acc[1]
401         ld      [$bp+24],$t6
402         ld      [$bp+28],$t7
403         addccc  @acc[2],$t2,@acc[2]
404         addccc  @acc[3],$t3,@acc[3]
405         addccc  @acc[4],$t4,@acc[4]
406         addccc  @acc[5],$t5,@acc[5]
407         addccc  @acc[6],$t6,@acc[6]
408         addccc  @acc[7],$t7,@acc[7]
409         subc    %g0,%g0,$carry          ! broadcast carry bit
410
411 .Lreduce_by_sub:
412
413         ! if a+b carries, subtract modulus.
414         !
415         ! Note that because mod has special form, i.e. consists of
416         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
417         ! using value of broadcasted borrow and the borrow bit itself.
418         ! To minimize dependency chain we first broadcast and then
419         ! extract the bit by negating (follow $bi).
420
421         subcc   @acc[0],$carry,@acc[0]  ! subtract synthesized modulus
422         subccc  @acc[1],$carry,@acc[1]
423         neg     $carry,$bi
424         st      @acc[0],[$rp]
425         subccc  @acc[2],$carry,@acc[2]
426         st      @acc[1],[$rp+4]
427         subccc  @acc[3],0,@acc[3]
428         st      @acc[2],[$rp+8]
429         subccc  @acc[4],0,@acc[4]
430         st      @acc[3],[$rp+12]
431         subccc  @acc[5],0,@acc[5]
432         st      @acc[4],[$rp+16]
433         subccc  @acc[6],$bi,@acc[6]
434         st      @acc[5],[$rp+20]
435         subc    @acc[7],$carry,@acc[7]
436         st      @acc[6],[$rp+24]
437         retl
438         st      @acc[7],[$rp+28]
439 .type   __ecp_nistz256_add,#function
440 .size   __ecp_nistz256_add,.-__ecp_nistz256_add
441
442 ! void  ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
443 .globl  ecp_nistz256_mul_by_2
444 .align  32
445 ecp_nistz256_mul_by_2:
446         save    %sp,-STACK_FRAME,%sp
447         ld      [$ap],@acc[0]
448         ld      [$ap+4],@acc[1]
449         ld      [$ap+8],@acc[2]
450         ld      [$ap+12],@acc[3]
451         ld      [$ap+16],@acc[4]
452         ld      [$ap+20],@acc[5]
453         ld      [$ap+24],@acc[6]
454         call    __ecp_nistz256_mul_by_2
455         ld      [$ap+28],@acc[7]
456         ret
457         restore
458 .type   ecp_nistz256_mul_by_2,#function
459 .size   ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
460
461 .align  32
462 __ecp_nistz256_mul_by_2:
463         addcc   @acc[0],@acc[0],@acc[0] ! a+a=2*a
464         addccc  @acc[1],@acc[1],@acc[1]
465         addccc  @acc[2],@acc[2],@acc[2]
466         addccc  @acc[3],@acc[3],@acc[3]
467         addccc  @acc[4],@acc[4],@acc[4]
468         addccc  @acc[5],@acc[5],@acc[5]
469         addccc  @acc[6],@acc[6],@acc[6]
470         addccc  @acc[7],@acc[7],@acc[7]
471         b       .Lreduce_by_sub
472         subc    %g0,%g0,$carry          ! broadcast carry bit
473 .type   __ecp_nistz256_mul_by_2,#function
474 .size   __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
475
476 ! void  ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
477 .globl  ecp_nistz256_mul_by_3
478 .align  32
479 ecp_nistz256_mul_by_3:
480         save    %sp,-STACK_FRAME,%sp
481         ld      [$ap],@acc[0]
482         ld      [$ap+4],@acc[1]
483         ld      [$ap+8],@acc[2]
484         ld      [$ap+12],@acc[3]
485         ld      [$ap+16],@acc[4]
486         ld      [$ap+20],@acc[5]
487         ld      [$ap+24],@acc[6]
488         call    __ecp_nistz256_mul_by_3
489         ld      [$ap+28],@acc[7]
490         ret
491         restore
492 .type   ecp_nistz256_mul_by_3,#function
493 .size   ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
494
495 .align  32
496 __ecp_nistz256_mul_by_3:
497         addcc   @acc[0],@acc[0],$t0     ! a+a=2*a
498         addccc  @acc[1],@acc[1],$t1
499         addccc  @acc[2],@acc[2],$t2
500         addccc  @acc[3],@acc[3],$t3
501         addccc  @acc[4],@acc[4],$t4
502         addccc  @acc[5],@acc[5],$t5
503         addccc  @acc[6],@acc[6],$t6
504         addccc  @acc[7],@acc[7],$t7
505         subc    %g0,%g0,$carry          ! broadcast carry bit
506
507         subcc   $t0,$carry,$t0          ! .Lreduce_by_sub but without stores
508         neg     $carry,$bi
509         subccc  $t1,$carry,$t1
510         subccc  $t2,$carry,$t2
511         subccc  $t3,0,$t3
512         subccc  $t4,0,$t4
513         subccc  $t5,0,$t5
514         subccc  $t6,$bi,$t6
515         subc    $t7,$carry,$t7
516
517         addcc   $t0,@acc[0],@acc[0]     ! 2*a+a=3*a
518         addccc  $t1,@acc[1],@acc[1]
519         addccc  $t2,@acc[2],@acc[2]
520         addccc  $t3,@acc[3],@acc[3]
521         addccc  $t4,@acc[4],@acc[4]
522         addccc  $t5,@acc[5],@acc[5]
523         addccc  $t6,@acc[6],@acc[6]
524         addccc  $t7,@acc[7],@acc[7]
525         b       .Lreduce_by_sub
526         subc    %g0,%g0,$carry          ! broadcast carry bit
527 .type   __ecp_nistz256_mul_by_3,#function
528 .size   __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
529
530 ! void  ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
531 !                                        const BN_ULONG %i2[8]);
532 .globl  ecp_nistz256_sub
533 .align  32
534 ecp_nistz256_sub:
535         save    %sp,-STACK_FRAME,%sp
536         ld      [$ap],@acc[0]
537         ld      [$ap+4],@acc[1]
538         ld      [$ap+8],@acc[2]
539         ld      [$ap+12],@acc[3]
540         ld      [$ap+16],@acc[4]
541         ld      [$ap+20],@acc[5]
542         ld      [$ap+24],@acc[6]
543         call    __ecp_nistz256_sub_from
544         ld      [$ap+28],@acc[7]
545         ret
546         restore
547 .type   ecp_nistz256_sub,#function
548 .size   ecp_nistz256_sub,.-ecp_nistz256_sub
549
550 ! void  ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
551 .globl  ecp_nistz256_neg
552 .align  32
553 ecp_nistz256_neg:
554         save    %sp,-STACK_FRAME,%sp
555         mov     $ap,$bp
556         mov     0,@acc[0]
557         mov     0,@acc[1]
558         mov     0,@acc[2]
559         mov     0,@acc[3]
560         mov     0,@acc[4]
561         mov     0,@acc[5]
562         mov     0,@acc[6]
563         call    __ecp_nistz256_sub_from
564         mov     0,@acc[7]
565         ret
566         restore
567 .type   ecp_nistz256_neg,#function
568 .size   ecp_nistz256_neg,.-ecp_nistz256_neg
569
570 .align  32
571 __ecp_nistz256_sub_from:
572         ld      [$bp+0],$t0             ! b[0]
573         ld      [$bp+4],$t1
574         ld      [$bp+8],$t2
575         ld      [$bp+12],$t3
576         subcc   @acc[0],$t0,@acc[0]
577         ld      [$bp+16],$t4
578         ld      [$bp+20],$t5
579         subccc  @acc[1],$t1,@acc[1]
580         subccc  @acc[2],$t2,@acc[2]
581         ld      [$bp+24],$t6
582         ld      [$bp+28],$t7
583         subccc  @acc[3],$t3,@acc[3]
584         subccc  @acc[4],$t4,@acc[4]
585         subccc  @acc[5],$t5,@acc[5]
586         subccc  @acc[6],$t6,@acc[6]
587         subccc  @acc[7],$t7,@acc[7]
588         subc    %g0,%g0,$carry          ! broadcast borrow bit
589
590 .Lreduce_by_add:
591
592         ! if a-b borrows, add modulus.
593         !
594         ! Note that because mod has special form, i.e. consists of
595         ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
596         ! using value of broadcasted borrow and the borrow bit itself.
597         ! To minimize dependency chain we first broadcast and then
598         ! extract the bit by negating (follow $bi).
599
600         addcc   @acc[0],$carry,@acc[0]  ! add synthesized modulus
601         addccc  @acc[1],$carry,@acc[1]
602         neg     $carry,$bi
603         st      @acc[0],[$rp]
604         addccc  @acc[2],$carry,@acc[2]
605         st      @acc[1],[$rp+4]
606         addccc  @acc[3],0,@acc[3]
607         st      @acc[2],[$rp+8]
608         addccc  @acc[4],0,@acc[4]
609         st      @acc[3],[$rp+12]
610         addccc  @acc[5],0,@acc[5]
611         st      @acc[4],[$rp+16]
612         addccc  @acc[6],$bi,@acc[6]
613         st      @acc[5],[$rp+20]
614         addc    @acc[7],$carry,@acc[7]
615         st      @acc[6],[$rp+24]
616         retl
617         st      @acc[7],[$rp+28]
618 .type   __ecp_nistz256_sub_from,#function
619 .size   __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
620
621 .align  32
622 __ecp_nistz256_sub_morf:
623         ld      [$bp+0],$t0             ! b[0]
624         ld      [$bp+4],$t1
625         ld      [$bp+8],$t2
626         ld      [$bp+12],$t3
627         subcc   $t0,@acc[0],@acc[0]
628         ld      [$bp+16],$t4
629         ld      [$bp+20],$t5
630         subccc  $t1,@acc[1],@acc[1]
631         subccc  $t2,@acc[2],@acc[2]
632         ld      [$bp+24],$t6
633         ld      [$bp+28],$t7
634         subccc  $t3,@acc[3],@acc[3]
635         subccc  $t4,@acc[4],@acc[4]
636         subccc  $t5,@acc[5],@acc[5]
637         subccc  $t6,@acc[6],@acc[6]
638         subccc  $t7,@acc[7],@acc[7]
639         b       .Lreduce_by_add
640         subc    %g0,%g0,$carry          ! broadcast borrow bit
641 .type   __ecp_nistz256_sub_morf,#function
642 .size   __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
643
644 ! void  ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
645 .globl  ecp_nistz256_div_by_2
646 .align  32
647 ecp_nistz256_div_by_2:
648         save    %sp,-STACK_FRAME,%sp
649         ld      [$ap],@acc[0]
650         ld      [$ap+4],@acc[1]
651         ld      [$ap+8],@acc[2]
652         ld      [$ap+12],@acc[3]
653         ld      [$ap+16],@acc[4]
654         ld      [$ap+20],@acc[5]
655         ld      [$ap+24],@acc[6]
656         call    __ecp_nistz256_div_by_2
657         ld      [$ap+28],@acc[7]
658         ret
659         restore
660 .type   ecp_nistz256_div_by_2,#function
661 .size   ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
662
663 .align  32
664 __ecp_nistz256_div_by_2:
665         ! ret = (a is odd ? a+mod : a) >> 1
666
667         and     @acc[0],1,$bi
668         neg     $bi,$carry
669         addcc   @acc[0],$carry,@acc[0]
670         addccc  @acc[1],$carry,@acc[1]
671         addccc  @acc[2],$carry,@acc[2]
672         addccc  @acc[3],0,@acc[3]
673         addccc  @acc[4],0,@acc[4]
674         addccc  @acc[5],0,@acc[5]
675         addccc  @acc[6],$bi,@acc[6]
676         addccc  @acc[7],$carry,@acc[7]
677         addc    %g0,%g0,$carry
678
679         ! ret >>= 1
680
681         srl     @acc[0],1,@acc[0]
682         sll     @acc[1],31,$t0
683         srl     @acc[1],1,@acc[1]
684         or      @acc[0],$t0,@acc[0]
685         sll     @acc[2],31,$t1
686         srl     @acc[2],1,@acc[2]
687         or      @acc[1],$t1,@acc[1]
688         sll     @acc[3],31,$t2
689         st      @acc[0],[$rp]
690         srl     @acc[3],1,@acc[3]
691         or      @acc[2],$t2,@acc[2]
692         sll     @acc[4],31,$t3
693         st      @acc[1],[$rp+4]
694         srl     @acc[4],1,@acc[4]
695         or      @acc[3],$t3,@acc[3]
696         sll     @acc[5],31,$t4
697         st      @acc[2],[$rp+8]
698         srl     @acc[5],1,@acc[5]
699         or      @acc[4],$t4,@acc[4]
700         sll     @acc[6],31,$t5
701         st      @acc[3],[$rp+12]
702         srl     @acc[6],1,@acc[6]
703         or      @acc[5],$t5,@acc[5]
704         sll     @acc[7],31,$t6
705         st      @acc[4],[$rp+16]
706         srl     @acc[7],1,@acc[7]
707         or      @acc[6],$t6,@acc[6]
708         sll     $carry,31,$t7
709         st      @acc[5],[$rp+20]
710         or      @acc[7],$t7,@acc[7]
711         st      @acc[6],[$rp+24]
712         retl
713         st      @acc[7],[$rp+28]
714 .type   __ecp_nistz256_div_by_2,#function
715 .size   __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
716 ___
717
718 ########################################################################
719 # following subroutines are "literal" implementation of those found in
720 # ecp_nistz256.c
721 #
722 ########################################################################
723 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
724 #
725 {
726 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
727 # above map() describes stack layout with 4 temporary
728 # 256-bit vectors on top.
729
730 $code.=<<___;
731 #ifdef __PIC__
732 SPARC_PIC_THUNK(%g1)
733 #endif
734
735 .globl  ecp_nistz256_point_double
736 .align  32
737 ecp_nistz256_point_double:
738         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
739         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
740         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
741         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
742         be      ecp_nistz256_point_double_vis3
743         nop
744
745         save    %sp,-STACK_FRAME-32*4,%sp
746
747         mov     $rp,$rp_real
748         mov     $ap,$ap_real
749
750 .Lpoint_double_shortcut:
751         ld      [$ap+32],@acc[0]
752         ld      [$ap+32+4],@acc[1]
753         ld      [$ap+32+8],@acc[2]
754         ld      [$ap+32+12],@acc[3]
755         ld      [$ap+32+16],@acc[4]
756         ld      [$ap+32+20],@acc[5]
757         ld      [$ap+32+24],@acc[6]
758         ld      [$ap+32+28],@acc[7]
759         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
760         add     %sp,LOCALS+$S,$rp
761
762         add     $ap_real,64,$bp
763         add     $ap_real,64,$ap
764         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
765         add     %sp,LOCALS+$Zsqr,$rp
766
767         add     $ap_real,0,$bp
768         call    __ecp_nistz256_add      ! p256_add(M, Zsqr, in_x);
769         add     %sp,LOCALS+$M,$rp
770
771         add     %sp,LOCALS+$S,$bp
772         add     %sp,LOCALS+$S,$ap
773         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
774         add     %sp,LOCALS+$S,$rp
775
776         ld      [$ap_real],@acc[0]
777         add     %sp,LOCALS+$Zsqr,$bp
778         ld      [$ap_real+4],@acc[1]
779         ld      [$ap_real+8],@acc[2]
780         ld      [$ap_real+12],@acc[3]
781         ld      [$ap_real+16],@acc[4]
782         ld      [$ap_real+20],@acc[5]
783         ld      [$ap_real+24],@acc[6]
784         ld      [$ap_real+28],@acc[7]
785         call    __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
786         add     %sp,LOCALS+$Zsqr,$rp
787
788         add     $ap_real,32,$bp
789         add     $ap_real,64,$ap
790         call    __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
791         add     %sp,LOCALS+$tmp0,$rp
792
793         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
794         add     $rp_real,64,$rp
795
796         add     %sp,LOCALS+$Zsqr,$bp
797         add     %sp,LOCALS+$M,$ap
798         call    __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
799         add     %sp,LOCALS+$M,$rp
800
801         call    __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
802         add     %sp,LOCALS+$M,$rp
803
804         add     %sp,LOCALS+$S,$bp
805         add     %sp,LOCALS+$S,$ap
806         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
807         add     %sp,LOCALS+$tmp0,$rp
808
809         call    __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
810         add     $rp_real,32,$rp
811
812         add     $ap_real,0,$bp
813         add     %sp,LOCALS+$S,$ap
814         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
815         add     %sp,LOCALS+$S,$rp
816
817         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
818         add     %sp,LOCALS+$tmp0,$rp
819
820         add     %sp,LOCALS+$M,$bp
821         add     %sp,LOCALS+$M,$ap
822         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
823         add     $rp_real,0,$rp
824
825         add     %sp,LOCALS+$tmp0,$bp
826         call    __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
827         add     $rp_real,0,$rp
828
829         add     %sp,LOCALS+$S,$bp
830         call    __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
831         add     %sp,LOCALS+$S,$rp
832
833         add     %sp,LOCALS+$M,$bp
834         add     %sp,LOCALS+$S,$ap
835         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
836         add     %sp,LOCALS+$S,$rp
837
838         add     $rp_real,32,$bp
839         call    __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
840         add     $rp_real,32,$rp
841
842         ret
843         restore
844 .type   ecp_nistz256_point_double,#function
845 .size   ecp_nistz256_point_double,.-ecp_nistz256_point_double
846 ___
847 }
848
849 ########################################################################
850 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
851 #                             const P256_POINT *in2);
852 {
853 my ($res_x,$res_y,$res_z,
854     $H,$Hsqr,$R,$Rsqr,$Hcub,
855     $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
856 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
857
858 # above map() describes stack layout with 12 temporary
859 # 256-bit vectors on top. Then we reserve some space for
860 # !in1infty, !in2infty, result of check for zero and return pointer.
861
862 my $bp_real=$rp_real;
863
864 $code.=<<___;
865 .globl  ecp_nistz256_point_add
866 .align  32
867 ecp_nistz256_point_add:
868         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
869         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
870         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
871         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
872         be      ecp_nistz256_point_add_vis3
873         nop
874
875         save    %sp,-STACK_FRAME-32*12-32,%sp
876
877         stx     $rp,[%fp+STACK_BIAS-8]  ! off-load $rp
878         mov     $ap,$ap_real
879         mov     $bp,$bp_real
880
881         ld      [$bp],@acc[0]           ! in2_x
882         ld      [$bp+4],@acc[1]
883         ld      [$bp+8],@acc[2]
884         ld      [$bp+12],@acc[3]
885         ld      [$bp+16],@acc[4]
886         ld      [$bp+20],@acc[5]
887         ld      [$bp+24],@acc[6]
888         ld      [$bp+28],@acc[7]
889         ld      [$bp+32],$t0            ! in2_y
890         ld      [$bp+32+4],$t1
891         ld      [$bp+32+8],$t2
892         ld      [$bp+32+12],$t3
893         ld      [$bp+32+16],$t4
894         ld      [$bp+32+20],$t5
895         ld      [$bp+32+24],$t6
896         ld      [$bp+32+28],$t7
897         or      @acc[1],@acc[0],@acc[0]
898         or      @acc[3],@acc[2],@acc[2]
899         or      @acc[5],@acc[4],@acc[4]
900         or      @acc[7],@acc[6],@acc[6]
901         or      @acc[2],@acc[0],@acc[0]
902         or      @acc[6],@acc[4],@acc[4]
903         or      @acc[4],@acc[0],@acc[0]
904         or      $t1,$t0,$t0
905         or      $t3,$t2,$t2
906         or      $t5,$t4,$t4
907         or      $t7,$t6,$t6
908         or      $t2,$t0,$t0
909         or      $t6,$t4,$t4
910         or      $t4,$t0,$t0
911         or      @acc[0],$t0,$t0         ! !in2infty
912         movrnz  $t0,-1,$t0
913         st      $t0,[%fp+STACK_BIAS-12]
914
915         ld      [$ap],@acc[0]           ! in1_x
916         ld      [$ap+4],@acc[1]
917         ld      [$ap+8],@acc[2]
918         ld      [$ap+12],@acc[3]
919         ld      [$ap+16],@acc[4]
920         ld      [$ap+20],@acc[5]
921         ld      [$ap+24],@acc[6]
922         ld      [$ap+28],@acc[7]
923         ld      [$ap+32],$t0            ! in1_y
924         ld      [$ap+32+4],$t1
925         ld      [$ap+32+8],$t2
926         ld      [$ap+32+12],$t3
927         ld      [$ap+32+16],$t4
928         ld      [$ap+32+20],$t5
929         ld      [$ap+32+24],$t6
930         ld      [$ap+32+28],$t7
931         or      @acc[1],@acc[0],@acc[0]
932         or      @acc[3],@acc[2],@acc[2]
933         or      @acc[5],@acc[4],@acc[4]
934         or      @acc[7],@acc[6],@acc[6]
935         or      @acc[2],@acc[0],@acc[0]
936         or      @acc[6],@acc[4],@acc[4]
937         or      @acc[4],@acc[0],@acc[0]
938         or      $t1,$t0,$t0
939         or      $t3,$t2,$t2
940         or      $t5,$t4,$t4
941         or      $t7,$t6,$t6
942         or      $t2,$t0,$t0
943         or      $t6,$t4,$t4
944         or      $t4,$t0,$t0
945         or      @acc[0],$t0,$t0         ! !in1infty
946         movrnz  $t0,-1,$t0
947         st      $t0,[%fp+STACK_BIAS-16]
948
949         add     $bp_real,64,$bp
950         add     $bp_real,64,$ap
951         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
952         add     %sp,LOCALS+$Z2sqr,$rp
953
954         add     $ap_real,64,$bp
955         add     $ap_real,64,$ap
956         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
957         add     %sp,LOCALS+$Z1sqr,$rp
958
959         add     $bp_real,64,$bp
960         add     %sp,LOCALS+$Z2sqr,$ap
961         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
962         add     %sp,LOCALS+$S1,$rp
963
964         add     $ap_real,64,$bp
965         add     %sp,LOCALS+$Z1sqr,$ap
966         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
967         add     %sp,LOCALS+$S2,$rp
968
969         add     $ap_real,32,$bp
970         add     %sp,LOCALS+$S1,$ap
971         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
972         add     %sp,LOCALS+$S1,$rp
973
974         add     $bp_real,32,$bp
975         add     %sp,LOCALS+$S2,$ap
976         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
977         add     %sp,LOCALS+$S2,$rp
978
979         add     %sp,LOCALS+$S1,$bp
980         call    __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
981         add     %sp,LOCALS+$R,$rp
982
983         or      @acc[1],@acc[0],@acc[0] ! see if result is zero
984         or      @acc[3],@acc[2],@acc[2]
985         or      @acc[5],@acc[4],@acc[4]
986         or      @acc[7],@acc[6],@acc[6]
987         or      @acc[2],@acc[0],@acc[0]
988         or      @acc[6],@acc[4],@acc[4]
989         or      @acc[4],@acc[0],@acc[0]
990         st      @acc[0],[%fp+STACK_BIAS-20]
991
992         add     $ap_real,0,$bp
993         add     %sp,LOCALS+$Z2sqr,$ap
994         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
995         add     %sp,LOCALS+$U1,$rp
996
997         add     $bp_real,0,$bp
998         add     %sp,LOCALS+$Z1sqr,$ap
999         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
1000         add     %sp,LOCALS+$U2,$rp
1001
1002         add     %sp,LOCALS+$U1,$bp
1003         call    __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
1004         add     %sp,LOCALS+$H,$rp
1005
1006         or      @acc[1],@acc[0],@acc[0] ! see if result is zero
1007         or      @acc[3],@acc[2],@acc[2]
1008         or      @acc[5],@acc[4],@acc[4]
1009         or      @acc[7],@acc[6],@acc[6]
1010         or      @acc[2],@acc[0],@acc[0]
1011         or      @acc[6],@acc[4],@acc[4]
1012         orcc    @acc[4],@acc[0],@acc[0]
1013
1014         bne,pt  %icc,.Ladd_proceed      ! is_equal(U1,U2)?
1015         nop
1016
1017         ld      [%fp+STACK_BIAS-12],$t0
1018         ld      [%fp+STACK_BIAS-16],$t1
1019         ld      [%fp+STACK_BIAS-20],$t2
1020         andcc   $t0,$t1,%g0
1021         be,pt   %icc,.Ladd_proceed      ! (in1infty || in2infty)?
1022         nop
1023         andcc   $t2,$t2,%g0
1024         be,pt   %icc,.Ladd_double       ! is_equal(S1,S2)?
1025         nop
1026
1027         ldx     [%fp+STACK_BIAS-8],$rp
1028         st      %g0,[$rp]
1029         st      %g0,[$rp+4]
1030         st      %g0,[$rp+8]
1031         st      %g0,[$rp+12]
1032         st      %g0,[$rp+16]
1033         st      %g0,[$rp+20]
1034         st      %g0,[$rp+24]
1035         st      %g0,[$rp+28]
1036         st      %g0,[$rp+32]
1037         st      %g0,[$rp+32+4]
1038         st      %g0,[$rp+32+8]
1039         st      %g0,[$rp+32+12]
1040         st      %g0,[$rp+32+16]
1041         st      %g0,[$rp+32+20]
1042         st      %g0,[$rp+32+24]
1043         st      %g0,[$rp+32+28]
1044         st      %g0,[$rp+64]
1045         st      %g0,[$rp+64+4]
1046         st      %g0,[$rp+64+8]
1047         st      %g0,[$rp+64+12]
1048         st      %g0,[$rp+64+16]
1049         st      %g0,[$rp+64+20]
1050         st      %g0,[$rp+64+24]
1051         st      %g0,[$rp+64+28]
1052         b       .Ladd_done
1053         nop
1054
1055 .align  16
1056 .Ladd_double:
1057         ldx     [%fp+STACK_BIAS-8],$rp_real
1058         mov     $ap_real,$ap
1059         b       .Lpoint_double_shortcut
1060         add     %sp,32*(12-4)+32,%sp    ! difference in frame sizes
1061
1062 .align  16
1063 .Ladd_proceed:
1064         add     %sp,LOCALS+$R,$bp
1065         add     %sp,LOCALS+$R,$ap
1066         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1067         add     %sp,LOCALS+$Rsqr,$rp
1068
1069         add     $ap_real,64,$bp
1070         add     %sp,LOCALS+$H,$ap
1071         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1072         add     %sp,LOCALS+$res_z,$rp
1073
1074         add     %sp,LOCALS+$H,$bp
1075         add     %sp,LOCALS+$H,$ap
1076         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1077         add     %sp,LOCALS+$Hsqr,$rp
1078
1079         add     $bp_real,64,$bp
1080         add     %sp,LOCALS+$res_z,$ap
1081         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
1082         add     %sp,LOCALS+$res_z,$rp
1083
1084         add     %sp,LOCALS+$H,$bp
1085         add     %sp,LOCALS+$Hsqr,$ap
1086         call    __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1087         add     %sp,LOCALS+$Hcub,$rp
1088
1089         add     %sp,LOCALS+$U1,$bp
1090         add     %sp,LOCALS+$Hsqr,$ap
1091         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
1092         add     %sp,LOCALS+$U2,$rp
1093
1094         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1095         add     %sp,LOCALS+$Hsqr,$rp
1096
1097         add     %sp,LOCALS+$Rsqr,$bp
1098         call    __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1099         add     %sp,LOCALS+$res_x,$rp
1100
1101         add     %sp,LOCALS+$Hcub,$bp
1102         call    __ecp_nistz256_sub_from !  p256_sub(res_x, res_x, Hcub);
1103         add     %sp,LOCALS+$res_x,$rp
1104
1105         add     %sp,LOCALS+$U2,$bp
1106         call    __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1107         add     %sp,LOCALS+$res_y,$rp
1108
1109         add     %sp,LOCALS+$Hcub,$bp
1110         add     %sp,LOCALS+$S1,$ap
1111         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
1112         add     %sp,LOCALS+$S2,$rp
1113
1114         add     %sp,LOCALS+$R,$bp
1115         add     %sp,LOCALS+$res_y,$ap
1116         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1117         add     %sp,LOCALS+$res_y,$rp
1118
1119         add     %sp,LOCALS+$S2,$bp
1120         call    __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1121         add     %sp,LOCALS+$res_y,$rp
1122
1123         ld      [%fp+STACK_BIAS-16],$t1 ! !in1infty
1124         ld      [%fp+STACK_BIAS-12],$t2 ! !in2infty
1125         ldx     [%fp+STACK_BIAS-8],$rp
1126 ___
1127 for($i=0;$i<96;$i+=8) {                 # conditional moves
1128 $code.=<<___;
1129         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1130         ld      [%sp+LOCALS+$i+4],@acc[1]
1131         ld      [$bp_real+$i],@acc[2]           ! in2
1132         ld      [$bp_real+$i+4],@acc[3]
1133         ld      [$ap_real+$i],@acc[4]           ! in1
1134         ld      [$ap_real+$i+4],@acc[5]
1135         movrz   $t1,@acc[2],@acc[0]
1136         movrz   $t1,@acc[3],@acc[1]
1137         movrz   $t2,@acc[4],@acc[0]
1138         movrz   $t2,@acc[5],@acc[1]
1139         st      @acc[0],[$rp+$i]
1140         st      @acc[1],[$rp+$i+4]
1141 ___
1142 }
1143 $code.=<<___;
1144 .Ladd_done:
1145         ret
1146         restore
1147 .type   ecp_nistz256_point_add,#function
1148 .size   ecp_nistz256_point_add,.-ecp_nistz256_point_add
1149 ___
1150 }
1151
1152 ########################################################################
1153 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1154 #                                    const P256_POINT_AFFINE *in2);
1155 {
1156 my ($res_x,$res_y,$res_z,
1157     $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1158 my $Z1sqr = $S2;
1159 # above map() describes stack layout with 10 temporary
1160 # 256-bit vectors on top. Then we reserve some space for
1161 # !in1infty, !in2infty, result of check for zero and return pointer.
1162
1163 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1164 my $bp_real=$rp_real;
1165
1166 $code.=<<___;
1167 .globl  ecp_nistz256_point_add_affine
1168 .align  32
1169 ecp_nistz256_point_add_affine:
1170         SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
1171         ld      [%g1],%g1               ! OPENSSL_sparcv9cap_P[0]
1172         and     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
1173         cmp     %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
1174         be      ecp_nistz256_point_add_affine_vis3
1175         nop
1176
1177         save    %sp,-STACK_FRAME-32*10-32,%sp
1178
1179         stx     $rp,[%fp+STACK_BIAS-8]  ! off-load $rp
1180         mov     $ap,$ap_real
1181         mov     $bp,$bp_real
1182
1183         ld      [$ap],@acc[0]           ! in1_x
1184         ld      [$ap+4],@acc[1]
1185         ld      [$ap+8],@acc[2]
1186         ld      [$ap+12],@acc[3]
1187         ld      [$ap+16],@acc[4]
1188         ld      [$ap+20],@acc[5]
1189         ld      [$ap+24],@acc[6]
1190         ld      [$ap+28],@acc[7]
1191         ld      [$ap+32],$t0            ! in1_y
1192         ld      [$ap+32+4],$t1
1193         ld      [$ap+32+8],$t2
1194         ld      [$ap+32+12],$t3
1195         ld      [$ap+32+16],$t4
1196         ld      [$ap+32+20],$t5
1197         ld      [$ap+32+24],$t6
1198         ld      [$ap+32+28],$t7
1199         or      @acc[1],@acc[0],@acc[0]
1200         or      @acc[3],@acc[2],@acc[2]
1201         or      @acc[5],@acc[4],@acc[4]
1202         or      @acc[7],@acc[6],@acc[6]
1203         or      @acc[2],@acc[0],@acc[0]
1204         or      @acc[6],@acc[4],@acc[4]
1205         or      @acc[4],@acc[0],@acc[0]
1206         or      $t1,$t0,$t0
1207         or      $t3,$t2,$t2
1208         or      $t5,$t4,$t4
1209         or      $t7,$t6,$t6
1210         or      $t2,$t0,$t0
1211         or      $t6,$t4,$t4
1212         or      $t4,$t0,$t0
1213         or      @acc[0],$t0,$t0         ! !in1infty
1214         movrnz  $t0,-1,$t0
1215         st      $t0,[%fp+STACK_BIAS-16]
1216
1217         ld      [$bp],@acc[0]           ! in2_x
1218         ld      [$bp+4],@acc[1]
1219         ld      [$bp+8],@acc[2]
1220         ld      [$bp+12],@acc[3]
1221         ld      [$bp+16],@acc[4]
1222         ld      [$bp+20],@acc[5]
1223         ld      [$bp+24],@acc[6]
1224         ld      [$bp+28],@acc[7]
1225         ld      [$bp+32],$t0            ! in2_y
1226         ld      [$bp+32+4],$t1
1227         ld      [$bp+32+8],$t2
1228         ld      [$bp+32+12],$t3
1229         ld      [$bp+32+16],$t4
1230         ld      [$bp+32+20],$t5
1231         ld      [$bp+32+24],$t6
1232         ld      [$bp+32+28],$t7
1233         or      @acc[1],@acc[0],@acc[0]
1234         or      @acc[3],@acc[2],@acc[2]
1235         or      @acc[5],@acc[4],@acc[4]
1236         or      @acc[7],@acc[6],@acc[6]
1237         or      @acc[2],@acc[0],@acc[0]
1238         or      @acc[6],@acc[4],@acc[4]
1239         or      @acc[4],@acc[0],@acc[0]
1240         or      $t1,$t0,$t0
1241         or      $t3,$t2,$t2
1242         or      $t5,$t4,$t4
1243         or      $t7,$t6,$t6
1244         or      $t2,$t0,$t0
1245         or      $t6,$t4,$t4
1246         or      $t4,$t0,$t0
1247         or      @acc[0],$t0,$t0         ! !in2infty
1248         movrnz  $t0,-1,$t0
1249         st      $t0,[%fp+STACK_BIAS-12]
1250
1251         add     $ap_real,64,$bp
1252         add     $ap_real,64,$ap
1253         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
1254         add     %sp,LOCALS+$Z1sqr,$rp
1255
1256         add     $bp_real,0,$bp
1257         add     %sp,LOCALS+$Z1sqr,$ap
1258         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
1259         add     %sp,LOCALS+$U2,$rp
1260
1261         add     $ap_real,0,$bp
1262         call    __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
1263         add     %sp,LOCALS+$H,$rp
1264
1265         add     $ap_real,64,$bp
1266         add     %sp,LOCALS+$Z1sqr,$ap
1267         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
1268         add     %sp,LOCALS+$S2,$rp
1269
1270         add     $ap_real,64,$bp
1271         add     %sp,LOCALS+$H,$ap
1272         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1273         add     %sp,LOCALS+$res_z,$rp
1274
1275         add     $bp_real,32,$bp
1276         add     %sp,LOCALS+$S2,$ap
1277         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
1278         add     %sp,LOCALS+$S2,$rp
1279
1280         add     $ap_real,32,$bp
1281         call    __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
1282         add     %sp,LOCALS+$R,$rp
1283
1284         add     %sp,LOCALS+$H,$bp
1285         add     %sp,LOCALS+$H,$ap
1286         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1287         add     %sp,LOCALS+$Hsqr,$rp
1288
1289         add     %sp,LOCALS+$R,$bp
1290         add     %sp,LOCALS+$R,$ap
1291         call    __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1292         add     %sp,LOCALS+$Rsqr,$rp
1293
1294         add     %sp,LOCALS+$H,$bp
1295         add     %sp,LOCALS+$Hsqr,$ap
1296         call    __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1297         add     %sp,LOCALS+$Hcub,$rp
1298
1299         add     $ap_real,0,$bp
1300         add     %sp,LOCALS+$Hsqr,$ap
1301         call    __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
1302         add     %sp,LOCALS+$U2,$rp
1303
1304         call    __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1305         add     %sp,LOCALS+$Hsqr,$rp
1306
1307         add     %sp,LOCALS+$Rsqr,$bp
1308         call    __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1309         add     %sp,LOCALS+$res_x,$rp
1310
1311         add     %sp,LOCALS+$Hcub,$bp
1312         call    __ecp_nistz256_sub_from !  p256_sub(res_x, res_x, Hcub);
1313         add     %sp,LOCALS+$res_x,$rp
1314
1315         add     %sp,LOCALS+$U2,$bp
1316         call    __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1317         add     %sp,LOCALS+$res_y,$rp
1318
1319         add     $ap_real,32,$bp
1320         add     %sp,LOCALS+$Hcub,$ap
1321         call    __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
1322         add     %sp,LOCALS+$S2,$rp
1323
1324         add     %sp,LOCALS+$R,$bp
1325         add     %sp,LOCALS+$res_y,$ap
1326         call    __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1327         add     %sp,LOCALS+$res_y,$rp
1328
1329         add     %sp,LOCALS+$S2,$bp
1330         call    __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1331         add     %sp,LOCALS+$res_y,$rp
1332
1333         ld      [%fp+STACK_BIAS-16],$t1 ! !in1infty
1334         ld      [%fp+STACK_BIAS-12],$t2 ! !in2infty
1335         ldx     [%fp+STACK_BIAS-8],$rp
1336 ___
1337 for($i=0;$i<64;$i+=8) {                 # conditional moves
1338 $code.=<<___;
1339         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1340         ld      [%sp+LOCALS+$i+4],@acc[1]
1341         ld      [$bp_real+$i],@acc[2]           ! in2
1342         ld      [$bp_real+$i+4],@acc[3]
1343         ld      [$ap_real+$i],@acc[4]           ! in1
1344         ld      [$ap_real+$i+4],@acc[5]
1345         movrz   $t1,@acc[2],@acc[0]
1346         movrz   $t1,@acc[3],@acc[1]
1347         movrz   $t2,@acc[4],@acc[0]
1348         movrz   $t2,@acc[5],@acc[1]
1349         st      @acc[0],[$rp+$i]
1350         st      @acc[1],[$rp+$i+4]
1351 ___
1352 }
1353 for(;$i<96;$i+=8) {
1354 my $j=($i-64)/4;
1355 $code.=<<___;
1356         ld      [%sp+LOCALS+$i],@acc[0]         ! res
1357         ld      [%sp+LOCALS+$i+4],@acc[1]
1358         ld      [$ap_real+$i],@acc[4]           ! in1
1359         ld      [$ap_real+$i+4],@acc[5]
1360         movrz   $t1,@ONE_mont[$j],@acc[0]
1361         movrz   $t1,@ONE_mont[$j+1],@acc[1]
1362         movrz   $t2,@acc[4],@acc[0]
1363         movrz   $t2,@acc[5],@acc[1]
1364         st      @acc[0],[$rp+$i]
1365         st      @acc[1],[$rp+$i+4]
1366 ___
1367 }
1368 $code.=<<___;
1369         ret
1370         restore
1371 .type   ecp_nistz256_point_add_affine,#function
1372 .size   ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1373 ___
1374 }                                                               }}}
1375 {{{
1376 my ($out,$inp,$index)=map("%i$_",(0..2));
1377 my $mask="%o0";
1378
1379 $code.=<<___;
1380 ! void  ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
1381 !                                         int %i2);
1382 .globl  ecp_nistz256_scatter_w5
1383 .align  32
1384 ecp_nistz256_scatter_w5:
1385         save    %sp,-STACK_FRAME,%sp
1386
1387         sll     $index,2,$index
1388         add     $out,$index,$out
1389
1390         ld      [$inp],%l0              ! X
1391         ld      [$inp+4],%l1
1392         ld      [$inp+8],%l2
1393         ld      [$inp+12],%l3
1394         ld      [$inp+16],%l4
1395         ld      [$inp+20],%l5
1396         ld      [$inp+24],%l6
1397         ld      [$inp+28],%l7
1398         add     $inp,32,$inp
1399         st      %l0,[$out+64*0-4]
1400         st      %l1,[$out+64*1-4]
1401         st      %l2,[$out+64*2-4]
1402         st      %l3,[$out+64*3-4]
1403         st      %l4,[$out+64*4-4]
1404         st      %l5,[$out+64*5-4]
1405         st      %l6,[$out+64*6-4]
1406         st      %l7,[$out+64*7-4]
1407         add     $out,64*8,$out
1408
1409         ld      [$inp],%l0              ! Y
1410         ld      [$inp+4],%l1
1411         ld      [$inp+8],%l2
1412         ld      [$inp+12],%l3
1413         ld      [$inp+16],%l4
1414         ld      [$inp+20],%l5
1415         ld      [$inp+24],%l6
1416         ld      [$inp+28],%l7
1417         add     $inp,32,$inp
1418         st      %l0,[$out+64*0-4]
1419         st      %l1,[$out+64*1-4]
1420         st      %l2,[$out+64*2-4]
1421         st      %l3,[$out+64*3-4]
1422         st      %l4,[$out+64*4-4]
1423         st      %l5,[$out+64*5-4]
1424         st      %l6,[$out+64*6-4]
1425         st      %l7,[$out+64*7-4]
1426         add     $out,64*8,$out
1427
1428         ld      [$inp],%l0              ! Z
1429         ld      [$inp+4],%l1
1430         ld      [$inp+8],%l2
1431         ld      [$inp+12],%l3
1432         ld      [$inp+16],%l4
1433         ld      [$inp+20],%l5
1434         ld      [$inp+24],%l6
1435         ld      [$inp+28],%l7
1436         st      %l0,[$out+64*0-4]
1437         st      %l1,[$out+64*1-4]
1438         st      %l2,[$out+64*2-4]
1439         st      %l3,[$out+64*3-4]
1440         st      %l4,[$out+64*4-4]
1441         st      %l5,[$out+64*5-4]
1442         st      %l6,[$out+64*6-4]
1443         st      %l7,[$out+64*7-4]
1444
1445         ret
1446         restore
1447 .type   ecp_nistz256_scatter_w5,#function
1448 .size   ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1449
1450 ! void  ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
1451 !                                              int %i2);
1452 .globl  ecp_nistz256_gather_w5
1453 .align  32
1454 ecp_nistz256_gather_w5:
1455         save    %sp,-STACK_FRAME,%sp
1456
1457         neg     $index,$mask
1458         srax    $mask,63,$mask
1459
1460         add     $index,$mask,$index
1461         sll     $index,2,$index
1462         add     $inp,$index,$inp
1463
1464         ld      [$inp+64*0],%l0
1465         ld      [$inp+64*1],%l1
1466         ld      [$inp+64*2],%l2
1467         ld      [$inp+64*3],%l3
1468         ld      [$inp+64*4],%l4
1469         ld      [$inp+64*5],%l5
1470         ld      [$inp+64*6],%l6
1471         ld      [$inp+64*7],%l7
1472         add     $inp,64*8,$inp
1473         and     %l0,$mask,%l0
1474         and     %l1,$mask,%l1
1475         st      %l0,[$out]              ! X
1476         and     %l2,$mask,%l2
1477         st      %l1,[$out+4]
1478         and     %l3,$mask,%l3
1479         st      %l2,[$out+8]
1480         and     %l4,$mask,%l4
1481         st      %l3,[$out+12]
1482         and     %l5,$mask,%l5
1483         st      %l4,[$out+16]
1484         and     %l6,$mask,%l6
1485         st      %l5,[$out+20]
1486         and     %l7,$mask,%l7
1487         st      %l6,[$out+24]
1488         st      %l7,[$out+28]
1489         add     $out,32,$out
1490
1491         ld      [$inp+64*0],%l0
1492         ld      [$inp+64*1],%l1
1493         ld      [$inp+64*2],%l2
1494         ld      [$inp+64*3],%l3
1495         ld      [$inp+64*4],%l4
1496         ld      [$inp+64*5],%l5
1497         ld      [$inp+64*6],%l6
1498         ld      [$inp+64*7],%l7
1499         add     $inp,64*8,$inp
1500         and     %l0,$mask,%l0
1501         and     %l1,$mask,%l1
1502         st      %l0,[$out]              ! Y
1503         and     %l2,$mask,%l2
1504         st      %l1,[$out+4]
1505         and     %l3,$mask,%l3
1506         st      %l2,[$out+8]
1507         and     %l4,$mask,%l4
1508         st      %l3,[$out+12]
1509         and     %l5,$mask,%l5
1510         st      %l4,[$out+16]
1511         and     %l6,$mask,%l6
1512         st      %l5,[$out+20]
1513         and     %l7,$mask,%l7
1514         st      %l6,[$out+24]
1515         st      %l7,[$out+28]
1516         add     $out,32,$out
1517
1518         ld      [$inp+64*0],%l0
1519         ld      [$inp+64*1],%l1
1520         ld      [$inp+64*2],%l2
1521         ld      [$inp+64*3],%l3
1522         ld      [$inp+64*4],%l4
1523         ld      [$inp+64*5],%l5
1524         ld      [$inp+64*6],%l6
1525         ld      [$inp+64*7],%l7
1526         and     %l0,$mask,%l0
1527         and     %l1,$mask,%l1
1528         st      %l0,[$out]              ! Z
1529         and     %l2,$mask,%l2
1530         st      %l1,[$out+4]
1531         and     %l3,$mask,%l3
1532         st      %l2,[$out+8]
1533         and     %l4,$mask,%l4
1534         st      %l3,[$out+12]
1535         and     %l5,$mask,%l5
1536         st      %l4,[$out+16]
1537         and     %l6,$mask,%l6
1538         st      %l5,[$out+20]
1539         and     %l7,$mask,%l7
1540         st      %l6,[$out+24]
1541         st      %l7,[$out+28]
1542
1543         ret
1544         restore
1545 .type   ecp_nistz256_gather_w5,#function
1546 .size   ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1547
1548 ! void  ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
1549 !                                         int %i2);
1550 .globl  ecp_nistz256_scatter_w7
1551 .align  32
1552 ecp_nistz256_scatter_w7:
1553         save    %sp,-STACK_FRAME,%sp
1554         nop
1555         add     $out,$index,$out
1556         mov     64/4,$index
1557 .Loop_scatter_w7:
1558         ld      [$inp],%l0
1559         add     $inp,4,$inp
1560         subcc   $index,1,$index
1561         stb     %l0,[$out+64*0-1]
1562         srl     %l0,8,%l1
1563         stb     %l1,[$out+64*1-1]
1564         srl     %l0,16,%l2
1565         stb     %l2,[$out+64*2-1]
1566         srl     %l0,24,%l3
1567         stb     %l3,[$out+64*3-1]
1568         bne     .Loop_scatter_w7
1569         add     $out,64*4,$out
1570
1571         ret
1572         restore
1573 .type   ecp_nistz256_scatter_w7,#function
1574 .size   ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1575
1576 ! void  ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
1577 !                                                     int %i2);
1578 .globl  ecp_nistz256_gather_w7
1579 .align  32
1580 ecp_nistz256_gather_w7:
1581         save    %sp,-STACK_FRAME,%sp
1582
1583         neg     $index,$mask
1584         srax    $mask,63,$mask
1585
1586         add     $index,$mask,$index
1587         add     $inp,$index,$inp
1588         mov     64/4,$index
1589
1590 .Loop_gather_w7:
1591         ldub    [$inp+64*0],%l0
1592         prefetch [$inp+3840+64*0],1
1593         subcc   $index,1,$index
1594         ldub    [$inp+64*1],%l1
1595         prefetch [$inp+3840+64*1],1
1596         ldub    [$inp+64*2],%l2
1597         prefetch [$inp+3840+64*2],1
1598         ldub    [$inp+64*3],%l3
1599         prefetch [$inp+3840+64*3],1
1600         add     $inp,64*4,$inp
1601         sll     %l1,8,%l1
1602         sll     %l2,16,%l2
1603         or      %l0,%l1,%l0
1604         sll     %l3,24,%l3
1605         or      %l0,%l2,%l0
1606         or      %l0,%l3,%l0
1607         and     %l0,$mask,%l0
1608         st      %l0,[$out]
1609         bne     .Loop_gather_w7
1610         add     $out,4,$out
1611
1612         ret
1613         restore
1614 .type   ecp_nistz256_gather_w7,#function
1615 .size   ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1616 ___
1617 }}}
1618 {{{
1619 ########################################################################
1620 # Following subroutines are VIS3 counterparts of those above that
1621 # implement ones found in ecp_nistz256.c. Key difference is that they
1622 # use 128-bit muliplication and addition with 64-bit carry, and in order
1623 # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
1624 # entry and vice versa on return.
1625 #
1626 my ($rp,$ap,$bp)=map("%i$_",(0..2));
1627 my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
1628 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
1629 my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
1630 my ($rp_real,$ap_real)=("%g2","%g3");
1631 my ($acc6,$acc7)=($bp,$bi);     # used in squaring
1632
1633 $code.=<<___;
1634 .align  32
1635 __ecp_nistz256_mul_by_2_vis3:
1636         addcc   $acc0,$acc0,$acc0
1637         addxccc $acc1,$acc1,$acc1
1638         addxccc $acc2,$acc2,$acc2
1639         addxccc $acc3,$acc3,$acc3
1640         b       .Lreduce_by_sub_vis3
1641         addxc   %g0,%g0,$acc4           ! did it carry?
1642 .type   __ecp_nistz256_mul_by_2_vis3,#function
1643 .size   __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
1644
1645 .align  32
1646 __ecp_nistz256_add_vis3:
1647         ldx     [$bp+0],$t0
1648         ldx     [$bp+8],$t1
1649         ldx     [$bp+16],$t2
1650         ldx     [$bp+24],$t3
1651
1652 __ecp_nistz256_add_noload_vis3:
1653
1654         addcc   $t0,$acc0,$acc0
1655         addxccc $t1,$acc1,$acc1
1656         addxccc $t2,$acc2,$acc2
1657         addxccc $t3,$acc3,$acc3
1658         addxc   %g0,%g0,$acc4           ! did it carry?
1659
1660 .Lreduce_by_sub_vis3:
1661
1662         addcc   $acc0,1,$t0             ! add -modulus, i.e. subtract
1663         addxccc $acc1,$poly1,$t1
1664         addxccc $acc2,$minus1,$t2
1665         addxc   $acc3,$poly3,$t3
1666
1667         movrnz  $acc4,$t0,$acc0         ! if a+b carried, ret = ret-mod
1668         movrnz  $acc4,$t1,$acc1
1669         stx     $acc0,[$rp]
1670         movrnz  $acc4,$t2,$acc2
1671         stx     $acc1,[$rp+8]
1672         movrnz  $acc4,$t3,$acc3
1673         stx     $acc2,[$rp+16]
1674         retl
1675         stx     $acc3,[$rp+24]
1676 .type   __ecp_nistz256_add_vis3,#function
1677 .size   __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
1678
1679 ! Trouble with subtraction is that there is no subtraction with 64-bit
1680 ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
1681 ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
1682 ! recall that SPARC is big-endian, which is why you'll observe that
1683 ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
1684 ! "collect" result back to 64-bit $acc0-$acc3.
1685 .align  32
1686 __ecp_nistz256_sub_from_vis3:
1687         ld      [$bp+4],$t0
1688         ld      [$bp+0],$t1
1689         ld      [$bp+12],$t2
1690         ld      [$bp+8],$t3
1691
1692         srlx    $acc0,32,$acc4
1693         not     $poly1,$poly1
1694         srlx    $acc1,32,$acc5
1695         subcc   $acc0,$t0,$acc0
1696         ld      [$bp+20],$t0
1697         subccc  $acc4,$t1,$acc4
1698         ld      [$bp+16],$t1
1699         subccc  $acc1,$t2,$acc1
1700         ld      [$bp+28],$t2
1701         and     $acc0,$poly1,$acc0
1702         subccc  $acc5,$t3,$acc5
1703         ld      [$bp+24],$t3
1704         sllx    $acc4,32,$acc4
1705         and     $acc1,$poly1,$acc1
1706         sllx    $acc5,32,$acc5
1707         or      $acc0,$acc4,$acc0
1708         srlx    $acc2,32,$acc4
1709         or      $acc1,$acc5,$acc1
1710         srlx    $acc3,32,$acc5
1711         subccc  $acc2,$t0,$acc2
1712         subccc  $acc4,$t1,$acc4
1713         subccc  $acc3,$t2,$acc3
1714         and     $acc2,$poly1,$acc2
1715         subccc  $acc5,$t3,$acc5
1716         sllx    $acc4,32,$acc4
1717         and     $acc3,$poly1,$acc3
1718         sllx    $acc5,32,$acc5
1719         or      $acc2,$acc4,$acc2
1720         subc    %g0,%g0,$acc4           ! did it borrow?
1721         b       .Lreduce_by_add_vis3
1722         or      $acc3,$acc5,$acc3
1723 .type   __ecp_nistz256_sub_from_vis3,#function
1724 .size   __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
1725
1726 .align  32
1727 __ecp_nistz256_sub_morf_vis3:
1728         ld      [$bp+4],$t0
1729         ld      [$bp+0],$t1
1730         ld      [$bp+12],$t2
1731         ld      [$bp+8],$t3
1732
1733         srlx    $acc0,32,$acc4
1734         not     $poly1,$poly1
1735         srlx    $acc1,32,$acc5
1736         subcc   $t0,$acc0,$acc0
1737         ld      [$bp+20],$t0
1738         subccc  $t1,$acc4,$acc4
1739         ld      [$bp+16],$t1
1740         subccc  $t2,$acc1,$acc1
1741         ld      [$bp+28],$t2
1742         and     $acc0,$poly1,$acc0
1743         subccc  $t3,$acc5,$acc5
1744         ld      [$bp+24],$t3
1745         sllx    $acc4,32,$acc4
1746         and     $acc1,$poly1,$acc1
1747         sllx    $acc5,32,$acc5
1748         or      $acc0,$acc4,$acc0
1749         srlx    $acc2,32,$acc4
1750         or      $acc1,$acc5,$acc1
1751         srlx    $acc3,32,$acc5
1752         subccc  $t0,$acc2,$acc2
1753         subccc  $t1,$acc4,$acc4
1754         subccc  $t2,$acc3,$acc3
1755         and     $acc2,$poly1,$acc2
1756         subccc  $t3,$acc5,$acc5
1757         sllx    $acc4,32,$acc4
1758         and     $acc3,$poly1,$acc3
1759         sllx    $acc5,32,$acc5
1760         or      $acc2,$acc4,$acc2
1761         subc    %g0,%g0,$acc4           ! did it borrow?
1762         or      $acc3,$acc5,$acc3
1763
1764 .Lreduce_by_add_vis3:
1765
1766         addcc   $acc0,-1,$t0            ! add modulus
1767         not     $poly3,$t3
1768         addxccc $acc1,$poly1,$t1
1769         not     $poly1,$poly1           ! restore $poly1
1770         addxccc $acc2,%g0,$t2
1771         addxc   $acc3,$t3,$t3
1772
1773         movrnz  $acc4,$t0,$acc0         ! if a-b borrowed, ret = ret+mod
1774         movrnz  $acc4,$t1,$acc1
1775         stx     $acc0,[$rp]
1776         movrnz  $acc4,$t2,$acc2
1777         stx     $acc1,[$rp+8]
1778         movrnz  $acc4,$t3,$acc3
1779         stx     $acc2,[$rp+16]
1780         retl
1781         stx     $acc3,[$rp+24]
1782 .type   __ecp_nistz256_sub_morf_vis3,#function
1783 .size   __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
1784
1785 .align  32
1786 __ecp_nistz256_div_by_2_vis3:
1787         ! ret = (a is odd ? a+mod : a) >> 1
1788
1789         not     $poly1,$t1
1790         not     $poly3,$t3
1791         and     $acc0,1,$acc5
1792         addcc   $acc0,-1,$t0            ! add modulus
1793         addxccc $acc1,$t1,$t1
1794         addxccc $acc2,%g0,$t2
1795         addxccc $acc3,$t3,$t3
1796         addxc   %g0,%g0,$acc4           ! carry bit
1797
1798         movrnz  $acc5,$t0,$acc0
1799         movrnz  $acc5,$t1,$acc1
1800         movrnz  $acc5,$t2,$acc2
1801         movrnz  $acc5,$t3,$acc3
1802         movrz   $acc5,%g0,$acc4
1803
1804         ! ret >>= 1
1805
1806         srlx    $acc0,1,$acc0
1807         sllx    $acc1,63,$t0
1808         srlx    $acc1,1,$acc1
1809         or      $acc0,$t0,$acc0
1810         sllx    $acc2,63,$t1
1811         srlx    $acc2,1,$acc2
1812         or      $acc1,$t1,$acc1
1813         sllx    $acc3,63,$t2
1814         stx     $acc0,[$rp]
1815         srlx    $acc3,1,$acc3
1816         or      $acc2,$t2,$acc2
1817         sllx    $acc4,63,$t3            ! don't forget carry bit
1818         stx     $acc1,[$rp+8]
1819         or      $acc3,$t3,$acc3
1820         stx     $acc2,[$rp+16]
1821         retl
1822         stx     $acc3,[$rp+24]
1823 .type   __ecp_nistz256_div_by_2_vis3,#function
1824 .size   __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
1825
1826 ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
1827 ! 4x faster [on T4]...
1828 .align  32
1829 __ecp_nistz256_mul_mont_vis3:
1830         mulx    $a0,$bi,$acc0
1831         not     $poly3,$poly3           ! 0xFFFFFFFF00000001
1832         umulxhi $a0,$bi,$t0
1833         mulx    $a1,$bi,$acc1
1834         umulxhi $a1,$bi,$t1
1835         mulx    $a2,$bi,$acc2
1836         umulxhi $a2,$bi,$t2
1837         mulx    $a3,$bi,$acc3
1838         umulxhi $a3,$bi,$t3
1839         ldx     [$bp+8],$bi             ! b[1]
1840
1841         addcc   $acc1,$t0,$acc1         ! accumulate high parts of multiplication
1842          sllx   $acc0,32,$t0
1843         addxccc $acc2,$t1,$acc2
1844          srlx   $acc0,32,$t1
1845         addxccc $acc3,$t2,$acc3
1846         addxc   %g0,$t3,$acc4
1847         mov     0,$acc5
1848 ___
1849 for($i=1;$i<4;$i++) {
1850         # Reduction iteration is normally performed by accumulating
1851         # result of multiplication of modulus by "magic" digit [and
1852         # omitting least significant word, which is guaranteed to
1853         # be 0], but thanks to special form of modulus and "magic"
1854         # digit being equal to least significant word, it can be
1855         # performed with additions and subtractions alone. Indeed:
1856         #
1857         #            ffff0001.00000000.0000ffff.ffffffff
1858         # *                                     abcdefgh
1859         # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1860         #
1861         # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1862         # rewrite above as:
1863         #
1864         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1865         # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
1866         # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
1867         #
1868         # or marking redundant operations:
1869         #
1870         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
1871         # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
1872         # - 0000abcd.efgh0000.--------.--------.--------
1873         #   ^^^^^^^^ but this word is calculated with umulxhi, because
1874         #            there is no subtract with 64-bit borrow:-(
1875
1876 $code.=<<___;
1877         sub     $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1878         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1879         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1880         mulx    $a0,$bi,$t0
1881         addxccc $acc2,$t1,$acc1
1882         mulx    $a1,$bi,$t1
1883         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1884         mulx    $a2,$bi,$t2
1885         addxccc $acc4,$t3,$acc3
1886         mulx    $a3,$bi,$t3
1887         addxc   $acc5,%g0,$acc4
1888
1889         addcc   $acc0,$t0,$acc0         ! accumulate low parts of multiplication
1890         umulxhi $a0,$bi,$t0
1891         addxccc $acc1,$t1,$acc1
1892         umulxhi $a1,$bi,$t1
1893         addxccc $acc2,$t2,$acc2
1894         umulxhi $a2,$bi,$t2
1895         addxccc $acc3,$t3,$acc3
1896         umulxhi $a3,$bi,$t3
1897         addxc   $acc4,%g0,$acc4
1898 ___
1899 $code.=<<___    if ($i<3);
1900         ldx     [$bp+8*($i+1)],$bi      ! bp[$i+1]
1901 ___
1902 $code.=<<___;
1903         addcc   $acc1,$t0,$acc1         ! accumulate high parts of multiplication 
1904          sllx   $acc0,32,$t0
1905         addxccc $acc2,$t1,$acc2
1906          srlx   $acc0,32,$t1
1907         addxccc $acc3,$t2,$acc3
1908         addxccc $acc4,$t3,$acc4
1909         addxc   %g0,%g0,$acc5
1910 ___
1911 }
1912 $code.=<<___;
1913         sub     $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1914         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
1915         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
1916         addxccc $acc2,$t1,$acc1
1917         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
1918         addxccc $acc4,$t3,$acc3
1919         b       .Lmul_final_vis3        ! see below
1920         addxc   $acc5,%g0,$acc4
1921 .type   __ecp_nistz256_mul_mont_vis3,#function
1922 .size   __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
1923
1924 ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
1925 ! instructions, but only 14% faster [on T4]...
1926 .align  32
1927 __ecp_nistz256_sqr_mont_vis3:
1928         !  |  |  |  |  |  |a1*a0|  |
1929         !  |  |  |  |  |a2*a0|  |  |
1930         !  |  |a3*a2|a3*a0|  |  |  |
1931         !  |  |  |  |a2*a1|  |  |  |
1932         !  |  |  |a3*a1|  |  |  |  |
1933         ! *|  |  |  |  |  |  |  | 2|
1934         ! +|a3*a3|a2*a2|a1*a1|a0*a0|
1935         !  |--+--+--+--+--+--+--+--|
1936         !  |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1937         !
1938         !  "can't overflow" below mark carrying into high part of
1939         !  multiplication result, which can't overflow, because it
1940         !  can never be all ones.
1941
1942         mulx    $a1,$a0,$acc1           ! a[1]*a[0]
1943         umulxhi $a1,$a0,$t1
1944         mulx    $a2,$a0,$acc2           ! a[2]*a[0]
1945         umulxhi $a2,$a0,$t2
1946         mulx    $a3,$a0,$acc3           ! a[3]*a[0]
1947         umulxhi $a3,$a0,$acc4
1948
1949         addcc   $acc2,$t1,$acc2         ! accumulate high parts of multiplication
1950         mulx    $a2,$a1,$t0             ! a[2]*a[1]
1951         umulxhi $a2,$a1,$t1
1952         addxccc $acc3,$t2,$acc3
1953         mulx    $a3,$a1,$t2             ! a[3]*a[1]
1954         umulxhi $a3,$a1,$t3
1955         addxc   $acc4,%g0,$acc4         ! can't overflow
1956
1957         mulx    $a3,$a2,$acc5           ! a[3]*a[2]
1958         not     $poly3,$poly3           ! 0xFFFFFFFF00000001
1959         umulxhi $a3,$a2,$acc6
1960
1961         addcc   $t2,$t1,$t1             ! accumulate high parts of multiplication
1962         mulx    $a0,$a0,$acc0           ! a[0]*a[0]
1963         addxc   $t3,%g0,$t2             ! can't overflow
1964
1965         addcc   $acc3,$t0,$acc3         ! accumulate low parts of multiplication
1966         umulxhi $a0,$a0,$a0
1967         addxccc $acc4,$t1,$acc4
1968         mulx    $a1,$a1,$t1             ! a[1]*a[1]
1969         addxccc $acc5,$t2,$acc5
1970         umulxhi $a1,$a1,$a1
1971         addxc   $acc6,%g0,$acc6         ! can't overflow
1972
1973         addcc   $acc1,$acc1,$acc1       ! acc[1-6]*=2
1974         mulx    $a2,$a2,$t2             ! a[2]*a[2]
1975         addxccc $acc2,$acc2,$acc2
1976         umulxhi $a2,$a2,$a2
1977         addxccc $acc3,$acc3,$acc3
1978         mulx    $a3,$a3,$t3             ! a[3]*a[3]
1979         addxccc $acc4,$acc4,$acc4
1980         umulxhi $a3,$a3,$a3
1981         addxccc $acc5,$acc5,$acc5
1982         addxccc $acc6,$acc6,$acc6
1983         addxc   %g0,%g0,$acc7
1984
1985         addcc   $acc1,$a0,$acc1         ! +a[i]*a[i]
1986         addxccc $acc2,$t1,$acc2
1987         addxccc $acc3,$a1,$acc3
1988         addxccc $acc4,$t2,$acc4
1989          sllx   $acc0,32,$t0
1990         addxccc $acc5,$a2,$acc5
1991          srlx   $acc0,32,$t1
1992         addxccc $acc6,$t3,$acc6
1993          sub    $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
1994         addxc   $acc7,$a3,$acc7
1995 ___
1996 for($i=0;$i<3;$i++) {                   # reductions, see commentary
1997                                         # in multiplication for details
1998 $code.=<<___;
1999         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
2000         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
2001          sllx   $acc0,32,$t0
2002         addxccc $acc2,$t1,$acc1
2003          srlx   $acc0,32,$t1
2004         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
2005          sub    $acc0,$t0,$t2           ! acc0*0xFFFFFFFF00000001, low part
2006         addxc   %g0,$t3,$acc3           ! cant't overflow
2007 ___
2008 }
2009 $code.=<<___;
2010         umulxhi $acc0,$poly3,$t3        ! acc0*0xFFFFFFFF00000001, high part
2011         addcc   $acc1,$t0,$acc0         ! +=acc[0]<<96 and omit acc[0]
2012         addxccc $acc2,$t1,$acc1
2013         addxccc $acc3,$t2,$acc2         ! +=acc[0]*0xFFFFFFFF00000001
2014         addxc   %g0,$t3,$acc3           ! can't overflow
2015
2016         addcc   $acc0,$acc4,$acc0       ! accumulate upper half
2017         addxccc $acc1,$acc5,$acc1
2018         addxccc $acc2,$acc6,$acc2
2019         addxccc $acc3,$acc7,$acc3
2020         addxc   %g0,%g0,$acc4
2021
2022 .Lmul_final_vis3:
2023
2024         ! Final step is "if result > mod, subtract mod", but as comparison
2025         ! means subtraction, we do the subtraction and then copy outcome
2026         ! if it didn't borrow. But note that as we [have to] replace
2027         ! subtraction with addition with negative, carry/borrow logic is
2028         ! inverse.
2029
2030         addcc   $acc0,1,$t0             ! add -modulus, i.e. subtract
2031         not     $poly3,$poly3           ! restore 0x00000000FFFFFFFE
2032         addxccc $acc1,$poly1,$t1
2033         addxccc $acc2,$minus1,$t2
2034         addxccc $acc3,$poly3,$t3
2035         addxccc $acc4,$minus1,%g0       ! did it carry?
2036
2037         movcs   %xcc,$t0,$acc0
2038         movcs   %xcc,$t1,$acc1
2039         stx     $acc0,[$rp]
2040         movcs   %xcc,$t2,$acc2
2041         stx     $acc1,[$rp+8]
2042         movcs   %xcc,$t3,$acc3
2043         stx     $acc2,[$rp+16]
2044         retl
2045         stx     $acc3,[$rp+24]
2046 .type   __ecp_nistz256_sqr_mont_vis3,#function
2047 .size   __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
2048 ___
2049
2050 ########################################################################
2051 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
2052 #
2053 {
2054 my ($res_x,$res_y,$res_z,
2055     $in_x,$in_y,$in_z,
2056     $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
2057 # above map() describes stack layout with 10 temporary
2058 # 256-bit vectors on top.
2059
2060 $code.=<<___;
2061 .align  32
2062 ecp_nistz256_point_double_vis3:
2063         save    %sp,-STACK64_FRAME-32*10,%sp
2064
2065         mov     $rp,$rp_real
2066 .Ldouble_shortcut_vis3:
2067         mov     -1,$minus1
2068         mov     -2,$poly3
2069         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2070         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2071
2072         ! convert input to uint64_t[4]
2073         ld      [$ap],$a0                       ! in_x
2074         ld      [$ap+4],$t0
2075         ld      [$ap+8],$a1
2076         ld      [$ap+12],$t1
2077         ld      [$ap+16],$a2
2078         ld      [$ap+20],$t2
2079         ld      [$ap+24],$a3
2080         ld      [$ap+28],$t3
2081         sllx    $t0,32,$t0
2082         sllx    $t1,32,$t1
2083         ld      [$ap+32],$acc0                  ! in_y
2084         or      $a0,$t0,$a0
2085         ld      [$ap+32+4],$t0
2086         sllx    $t2,32,$t2
2087         ld      [$ap+32+8],$acc1
2088         or      $a1,$t1,$a1
2089         ld      [$ap+32+12],$t1
2090         sllx    $t3,32,$t3
2091         ld      [$ap+32+16],$acc2
2092         or      $a2,$t2,$a2
2093         ld      [$ap+32+20],$t2
2094         or      $a3,$t3,$a3
2095         ld      [$ap+32+24],$acc3
2096         sllx    $t0,32,$t0
2097         ld      [$ap+32+28],$t3
2098         sllx    $t1,32,$t1
2099         stx     $a0,[%sp+LOCALS64+$in_x]
2100         sllx    $t2,32,$t2
2101         stx     $a1,[%sp+LOCALS64+$in_x+8]
2102         sllx    $t3,32,$t3
2103         stx     $a2,[%sp+LOCALS64+$in_x+16]
2104         or      $acc0,$t0,$acc0
2105         stx     $a3,[%sp+LOCALS64+$in_x+24]
2106         or      $acc1,$t1,$acc1
2107         stx     $acc0,[%sp+LOCALS64+$in_y]
2108         or      $acc2,$t2,$acc2
2109         stx     $acc1,[%sp+LOCALS64+$in_y+8]
2110         or      $acc3,$t3,$acc3
2111         stx     $acc2,[%sp+LOCALS64+$in_y+16]
2112         stx     $acc3,[%sp+LOCALS64+$in_y+24]
2113
2114         ld      [$ap+64],$a0                    ! in_z
2115         ld      [$ap+64+4],$t0
2116         ld      [$ap+64+8],$a1
2117         ld      [$ap+64+12],$t1
2118         ld      [$ap+64+16],$a2
2119         ld      [$ap+64+20],$t2
2120         ld      [$ap+64+24],$a3
2121         ld      [$ap+64+28],$t3
2122         sllx    $t0,32,$t0
2123         sllx    $t1,32,$t1
2124         or      $a0,$t0,$a0
2125         sllx    $t2,32,$t2
2126         or      $a1,$t1,$a1
2127         sllx    $t3,32,$t3
2128         or      $a2,$t2,$a2
2129         or      $a3,$t3,$a3
2130         sllx    $t0,32,$t0
2131         sllx    $t1,32,$t1
2132         stx     $a0,[%sp+LOCALS64+$in_z]
2133         sllx    $t2,32,$t2
2134         stx     $a1,[%sp+LOCALS64+$in_z+8]
2135         sllx    $t3,32,$t3
2136         stx     $a2,[%sp+LOCALS64+$in_z+16]
2137         stx     $a3,[%sp+LOCALS64+$in_z+24]
2138
2139         ! in_y is still in $acc0-$acc3
2140         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(S, in_y);
2141         add     %sp,LOCALS64+$S,$rp
2142
2143         ! in_z is still in $a0-$a3
2144         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Zsqr, in_z);
2145         add     %sp,LOCALS64+$Zsqr,$rp
2146
2147         mov     $acc0,$a0                       ! put Zsqr aside
2148         mov     $acc1,$a1
2149         mov     $acc2,$a2
2150         mov     $acc3,$a3
2151
2152         add     %sp,LOCALS64+$in_x,$bp
2153         call    __ecp_nistz256_add_vis3         ! p256_add(M, Zsqr, in_x);
2154         add     %sp,LOCALS64+$M,$rp
2155
2156         mov     $a0,$acc0                       ! restore Zsqr
2157         ldx     [%sp+LOCALS64+$S],$a0           ! forward load
2158         mov     $a1,$acc1
2159         ldx     [%sp+LOCALS64+$S+8],$a1
2160         mov     $a2,$acc2
2161         ldx     [%sp+LOCALS64+$S+16],$a2
2162         mov     $a3,$acc3
2163         ldx     [%sp+LOCALS64+$S+24],$a3
2164
2165         add     %sp,LOCALS64+$in_x,$bp
2166         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(Zsqr, in_x, Zsqr);
2167         add     %sp,LOCALS64+$Zsqr,$rp
2168
2169         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(S, S);
2170         add     %sp,LOCALS64+$S,$rp
2171
2172         ldx     [%sp+LOCALS64+$in_z],$bi
2173         ldx     [%sp+LOCALS64+$in_y],$a0
2174         ldx     [%sp+LOCALS64+$in_y+8],$a1
2175         ldx     [%sp+LOCALS64+$in_y+16],$a2
2176         ldx     [%sp+LOCALS64+$in_y+24],$a3
2177         add     %sp,LOCALS64+$in_z,$bp
2178         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(tmp0, in_z, in_y);
2179         add     %sp,LOCALS64+$tmp0,$rp
2180
2181         ldx     [%sp+LOCALS64+$M],$bi           ! forward load
2182         ldx     [%sp+LOCALS64+$Zsqr],$a0
2183         ldx     [%sp+LOCALS64+$Zsqr+8],$a1
2184         ldx     [%sp+LOCALS64+$Zsqr+16],$a2
2185         ldx     [%sp+LOCALS64+$Zsqr+24],$a3
2186
2187         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(res_z, tmp0);
2188         add     %sp,LOCALS64+$res_z,$rp
2189
2190         add     %sp,LOCALS64+$M,$bp
2191         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(M, M, Zsqr);
2192         add     %sp,LOCALS64+$M,$rp
2193
2194         mov     $acc0,$a0                       ! put aside M
2195         mov     $acc1,$a1
2196         mov     $acc2,$a2
2197         mov     $acc3,$a3
2198         call    __ecp_nistz256_mul_by_2_vis3
2199         add     %sp,LOCALS64+$M,$rp
2200         mov     $a0,$t0                         ! copy M
2201         ldx     [%sp+LOCALS64+$S],$a0           ! forward load
2202         mov     $a1,$t1
2203         ldx     [%sp+LOCALS64+$S+8],$a1
2204         mov     $a2,$t2
2205         ldx     [%sp+LOCALS64+$S+16],$a2
2206         mov     $a3,$t3
2207         ldx     [%sp+LOCALS64+$S+24],$a3
2208         call    __ecp_nistz256_add_noload_vis3  ! p256_mul_by_3(M, M);
2209         add     %sp,LOCALS64+$M,$rp
2210
2211         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(tmp0, S);
2212         add     %sp,LOCALS64+$tmp0,$rp
2213
2214         ldx     [%sp+LOCALS64+$S],$bi           ! forward load
2215         ldx     [%sp+LOCALS64+$in_x],$a0
2216         ldx     [%sp+LOCALS64+$in_x+8],$a1
2217         ldx     [%sp+LOCALS64+$in_x+16],$a2
2218         ldx     [%sp+LOCALS64+$in_x+24],$a3
2219
2220         call    __ecp_nistz256_div_by_2_vis3    ! p256_div_by_2(res_y, tmp0);
2221         add     %sp,LOCALS64+$res_y,$rp
2222
2223         add     %sp,LOCALS64+$S,$bp
2224         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S, S, in_x);
2225         add     %sp,LOCALS64+$S,$rp
2226
2227         ldx     [%sp+LOCALS64+$M],$a0           ! forward load
2228         ldx     [%sp+LOCALS64+$M+8],$a1
2229         ldx     [%sp+LOCALS64+$M+16],$a2
2230         ldx     [%sp+LOCALS64+$M+24],$a3
2231
2232         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(tmp0, S);
2233         add     %sp,LOCALS64+$tmp0,$rp
2234
2235         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(res_x, M);
2236         add     %sp,LOCALS64+$res_x,$rp
2237
2238         add     %sp,LOCALS64+$tmp0,$bp
2239         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_x, res_x, tmp0);
2240         add     %sp,LOCALS64+$res_x,$rp
2241
2242         ldx     [%sp+LOCALS64+$M],$a0           ! forward load
2243         ldx     [%sp+LOCALS64+$M+8],$a1
2244         ldx     [%sp+LOCALS64+$M+16],$a2
2245         ldx     [%sp+LOCALS64+$M+24],$a3
2246
2247         add     %sp,LOCALS64+$S,$bp
2248         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(S, S, res_x);
2249         add     %sp,LOCALS64+$S,$rp
2250
2251         mov     $acc0,$bi
2252         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S, S, M);
2253         add     %sp,LOCALS64+$S,$rp
2254
2255         ldx     [%sp+LOCALS64+$res_x],$a0       ! forward load
2256         ldx     [%sp+LOCALS64+$res_x+8],$a1
2257         ldx     [%sp+LOCALS64+$res_x+16],$a2
2258         ldx     [%sp+LOCALS64+$res_x+24],$a3
2259
2260         add     %sp,LOCALS64+$res_y,$bp
2261         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, S, res_y);
2262         add     %sp,LOCALS64+$res_y,$bp
2263
2264         ! convert output to uint_32[8]
2265         srlx    $a0,32,$t0
2266         srlx    $a1,32,$t1
2267         st      $a0,[$rp_real]                  ! res_x
2268         srlx    $a2,32,$t2
2269         st      $t0,[$rp_real+4]
2270         srlx    $a3,32,$t3
2271         st      $a1,[$rp_real+8]
2272         st      $t1,[$rp_real+12]
2273         st      $a2,[$rp_real+16]
2274         st      $t2,[$rp_real+20]
2275         st      $a3,[$rp_real+24]
2276         st      $t3,[$rp_real+28]
2277
2278         ldx     [%sp+LOCALS64+$res_z],$a0       ! forward load
2279         srlx    $acc0,32,$t0
2280         ldx     [%sp+LOCALS64+$res_z+8],$a1
2281         srlx    $acc1,32,$t1
2282         ldx     [%sp+LOCALS64+$res_z+16],$a2
2283         srlx    $acc2,32,$t2
2284         ldx     [%sp+LOCALS64+$res_z+24],$a3
2285         srlx    $acc3,32,$t3
2286         st      $acc0,[$rp_real+32]             ! res_y
2287         st      $t0,  [$rp_real+32+4]
2288         st      $acc1,[$rp_real+32+8]
2289         st      $t1,  [$rp_real+32+12]
2290         st      $acc2,[$rp_real+32+16]
2291         st      $t2,  [$rp_real+32+20]
2292         st      $acc3,[$rp_real+32+24]
2293         st      $t3,  [$rp_real+32+28]
2294
2295         srlx    $a0,32,$t0
2296         srlx    $a1,32,$t1
2297         st      $a0,[$rp_real+64]               ! res_z
2298         srlx    $a2,32,$t2
2299         st      $t0,[$rp_real+64+4]
2300         srlx    $a3,32,$t3
2301         st      $a1,[$rp_real+64+8]
2302         st      $t1,[$rp_real+64+12]
2303         st      $a2,[$rp_real+64+16]
2304         st      $t2,[$rp_real+64+20]
2305         st      $a3,[$rp_real+64+24]
2306         st      $t3,[$rp_real+64+28]
2307
2308         ret
2309         restore
2310 .type   ecp_nistz256_point_double_vis3,#function
2311 .size   ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
2312 ___
2313 }
2314 ########################################################################
2315 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
2316 #                             const P256_POINT *in2);
2317 {
2318 my ($res_x,$res_y,$res_z,
2319     $in1_x,$in1_y,$in1_z,
2320     $in2_x,$in2_y,$in2_z,
2321     $H,$Hsqr,$R,$Rsqr,$Hcub,
2322     $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
2323 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2324
2325 # above map() describes stack layout with 18 temporary
2326 # 256-bit vectors on top. Then we reserve some space for
2327 # !in1infty, !in2infty and result of check for zero.
2328
2329 $code.=<<___;
2330 .globl  ecp_nistz256_point_add_vis3
2331 .align  32
2332 ecp_nistz256_point_add_vis3:
2333         save    %sp,-STACK64_FRAME-32*18-32,%sp
2334
2335         mov     $rp,$rp_real
2336         mov     -1,$minus1
2337         mov     -2,$poly3
2338         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2339         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2340
2341         ! convert input to uint64_t[4]
2342         ld      [$bp],$a0                       ! in2_x
2343         ld      [$bp+4],$t0
2344         ld      [$bp+8],$a1
2345         ld      [$bp+12],$t1
2346         ld      [$bp+16],$a2
2347         ld      [$bp+20],$t2
2348         ld      [$bp+24],$a3
2349         ld      [$bp+28],$t3
2350         sllx    $t0,32,$t0
2351         sllx    $t1,32,$t1
2352         ld      [$bp+32],$acc0                  ! in2_y
2353         or      $a0,$t0,$a0
2354         ld      [$bp+32+4],$t0
2355         sllx    $t2,32,$t2
2356         ld      [$bp+32+8],$acc1
2357         or      $a1,$t1,$a1
2358         ld      [$bp+32+12],$t1
2359         sllx    $t3,32,$t3
2360         ld      [$bp+32+16],$acc2
2361         or      $a2,$t2,$a2
2362         ld      [$bp+32+20],$t2
2363         or      $a3,$t3,$a3
2364         ld      [$bp+32+24],$acc3
2365         sllx    $t0,32,$t0
2366         ld      [$bp+32+28],$t3
2367         sllx    $t1,32,$t1
2368         stx     $a0,[%sp+LOCALS64+$in2_x]
2369         sllx    $t2,32,$t2
2370         stx     $a1,[%sp+LOCALS64+$in2_x+8]
2371         sllx    $t3,32,$t3
2372         stx     $a2,[%sp+LOCALS64+$in2_x+16]
2373         or      $acc0,$t0,$acc0
2374         stx     $a3,[%sp+LOCALS64+$in2_x+24]
2375         or      $acc1,$t1,$acc1
2376         stx     $acc0,[%sp+LOCALS64+$in2_y]
2377         or      $acc2,$t2,$acc2
2378         stx     $acc1,[%sp+LOCALS64+$in2_y+8]
2379         or      $acc3,$t3,$acc3
2380         stx     $acc2,[%sp+LOCALS64+$in2_y+16]
2381         stx     $acc3,[%sp+LOCALS64+$in2_y+24]
2382
2383         or      $a1,$a0,$a0
2384         or      $a3,$a2,$a2
2385         or      $acc1,$acc0,$acc0
2386         or      $acc3,$acc2,$acc2
2387         or      $a2,$a0,$a0
2388         or      $acc2,$acc0,$acc0
2389         or      $acc0,$a0,$a0
2390         movrnz  $a0,-1,$a0                      ! !in2infty
2391         stx     $a0,[%fp+STACK_BIAS-8]
2392
2393         ld      [$bp+64],$acc0                  ! in2_z
2394         ld      [$bp+64+4],$t0
2395         ld      [$bp+64+8],$acc1
2396         ld      [$bp+64+12],$t1
2397         ld      [$bp+64+16],$acc2
2398         ld      [$bp+64+20],$t2
2399         ld      [$bp+64+24],$acc3
2400         ld      [$bp+64+28],$t3
2401         sllx    $t0,32,$t0
2402         sllx    $t1,32,$t1
2403         ld      [$ap],$a0                       ! in1_x
2404         or      $acc0,$t0,$acc0
2405         ld      [$ap+4],$t0
2406         sllx    $t2,32,$t2
2407         ld      [$ap+8],$a1
2408         or      $acc1,$t1,$acc1
2409         ld      [$ap+12],$t1
2410         sllx    $t3,32,$t3
2411         ld      [$ap+16],$a2
2412         or      $acc2,$t2,$acc2
2413         ld      [$ap+20],$t2
2414         or      $acc3,$t3,$acc3
2415         ld      [$ap+24],$a3
2416         sllx    $t0,32,$t0
2417         ld      [$ap+28],$t3
2418         sllx    $t1,32,$t1
2419         stx     $acc0,[%sp+LOCALS64+$in2_z]
2420         sllx    $t2,32,$t2
2421         stx     $acc1,[%sp+LOCALS64+$in2_z+8]
2422         sllx    $t3,32,$t3
2423         stx     $acc2,[%sp+LOCALS64+$in2_z+16]
2424         stx     $acc3,[%sp+LOCALS64+$in2_z+24]
2425
2426         or      $a0,$t0,$a0
2427         ld      [$ap+32],$acc0                  ! in1_y
2428         or      $a1,$t1,$a1
2429         ld      [$ap+32+4],$t0
2430         or      $a2,$t2,$a2
2431         ld      [$ap+32+8],$acc1
2432         or      $a3,$t3,$a3
2433         ld      [$ap+32+12],$t1
2434         ld      [$ap+32+16],$acc2
2435         ld      [$ap+32+20],$t2
2436         ld      [$ap+32+24],$acc3
2437         sllx    $t0,32,$t0
2438         ld      [$ap+32+28],$t3
2439         sllx    $t1,32,$t1
2440         stx     $a0,[%sp+LOCALS64+$in1_x]
2441         sllx    $t2,32,$t2
2442         stx     $a1,[%sp+LOCALS64+$in1_x+8]
2443         sllx    $t3,32,$t3
2444         stx     $a2,[%sp+LOCALS64+$in1_x+16]
2445         or      $acc0,$t0,$acc0
2446         stx     $a3,[%sp+LOCALS64+$in1_x+24]
2447         or      $acc1,$t1,$acc1
2448         stx     $acc0,[%sp+LOCALS64+$in1_y]
2449         or      $acc2,$t2,$acc2
2450         stx     $acc1,[%sp+LOCALS64+$in1_y+8]
2451         or      $acc3,$t3,$acc3
2452         stx     $acc2,[%sp+LOCALS64+$in1_y+16]
2453         stx     $acc3,[%sp+LOCALS64+$in1_y+24]
2454
2455         or      $a1,$a0,$a0
2456         or      $a3,$a2,$a2
2457         or      $acc1,$acc0,$acc0
2458         or      $acc3,$acc2,$acc2
2459         or      $a2,$a0,$a0
2460         or      $acc2,$acc0,$acc0
2461         or      $acc0,$a0,$a0
2462         movrnz  $a0,-1,$a0                      ! !in1infty
2463         stx     $a0,[%fp+STACK_BIAS-16]
2464
2465         ldx     [%sp+LOCALS64+$in2_z],$a0       ! forward load
2466         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2467         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2468         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2469
2470         ld      [$ap+64],$acc0                  ! in1_z
2471         ld      [$ap+64+4],$t0
2472         ld      [$ap+64+8],$acc1
2473         ld      [$ap+64+12],$t1
2474         ld      [$ap+64+16],$acc2
2475         ld      [$ap+64+20],$t2
2476         ld      [$ap+64+24],$acc3
2477         ld      [$ap+64+28],$t3
2478         sllx    $t0,32,$t0
2479         sllx    $t1,32,$t1
2480         or      $acc0,$t0,$acc0
2481         sllx    $t2,32,$t2
2482         or      $acc1,$t1,$acc1
2483         sllx    $t3,32,$t3
2484         stx     $acc0,[%sp+LOCALS64+$in1_z]
2485         or      $acc2,$t2,$acc2
2486         stx     $acc1,[%sp+LOCALS64+$in1_z+8]
2487         or      $acc3,$t3,$acc3
2488         stx     $acc2,[%sp+LOCALS64+$in1_z+16]
2489         stx     $acc3,[%sp+LOCALS64+$in1_z+24]
2490
2491         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z2sqr, in2_z);
2492         add     %sp,LOCALS64+$Z2sqr,$rp
2493
2494         ldx     [%sp+LOCALS64+$in1_z],$a0
2495         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2496         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2497         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2498         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z1sqr, in1_z);
2499         add     %sp,LOCALS64+$Z1sqr,$rp
2500
2501         ldx     [%sp+LOCALS64+$Z2sqr],$bi
2502         ldx     [%sp+LOCALS64+$in2_z],$a0
2503         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2504         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2505         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2506         add     %sp,LOCALS64+$Z2sqr,$bp
2507         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S1, Z2sqr, in2_z);
2508         add     %sp,LOCALS64+$S1,$rp
2509
2510         ldx     [%sp+LOCALS64+$Z1sqr],$bi
2511         ldx     [%sp+LOCALS64+$in1_z],$a0
2512         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2513         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2514         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2515         add     %sp,LOCALS64+$Z1sqr,$bp
2516         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, Z1sqr, in1_z);
2517         add     %sp,LOCALS64+$S2,$rp
2518
2519         ldx     [%sp+LOCALS64+$S1],$bi
2520         ldx     [%sp+LOCALS64+$in1_y],$a0
2521         ldx     [%sp+LOCALS64+$in1_y+8],$a1
2522         ldx     [%sp+LOCALS64+$in1_y+16],$a2
2523         ldx     [%sp+LOCALS64+$in1_y+24],$a3
2524         add     %sp,LOCALS64+$S1,$bp
2525         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S1, S1, in1_y);
2526         add     %sp,LOCALS64+$S1,$rp
2527
2528         ldx     [%sp+LOCALS64+$S2],$bi
2529         ldx     [%sp+LOCALS64+$in2_y],$a0
2530         ldx     [%sp+LOCALS64+$in2_y+8],$a1
2531         ldx     [%sp+LOCALS64+$in2_y+16],$a2
2532         ldx     [%sp+LOCALS64+$in2_y+24],$a3
2533         add     %sp,LOCALS64+$S2,$bp
2534         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S2, in2_y);
2535         add     %sp,LOCALS64+$S2,$rp
2536
2537         ldx     [%sp+LOCALS64+$Z2sqr],$bi       ! forward load
2538         ldx     [%sp+LOCALS64+$in1_x],$a0
2539         ldx     [%sp+LOCALS64+$in1_x+8],$a1
2540         ldx     [%sp+LOCALS64+$in1_x+16],$a2
2541         ldx     [%sp+LOCALS64+$in1_x+24],$a3
2542
2543         add     %sp,LOCALS64+$S1,$bp
2544         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(R, S2, S1);
2545         add     %sp,LOCALS64+$R,$rp
2546
2547         or      $acc1,$acc0,$acc0               ! see if result is zero
2548         or      $acc3,$acc2,$acc2
2549         or      $acc2,$acc0,$acc0
2550         stx     $acc0,[%fp+STACK_BIAS-24]
2551
2552         add     %sp,LOCALS64+$Z2sqr,$bp
2553         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U1, in1_x, Z2sqr);
2554         add     %sp,LOCALS64+$U1,$rp
2555
2556         ldx     [%sp+LOCALS64+$Z1sqr],$bi
2557         ldx     [%sp+LOCALS64+$in2_x],$a0
2558         ldx     [%sp+LOCALS64+$in2_x+8],$a1
2559         ldx     [%sp+LOCALS64+$in2_x+16],$a2
2560         ldx     [%sp+LOCALS64+$in2_x+24],$a3
2561         add     %sp,LOCALS64+$Z1sqr,$bp
2562         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, in2_x, Z1sqr);
2563         add     %sp,LOCALS64+$U2,$rp
2564
2565         ldx     [%sp+LOCALS64+$R],$a0           ! forward load
2566         ldx     [%sp+LOCALS64+$R+8],$a1
2567         ldx     [%sp+LOCALS64+$R+16],$a2
2568         ldx     [%sp+LOCALS64+$R+24],$a3
2569
2570         add     %sp,LOCALS64+$U1,$bp
2571         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(H, U2, U1);
2572         add     %sp,LOCALS64+$H,$rp
2573
2574         or      $acc1,$acc0,$acc0               ! see if result is zero
2575         or      $acc3,$acc2,$acc2
2576         orcc    $acc2,$acc0,$acc0
2577
2578         bne,pt  %xcc,.Ladd_proceed_vis3         ! is_equal(U1,U2)?
2579         nop
2580
2581         ldx     [%fp+STACK_BIAS-8],$t0
2582         ldx     [%fp+STACK_BIAS-16],$t1
2583         ldx     [%fp+STACK_BIAS-24],$t2
2584         andcc   $t0,$t1,%g0
2585         be,pt   %xcc,.Ladd_proceed_vis3         ! (in1infty || in2infty)?
2586         nop
2587         andcc   $t2,$t2,%g0
2588         be,a,pt %xcc,.Ldouble_shortcut_vis3     ! is_equal(S1,S2)?
2589         add     %sp,32*(12-10)+32,%sp           ! difference in frame sizes
2590
2591         st      %g0,[$rp_real]
2592         st      %g0,[$rp_real+4]
2593         st      %g0,[$rp_real+8]
2594         st      %g0,[$rp_real+12]
2595         st      %g0,[$rp_real+16]
2596         st      %g0,[$rp_real+20]
2597         st      %g0,[$rp_real+24]
2598         st      %g0,[$rp_real+28]
2599         st      %g0,[$rp_real+32]
2600         st      %g0,[$rp_real+32+4]
2601         st      %g0,[$rp_real+32+8]
2602         st      %g0,[$rp_real+32+12]
2603         st      %g0,[$rp_real+32+16]
2604         st      %g0,[$rp_real+32+20]
2605         st      %g0,[$rp_real+32+24]
2606         st      %g0,[$rp_real+32+28]
2607         st      %g0,[$rp_real+64]
2608         st      %g0,[$rp_real+64+4]
2609         st      %g0,[$rp_real+64+8]
2610         st      %g0,[$rp_real+64+12]
2611         st      %g0,[$rp_real+64+16]
2612         st      %g0,[$rp_real+64+20]
2613         st      %g0,[$rp_real+64+24]
2614         st      %g0,[$rp_real+64+28]
2615         b       .Ladd_done_vis3
2616         nop
2617
2618 .align  16
2619 .Ladd_proceed_vis3:
2620         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Rsqr, R);
2621         add     %sp,LOCALS64+$Rsqr,$rp
2622
2623         ldx     [%sp+LOCALS64+$H],$bi
2624         ldx     [%sp+LOCALS64+$in1_z],$a0
2625         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2626         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2627         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2628         add     %sp,LOCALS64+$H,$bp
2629         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, H, in1_z);
2630         add     %sp,LOCALS64+$res_z,$rp
2631
2632         ldx     [%sp+LOCALS64+$H],$a0
2633         ldx     [%sp+LOCALS64+$H+8],$a1
2634         ldx     [%sp+LOCALS64+$H+16],$a2
2635         ldx     [%sp+LOCALS64+$H+24],$a3
2636         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Hsqr, H);
2637         add     %sp,LOCALS64+$Hsqr,$rp
2638
2639         ldx     [%sp+LOCALS64+$res_z],$bi
2640         ldx     [%sp+LOCALS64+$in2_z],$a0
2641         ldx     [%sp+LOCALS64+$in2_z+8],$a1
2642         ldx     [%sp+LOCALS64+$in2_z+16],$a2
2643         ldx     [%sp+LOCALS64+$in2_z+24],$a3
2644         add     %sp,LOCALS64+$res_z,$bp
2645         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, res_z, in2_z);
2646         add     %sp,LOCALS64+$res_z,$rp
2647
2648         ldx     [%sp+LOCALS64+$H],$bi
2649         ldx     [%sp+LOCALS64+$Hsqr],$a0
2650         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2651         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2652         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2653         add     %sp,LOCALS64+$H,$bp
2654         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(Hcub, Hsqr, H);
2655         add     %sp,LOCALS64+$Hcub,$rp
2656
2657         ldx     [%sp+LOCALS64+$U1],$bi
2658         ldx     [%sp+LOCALS64+$Hsqr],$a0
2659         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2660         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2661         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2662         add     %sp,LOCALS64+$U1,$bp
2663         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, U1, Hsqr);
2664         add     %sp,LOCALS64+$U2,$rp
2665
2666         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(Hsqr, U2);
2667         add     %sp,LOCALS64+$Hsqr,$rp
2668
2669         add     %sp,LOCALS64+$Rsqr,$bp
2670         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_x, Rsqr, Hsqr);
2671         add     %sp,LOCALS64+$res_x,$rp
2672
2673         add     %sp,LOCALS64+$Hcub,$bp
2674         call    __ecp_nistz256_sub_from_vis3    !  p256_sub(res_x, res_x, Hcub);
2675         add     %sp,LOCALS64+$res_x,$rp
2676
2677         ldx     [%sp+LOCALS64+$S1],$bi          ! forward load
2678         ldx     [%sp+LOCALS64+$Hcub],$a0
2679         ldx     [%sp+LOCALS64+$Hcub+8],$a1
2680         ldx     [%sp+LOCALS64+$Hcub+16],$a2
2681         ldx     [%sp+LOCALS64+$Hcub+24],$a3
2682
2683         add     %sp,LOCALS64+$U2,$bp
2684         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_y, U2, res_x);
2685         add     %sp,LOCALS64+$res_y,$rp
2686
2687         add     %sp,LOCALS64+$S1,$bp
2688         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S1, Hcub);
2689         add     %sp,LOCALS64+$S2,$rp
2690
2691         ldx     [%sp+LOCALS64+$R],$bi
2692         ldx     [%sp+LOCALS64+$res_y],$a0
2693         ldx     [%sp+LOCALS64+$res_y+8],$a1
2694         ldx     [%sp+LOCALS64+$res_y+16],$a2
2695         ldx     [%sp+LOCALS64+$res_y+24],$a3
2696         add     %sp,LOCALS64+$R,$bp
2697         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_y, res_y, R);
2698         add     %sp,LOCALS64+$res_y,$rp
2699
2700         add     %sp,LOCALS64+$S2,$bp
2701         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, res_y, S2);
2702         add     %sp,LOCALS64+$res_y,$rp
2703
2704         ldx     [%fp+STACK_BIAS-16],$t1         ! !in1infty
2705         ldx     [%fp+STACK_BIAS-8],$t2          ! !in2infty
2706 ___
2707 for($i=0;$i<96;$i+=16) {                        # conditional moves
2708 $code.=<<___;
2709         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
2710         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
2711         ldx     [%sp+LOCALS64+$in2_x+$i],$acc2  ! in2
2712         ldx     [%sp+LOCALS64+$in2_x+$i+8],$acc3
2713         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
2714         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
2715         movrz   $t1,$acc2,$acc0
2716         movrz   $t1,$acc3,$acc1
2717         movrz   $t2,$acc4,$acc0
2718         movrz   $t2,$acc5,$acc1
2719         srlx    $acc0,32,$acc2
2720         srlx    $acc1,32,$acc3
2721         st      $acc0,[$rp_real+$i]
2722         st      $acc2,[$rp_real+$i+4]
2723         st      $acc1,[$rp_real+$i+8]
2724         st      $acc3,[$rp_real+$i+12]
2725 ___
2726 }
2727 $code.=<<___;
2728 .Ladd_done_vis3:
2729         ret
2730         restore
2731 .type   ecp_nistz256_point_add_vis3,#function
2732 .size   ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
2733 ___
2734 }
2735 ########################################################################
2736 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
2737 #                                    const P256_POINT_AFFINE *in2);
2738 {
2739 my ($res_x,$res_y,$res_z,
2740     $in1_x,$in1_y,$in1_z,
2741     $in2_x,$in2_y,
2742     $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
2743 my $Z1sqr = $S2;
2744 # above map() describes stack layout with 15 temporary
2745 # 256-bit vectors on top. Then we reserve some space for
2746 # !in1infty and !in2infty.
2747
2748 $code.=<<___;
2749 .align  32
2750 ecp_nistz256_point_add_affine_vis3:
2751         save    %sp,-STACK64_FRAME-32*15-32,%sp
2752
2753         mov     $rp,$rp_real
2754         mov     -1,$minus1
2755         mov     -2,$poly3
2756         sllx    $minus1,32,$poly1               ! 0xFFFFFFFF00000000
2757         srl     $poly3,0,$poly3                 ! 0x00000000FFFFFFFE
2758
2759         ! convert input to uint64_t[4]
2760         ld      [$bp],$a0                       ! in2_x
2761         ld      [$bp+4],$t0
2762         ld      [$bp+8],$a1
2763         ld      [$bp+12],$t1
2764         ld      [$bp+16],$a2
2765         ld      [$bp+20],$t2
2766         ld      [$bp+24],$a3
2767         ld      [$bp+28],$t3
2768         sllx    $t0,32,$t0
2769         sllx    $t1,32,$t1
2770         ld      [$bp+32],$acc0                  ! in2_y
2771         or      $a0,$t0,$a0
2772         ld      [$bp+32+4],$t0
2773         sllx    $t2,32,$t2
2774         ld      [$bp+32+8],$acc1
2775         or      $a1,$t1,$a1
2776         ld      [$bp+32+12],$t1
2777         sllx    $t3,32,$t3
2778         ld      [$bp+32+16],$acc2
2779         or      $a2,$t2,$a2
2780         ld      [$bp+32+20],$t2
2781         or      $a3,$t3,$a3
2782         ld      [$bp+32+24],$acc3
2783         sllx    $t0,32,$t0
2784         ld      [$bp+32+28],$t3
2785         sllx    $t1,32,$t1
2786         stx     $a0,[%sp+LOCALS64+$in2_x]
2787         sllx    $t2,32,$t2
2788         stx     $a1,[%sp+LOCALS64+$in2_x+8]
2789         sllx    $t3,32,$t3
2790         stx     $a2,[%sp+LOCALS64+$in2_x+16]
2791         or      $acc0,$t0,$acc0
2792         stx     $a3,[%sp+LOCALS64+$in2_x+24]
2793         or      $acc1,$t1,$acc1
2794         stx     $acc0,[%sp+LOCALS64+$in2_y]
2795         or      $acc2,$t2,$acc2
2796         stx     $acc1,[%sp+LOCALS64+$in2_y+8]
2797         or      $acc3,$t3,$acc3
2798         stx     $acc2,[%sp+LOCALS64+$in2_y+16]
2799         stx     $acc3,[%sp+LOCALS64+$in2_y+24]
2800
2801         or      $a1,$a0,$a0
2802         or      $a3,$a2,$a2
2803         or      $acc1,$acc0,$acc0
2804         or      $acc3,$acc2,$acc2
2805         or      $a2,$a0,$a0
2806         or      $acc2,$acc0,$acc0
2807         or      $acc0,$a0,$a0
2808         movrnz  $a0,-1,$a0                      ! !in2infty
2809         stx     $a0,[%fp+STACK_BIAS-8]
2810
2811         ld      [$ap],$a0                       ! in1_x
2812         ld      [$ap+4],$t0
2813         ld      [$ap+8],$a1
2814         ld      [$ap+12],$t1
2815         ld      [$ap+16],$a2
2816         ld      [$ap+20],$t2
2817         ld      [$ap+24],$a3
2818         ld      [$ap+28],$t3
2819         sllx    $t0,32,$t0
2820         sllx    $t1,32,$t1
2821         ld      [$ap+32],$acc0                  ! in1_y
2822         or      $a0,$t0,$a0
2823         ld      [$ap+32+4],$t0
2824         sllx    $t2,32,$t2
2825         ld      [$ap+32+8],$acc1
2826         or      $a1,$t1,$a1
2827         ld      [$ap+32+12],$t1
2828         sllx    $t3,32,$t3
2829         ld      [$ap+32+16],$acc2
2830         or      $a2,$t2,$a2
2831         ld      [$ap+32+20],$t2
2832         or      $a3,$t3,$a3
2833         ld      [$ap+32+24],$acc3
2834         sllx    $t0,32,$t0
2835         ld      [$ap+32+28],$t3
2836         sllx    $t1,32,$t1
2837         stx     $a0,[%sp+LOCALS64+$in1_x]
2838         sllx    $t2,32,$t2
2839         stx     $a1,[%sp+LOCALS64+$in1_x+8]
2840         sllx    $t3,32,$t3
2841         stx     $a2,[%sp+LOCALS64+$in1_x+16]
2842         or      $acc0,$t0,$acc0
2843         stx     $a3,[%sp+LOCALS64+$in1_x+24]
2844         or      $acc1,$t1,$acc1
2845         stx     $acc0,[%sp+LOCALS64+$in1_y]
2846         or      $acc2,$t2,$acc2
2847         stx     $acc1,[%sp+LOCALS64+$in1_y+8]
2848         or      $acc3,$t3,$acc3
2849         stx     $acc2,[%sp+LOCALS64+$in1_y+16]
2850         stx     $acc3,[%sp+LOCALS64+$in1_y+24]
2851
2852         or      $a1,$a0,$a0
2853         or      $a3,$a2,$a2
2854         or      $acc1,$acc0,$acc0
2855         or      $acc3,$acc2,$acc2
2856         or      $a2,$a0,$a0
2857         or      $acc2,$acc0,$acc0
2858         or      $acc0,$a0,$a0
2859         movrnz  $a0,-1,$a0                      ! !in1infty
2860         stx     $a0,[%fp+STACK_BIAS-16]
2861
2862         ld      [$ap+64],$a0                    ! in1_z
2863         ld      [$ap+64+4],$t0
2864         ld      [$ap+64+8],$a1
2865         ld      [$ap+64+12],$t1
2866         ld      [$ap+64+16],$a2
2867         ld      [$ap+64+20],$t2
2868         ld      [$ap+64+24],$a3
2869         ld      [$ap+64+28],$t3
2870         sllx    $t0,32,$t0
2871         sllx    $t1,32,$t1
2872         or      $a0,$t0,$a0
2873         sllx    $t2,32,$t2
2874         or      $a1,$t1,$a1
2875         sllx    $t3,32,$t3
2876         stx     $a0,[%sp+LOCALS64+$in1_z]
2877         or      $a2,$t2,$a2
2878         stx     $a1,[%sp+LOCALS64+$in1_z+8]
2879         or      $a3,$t3,$a3
2880         stx     $a2,[%sp+LOCALS64+$in1_z+16]
2881         stx     $a3,[%sp+LOCALS64+$in1_z+24]
2882
2883         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Z1sqr, in1_z);
2884         add     %sp,LOCALS64+$Z1sqr,$rp
2885
2886         ldx     [%sp+LOCALS64+$in2_x],$bi
2887         mov     $acc0,$a0
2888         mov     $acc1,$a1
2889         mov     $acc2,$a2
2890         mov     $acc3,$a3
2891         add     %sp,LOCALS64+$in2_x,$bp
2892         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, Z1sqr, in2_x);
2893         add     %sp,LOCALS64+$U2,$rp
2894
2895         ldx     [%sp+LOCALS64+$Z1sqr],$bi       ! forward load
2896         ldx     [%sp+LOCALS64+$in1_z],$a0
2897         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2898         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2899         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2900
2901         add     %sp,LOCALS64+$in1_x,$bp
2902         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(H, U2, in1_x);
2903         add     %sp,LOCALS64+$H,$rp
2904
2905         add     %sp,LOCALS64+$Z1sqr,$bp
2906         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, Z1sqr, in1_z);
2907         add     %sp,LOCALS64+$S2,$rp
2908
2909         ldx     [%sp+LOCALS64+$H],$bi
2910         ldx     [%sp+LOCALS64+$in1_z],$a0
2911         ldx     [%sp+LOCALS64+$in1_z+8],$a1
2912         ldx     [%sp+LOCALS64+$in1_z+16],$a2
2913         ldx     [%sp+LOCALS64+$in1_z+24],$a3
2914         add     %sp,LOCALS64+$H,$bp
2915         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_z, H, in1_z);
2916         add     %sp,LOCALS64+$res_z,$rp
2917
2918         ldx     [%sp+LOCALS64+$S2],$bi
2919         ldx     [%sp+LOCALS64+$in2_y],$a0
2920         ldx     [%sp+LOCALS64+$in2_y+8],$a1
2921         ldx     [%sp+LOCALS64+$in2_y+16],$a2
2922         ldx     [%sp+LOCALS64+$in2_y+24],$a3
2923         add     %sp,LOCALS64+$S2,$bp
2924         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, S2, in2_y);
2925         add     %sp,LOCALS64+$S2,$rp
2926
2927         ldx     [%sp+LOCALS64+$H],$a0           ! forward load
2928         ldx     [%sp+LOCALS64+$H+8],$a1
2929         ldx     [%sp+LOCALS64+$H+16],$a2
2930         ldx     [%sp+LOCALS64+$H+24],$a3
2931
2932         add     %sp,LOCALS64+$in1_y,$bp
2933         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(R, S2, in1_y);
2934         add     %sp,LOCALS64+$R,$rp
2935
2936         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Hsqr, H);
2937         add     %sp,LOCALS64+$Hsqr,$rp
2938
2939         ldx     [%sp+LOCALS64+$R],$a0
2940         ldx     [%sp+LOCALS64+$R+8],$a1
2941         ldx     [%sp+LOCALS64+$R+16],$a2
2942         ldx     [%sp+LOCALS64+$R+24],$a3
2943         call    __ecp_nistz256_sqr_mont_vis3    ! p256_sqr_mont(Rsqr, R);
2944         add     %sp,LOCALS64+$Rsqr,$rp
2945
2946         ldx     [%sp+LOCALS64+$H],$bi
2947         ldx     [%sp+LOCALS64+$Hsqr],$a0
2948         ldx     [%sp+LOCALS64+$Hsqr+8],$a1
2949         ldx     [%sp+LOCALS64+$Hsqr+16],$a2
2950         ldx     [%sp+LOCALS64+$Hsqr+24],$a3
2951         add     %sp,LOCALS64+$H,$bp
2952         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(Hcub, Hsqr, H);
2953         add     %sp,LOCALS64+$Hcub,$rp
2954
2955         ldx     [%sp+LOCALS64+$Hsqr],$bi
2956         ldx     [%sp+LOCALS64+$in1_x],$a0
2957         ldx     [%sp+LOCALS64+$in1_x+8],$a1
2958         ldx     [%sp+LOCALS64+$in1_x+16],$a2
2959         ldx     [%sp+LOCALS64+$in1_x+24],$a3
2960         add     %sp,LOCALS64+$Hsqr,$bp
2961         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(U2, in1_x, Hsqr);
2962         add     %sp,LOCALS64+$U2,$rp
2963
2964         call    __ecp_nistz256_mul_by_2_vis3    ! p256_mul_by_2(Hsqr, U2);
2965         add     %sp,LOCALS64+$Hsqr,$rp
2966
2967         add     %sp,LOCALS64+$Rsqr,$bp
2968         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_x, Rsqr, Hsqr);
2969         add     %sp,LOCALS64+$res_x,$rp
2970
2971         add     %sp,LOCALS64+$Hcub,$bp
2972         call    __ecp_nistz256_sub_from_vis3    !  p256_sub(res_x, res_x, Hcub);
2973         add     %sp,LOCALS64+$res_x,$rp
2974
2975         ldx     [%sp+LOCALS64+$Hcub],$bi        ! forward load
2976         ldx     [%sp+LOCALS64+$in1_y],$a0
2977         ldx     [%sp+LOCALS64+$in1_y+8],$a1
2978         ldx     [%sp+LOCALS64+$in1_y+16],$a2
2979         ldx     [%sp+LOCALS64+$in1_y+24],$a3
2980
2981         add     %sp,LOCALS64+$U2,$bp
2982         call    __ecp_nistz256_sub_morf_vis3    ! p256_sub(res_y, U2, res_x);
2983         add     %sp,LOCALS64+$res_y,$rp
2984
2985         add     %sp,LOCALS64+$Hcub,$bp
2986         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(S2, in1_y, Hcub);
2987         add     %sp,LOCALS64+$S2,$rp
2988
2989         ldx     [%sp+LOCALS64+$R],$bi
2990         ldx     [%sp+LOCALS64+$res_y],$a0
2991         ldx     [%sp+LOCALS64+$res_y+8],$a1
2992         ldx     [%sp+LOCALS64+$res_y+16],$a2
2993         ldx     [%sp+LOCALS64+$res_y+24],$a3
2994         add     %sp,LOCALS64+$R,$bp
2995         call    __ecp_nistz256_mul_mont_vis3    ! p256_mul_mont(res_y, res_y, R);
2996         add     %sp,LOCALS64+$res_y,$rp
2997
2998         add     %sp,LOCALS64+$S2,$bp
2999         call    __ecp_nistz256_sub_from_vis3    ! p256_sub(res_y, res_y, S2);
3000         add     %sp,LOCALS64+$res_y,$rp
3001
3002         ldx     [%fp+STACK_BIAS-16],$t1         ! !in1infty
3003         ldx     [%fp+STACK_BIAS-8],$t2          ! !in2infty
3004 1:      call    .+8
3005         add     %o7,.Lone_mont_vis3-1b,$bp
3006 ___
3007 for($i=0;$i<64;$i+=16) {                        # conditional moves
3008 $code.=<<___;
3009         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
3010         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
3011         ldx     [%sp+LOCALS64+$in2_x+$i],$acc2  ! in2
3012         ldx     [%sp+LOCALS64+$in2_x+$i+8],$acc3
3013         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
3014         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
3015         movrz   $t1,$acc2,$acc0
3016         movrz   $t1,$acc3,$acc1
3017         movrz   $t2,$acc4,$acc0
3018         movrz   $t2,$acc5,$acc1
3019         srlx    $acc0,32,$acc2
3020         srlx    $acc1,32,$acc3
3021         st      $acc0,[$rp_real+$i]
3022         st      $acc2,[$rp_real+$i+4]
3023         st      $acc1,[$rp_real+$i+8]
3024         st      $acc3,[$rp_real+$i+12]
3025 ___
3026 }
3027 for(;$i<96;$i+=16) {
3028 $code.=<<___;
3029         ldx     [%sp+LOCALS64+$res_x+$i],$acc0  ! res
3030         ldx     [%sp+LOCALS64+$res_x+$i+8],$acc1
3031         ldx     [$bp+$i-64],$acc2               ! "in2"
3032         ldx     [$bp+$i-64+8],$acc3
3033         ldx     [%sp+LOCALS64+$in1_x+$i],$acc4  ! in1
3034         ldx     [%sp+LOCALS64+$in1_x+$i+8],$acc5
3035         movrz   $t1,$acc2,$acc0
3036         movrz   $t1,$acc3,$acc1
3037         movrz   $t2,$acc4,$acc0
3038         movrz   $t2,$acc5,$acc1
3039         srlx    $acc0,32,$acc2
3040         srlx    $acc1,32,$acc3
3041         st      $acc0,[$rp_real+$i]
3042         st      $acc2,[$rp_real+$i+4]
3043         st      $acc1,[$rp_real+$i+8]
3044         st      $acc3,[$rp_real+$i+12]
3045 ___
3046 }
3047 $code.=<<___;
3048         ret
3049         restore
3050 .type   ecp_nistz256_point_add_affine_vis3,#function
3051 .size   ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
3052 .align  64
3053 .Lone_mont_vis3:
3054 .long   0x00000000,0x00000001, 0xffffffff,0x00000000
3055 .long   0xffffffff,0xffffffff, 0x00000000,0xfffffffe
3056 .align  64
3057 ___
3058 }                                                               }}}
3059 \f
3060 # Purpose of these subroutines is to explicitly encode VIS instructions,
3061 # so that one can compile the module without having to specify VIS
3062 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
3063 # Idea is to reserve for option to produce "universal" binary and let
3064 # programmer detect if current CPU is VIS capable at run-time.
3065 sub unvis3 {
3066 my ($mnemonic,$rs1,$rs2,$rd)=@_;
3067 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
3068 my ($ref,$opf);
3069 my %visopf = (  "addxc"         => 0x011,
3070                 "addxccc"       => 0x013,
3071                 "umulxhi"       => 0x016        );
3072
3073     $ref = "$mnemonic\t$rs1,$rs2,$rd";
3074
3075     if ($opf=$visopf{$mnemonic}) {
3076         foreach ($rs1,$rs2,$rd) {
3077             return $ref if (!/%([goli])([0-9])/);
3078             $_=$bias{$1}+$2;
3079         }
3080
3081         return  sprintf ".word\t0x%08x !%s",
3082                         0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
3083                         $ref;
3084     } else {
3085         return $ref;
3086     }
3087 }
3088
3089 foreach (split("\n",$code)) {
3090         s/\`([^\`]*)\`/eval $1/ge;
3091
3092         s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
3093                 &unvis3($1,$2,$3,$4)
3094          /ge;
3095
3096         print $_,"\n";
3097 }
3098
3099 close STDOUT;