73d0746eb94acfdb1f80a133872e78ae0be1134e
[openssl.git] / crypto / ec / asm / ecp_nistz256-ppc64.pl
1 #! /usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # ECP_NISTZ256 module for PPC64.
11 #
12 # August 2016.
13 #
14 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
15 # http://eprint.iacr.org/2013/816.
16 #
17 #                       with/without -DECP_NISTZ256_ASM
18 # POWER7                +260-530%
19 # POWER8                +220-340%
20
21 $flavour = shift;
22 while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
23
24 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
25 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
26 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
27 die "can't locate ppc-xlate.pl";
28
29 open OUT,"| \"$^X\" $xlate $flavour $output";
30 *STDOUT=*OUT;
31
32 my $sp="r1";
33
34 {
35 my ($rp,$ap,$bp,$bi,$acc0,$acc1,$acc2,$acc3,$poly1,$poly3,
36     $acc4,$acc5,$a0,$a1,$a2,$a3,$t0,$t1,$t2,$t3) =
37     map("r$_",(3..12,22..31));
38
39 my ($acc6,$acc7)=($bp,$bi);     # used in __ecp_nistz256_sqr_mont
40
41 $code.=<<___;
42 .machine        "any"
43 .text
44 ___
45 ########################################################################
46 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
47 #
48 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49 open TABLE,"<ecp_nistz256_table.c"              or
50 open TABLE,"<${dir}../ecp_nistz256_table.c"     or
51 die "failed to open ecp_nistz256_table.c:",$!;
52
53 use integer;
54
55 foreach(<TABLE>) {
56         s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
57 }
58 close TABLE;
59
60 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
61 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
62 # amount of elements.
63 die "insane number of elements" if ($#arr != 64*16*37-1);
64
65 $code.=<<___;
66 .type   ecp_nistz256_precomputed,\@object
67 .globl  ecp_nistz256_precomputed
68 .align  12
69 ecp_nistz256_precomputed:
70 ___
71 ########################################################################
72 # this conversion smashes P256_POINT_AFFINE by individual bytes with
73 # 64 byte interval, similar to
74 #       1111222233334444
75 #       1234123412341234
76 for(1..37) {
77         @tbl = splice(@arr,0,64*16);
78         for($i=0;$i<64;$i++) {
79                 undef @line;
80                 for($j=0;$j<64;$j++) {
81                         push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
82                 }
83                 $code.=".byte\t";
84                 $code.=join(',',map { sprintf "0x%02x",$_} @line);
85                 $code.="\n";
86         }
87 }
88
89 $code.=<<___;
90 .size   ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
91 .asciz  "ECP_NISTZ256 for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
92
93 # void  ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
94 #                                            const BN_ULONG x2[4]);
95 .globl  ecp_nistz256_mul_mont
96 .align  5
97 ecp_nistz256_mul_mont:
98         stdu    $sp,-128($sp)
99         mflr    r0
100         std     r22,48($sp)
101         std     r23,56($sp)
102         std     r24,64($sp)
103         std     r25,72($sp)
104         std     r26,80($sp)
105         std     r27,88($sp)
106         std     r28,96($sp)
107         std     r29,104($sp)
108         std     r30,112($sp)
109         std     r31,120($sp)
110
111         ld      $a0,0($ap)
112         ld      $bi,0($bp)
113         ld      $a1,8($ap)
114         ld      $a2,16($ap)
115         ld      $a3,24($ap)
116
117         li      $poly1,-1
118         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
119         li      $poly3,1
120         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
121
122         bl      __ecp_nistz256_mul_mont
123
124         mtlr    r0
125         ld      r22,48($sp)
126         ld      r23,56($sp)
127         ld      r24,64($sp)
128         ld      r25,72($sp)
129         ld      r26,80($sp)
130         ld      r27,88($sp)
131         ld      r28,96($sp)
132         ld      r29,104($sp)
133         ld      r30,112($sp)
134         ld      r31,120($sp)
135         addi    $sp,$sp,128
136         blr
137         .long   0
138         .byte   0,12,4,0,0x80,10,3,0
139         .long   0
140 .size   ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
141
142 # void  ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
143 .globl  ecp_nistz256_sqr_mont
144 .align  4
145 ecp_nistz256_sqr_mont:
146         stdu    $sp,-128($sp)
147         mflr    r0
148         std     r22,48($sp)
149         std     r23,56($sp)
150         std     r24,64($sp)
151         std     r25,72($sp)
152         std     r26,80($sp)
153         std     r27,88($sp)
154         std     r28,96($sp)
155         std     r29,104($sp)
156         std     r30,112($sp)
157         std     r31,120($sp)
158
159         ld      $a0,0($ap)
160         ld      $a1,8($ap)
161         ld      $a2,16($ap)
162         ld      $a3,24($ap)
163
164         li      $poly1,-1
165         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
166         li      $poly3,1
167         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
168
169         bl      __ecp_nistz256_sqr_mont
170
171         mtlr    r0
172         ld      r22,48($sp)
173         ld      r23,56($sp)
174         ld      r24,64($sp)
175         ld      r25,72($sp)
176         ld      r26,80($sp)
177         ld      r27,88($sp)
178         ld      r28,96($sp)
179         ld      r29,104($sp)
180         ld      r30,112($sp)
181         ld      r31,120($sp)
182         addi    $sp,$sp,128
183         blr
184         .long   0
185         .byte   0,12,4,0,0x80,10,2,0
186         .long   0
187 .size   ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
188
189 # void  ecp_nistz256_add(BN_ULONG x0[4],const BN_ULONG x1[4],
190 #                                       const BN_ULONG x2[4]);
191 .globl  ecp_nistz256_add
192 .align  4
193 ecp_nistz256_add:
194         stdu    $sp,-128($sp)
195         mflr    r0
196         std     r28,96($sp)
197         std     r29,104($sp)
198         std     r30,112($sp)
199         std     r31,120($sp)
200
201         ld      $acc0,0($ap)
202         ld      $t0,  0($bp)
203         ld      $acc1,8($ap)
204         ld      $t1,  8($bp)
205         ld      $acc2,16($ap)
206         ld      $t2,  16($bp)
207         ld      $acc3,24($ap)
208         ld      $t3,  24($bp)
209
210         li      $poly1,-1
211         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
212         li      $poly3,1
213         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
214
215         bl      __ecp_nistz256_add
216
217         mtlr    r0
218         ld      r28,96($sp)
219         ld      r29,104($sp)
220         ld      r30,112($sp)
221         ld      r31,120($sp)
222         addi    $sp,$sp,128
223         blr
224         .long   0
225         .byte   0,12,4,0,0x80,4,3,0
226         .long   0
227 .size   ecp_nistz256_add,.-ecp_nistz256_add
228
229 # void  ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
230 .globl  ecp_nistz256_div_by_2
231 .align  4
232 ecp_nistz256_div_by_2:
233         stdu    $sp,-128($sp)
234         mflr    r0
235         std     r28,96($sp)
236         std     r29,104($sp)
237         std     r30,112($sp)
238         std     r31,120($sp)
239
240         ld      $acc0,0($ap)
241         ld      $acc1,8($ap)
242         ld      $acc2,16($ap)
243         ld      $acc3,24($ap)
244
245         li      $poly1,-1
246         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
247         li      $poly3,1
248         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
249
250         bl      __ecp_nistz256_div_by_2
251
252         mtlr    r0
253         ld      r28,96($sp)
254         ld      r29,104($sp)
255         ld      r30,112($sp)
256         ld      r31,120($sp)
257         addi    $sp,$sp,128
258         blr
259         .long   0
260         .byte   0,12,4,0,0x80,4,2,0
261         .long   0
262 .size   ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
263
264 # void  ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
265 .globl  ecp_nistz256_mul_by_2
266 .align  4
267 ecp_nistz256_mul_by_2:
268         stdu    $sp,-128($sp)
269         mflr    r0
270         std     r28,96($sp)
271         std     r29,104($sp)
272         std     r30,112($sp)
273         std     r31,120($sp)
274
275         ld      $acc0,0($ap)
276         ld      $acc1,8($ap)
277         ld      $acc2,16($ap)
278         ld      $acc3,24($ap)
279
280         mr      $t0,$acc0
281         mr      $t1,$acc1
282         mr      $t2,$acc2
283         mr      $t3,$acc3
284
285         li      $poly1,-1
286         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
287         li      $poly3,1
288         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
289
290         bl      __ecp_nistz256_add      # ret = a+a     // 2*a
291
292         mtlr    r0
293         ld      r28,96($sp)
294         ld      r29,104($sp)
295         ld      r30,112($sp)
296         ld      r31,120($sp)
297         addi    $sp,$sp,128
298         blr
299         .long   0
300         .byte   0,12,4,0,0x80,4,3,0
301         .long   0
302 .size   ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
303
304 # void  ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]);
305 .globl  ecp_nistz256_mul_by_3
306 .align  4
307 ecp_nistz256_mul_by_3:
308         stdu    $sp,-128($sp)
309         mflr    r0
310         std     r28,96($sp)
311         std     r29,104($sp)
312         std     r30,112($sp)
313         std     r31,120($sp)
314
315         ld      $acc0,0($ap)
316         ld      $acc1,8($ap)
317         ld      $acc2,16($ap)
318         ld      $acc3,24($ap)
319
320         mr      $t0,$acc0
321         std     $acc0,64($sp)
322         mr      $t1,$acc1
323         std     $acc1,72($sp)
324         mr      $t2,$acc2
325         std     $acc2,80($sp)
326         mr      $t3,$acc3
327         std     $acc3,88($sp)
328
329         li      $poly1,-1
330         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
331         li      $poly3,1
332         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
333
334         bl      __ecp_nistz256_add      # ret = a+a     // 2*a
335
336         ld      $t0,64($sp)
337         ld      $t1,72($sp)
338         ld      $t2,80($sp)
339         ld      $t3,88($sp)
340
341         bl      __ecp_nistz256_add      # ret += a      // 2*a+a=3*a
342
343         mtlr    r0
344         ld      r28,96($sp)
345         ld      r29,104($sp)
346         ld      r30,112($sp)
347         ld      r31,120($sp)
348         addi    $sp,$sp,128
349         blr
350         .long   0
351         .byte   0,12,4,0,0x80,4,2,0
352         .long   0
353 .size   ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
354
355 # void  ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4],
356 #                                       const BN_ULONG x2[4]);
357 .globl  ecp_nistz256_sub
358 .align  4
359 ecp_nistz256_sub:
360         stdu    $sp,-128($sp)
361         mflr    r0
362         std     r28,96($sp)
363         std     r29,104($sp)
364         std     r30,112($sp)
365         std     r31,120($sp)
366
367         ld      $acc0,0($ap)
368         ld      $acc1,8($ap)
369         ld      $acc2,16($ap)
370         ld      $acc3,24($ap)
371
372         li      $poly1,-1
373         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
374         li      $poly3,1
375         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
376
377         bl      __ecp_nistz256_sub_from
378
379         mtlr    r0
380         ld      r28,96($sp)
381         ld      r29,104($sp)
382         ld      r30,112($sp)
383         ld      r31,120($sp)
384         addi    $sp,$sp,128
385         blr
386         .long   0
387         .byte   0,12,4,0,0x80,4,3,0
388         .long   0
389 .size   ecp_nistz256_sub,.-ecp_nistz256_sub
390
391 # void  ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
392 .globl  ecp_nistz256_neg
393 .align  4
394 ecp_nistz256_neg:
395         stdu    $sp,-128($sp)
396         mflr    r0
397         std     r28,96($sp)
398         std     r29,104($sp)
399         std     r30,112($sp)
400         std     r31,120($sp)
401
402         mr      $bp,$ap
403         li      $acc0,0
404         li      $acc1,0
405         li      $acc2,0
406         li      $acc3,0
407
408         li      $poly1,-1
409         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
410         li      $poly3,1
411         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
412
413         bl      __ecp_nistz256_sub_from
414
415         mtlr    r0
416         ld      r28,96($sp)
417         ld      r29,104($sp)
418         ld      r30,112($sp)
419         ld      r31,120($sp)
420         addi    $sp,$sp,128
421         blr
422         .long   0
423         .byte   0,12,4,0,0x80,4,2,0
424         .long   0
425 .size   ecp_nistz256_neg,.-ecp_nistz256_neg
426
427 # note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
428 # to $a0-$a3 and b[0] - to $bi
429 .type   __ecp_nistz256_mul_mont,\@function
430 .align  4
431 __ecp_nistz256_mul_mont:
432         mulld   $acc0,$a0,$bi           # a[0]*b[0]
433         mulhdu  $t0,$a0,$bi
434
435         mulld   $acc1,$a1,$bi           # a[1]*b[0]
436         mulhdu  $t1,$a1,$bi
437
438         mulld   $acc2,$a2,$bi           # a[2]*b[0]
439         mulhdu  $t2,$a2,$bi
440
441         mulld   $acc3,$a3,$bi           # a[3]*b[0]
442         mulhdu  $t3,$a3,$bi
443         ld      $bi,8($bp)              # b[1]
444
445         addc    $acc1,$acc1,$t0         # accumulate high parts of multiplication
446          sldi   $t0,$acc0,32
447         adde    $acc2,$acc2,$t1
448          srdi   $t1,$acc0,32
449         adde    $acc3,$acc3,$t2
450         addze   $acc4,$t3
451         li      $acc5,0
452 ___
453 for($i=1;$i<4;$i++) {
454         ################################################################
455         # Reduction iteration is normally performed by accumulating
456         # result of multiplication of modulus by "magic" digit [and
457         # omitting least significant word, which is guaranteed to
458         # be 0], but thanks to special form of modulus and "magic"
459         # digit being equal to least significant word, it can be
460         # performed with additions and subtractions alone. Indeed:
461         #
462         #            ffff0001.00000000.0000ffff.ffffffff
463         # *                                     abcdefgh
464         # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
465         #
466         # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
467         # rewrite above as:
468         #
469         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
470         # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
471         # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
472         #
473         # or marking redundant operations:
474         #
475         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
476         # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
477         # - 0000abcd.efgh0000.--------.--------.--------
478
479 $code.=<<___;
480         subfc   $t2,$t0,$acc0           # "*0xffff0001"
481         subfe   $t3,$t1,$acc0
482         addc    $acc0,$acc1,$t0         # +=acc[0]<<96 and omit acc[0]
483         adde    $acc1,$acc2,$t1
484         adde    $acc2,$acc3,$t2         # +=acc[0]*0xffff0001
485         adde    $acc3,$acc4,$t3
486         addze   $acc4,$acc5
487
488         mulld   $t0,$a0,$bi             # lo(a[0]*b[i])
489         mulld   $t1,$a1,$bi             # lo(a[1]*b[i])
490         mulld   $t2,$a2,$bi             # lo(a[2]*b[i])
491         mulld   $t3,$a3,$bi             # lo(a[3]*b[i])
492         addc    $acc0,$acc0,$t0         # accumulate low parts of multiplication
493          mulhdu $t0,$a0,$bi             # hi(a[0]*b[i])
494         adde    $acc1,$acc1,$t1
495          mulhdu $t1,$a1,$bi             # hi(a[1]*b[i])
496         adde    $acc2,$acc2,$t2
497          mulhdu $t2,$a2,$bi             # hi(a[2]*b[i])
498         adde    $acc3,$acc3,$t3
499          mulhdu $t3,$a3,$bi             # hi(a[3]*b[i])
500         addze   $acc4,$acc4
501 ___
502 $code.=<<___    if ($i<3);
503         ld      $bi,8*($i+1)($bp)       # b[$i+1]
504 ___
505 $code.=<<___;
506         addc    $acc1,$acc1,$t0         # accumulate high parts of multiplication
507          sldi   $t0,$acc0,32
508         adde    $acc2,$acc2,$t1
509          srdi   $t1,$acc0,32
510         adde    $acc3,$acc3,$t2
511         adde    $acc4,$acc4,$t3
512         li      $acc5,0
513         addze   $acc5,$acc5
514 ___
515 }
516 $code.=<<___;
517         # last reduction
518         subfc   $t2,$t0,$acc0           # "*0xffff0001"
519         subfe   $t3,$t1,$acc0
520         addc    $acc0,$acc1,$t0         # +=acc[0]<<96 and omit acc[0]
521         adde    $acc1,$acc2,$t1
522         adde    $acc2,$acc3,$t2         # +=acc[0]*0xffff0001
523         adde    $acc3,$acc4,$t3
524         addze   $acc4,$acc5
525
526         li      $t2,0
527         addic   $acc0,$acc0,1           # ret -= modulus
528         subfe   $acc1,$poly1,$acc1
529         subfe   $acc2,$t2,$acc2
530         subfe   $acc3,$poly3,$acc3
531         subfe   $acc4,$t2,$acc4
532
533         addc    $acc0,$acc0,$acc4       # ret += modulus if borrow
534         and     $t1,$poly1,$acc4
535         and     $t3,$poly3,$acc4
536         adde    $acc1,$acc1,$t1
537         addze   $acc2,$acc2
538         adde    $acc3,$acc3,$t3
539
540         std     $acc0,0($rp)
541         std     $acc1,8($rp)
542         std     $acc2,16($rp)
543         std     $acc3,24($rp)
544
545         blr
546         .long   0
547         .byte   0,12,0x14,0,0,0,1,0
548         .long   0
549 .size   __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
550
551 # note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
552 # to $a0-$a3
553 .type   __ecp_nistz256_sqr_mont,\@function
554 .align  4
555 __ecp_nistz256_sqr_mont:
556         ################################################################
557         #  |  |  |  |  |  |a1*a0|  |
558         #  |  |  |  |  |a2*a0|  |  |
559         #  |  |a3*a2|a3*a0|  |  |  |
560         #  |  |  |  |a2*a1|  |  |  |
561         #  |  |  |a3*a1|  |  |  |  |
562         # *|  |  |  |  |  |  |  | 2|
563         # +|a3*a3|a2*a2|a1*a1|a0*a0|
564         #  |--+--+--+--+--+--+--+--|
565         #  |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
566         #
567         #  "can't overflow" below mark carrying into high part of
568         #  multiplication result, which can't overflow, because it
569         #  can never be all ones.
570
571         mulld   $acc1,$a1,$a0           # a[1]*a[0]
572         mulhdu  $t1,$a1,$a0
573         mulld   $acc2,$a2,$a0           # a[2]*a[0]
574         mulhdu  $t2,$a2,$a0
575         mulld   $acc3,$a3,$a0           # a[3]*a[0]
576         mulhdu  $acc4,$a3,$a0
577
578         addc    $acc2,$acc2,$t1         # accumulate high parts of multiplication
579          mulld  $t0,$a2,$a1             # a[2]*a[1]
580          mulhdu $t1,$a2,$a1
581         adde    $acc3,$acc3,$t2
582          mulld  $t2,$a3,$a1             # a[3]*a[1]
583          mulhdu $t3,$a3,$a1
584         addze   $acc4,$acc4             # can't overflow
585
586         mulld   $acc5,$a3,$a2           # a[3]*a[2]
587         mulhdu  $acc6,$a3,$a2
588
589         addc    $t1,$t1,$t2             # accumulate high parts of multiplication
590         addze   $t2,$t3                 # can't overflow
591
592         addc    $acc3,$acc3,$t0         # accumulate low parts of multiplication
593         adde    $acc4,$acc4,$t1
594         adde    $acc5,$acc5,$t2
595         addze   $acc6,$acc6             # can't overflow
596
597         addc    $acc1,$acc1,$acc1       # acc[1-6]*=2
598         adde    $acc2,$acc2,$acc2
599         adde    $acc3,$acc3,$acc3
600         adde    $acc4,$acc4,$acc4
601         adde    $acc5,$acc5,$acc5
602         adde    $acc6,$acc6,$acc6
603         li      $acc7,0
604         addze   $acc7,$acc7
605
606         mulld   $acc0,$a0,$a0           # a[0]*a[0]
607         mulhdu  $a0,$a0,$a0
608         mulld   $t1,$a1,$a1             # a[1]*a[1]
609         mulhdu  $a1,$a1,$a1
610         mulld   $t2,$a2,$a2             # a[2]*a[2]
611         mulhdu  $a2,$a2,$a2
612         mulld   $t3,$a3,$a3             # a[3]*a[3]
613         mulhdu  $a3,$a3,$a3
614         addc    $acc1,$acc1,$a0         # +a[i]*a[i]
615          sldi   $t0,$acc0,32
616         adde    $acc2,$acc2,$t1
617          srdi   $t1,$acc0,32
618         adde    $acc3,$acc3,$a1
619         adde    $acc4,$acc4,$t2
620         adde    $acc5,$acc5,$a2
621         adde    $acc6,$acc6,$t3
622         adde    $acc7,$acc7,$a3
623 ___
624 for($i=0;$i<3;$i++) {                   # reductions, see commentary in
625                                         # multiplication for details
626 $code.=<<___;
627         subfc   $t2,$t0,$acc0           # "*0xffff0001"
628         subfe   $t3,$t1,$acc0
629         addc    $acc0,$acc1,$t0         # +=acc[0]<<96 and omit acc[0]
630          sldi   $t0,$acc0,32
631         adde    $acc1,$acc2,$t1
632          srdi   $t1,$acc0,32
633         adde    $acc2,$acc3,$t2         # +=acc[0]*0xffff0001
634         addze   $acc3,$t3               # can't overflow
635 ___
636 }
637 $code.=<<___;
638         subfc   $t2,$t0,$acc0           # "*0xffff0001"
639         subfe   $t3,$t1,$acc0
640         addc    $acc0,$acc1,$t0         # +=acc[0]<<96 and omit acc[0]
641         adde    $acc1,$acc2,$t1
642         adde    $acc2,$acc3,$t2         # +=acc[0]*0xffff0001
643         addze   $acc3,$t3               # can't overflow
644
645         addc    $acc0,$acc0,$acc4       # accumulate upper half
646         adde    $acc1,$acc1,$acc5
647         adde    $acc2,$acc2,$acc6
648         adde    $acc3,$acc3,$acc7
649         li      $t2,0
650         addze   $acc4,$t2
651
652         addic   $acc0,$acc0,1           # ret -= modulus
653         subfe   $acc1,$poly1,$acc1
654         subfe   $acc2,$t2,$acc2
655         subfe   $acc3,$poly3,$acc3
656         subfe   $acc4,$t2,$acc4
657
658         addc    $acc0,$acc0,$acc4       # ret += modulus if borrow
659         and     $t1,$poly1,$acc4
660         and     $t3,$poly3,$acc4
661         adde    $acc1,$acc1,$t1
662         addze   $acc2,$acc2
663         adde    $acc3,$acc3,$t3
664
665         std     $acc0,0($rp)
666         std     $acc1,8($rp)
667         std     $acc2,16($rp)
668         std     $acc3,24($rp)
669
670         blr
671         .long   0
672         .byte   0,12,0x14,0,0,0,1,0
673         .long   0
674 .size   __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont
675
676 # Note that __ecp_nistz256_add expects both input vectors pre-loaded to
677 # $a0-$a3 and $t0-$t3. This is done because it's used in multiple
678 # contexts, e.g. in multiplication by 2 and 3...
679 .type   __ecp_nistz256_add,\@function
680 .align  4
681 __ecp_nistz256_add:
682         addc    $acc0,$acc0,$t0         # ret = a+b
683         adde    $acc1,$acc1,$t1
684         adde    $acc2,$acc2,$t2
685         li      $t2,0
686         adde    $acc3,$acc3,$t3
687         addze   $t0,$t2
688
689         # if a+b >= modulus, subtract modulus
690         #
691         # But since comparison implies subtraction, we subtract
692         # modulus and then add it back if subraction borrowed.
693
694         subic   $acc0,$acc0,-1
695         subfe   $acc1,$poly1,$acc1
696         subfe   $acc2,$t2,$acc2
697         subfe   $acc3,$poly3,$acc3
698         subfe   $t0,$t2,$t0
699
700         addc    $acc0,$acc0,$t0
701         and     $t1,$poly1,$t0
702         and     $t3,$poly3,$t0
703         adde    $acc1,$acc1,$t1
704         addze   $acc2,$acc2
705         adde    $acc3,$acc3,$t3
706
707         std     $acc0,0($rp)
708         std     $acc1,8($rp)
709         std     $acc2,16($rp)
710         std     $acc3,24($rp)
711
712         blr
713         .long   0
714         .byte   0,12,0x14,0,0,0,3,0
715         .long   0
716 .size   __ecp_nistz256_add,.-__ecp_nistz256_add
717
718 .type   __ecp_nistz256_sub_from,\@function
719 .align  4
720 __ecp_nistz256_sub_from:
721         ld      $t0,0($bp)
722         ld      $t1,8($bp)
723         ld      $t2,16($bp)
724         ld      $t3,24($bp)
725         subfc   $acc0,$t0,$acc0         # ret = a-b
726         subfe   $acc1,$t1,$acc1
727         subfe   $acc2,$t2,$acc2
728         subfe   $acc3,$t3,$acc3
729         subfe   $t0,$t0,$t0             # t0 = borrow ? -1 : 0
730
731         # if a-b borrowed, add modulus
732
733         addc    $acc0,$acc0,$t0         # ret -= modulus & t0
734         and     $t1,$poly1,$t0
735         and     $t3,$poly3,$t0
736         adde    $acc1,$acc1,$t1
737         addze   $acc2,$acc2
738         adde    $acc3,$acc3,$t3
739
740         std     $acc0,0($rp)
741         std     $acc1,8($rp)
742         std     $acc2,16($rp)
743         std     $acc3,24($rp)
744
745         blr
746         .long   0
747         .byte   0,12,0x14,0,0,0,3,0
748         .long   0
749 .size   __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
750
751 .type   __ecp_nistz256_sub_morf,\@function
752 .align  4
753 __ecp_nistz256_sub_morf:
754         ld      $t0,0($bp)
755         ld      $t1,8($bp)
756         ld      $t2,16($bp)
757         ld      $t3,24($bp)
758         subfc   $acc0,$acc0,$t0         # ret = b-a
759         subfe   $acc1,$acc1,$t1
760         subfe   $acc2,$acc2,$t2
761         subfe   $acc3,$acc3,$t3
762         subfe   $t0,$t0,$t0             # t0 = borrow ? -1 : 0
763
764         # if b-a borrowed, add modulus
765
766         addc    $acc0,$acc0,$t0         # ret -= modulus & t0
767         and     $t1,$poly1,$t0
768         and     $t3,$poly3,$t0
769         adde    $acc1,$acc1,$t1
770         addze   $acc2,$acc2
771         adde    $acc3,$acc3,$t3
772
773         std     $acc0,0($rp)
774         std     $acc1,8($rp)
775         std     $acc2,16($rp)
776         std     $acc3,24($rp)
777
778         blr
779         .long   0
780         .byte   0,12,0x14,0,0,0,3,0
781         .long   0
782 .size   __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
783
784 .type   __ecp_nistz256_div_by_2,\@function
785 .align  4
786 __ecp_nistz256_div_by_2:
787         andi.   $t0,$acc0,1
788         addic   $acc0,$acc0,-1          # a += modulus
789          neg    $t0,$t0
790         adde    $acc1,$acc1,$poly1
791          not    $t0,$t0
792         addze   $acc2,$acc2
793          li     $t2,0
794         adde    $acc3,$acc3,$poly3
795          and    $t1,$poly1,$t0
796         addze   $ap,$t2                 # ap = carry
797          and    $t3,$poly3,$t0
798
799         subfc   $acc0,$t0,$acc0         # a -= modulus if a was even
800         subfe   $acc1,$t1,$acc1
801         subfe   $acc2,$t2,$acc2
802         subfe   $acc3,$t3,$acc3
803         subfe   $ap,  $t2,$ap
804
805         srdi    $acc0,$acc0,1
806         sldi    $t0,$acc1,63
807         srdi    $acc1,$acc1,1
808         sldi    $t1,$acc2,63
809         srdi    $acc2,$acc2,1
810         sldi    $t2,$acc3,63
811         srdi    $acc3,$acc3,1
812         sldi    $t3,$ap,63
813         or      $acc0,$acc0,$t0
814         or      $acc1,$acc1,$t1
815         or      $acc2,$acc2,$t2
816         or      $acc3,$acc3,$t3
817
818         std     $acc0,0($rp)
819         std     $acc1,8($rp)
820         std     $acc2,16($rp)
821         std     $acc3,24($rp)
822
823         blr
824         .long   0
825         .byte   0,12,0x14,0,0,0,1,0
826         .long   0
827 .size   __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
828 ___
829 ########################################################################
830 # following subroutines are "literal" implementation of those found in
831 # ecp_nistz256.c
832 #
833 ########################################################################
834 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
835 #
836 if (1) {
837 my $FRAME=64+32*4+12*8;
838 my ($S,$M,$Zsqr,$tmp0)=map(64+32*$_,(0..3));
839 # above map() describes stack layout with 4 temporary
840 # 256-bit vectors on top.
841 my ($rp_real,$ap_real) = map("r$_",(20,21));
842
843 $code.=<<___;
844 .globl  ecp_nistz256_point_double
845 .align  5
846 ecp_nistz256_point_double:
847         stdu    $sp,-$FRAME($sp)
848         mflr    r0
849         std     r20,$FRAME-8*12($sp)
850         std     r21,$FRAME-8*11($sp)
851         std     r22,$FRAME-8*10($sp)
852         std     r23,$FRAME-8*9($sp)
853         std     r24,$FRAME-8*8($sp)
854         std     r25,$FRAME-8*7($sp)
855         std     r26,$FRAME-8*6($sp)
856         std     r27,$FRAME-8*5($sp)
857         std     r28,$FRAME-8*4($sp)
858         std     r29,$FRAME-8*3($sp)
859         std     r30,$FRAME-8*2($sp)
860         std     r31,$FRAME-8*1($sp)
861
862         li      $poly1,-1
863         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
864         li      $poly3,1
865         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
866 .Ldouble_shortcut:
867         ld      $acc0,32($ap)
868         ld      $acc1,40($ap)
869         ld      $acc2,48($ap)
870         ld      $acc3,56($ap)
871         mr      $t0,$acc0
872         mr      $t1,$acc1
873         mr      $t2,$acc2
874         mr      $t3,$acc3
875          ld     $a0,64($ap)             # forward load for p256_sqr_mont
876          ld     $a1,72($ap)
877          ld     $a2,80($ap)
878          ld     $a3,88($ap)
879          mr     $rp_real,$rp
880          mr     $ap_real,$ap
881         addi    $rp,$sp,$S
882         bl      __ecp_nistz256_add      # p256_mul_by_2(S, in_y);
883
884         addi    $rp,$sp,$Zsqr
885         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(Zsqr, in_z);
886
887         ld      $t0,0($ap_real)
888         ld      $t1,8($ap_real)
889         ld      $t2,16($ap_real)
890         ld      $t3,24($ap_real)
891         mr      $a0,$acc0               # put Zsqr aside for p256_sub
892         mr      $a1,$acc1
893         mr      $a2,$acc2
894         mr      $a3,$acc3
895         addi    $rp,$sp,$M
896         bl      __ecp_nistz256_add      # p256_add(M, Zsqr, in_x);
897
898         addi    $bp,$ap_real,0
899         mr      $acc0,$a0               # restore Zsqr
900         mr      $acc1,$a1
901         mr      $acc2,$a2
902         mr      $acc3,$a3
903          ld     $a0,$S+0($sp)           # forward load for p256_sqr_mont
904          ld     $a1,$S+8($sp)
905          ld     $a2,$S+16($sp)
906          ld     $a3,$S+24($sp)
907         addi    $rp,$sp,$Zsqr
908         bl      __ecp_nistz256_sub_morf # p256_sub(Zsqr, in_x, Zsqr);
909
910         addi    $rp,$sp,$S
911         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(S, S);
912
913         ld      $bi,32($ap_real)
914         ld      $a0,64($ap_real)
915         ld      $a1,72($ap_real)
916         ld      $a2,80($ap_real)
917         ld      $a3,88($ap_real)
918         addi    $bp,$ap_real,32
919         addi    $rp,$sp,$tmp0
920         bl      __ecp_nistz256_mul_mont # p256_mul_mont(tmp0, in_z, in_y);
921
922         mr      $t0,$acc0
923         mr      $t1,$acc1
924         mr      $t2,$acc2
925         mr      $t3,$acc3
926          ld     $a0,$S+0($sp)           # forward load for p256_sqr_mont
927          ld     $a1,$S+8($sp)
928          ld     $a2,$S+16($sp)
929          ld     $a3,$S+24($sp)
930         addi    $rp,$rp_real,64
931         bl      __ecp_nistz256_add      # p256_mul_by_2(res_z, tmp0);
932
933         addi    $rp,$sp,$tmp0
934         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(tmp0, S);
935
936          ld     $bi,$Zsqr($sp)          # forward load for p256_mul_mont
937          ld     $a0,$M+0($sp)
938          ld     $a1,$M+8($sp)
939          ld     $a2,$M+16($sp)
940          ld     $a3,$M+24($sp)
941         addi    $rp,$rp_real,32
942         bl      __ecp_nistz256_div_by_2 # p256_div_by_2(res_y, tmp0);
943
944         addi    $bp,$sp,$Zsqr
945         addi    $rp,$sp,$M
946         bl      __ecp_nistz256_mul_mont # p256_mul_mont(M, M, Zsqr);
947
948         mr      $t0,$acc0               # duplicate M
949         mr      $t1,$acc1
950         mr      $t2,$acc2
951         mr      $t3,$acc3
952         mr      $a0,$acc0               # put M aside
953         mr      $a1,$acc1
954         mr      $a2,$acc2
955         mr      $a3,$acc3
956         addi    $rp,$sp,$M
957         bl      __ecp_nistz256_add
958         mr      $t0,$a0                 # restore M
959         mr      $t1,$a1
960         mr      $t2,$a2
961         mr      $t3,$a3
962          ld     $bi,0($ap_real)         # forward load for p256_mul_mont
963          ld     $a0,$S+0($sp)
964          ld     $a1,$S+8($sp)
965          ld     $a2,$S+16($sp)
966          ld     $a3,$S+24($sp)
967         bl      __ecp_nistz256_add      # p256_mul_by_3(M, M);
968
969         addi    $bp,$ap_real,0
970         addi    $rp,$sp,$S
971         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S, S, in_x);
972
973         mr      $t0,$acc0
974         mr      $t1,$acc1
975         mr      $t2,$acc2
976         mr      $t3,$acc3
977          ld     $a0,$M+0($sp)           # forward load for p256_sqr_mont
978          ld     $a1,$M+8($sp)
979          ld     $a2,$M+16($sp)
980          ld     $a3,$M+24($sp)
981         addi    $rp,$sp,$tmp0
982         bl      __ecp_nistz256_add      # p256_mul_by_2(tmp0, S);
983
984         addi    $rp,$rp_real,0
985         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(res_x, M);
986
987         addi    $bp,$sp,$tmp0
988         bl      __ecp_nistz256_sub_from # p256_sub(res_x, res_x, tmp0);
989
990         addi    $bp,$sp,$S
991         addi    $rp,$sp,$S
992         bl      __ecp_nistz256_sub_morf # p256_sub(S, S, res_x);
993
994         ld      $bi,$M($sp)
995         mr      $a0,$acc0               # copy S
996         mr      $a1,$acc1
997         mr      $a2,$acc2
998         mr      $a3,$acc3
999         addi    $bp,$sp,$M
1000         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S, S, M);
1001
1002         addi    $bp,$rp_real,32
1003         addi    $rp,$rp_real,32
1004         bl      __ecp_nistz256_sub_from # p256_sub(res_y, S, res_y);
1005
1006         mtlr    r0
1007         ld      r20,$FRAME-8*12($sp)
1008         ld      r21,$FRAME-8*11($sp)
1009         ld      r22,$FRAME-8*10($sp)
1010         ld      r23,$FRAME-8*9($sp)
1011         ld      r24,$FRAME-8*8($sp)
1012         ld      r25,$FRAME-8*7($sp)
1013         ld      r26,$FRAME-8*6($sp)
1014         ld      r27,$FRAME-8*5($sp)
1015         ld      r28,$FRAME-8*4($sp)
1016         ld      r29,$FRAME-8*3($sp)
1017         ld      r30,$FRAME-8*2($sp)
1018         ld      r31,$FRAME-8*1($sp)
1019         addi    $sp,$sp,$FRAME
1020         blr
1021         .long   0
1022         .byte   0,12,4,0,0x80,12,2,0
1023         .long   0
1024 .size   ecp_nistz256_point_double,.-ecp_nistz256_point_double
1025 ___
1026 }
1027
1028 ########################################################################
1029 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
1030 #                             const P256_POINT *in2);
1031 if (1) {
1032 my $FRAME = 64 + 32*12 + 16*8;
1033 my ($res_x,$res_y,$res_z,
1034     $H,$Hsqr,$R,$Rsqr,$Hcub,
1035     $U1,$U2,$S1,$S2)=map(64+32*$_,(0..11));
1036 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
1037 # above map() describes stack layout with 12 temporary
1038 # 256-bit vectors on top.
1039 my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("r$_",(16..21));
1040
1041 $code.=<<___;
1042 .globl  ecp_nistz256_point_add
1043 .align  5
1044 ecp_nistz256_point_add:
1045         stdu    $sp,-$FRAME($sp)
1046         mflr    r0
1047         std     r16,$FRAME-8*16($sp)
1048         std     r17,$FRAME-8*15($sp)
1049         std     r18,$FRAME-8*14($sp)
1050         std     r19,$FRAME-8*13($sp)
1051         std     r20,$FRAME-8*12($sp)
1052         std     r21,$FRAME-8*11($sp)
1053         std     r22,$FRAME-8*10($sp)
1054         std     r23,$FRAME-8*9($sp)
1055         std     r24,$FRAME-8*8($sp)
1056         std     r25,$FRAME-8*7($sp)
1057         std     r26,$FRAME-8*6($sp)
1058         std     r27,$FRAME-8*5($sp)
1059         std     r28,$FRAME-8*4($sp)
1060         std     r29,$FRAME-8*3($sp)
1061         std     r30,$FRAME-8*2($sp)
1062         std     r31,$FRAME-8*1($sp)
1063
1064         li      $poly1,-1
1065         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
1066         li      $poly3,1
1067         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
1068
1069         ld      $a0,64($bp)             # in2_z
1070         ld      $a1,72($bp)
1071         ld      $a2,80($bp)
1072         ld      $a3,88($bp)
1073          mr     $rp_real,$rp
1074          mr     $ap_real,$ap
1075          mr     $bp_real,$bp
1076         or      $t0,$a0,$a1
1077         or      $t2,$a2,$a3
1078         or      $in2infty,$t0,$t2
1079         neg     $t0,$in2infty
1080         or      $in2infty,$in2infty,$t0
1081         sradi   $in2infty,$in2infty,63  # !in2infty
1082         addi    $rp,$sp,$Z2sqr
1083         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(Z2sqr, in2_z);
1084
1085         ld      $a0,64($ap_real)        # in1_z
1086         ld      $a1,72($ap_real)
1087         ld      $a2,80($ap_real)
1088         ld      $a3,88($ap_real)
1089         or      $t0,$a0,$a1
1090         or      $t2,$a2,$a3
1091         or      $in1infty,$t0,$t2
1092         neg     $t0,$in1infty
1093         or      $in1infty,$in1infty,$t0
1094         sradi   $in1infty,$in1infty,63  # !in1infty
1095         addi    $rp,$sp,$Z1sqr
1096         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(Z1sqr, in1_z);
1097
1098         ld      $bi,64($bp_real)
1099         ld      $a0,$Z2sqr+0($sp)
1100         ld      $a1,$Z2sqr+8($sp)
1101         ld      $a2,$Z2sqr+16($sp)
1102         ld      $a3,$Z2sqr+24($sp)
1103         addi    $bp,$bp_real,64
1104         addi    $rp,$sp,$S1
1105         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S1, Z2sqr, in2_z);
1106
1107         ld      $bi,64($ap_real)
1108         ld      $a0,$Z1sqr+0($sp)
1109         ld      $a1,$Z1sqr+8($sp)
1110         ld      $a2,$Z1sqr+16($sp)
1111         ld      $a3,$Z1sqr+24($sp)
1112         addi    $bp,$ap_real,64
1113         addi    $rp,$sp,$S2
1114         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S2, Z1sqr, in1_z);
1115
1116         ld      $bi,32($ap_real)
1117         ld      $a0,$S1+0($sp)
1118         ld      $a1,$S1+8($sp)
1119         ld      $a2,$S1+16($sp)
1120         ld      $a3,$S1+24($sp)
1121         addi    $bp,$ap_real,32
1122         addi    $rp,$sp,$S1
1123         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S1, S1, in1_y);
1124
1125         ld      $bi,32($bp_real)
1126         ld      $a0,$S2+0($sp)
1127         ld      $a1,$S2+8($sp)
1128         ld      $a2,$S2+16($sp)
1129         ld      $a3,$S2+24($sp)
1130         addi    $bp,$bp_real,32
1131         addi    $rp,$sp,$S2
1132         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S2, S2, in2_y);
1133
1134         addi    $bp,$sp,$S1
1135          ld     $bi,$Z2sqr($sp)         # forward load for p256_mul_mont
1136          ld     $a0,0($ap_real)
1137          ld     $a1,8($ap_real)
1138          ld     $a2,16($ap_real)
1139          ld     $a3,24($ap_real)
1140         addi    $rp,$sp,$R
1141         bl      __ecp_nistz256_sub_from # p256_sub(R, S2, S1);
1142
1143         or      $acc0,$acc0,$acc1       # see if result is zero
1144         or      $acc2,$acc2,$acc3
1145         or      $temp,$acc0,$acc2
1146
1147         addi    $bp,$sp,$Z2sqr
1148         addi    $rp,$sp,$U1
1149         bl      __ecp_nistz256_mul_mont # p256_mul_mont(U1, in1_x, Z2sqr);
1150
1151         ld      $bi,$Z1sqr($sp)
1152         ld      $a0,0($bp_real)
1153         ld      $a1,8($bp_real)
1154         ld      $a2,16($bp_real)
1155         ld      $a3,24($bp_real)
1156         addi    $bp,$sp,$Z1sqr
1157         addi    $rp,$sp,$U2
1158         bl      __ecp_nistz256_mul_mont # p256_mul_mont(U2, in2_x, Z1sqr);
1159
1160         addi    $bp,$sp,$U1
1161          ld     $a0,$R+0($sp)           # forward load for p256_sqr_mont
1162          ld     $a1,$R+8($sp)
1163          ld     $a2,$R+16($sp)
1164          ld     $a3,$R+24($sp)
1165         addi    $rp,$sp,$H
1166         bl      __ecp_nistz256_sub_from # p256_sub(H, U2, U1);
1167
1168         or      $acc0,$acc0,$acc1       # see if result is zero
1169         or      $acc2,$acc2,$acc3
1170         or.     $acc0,$acc0,$acc2
1171         bne     .Ladd_proceed           # is_equal(U1,U2)?
1172
1173         and.    $t0,$in1infty,$in2infty
1174         beq     .Ladd_proceed           # (in1infty || in2infty)?
1175
1176         cmpldi  $temp,0
1177         beq     .Ladd_double            # is_equal(S1,S2)?
1178
1179         xor     $a0,$a0,$a0
1180         std     $a0,0($rp_real)
1181         std     $a0,8($rp_real)
1182         std     $a0,16($rp_real)
1183         std     $a0,24($rp_real)
1184         std     $a0,32($rp_real)
1185         std     $a0,40($rp_real)
1186         std     $a0,48($rp_real)
1187         std     $a0,56($rp_real)
1188         std     $a0,64($rp_real)
1189         std     $a0,72($rp_real)
1190         std     $a0,80($rp_real)
1191         std     $a0,88($rp_real)
1192         b       .Ladd_done
1193
1194 .align  4
1195 .Ladd_double:
1196         ld      $bp,0($sp)              # back-link
1197         mr      $ap,$ap_real
1198         mr      $rp,$rp_real
1199         ld      r16,$FRAME-8*16($sp)
1200         ld      r17,$FRAME-8*15($sp)
1201         ld      r18,$FRAME-8*14($sp)
1202         ld      r19,$FRAME-8*13($sp)
1203         stdu    $bp,$FRAME-288($sp)     # difference in stack frame sizes
1204         b       .Ldouble_shortcut
1205
1206 .align  4
1207 .Ladd_proceed:
1208         addi    $rp,$sp,$Rsqr
1209         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(Rsqr, R);
1210
1211         ld      $bi,64($ap_real)
1212         ld      $a0,$H+0($sp)
1213         ld      $a1,$H+8($sp)
1214         ld      $a2,$H+16($sp)
1215         ld      $a3,$H+24($sp)
1216         addi    $bp,$ap_real,64
1217         addi    $rp,$sp,$res_z
1218         bl      __ecp_nistz256_mul_mont # p256_mul_mont(res_z, H, in1_z);
1219
1220         ld      $a0,$H+0($sp)
1221         ld      $a1,$H+8($sp)
1222         ld      $a2,$H+16($sp)
1223         ld      $a3,$H+24($sp)
1224         addi    $rp,$sp,$Hsqr
1225         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(Hsqr, H);
1226
1227         ld      $bi,64($bp_real)
1228         ld      $a0,$res_z+0($sp)
1229         ld      $a1,$res_z+8($sp)
1230         ld      $a2,$res_z+16($sp)
1231         ld      $a3,$res_z+24($sp)
1232         addi    $bp,$bp_real,64
1233         addi    $rp,$sp,$res_z
1234         bl      __ecp_nistz256_mul_mont # p256_mul_mont(res_z, res_z, in2_z);
1235
1236         ld      $bi,$H($sp)
1237         ld      $a0,$Hsqr+0($sp)
1238         ld      $a1,$Hsqr+8($sp)
1239         ld      $a2,$Hsqr+16($sp)
1240         ld      $a3,$Hsqr+24($sp)
1241         addi    $bp,$sp,$H
1242         addi    $rp,$sp,$Hcub
1243         bl      __ecp_nistz256_mul_mont # p256_mul_mont(Hcub, Hsqr, H);
1244
1245         ld      $bi,$Hsqr($sp)
1246         ld      $a0,$U1+0($sp)
1247         ld      $a1,$U1+8($sp)
1248         ld      $a2,$U1+16($sp)
1249         ld      $a3,$U1+24($sp)
1250         addi    $bp,$sp,$Hsqr
1251         addi    $rp,$sp,$U2
1252         bl      __ecp_nistz256_mul_mont # p256_mul_mont(U2, U1, Hsqr);
1253
1254         mr      $t0,$acc0
1255         mr      $t1,$acc1
1256         mr      $t2,$acc2
1257         mr      $t3,$acc3
1258         addi    $rp,$sp,$Hsqr
1259         bl      __ecp_nistz256_add      # p256_mul_by_2(Hsqr, U2);
1260
1261         addi    $bp,$sp,$Rsqr
1262         addi    $rp,$sp,$res_x
1263         bl      __ecp_nistz256_sub_morf # p256_sub(res_x, Rsqr, Hsqr);
1264
1265         addi    $bp,$sp,$Hcub
1266         bl      __ecp_nistz256_sub_from # p256_sub(res_x, res_x, Hcub);
1267
1268         addi    $bp,$sp,$U2
1269          ld     $bi,$Hcub($sp)          # forward load for p256_mul_mont
1270          ld     $a0,$S1+0($sp)
1271          ld     $a1,$S1+8($sp)
1272          ld     $a2,$S1+16($sp)
1273          ld     $a3,$S1+24($sp)
1274         addi    $rp,$sp,$res_y
1275         bl      __ecp_nistz256_sub_morf # p256_sub(res_y, U2, res_x);
1276
1277         addi    $bp,$sp,$Hcub
1278         addi    $rp,$sp,$S2
1279         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S2, S1, Hcub);
1280
1281         ld      $bi,$R($sp)
1282         ld      $a0,$res_y+0($sp)
1283         ld      $a1,$res_y+8($sp)
1284         ld      $a2,$res_y+16($sp)
1285         ld      $a3,$res_y+24($sp)
1286         addi    $bp,$sp,$R
1287         addi    $rp,$sp,$res_y
1288         bl      __ecp_nistz256_mul_mont # p256_mul_mont(res_y, res_y, R);
1289
1290         addi    $bp,$sp,$S2
1291         bl      __ecp_nistz256_sub_from # p256_sub(res_y, res_y, S2);
1292
1293         ld      $t0,0($bp_real)         # in2
1294         ld      $t1,8($bp_real)
1295         ld      $t2,16($bp_real)
1296         ld      $t3,24($bp_real)
1297         ld      $a0,$res_x+0($sp)       # res
1298         ld      $a1,$res_x+8($sp)
1299         ld      $a2,$res_x+16($sp)
1300         ld      $a3,$res_x+24($sp)
1301 ___
1302 for($i=0;$i<64;$i+=32) {                # conditional moves
1303 $code.=<<___;
1304         ld      $acc0,$i+0($ap_real)    # in1
1305         ld      $acc1,$i+8($ap_real)
1306         ld      $acc2,$i+16($ap_real)
1307         ld      $acc3,$i+24($ap_real)
1308         andc    $t0,$t0,$in1infty
1309         andc    $t1,$t1,$in1infty
1310         andc    $t2,$t2,$in1infty
1311         andc    $t3,$t3,$in1infty
1312         and     $a0,$a0,$in1infty
1313         and     $a1,$a1,$in1infty
1314         and     $a2,$a2,$in1infty
1315         and     $a3,$a3,$in1infty
1316         or      $t0,$t0,$a0
1317         or      $t1,$t1,$a1
1318         or      $t2,$t2,$a2
1319         or      $t3,$t3,$a3
1320         andc    $acc0,$acc0,$in2infty
1321         andc    $acc1,$acc1,$in2infty
1322         andc    $acc2,$acc2,$in2infty
1323         andc    $acc3,$acc3,$in2infty
1324         and     $t0,$t0,$in2infty
1325         and     $t1,$t1,$in2infty
1326         and     $t2,$t2,$in2infty
1327         and     $t3,$t3,$in2infty
1328         or      $acc0,$acc0,$t0
1329         or      $acc1,$acc1,$t1
1330         or      $acc2,$acc2,$t2
1331         or      $acc3,$acc3,$t3
1332
1333         ld      $t0,$i+32($bp_real)     # in2
1334         ld      $t1,$i+40($bp_real)
1335         ld      $t2,$i+48($bp_real)
1336         ld      $t3,$i+56($bp_real)
1337         ld      $a0,$res_x+$i+32($sp)
1338         ld      $a1,$res_x+$i+40($sp)
1339         ld      $a2,$res_x+$i+48($sp)
1340         ld      $a3,$res_x+$i+56($sp)
1341         std     $acc0,$i+0($rp_real)
1342         std     $acc1,$i+8($rp_real)
1343         std     $acc2,$i+16($rp_real)
1344         std     $acc3,$i+24($rp_real)
1345 ___
1346 }
1347 $code.=<<___;
1348         ld      $acc0,$i+0($ap_real)    # in1
1349         ld      $acc1,$i+8($ap_real)
1350         ld      $acc2,$i+16($ap_real)
1351         ld      $acc3,$i+24($ap_real)
1352         andc    $t0,$t0,$in1infty
1353         andc    $t1,$t1,$in1infty
1354         andc    $t2,$t2,$in1infty
1355         andc    $t3,$t3,$in1infty
1356         and     $a0,$a0,$in1infty
1357         and     $a1,$a1,$in1infty
1358         and     $a2,$a2,$in1infty
1359         and     $a3,$a3,$in1infty
1360         or      $t0,$t0,$a0
1361         or      $t1,$t1,$a1
1362         or      $t2,$t2,$a2
1363         or      $t3,$t3,$a3
1364         andc    $acc0,$acc0,$in2infty
1365         andc    $acc1,$acc1,$in2infty
1366         andc    $acc2,$acc2,$in2infty
1367         andc    $acc3,$acc3,$in2infty
1368         and     $t0,$t0,$in2infty
1369         and     $t1,$t1,$in2infty
1370         and     $t2,$t2,$in2infty
1371         and     $t3,$t3,$in2infty
1372         or      $acc0,$acc0,$t0
1373         or      $acc1,$acc1,$t1
1374         or      $acc2,$acc2,$t2
1375         or      $acc3,$acc3,$t3
1376         std     $acc0,$i+0($rp_real)
1377         std     $acc1,$i+8($rp_real)
1378         std     $acc2,$i+16($rp_real)
1379         std     $acc3,$i+24($rp_real)
1380
1381 .Ladd_done:
1382         mtlr    r0
1383         ld      r16,$FRAME-8*16($sp)
1384         ld      r17,$FRAME-8*15($sp)
1385         ld      r18,$FRAME-8*14($sp)
1386         ld      r19,$FRAME-8*13($sp)
1387         ld      r20,$FRAME-8*12($sp)
1388         ld      r21,$FRAME-8*11($sp)
1389         ld      r22,$FRAME-8*10($sp)
1390         ld      r23,$FRAME-8*9($sp)
1391         ld      r24,$FRAME-8*8($sp)
1392         ld      r25,$FRAME-8*7($sp)
1393         ld      r26,$FRAME-8*6($sp)
1394         ld      r27,$FRAME-8*5($sp)
1395         ld      r28,$FRAME-8*4($sp)
1396         ld      r29,$FRAME-8*3($sp)
1397         ld      r30,$FRAME-8*2($sp)
1398         ld      r31,$FRAME-8*1($sp)
1399         addi    $sp,$sp,$FRAME
1400         blr
1401         .long   0
1402         .byte   0,12,4,0,0x80,16,3,0
1403         .long   0
1404 .size   ecp_nistz256_point_add,.-ecp_nistz256_point_add
1405 ___
1406 }
1407
1408 ########################################################################
1409 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1410 #                                    const P256_POINT_AFFINE *in2);
1411 if (1) {
1412 my $FRAME = 64 + 32*10 + 16*8;
1413 my ($res_x,$res_y,$res_z,
1414     $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(64+32*$_,(0..9));
1415 my $Z1sqr = $S2;
1416 # above map() describes stack layout with 10 temporary
1417 # 256-bit vectors on top.
1418 my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("r$_",(16..21));
1419
1420 $code.=<<___;
1421 .globl  ecp_nistz256_point_add_affine
1422 .align  5
1423 ecp_nistz256_point_add_affine:
1424         stdu    $sp,-$FRAME($sp)
1425         mflr    r0
1426         std     r16,$FRAME-8*16($sp)
1427         std     r17,$FRAME-8*15($sp)
1428         std     r18,$FRAME-8*14($sp)
1429         std     r19,$FRAME-8*13($sp)
1430         std     r20,$FRAME-8*12($sp)
1431         std     r21,$FRAME-8*11($sp)
1432         std     r22,$FRAME-8*10($sp)
1433         std     r23,$FRAME-8*9($sp)
1434         std     r24,$FRAME-8*8($sp)
1435         std     r25,$FRAME-8*7($sp)
1436         std     r26,$FRAME-8*6($sp)
1437         std     r27,$FRAME-8*5($sp)
1438         std     r28,$FRAME-8*4($sp)
1439         std     r29,$FRAME-8*3($sp)
1440         std     r30,$FRAME-8*2($sp)
1441         std     r31,$FRAME-8*1($sp)
1442
1443         li      $poly1,-1
1444         srdi    $poly1,$poly1,32        # 0x00000000ffffffff
1445         li      $poly3,1
1446         orc     $poly3,$poly3,$poly1    # 0xffffffff00000001
1447
1448         mr      $rp_real,$rp
1449         mr      $ap_real,$ap
1450         mr      $bp_real,$bp
1451
1452         ld      $a0,64($ap)             # in1_z
1453         ld      $a1,72($ap)
1454         ld      $a2,80($ap)
1455         ld      $a3,88($ap)
1456         or      $t0,$a0,$a1
1457         or      $t2,$a2,$a3
1458         or      $in1infty,$t0,$t2
1459         neg     $t0,$in1infty
1460         or      $in1infty,$in1infty,$t0
1461         sradi   $in1infty,$in1infty,63  # !in1infty
1462
1463         ld      $acc0,0($bp)            # in2_x
1464         ld      $acc1,8($bp)
1465         ld      $acc2,16($bp)
1466         ld      $acc3,24($bp)
1467         ld      $t0,32($bp)             # in2_y
1468         ld      $t1,40($bp)
1469         ld      $t2,48($bp)
1470         ld      $t3,56($bp)
1471         or      $acc0,$acc0,$acc1
1472         or      $acc2,$acc2,$acc3
1473         or      $acc0,$acc0,$acc2
1474         or      $t0,$t0,$t1
1475         or      $t2,$t2,$t3
1476         or      $t0,$t0,$t2
1477         or      $in2infty,$acc0,$t0
1478         neg     $t0,$in2infty
1479         or      $in2infty,$in2infty,$t0
1480         sradi   $in2infty,$in2infty,63  # !in2infty
1481
1482         addi    $rp,$sp,$Z1sqr
1483         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(Z1sqr, in1_z);
1484
1485         mr      $a0,$acc0
1486         mr      $a1,$acc1
1487         mr      $a2,$acc2
1488         mr      $a3,$acc3
1489         ld      $bi,0($bp_real)
1490         addi    $bp,$bp_real,0
1491         addi    $rp,$sp,$U2
1492         bl      __ecp_nistz256_mul_mont # p256_mul_mont(U2, Z1sqr, in2_x);
1493
1494         addi    $bp,$ap_real,0
1495          ld     $bi,64($ap_real)        # forward load for p256_mul_mont
1496          ld     $a0,$Z1sqr+0($sp)
1497          ld     $a1,$Z1sqr+8($sp)
1498          ld     $a2,$Z1sqr+16($sp)
1499          ld     $a3,$Z1sqr+24($sp)
1500         addi    $rp,$sp,$H
1501         bl      __ecp_nistz256_sub_from # p256_sub(H, U2, in1_x);
1502
1503         addi    $bp,$ap_real,64
1504         addi    $rp,$sp,$S2
1505         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S2, Z1sqr, in1_z);
1506
1507         ld      $bi,64($ap_real)
1508         ld      $a0,$H+0($sp)
1509         ld      $a1,$H+8($sp)
1510         ld      $a2,$H+16($sp)
1511         ld      $a3,$H+24($sp)
1512         addi    $bp,$ap_real,64
1513         addi    $rp,$sp,$res_z
1514         bl      __ecp_nistz256_mul_mont # p256_mul_mont(res_z, H, in1_z);
1515
1516         ld      $bi,32($bp_real)
1517         ld      $a0,$S2+0($sp)
1518         ld      $a1,$S2+8($sp)
1519         ld      $a2,$S2+16($sp)
1520         ld      $a3,$S2+24($sp)
1521         addi    $bp,$bp_real,32
1522         addi    $rp,$sp,$S2
1523         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S2, S2, in2_y);
1524
1525         addi    $bp,$ap_real,32
1526          ld     $a0,$H+0($sp)           # forward load for p256_sqr_mont
1527          ld     $a1,$H+8($sp)
1528          ld     $a2,$H+16($sp)
1529          ld     $a3,$H+24($sp)
1530         addi    $rp,$sp,$R
1531         bl      __ecp_nistz256_sub_from # p256_sub(R, S2, in1_y);
1532
1533         addi    $rp,$sp,$Hsqr
1534         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(Hsqr, H);
1535
1536         ld      $a0,$R+0($sp)
1537         ld      $a1,$R+8($sp)
1538         ld      $a2,$R+16($sp)
1539         ld      $a3,$R+24($sp)
1540         addi    $rp,$sp,$Rsqr
1541         bl      __ecp_nistz256_sqr_mont # p256_sqr_mont(Rsqr, R);
1542
1543         ld      $bi,$H($sp)
1544         ld      $a0,$Hsqr+0($sp)
1545         ld      $a1,$Hsqr+8($sp)
1546         ld      $a2,$Hsqr+16($sp)
1547         ld      $a3,$Hsqr+24($sp)
1548         addi    $bp,$sp,$H
1549         addi    $rp,$sp,$Hcub
1550         bl      __ecp_nistz256_mul_mont # p256_mul_mont(Hcub, Hsqr, H);
1551
1552         ld      $bi,0($ap_real)
1553         ld      $a0,$Hsqr+0($sp)
1554         ld      $a1,$Hsqr+8($sp)
1555         ld      $a2,$Hsqr+16($sp)
1556         ld      $a3,$Hsqr+24($sp)
1557         addi    $bp,$ap_real,0
1558         addi    $rp,$sp,$U2
1559         bl      __ecp_nistz256_mul_mont # p256_mul_mont(U2, in1_x, Hsqr);
1560
1561         mr      $t0,$acc0
1562         mr      $t1,$acc1
1563         mr      $t2,$acc2
1564         mr      $t3,$acc3
1565         addi    $rp,$sp,$Hsqr
1566         bl      __ecp_nistz256_add      # p256_mul_by_2(Hsqr, U2);
1567
1568         addi    $bp,$sp,$Rsqr
1569         addi    $rp,$sp,$res_x
1570         bl      __ecp_nistz256_sub_morf # p256_sub(res_x, Rsqr, Hsqr);
1571
1572         addi    $bp,$sp,$Hcub
1573         bl      __ecp_nistz256_sub_from #  p256_sub(res_x, res_x, Hcub);
1574
1575         addi    $bp,$sp,$U2
1576          ld     $bi,32($ap_real)        # forward load for p256_mul_mont
1577          ld     $a0,$Hcub+0($sp)
1578          ld     $a1,$Hcub+8($sp)
1579          ld     $a2,$Hcub+16($sp)
1580          ld     $a3,$Hcub+24($sp)
1581         addi    $rp,$sp,$res_y
1582         bl      __ecp_nistz256_sub_morf # p256_sub(res_y, U2, res_x);
1583
1584         addi    $bp,$ap_real,32
1585         addi    $rp,$sp,$S2
1586         bl      __ecp_nistz256_mul_mont # p256_mul_mont(S2, in1_y, Hcub);
1587
1588         ld      $bi,$R($sp)
1589         ld      $a0,$res_y+0($sp)
1590         ld      $a1,$res_y+8($sp)
1591         ld      $a2,$res_y+16($sp)
1592         ld      $a3,$res_y+24($sp)
1593         addi    $bp,$sp,$R
1594         addi    $rp,$sp,$res_y
1595         bl      __ecp_nistz256_mul_mont # p256_mul_mont(res_y, res_y, R);
1596
1597         addi    $bp,$sp,$S2
1598         bl      __ecp_nistz256_sub_from # p256_sub(res_y, res_y, S2);
1599
1600         ld      $t0,0($bp_real)         # in2
1601         ld      $t1,8($bp_real)
1602         ld      $t2,16($bp_real)
1603         ld      $t3,24($bp_real)
1604         ld      $a0,$res_x+0($sp)       # res
1605         ld      $a1,$res_x+8($sp)
1606         ld      $a2,$res_x+16($sp)
1607         ld      $a3,$res_x+24($sp)
1608 ___
1609 for($i=0;$i<64;$i+=32) {                # conditional moves
1610 $code.=<<___;
1611         ld      $acc0,$i+0($ap_real)    # in1
1612         ld      $acc1,$i+8($ap_real)
1613         ld      $acc2,$i+16($ap_real)
1614         ld      $acc3,$i+24($ap_real)
1615         andc    $t0,$t0,$in1infty
1616         andc    $t1,$t1,$in1infty
1617         andc    $t2,$t2,$in1infty
1618         andc    $t3,$t3,$in1infty
1619         and     $a0,$a0,$in1infty
1620         and     $a1,$a1,$in1infty
1621         and     $a2,$a2,$in1infty
1622         and     $a3,$a3,$in1infty
1623         or      $t0,$t0,$a0
1624         or      $t1,$t1,$a1
1625         or      $t2,$t2,$a2
1626         or      $t3,$t3,$a3
1627         andc    $acc0,$acc0,$in2infty
1628         andc    $acc1,$acc1,$in2infty
1629         andc    $acc2,$acc2,$in2infty
1630         andc    $acc3,$acc3,$in2infty
1631         and     $t0,$t0,$in2infty
1632         and     $t1,$t1,$in2infty
1633         and     $t2,$t2,$in2infty
1634         and     $t3,$t3,$in2infty
1635         or      $acc0,$acc0,$t0
1636         or      $acc1,$acc1,$t1
1637         or      $acc2,$acc2,$t2
1638         or      $acc3,$acc3,$t3
1639 ___
1640 $code.=<<___    if ($i==0);
1641         ld      $t0,32($bp_real)        # in2
1642         ld      $t1,40($bp_real)
1643         ld      $t2,48($bp_real)
1644         ld      $t3,56($bp_real)
1645 ___
1646 $code.=<<___    if ($i==32);
1647         li      $t0,1                   # Lone_mont
1648         not     $t1,$poly1
1649         li      $t2,-1
1650         not     $t3,$poly3
1651 ___
1652 $code.=<<___;
1653         ld      $a0,$res_x+$i+32($sp)
1654         ld      $a1,$res_x+$i+40($sp)
1655         ld      $a2,$res_x+$i+48($sp)
1656         ld      $a3,$res_x+$i+56($sp)
1657         std     $acc0,$i+0($rp_real)
1658         std     $acc1,$i+8($rp_real)
1659         std     $acc2,$i+16($rp_real)
1660         std     $acc3,$i+24($rp_real)
1661 ___
1662 }
1663 $code.=<<___;
1664         ld      $acc0,$i+0($ap_real)    # in1
1665         ld      $acc1,$i+8($ap_real)
1666         ld      $acc2,$i+16($ap_real)
1667         ld      $acc3,$i+24($ap_real)
1668         andc    $t0,$t0,$in1infty
1669         andc    $t1,$t1,$in1infty
1670         andc    $t2,$t2,$in1infty
1671         andc    $t3,$t3,$in1infty
1672         and     $a0,$a0,$in1infty
1673         and     $a1,$a1,$in1infty
1674         and     $a2,$a2,$in1infty
1675         and     $a3,$a3,$in1infty
1676         or      $t0,$t0,$a0
1677         or      $t1,$t1,$a1
1678         or      $t2,$t2,$a2
1679         or      $t3,$t3,$a3
1680         andc    $acc0,$acc0,$in2infty
1681         andc    $acc1,$acc1,$in2infty
1682         andc    $acc2,$acc2,$in2infty
1683         andc    $acc3,$acc3,$in2infty
1684         and     $t0,$t0,$in2infty
1685         and     $t1,$t1,$in2infty
1686         and     $t2,$t2,$in2infty
1687         and     $t3,$t3,$in2infty
1688         or      $acc0,$acc0,$t0
1689         or      $acc1,$acc1,$t1
1690         or      $acc2,$acc2,$t2
1691         or      $acc3,$acc3,$t3
1692         std     $acc0,$i+0($rp_real)
1693         std     $acc1,$i+8($rp_real)
1694         std     $acc2,$i+16($rp_real)
1695         std     $acc3,$i+24($rp_real)
1696
1697         mtlr    r0
1698         ld      r16,$FRAME-8*16($sp)
1699         ld      r17,$FRAME-8*15($sp)
1700         ld      r18,$FRAME-8*14($sp)
1701         ld      r19,$FRAME-8*13($sp)
1702         ld      r20,$FRAME-8*12($sp)
1703         ld      r21,$FRAME-8*11($sp)
1704         ld      r22,$FRAME-8*10($sp)
1705         ld      r23,$FRAME-8*9($sp)
1706         ld      r24,$FRAME-8*8($sp)
1707         ld      r25,$FRAME-8*7($sp)
1708         ld      r26,$FRAME-8*6($sp)
1709         ld      r27,$FRAME-8*5($sp)
1710         ld      r28,$FRAME-8*4($sp)
1711         ld      r29,$FRAME-8*3($sp)
1712         ld      r30,$FRAME-8*2($sp)
1713         ld      r31,$FRAME-8*1($sp)
1714         addi    $sp,$sp,$FRAME
1715         blr
1716         .long   0
1717         .byte   0,12,4,0,0x80,16,3,0
1718         .long   0
1719 .size   ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1720 ___
1721 }
1722 if (1) {
1723 my ($ordk,$ord0,$ord1,$t4) = map("r$_",(18..21));
1724 my ($ord2,$ord3,$zr) = ($poly1,$poly3,"r0");
1725
1726 $code.=<<___;
1727 ########################################################################
1728 # void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
1729 #                                uint64_t b[4]);
1730 .globl  ecp_nistz256_ord_mul_mont
1731 .align  5
1732 ecp_nistz256_ord_mul_mont:
1733         stdu    $sp,-160($sp)
1734         std     r18,48($sp)
1735         std     r19,56($sp)
1736         std     r20,64($sp)
1737         std     r21,72($sp)
1738         std     r22,80($sp)
1739         std     r23,88($sp)
1740         std     r24,96($sp)
1741         std     r25,104($sp)
1742         std     r26,112($sp)
1743         std     r27,120($sp)
1744         std     r28,128($sp)
1745         std     r29,136($sp)
1746         std     r30,144($sp)
1747         std     r31,152($sp)
1748
1749         ld      $a0,0($ap)
1750         ld      $bi,0($bp)
1751         ld      $a1,8($ap)
1752         ld      $a2,16($ap)
1753         ld      $a3,24($ap)
1754
1755         lis     $ordk,0xccd1
1756         lis     $ord0,0xf3b9
1757         lis     $ord1,0xbce6
1758         ori     $ordk,$ordk,0xc8aa
1759         ori     $ord0,$ord0,0xcac2
1760         ori     $ord1,$ord1,0xfaad
1761         sldi    $ordk,$ordk,32
1762         sldi    $ord0,$ord0,32
1763         sldi    $ord1,$ord1,32
1764         oris    $ordk,$ordk,0xee00
1765         oris    $ord0,$ord0,0xfc63
1766         oris    $ord1,$ord1,0xa717
1767         ori     $ordk,$ordk,0xbc4f      # 0xccd1c8aaee00bc4f
1768         ori     $ord0,$ord0,0x2551      # 0xf3b9cac2fc632551
1769         ori     $ord1,$ord1,0x9e84      # 0xbce6faada7179e84
1770         li      $ord2,-1                # 0xffffffffffffffff
1771         sldi    $ord3,$ord2,32          # 0xffffffff00000000
1772         li      $zr,0
1773
1774         mulld   $acc0,$a0,$bi           # a[0]*b[0]
1775         mulhdu  $t0,$a0,$bi
1776
1777         mulld   $acc1,$a1,$bi           # a[1]*b[0]
1778         mulhdu  $t1,$a1,$bi
1779
1780         mulld   $acc2,$a2,$bi           # a[2]*b[0]
1781         mulhdu  $t2,$a2,$bi
1782
1783         mulld   $acc3,$a3,$bi           # a[3]*b[0]
1784         mulhdu  $acc4,$a3,$bi
1785
1786         mulld   $t4,$acc0,$ordk
1787
1788         addc    $acc1,$acc1,$t0         # accumulate high parts of multiplication
1789         adde    $acc2,$acc2,$t1
1790         adde    $acc3,$acc3,$t2
1791         addze   $acc4,$acc4
1792         li      $acc5,0
1793 ___
1794 for ($i=1;$i<4;$i++) {
1795         ################################################################
1796         #            ffff0000.ffffffff.yyyyyyyy.zzzzzzzz
1797         # *                                     abcdefgh
1798         # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
1799         #
1800         # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1801         # rewrite above as:
1802         #
1803         #   xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
1804         # - 0000abcd.efgh0000.abcdefgh.00000000.00000000
1805         # + abcdefgh.abcdefgh.yzayzbyz.cyzdyzey.zfyzgyzh
1806 $code.=<<___;
1807         ld      $bi,8*$i($bp)           # b[i]
1808
1809         sldi    $t0,$t4,32
1810         subfc   $acc2,$t4,$acc2
1811         srdi    $t1,$t4,32
1812         subfe   $acc3,$t0,$acc3
1813         subfe   $acc4,$t1,$acc4
1814         subfe   $acc5,$zr,$acc5
1815
1816         addic   $t0,$acc0,-1            # discarded
1817         mulhdu  $t1,$ord0,$t4
1818         mulld   $t2,$ord1,$t4
1819         mulhdu  $t3,$ord1,$t4
1820
1821         adde    $t2,$t2,$t1
1822          mulld  $t0,$a0,$bi
1823         addze   $t3,$t3
1824          mulld  $t1,$a1,$bi
1825
1826         addc    $acc0,$acc1,$t2
1827          mulld  $t2,$a2,$bi
1828         adde    $acc1,$acc2,$t3
1829          mulld  $t3,$a3,$bi
1830         adde    $acc2,$acc3,$t4
1831         adde    $acc3,$acc4,$t4
1832         addze   $acc4,$acc5
1833
1834         addc    $acc0,$acc0,$t0         # accumulate low parts
1835         mulhdu  $t0,$a0,$bi
1836         adde    $acc1,$acc1,$t1
1837         mulhdu  $t1,$a1,$bi
1838         adde    $acc2,$acc2,$t2
1839         mulhdu  $t2,$a2,$bi
1840         adde    $acc3,$acc3,$t3
1841         mulhdu  $t3,$a3,$bi
1842         addze   $acc4,$acc4
1843         mulld   $t4,$acc0,$ordk
1844         addc    $acc1,$acc1,$t0         # accumulate high parts
1845         adde    $acc2,$acc2,$t1
1846         adde    $acc3,$acc3,$t2
1847         adde    $acc4,$acc4,$t3
1848         addze   $acc5,$zr
1849 ___
1850 }
1851 $code.=<<___;
1852         sldi    $t0,$t4,32              # last reduction
1853         subfc   $acc2,$t4,$acc2
1854         srdi    $t1,$t4,32
1855         subfe   $acc3,$t0,$acc3
1856         subfe   $acc4,$t1,$acc4
1857         subfe   $acc5,$zr,$acc5
1858
1859         addic   $t0,$acc0,-1            # discarded
1860         mulhdu  $t1,$ord0,$t4
1861         mulld   $t2,$ord1,$t4
1862         mulhdu  $t3,$ord1,$t4
1863
1864         adde    $t2,$t2,$t1
1865         addze   $t3,$t3
1866
1867         addc    $acc0,$acc1,$t2
1868         adde    $acc1,$acc2,$t3
1869         adde    $acc2,$acc3,$t4
1870         adde    $acc3,$acc4,$t4
1871         addze   $acc4,$acc5
1872
1873         subfc   $acc0,$ord0,$acc0       # ret -= modulus
1874         subfe   $acc1,$ord1,$acc1
1875         subfe   $acc2,$ord2,$acc2
1876         subfe   $acc3,$ord3,$acc3
1877         subfe   $acc4,$zr,$acc4
1878
1879         and     $t0,$ord0,$acc4
1880         and     $t1,$ord1,$acc4
1881         addc    $acc0,$acc0,$t0         # ret += modulus if borrow
1882         and     $t3,$ord3,$acc4
1883         adde    $acc1,$acc1,$t1
1884         adde    $acc2,$acc2,$acc4
1885         adde    $acc3,$acc3,$t3
1886
1887         std     $acc0,0($rp)
1888         std     $acc1,8($rp)
1889         std     $acc2,16($rp)
1890         std     $acc3,24($rp)
1891
1892         ld      r18,48($sp)
1893         ld      r19,56($sp)
1894         ld      r20,64($sp)
1895         ld      r21,72($sp)
1896         ld      r22,80($sp)
1897         ld      r23,88($sp)
1898         ld      r24,96($sp)
1899         ld      r25,104($sp)
1900         ld      r26,112($sp)
1901         ld      r27,120($sp)
1902         ld      r28,128($sp)
1903         ld      r29,136($sp)
1904         ld      r30,144($sp)
1905         ld      r31,152($sp)
1906         addi    $sp,$sp,160
1907         blr
1908         .long   0
1909         .byte   0,12,4,0,0x80,14,3,0
1910         .long   0
1911 .size   ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
1912
1913 ################################################################################
1914 # void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
1915 #                                int rep);
1916 .globl  ecp_nistz256_ord_sqr_mont
1917 .align  5
1918 ecp_nistz256_ord_sqr_mont:
1919         stdu    $sp,-160($sp)
1920         std     r18,48($sp)
1921         std     r19,56($sp)
1922         std     r20,64($sp)
1923         std     r21,72($sp)
1924         std     r22,80($sp)
1925         std     r23,88($sp)
1926         std     r24,96($sp)
1927         std     r25,104($sp)
1928         std     r26,112($sp)
1929         std     r27,120($sp)
1930         std     r28,128($sp)
1931         std     r29,136($sp)
1932         std     r30,144($sp)
1933         std     r31,152($sp)
1934
1935         mtctr   $bp
1936
1937         ld      $a0,0($ap)
1938         ld      $a1,8($ap)
1939         ld      $a2,16($ap)
1940         ld      $a3,24($ap)
1941
1942         lis     $ordk,0xccd1
1943         lis     $ord0,0xf3b9
1944         lis     $ord1,0xbce6
1945         ori     $ordk,$ordk,0xc8aa
1946         ori     $ord0,$ord0,0xcac2
1947         ori     $ord1,$ord1,0xfaad
1948         sldi    $ordk,$ordk,32
1949         sldi    $ord0,$ord0,32
1950         sldi    $ord1,$ord1,32
1951         oris    $ordk,$ordk,0xee00
1952         oris    $ord0,$ord0,0xfc63
1953         oris    $ord1,$ord1,0xa717
1954         ori     $ordk,$ordk,0xbc4f      # 0xccd1c8aaee00bc4f
1955         ori     $ord0,$ord0,0x2551      # 0xf3b9cac2fc632551
1956         ori     $ord1,$ord1,0x9e84      # 0xbce6faada7179e84
1957         li      $ord2,-1                # 0xffffffffffffffff
1958         sldi    $ord3,$ord2,32          # 0xffffffff00000000
1959         li      $zr,0
1960         b       .Loop_ord_sqr
1961
1962 .align  5
1963 .Loop_ord_sqr:
1964         ################################################################
1965         #  |  |  |  |  |  |a1*a0|  |
1966         #  |  |  |  |  |a2*a0|  |  |
1967         #  |  |a3*a2|a3*a0|  |  |  |
1968         #  |  |  |  |a2*a1|  |  |  |
1969         #  |  |  |a3*a1|  |  |  |  |
1970         # *|  |  |  |  |  |  |  | 2|
1971         # +|a3*a3|a2*a2|a1*a1|a0*a0|
1972         #  |--+--+--+--+--+--+--+--|
1973         #  |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1974         #
1975         #  "can't overflow" below mark carrying into high part of
1976         #  multiplication result, which can't overflow, because it
1977         #  can never be all ones.
1978
1979         mulld   $acc1,$a1,$a0           # a[1]*a[0]
1980         mulhdu  $t1,$a1,$a0
1981         mulld   $acc2,$a2,$a0           # a[2]*a[0]
1982         mulhdu  $t2,$a2,$a0
1983         mulld   $acc3,$a3,$a0           # a[3]*a[0]
1984         mulhdu  $acc4,$a3,$a0
1985
1986         addc    $acc2,$acc2,$t1         # accumulate high parts of multiplication
1987          mulld  $t0,$a2,$a1             # a[2]*a[1]
1988          mulhdu $t1,$a2,$a1
1989         adde    $acc3,$acc3,$t2
1990          mulld  $t2,$a3,$a1             # a[3]*a[1]
1991          mulhdu $t3,$a3,$a1
1992         addze   $acc4,$acc4             # can't overflow
1993
1994         mulld   $acc5,$a3,$a2           # a[3]*a[2]
1995         mulhdu  $acc6,$a3,$a2
1996
1997         addc    $t1,$t1,$t2             # accumulate high parts of multiplication
1998          mulld  $acc0,$a0,$a0           # a[0]*a[0]
1999         addze   $t2,$t3                 # can't overflow
2000
2001         addc    $acc3,$acc3,$t0         # accumulate low parts of multiplication
2002          mulhdu $a0,$a0,$a0
2003         adde    $acc4,$acc4,$t1
2004          mulld  $t1,$a1,$a1             # a[1]*a[1]
2005         adde    $acc5,$acc5,$t2
2006          mulhdu $a1,$a1,$a1
2007         addze   $acc6,$acc6             # can't overflow
2008
2009         addc    $acc1,$acc1,$acc1       # acc[1-6]*=2
2010          mulld  $t2,$a2,$a2             # a[2]*a[2]
2011         adde    $acc2,$acc2,$acc2
2012          mulhdu $a2,$a2,$a2
2013         adde    $acc3,$acc3,$acc3
2014          mulld  $t3,$a3,$a3             # a[3]*a[3]
2015         adde    $acc4,$acc4,$acc4
2016          mulhdu $a3,$a3,$a3
2017         adde    $acc5,$acc5,$acc5
2018         adde    $acc6,$acc6,$acc6
2019         addze   $acc7,$zr
2020
2021         addc    $acc1,$acc1,$a0         # +a[i]*a[i]
2022          mulld  $t4,$acc0,$ordk
2023         adde    $acc2,$acc2,$t1
2024         adde    $acc3,$acc3,$a1
2025         adde    $acc4,$acc4,$t2
2026         adde    $acc5,$acc5,$a2
2027         adde    $acc6,$acc6,$t3
2028         adde    $acc7,$acc7,$a3
2029 ___
2030 for($i=0; $i<4; $i++) {                 # reductions
2031 $code.=<<___;
2032         addic   $t0,$acc0,-1            # discarded
2033         mulhdu  $t1,$ord0,$t4
2034         mulld   $t2,$ord1,$t4
2035         mulhdu  $t3,$ord1,$t4
2036
2037         adde    $t2,$t2,$t1
2038         addze   $t3,$t3
2039
2040         addc    $acc0,$acc1,$t2
2041         adde    $acc1,$acc2,$t3
2042         adde    $acc2,$acc3,$t4
2043         adde    $acc3,$zr,$t4           # can't overflow
2044 ___
2045 $code.=<<___    if ($i<3);
2046         mulld   $t3,$acc0,$ordk
2047 ___
2048 $code.=<<___;
2049         sldi    $t0,$t4,32
2050         subfc   $acc1,$t4,$acc1
2051         srdi    $t1,$t4,32
2052         subfe   $acc2,$t0,$acc2
2053         subfe   $acc3,$t1,$acc3         # can't borrow
2054 ___
2055         ($t3,$t4) = ($t4,$t3);
2056 }
2057 $code.=<<___;
2058         addc    $acc0,$acc0,$acc4       # accumulate upper half
2059         adde    $acc1,$acc1,$acc5
2060         adde    $acc2,$acc2,$acc6
2061         adde    $acc3,$acc3,$acc7
2062         addze   $acc4,$zr
2063
2064         subfc   $acc0,$ord0,$acc0       # ret -= modulus
2065         subfe   $acc1,$ord1,$acc1
2066         subfe   $acc2,$ord2,$acc2
2067         subfe   $acc3,$ord3,$acc3
2068         subfe   $acc4,$zr,$acc4
2069
2070         and     $t0,$ord0,$acc4
2071         and     $t1,$ord1,$acc4
2072         addc    $a0,$acc0,$t0           # ret += modulus if borrow
2073         and     $t3,$ord3,$acc4
2074         adde    $a1,$acc1,$t1
2075         adde    $a2,$acc2,$acc4
2076         adde    $a3,$acc3,$t3
2077
2078         bdnz    .Loop_ord_sqr
2079
2080         std     $a0,0($rp)
2081         std     $a1,8($rp)
2082         std     $a2,16($rp)
2083         std     $a3,24($rp)
2084
2085         ld      r18,48($sp)
2086         ld      r19,56($sp)
2087         ld      r20,64($sp)
2088         ld      r21,72($sp)
2089         ld      r22,80($sp)
2090         ld      r23,88($sp)
2091         ld      r24,96($sp)
2092         ld      r25,104($sp)
2093         ld      r26,112($sp)
2094         ld      r27,120($sp)
2095         ld      r28,128($sp)
2096         ld      r29,136($sp)
2097         ld      r30,144($sp)
2098         ld      r31,152($sp)
2099         addi    $sp,$sp,160
2100         blr
2101         .long   0
2102         .byte   0,12,4,0,0x80,14,3,0
2103         .long   0
2104 .size   ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
2105 ___
2106 }       }
2107
2108 ########################################################################
2109 # scatter-gather subroutines
2110 {
2111 my ($out,$inp,$index,$mask)=map("r$_",(3..7));
2112 $code.=<<___;
2113 ########################################################################
2114 # void  ecp_nistz256_scatter_w5(void *out, const P256_POINT *inp,
2115 #                               int index);
2116 .globl  ecp_nistz256_scatter_w5
2117 .align  4
2118 ecp_nistz256_scatter_w5:
2119         slwi    $index,$index,2
2120         add     $out,$out,$index
2121
2122         ld      r8, 0($inp)             # X
2123         ld      r9, 8($inp)
2124         ld      r10,16($inp)
2125         ld      r11,24($inp)
2126
2127         stw     r8, 64*0-4($out)
2128         srdi    r8, r8, 32
2129         stw     r9, 64*1-4($out)
2130         srdi    r9, r9, 32
2131         stw     r10,64*2-4($out)
2132         srdi    r10,r10,32
2133         stw     r11,64*3-4($out)
2134         srdi    r11,r11,32
2135         stw     r8, 64*4-4($out)
2136         stw     r9, 64*5-4($out)
2137         stw     r10,64*6-4($out)
2138         stw     r11,64*7-4($out)
2139         addi    $out,$out,64*8
2140
2141         ld      r8, 32($inp)            # Y
2142         ld      r9, 40($inp)
2143         ld      r10,48($inp)
2144         ld      r11,56($inp)
2145
2146         stw     r8, 64*0-4($out)
2147         srdi    r8, r8, 32
2148         stw     r9, 64*1-4($out)
2149         srdi    r9, r9, 32
2150         stw     r10,64*2-4($out)
2151         srdi    r10,r10,32
2152         stw     r11,64*3-4($out)
2153         srdi    r11,r11,32
2154         stw     r8, 64*4-4($out)
2155         stw     r9, 64*5-4($out)
2156         stw     r10,64*6-4($out)
2157         stw     r11,64*7-4($out)
2158         addi    $out,$out,64*8
2159
2160         ld      r8, 64($inp)            # Z
2161         ld      r9, 72($inp)
2162         ld      r10,80($inp)
2163         ld      r11,88($inp)
2164
2165         stw     r8, 64*0-4($out)
2166         srdi    r8, r8, 32
2167         stw     r9, 64*1-4($out)
2168         srdi    r9, r9, 32
2169         stw     r10,64*2-4($out)
2170         srdi    r10,r10,32
2171         stw     r11,64*3-4($out)
2172         srdi    r11,r11,32
2173         stw     r8, 64*4-4($out)
2174         stw     r9, 64*5-4($out)
2175         stw     r10,64*6-4($out)
2176         stw     r11,64*7-4($out)
2177
2178         blr
2179         .long   0
2180         .byte   0,12,0x14,0,0,0,3,0
2181         .long   0
2182 .size   ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
2183
2184 ########################################################################
2185 # void  ecp_nistz256_gather_w5(P256_POINT *out, const void *inp,
2186 #                               int index);
2187 .globl  ecp_nistz256_gather_w5
2188 .align  4
2189 ecp_nistz256_gather_w5:
2190         neg     r0,$index
2191         sradi   r0,r0,63
2192
2193         add     $index,$index,r0
2194         slwi    $index,$index,2
2195         add     $inp,$inp,$index
2196
2197         lwz     r5, 64*0($inp)
2198         lwz     r6, 64*1($inp)
2199         lwz     r7, 64*2($inp)
2200         lwz     r8, 64*3($inp)
2201         lwz     r9, 64*4($inp)
2202         lwz     r10,64*5($inp)
2203         lwz     r11,64*6($inp)
2204         lwz     r12,64*7($inp)
2205         addi    $inp,$inp,64*8
2206         sldi    r9, r9, 32
2207         sldi    r10,r10,32
2208         sldi    r11,r11,32
2209         sldi    r12,r12,32
2210         or      r5,r5,r9
2211         or      r6,r6,r10
2212         or      r7,r7,r11
2213         or      r8,r8,r12
2214         and     r5,r5,r0
2215         and     r6,r6,r0
2216         and     r7,r7,r0
2217         and     r8,r8,r0
2218         std     r5,0($out)              # X
2219         std     r6,8($out)
2220         std     r7,16($out)
2221         std     r8,24($out)
2222
2223         lwz     r5, 64*0($inp)
2224         lwz     r6, 64*1($inp)
2225         lwz     r7, 64*2($inp)
2226         lwz     r8, 64*3($inp)
2227         lwz     r9, 64*4($inp)
2228         lwz     r10,64*5($inp)
2229         lwz     r11,64*6($inp)
2230         lwz     r12,64*7($inp)
2231         addi    $inp,$inp,64*8
2232         sldi    r9, r9, 32
2233         sldi    r10,r10,32
2234         sldi    r11,r11,32
2235         sldi    r12,r12,32
2236         or      r5,r5,r9
2237         or      r6,r6,r10
2238         or      r7,r7,r11
2239         or      r8,r8,r12
2240         and     r5,r5,r0
2241         and     r6,r6,r0
2242         and     r7,r7,r0
2243         and     r8,r8,r0
2244         std     r5,32($out)             # Y
2245         std     r6,40($out)
2246         std     r7,48($out)
2247         std     r8,56($out)
2248
2249         lwz     r5, 64*0($inp)
2250         lwz     r6, 64*1($inp)
2251         lwz     r7, 64*2($inp)
2252         lwz     r8, 64*3($inp)
2253         lwz     r9, 64*4($inp)
2254         lwz     r10,64*5($inp)
2255         lwz     r11,64*6($inp)
2256         lwz     r12,64*7($inp)
2257         sldi    r9, r9, 32
2258         sldi    r10,r10,32
2259         sldi    r11,r11,32
2260         sldi    r12,r12,32
2261         or      r5,r5,r9
2262         or      r6,r6,r10
2263         or      r7,r7,r11
2264         or      r8,r8,r12
2265         and     r5,r5,r0
2266         and     r6,r6,r0
2267         and     r7,r7,r0
2268         and     r8,r8,r0
2269         std     r5,64($out)             # Z
2270         std     r6,72($out)
2271         std     r7,80($out)
2272         std     r8,88($out)
2273
2274         blr
2275         .long   0
2276         .byte   0,12,0x14,0,0,0,3,0
2277         .long   0
2278 .size   ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
2279
2280 ########################################################################
2281 # void  ecp_nistz256_scatter_w7(void *out, const P256_POINT_AFFINE *inp,
2282 #                               int index);
2283 .globl  ecp_nistz256_scatter_w7
2284 .align  4
2285 ecp_nistz256_scatter_w7:
2286         li      r0,8
2287         mtctr   r0
2288         add     $out,$out,$index
2289         subi    $inp,$inp,8
2290
2291 .Loop_scatter_w7:
2292         ldu     r0,8($inp)
2293         stb     r0,64*0-1($out)
2294         srdi    r0,r0,8
2295         stb     r0,64*1-1($out)
2296         srdi    r0,r0,8
2297         stb     r0,64*2-1($out)
2298         srdi    r0,r0,8
2299         stb     r0,64*3-1($out)
2300         srdi    r0,r0,8
2301         stb     r0,64*4-1($out)
2302         srdi    r0,r0,8
2303         stb     r0,64*5-1($out)
2304         srdi    r0,r0,8
2305         stb     r0,64*6-1($out)
2306         srdi    r0,r0,8
2307         stb     r0,64*7-1($out)
2308         addi    $out,$out,64*8
2309         bdnz    .Loop_scatter_w7
2310
2311         blr
2312         .long   0
2313         .byte   0,12,0x14,0,0,0,3,0
2314         .long   0
2315 .size   ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
2316
2317 ########################################################################
2318 # void  ecp_nistz256_gather_w7(P256_POINT_AFFINE *out, const void *inp,
2319 #                               int index);
2320 .globl  ecp_nistz256_gather_w7
2321 .align  4
2322 ecp_nistz256_gather_w7:
2323         li      r0,8
2324         mtctr   r0
2325         neg     r0,$index
2326         sradi   r0,r0,63
2327
2328         add     $index,$index,r0
2329         add     $inp,$inp,$index
2330         subi    $out,$out,8
2331
2332 .Loop_gather_w7:
2333         lbz     r5, 64*0($inp)
2334         lbz     r6, 64*1($inp)
2335         lbz     r7, 64*2($inp)
2336         lbz     r8, 64*3($inp)
2337         lbz     r9, 64*4($inp)
2338         lbz     r10,64*5($inp)
2339         lbz     r11,64*6($inp)
2340         lbz     r12,64*7($inp)
2341         addi    $inp,$inp,64*8
2342
2343         sldi    r6, r6, 8
2344         sldi    r7, r7, 16
2345         sldi    r8, r8, 24
2346         sldi    r9, r9, 32
2347         sldi    r10,r10,40
2348         sldi    r11,r11,48
2349         sldi    r12,r12,56
2350
2351         or      r5,r5,r6
2352         or      r7,r7,r8
2353         or      r9,r9,r10
2354         or      r11,r11,r12
2355         or      r5,r5,r7
2356         or      r9,r9,r11
2357         or      r5,r5,r9
2358         and     r5,r5,r0
2359         stdu    r5,8($out)
2360         bdnz    .Loop_gather_w7
2361
2362         blr
2363         .long   0
2364         .byte   0,12,0x14,0,0,0,3,0
2365         .long   0
2366 .size   ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
2367 ___
2368 }
2369
2370 foreach (split("\n",$code)) {
2371         s/\`([^\`]*)\`/eval $1/ge;
2372
2373         print $_,"\n";
2374 }
2375 close STDOUT;   # enforce flush