2 # Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # ECP_NISTZ256 module for ARMv4.
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
26 # with/without -DECP_NISTZ256_ASM
29 # Cortex-A15 +100-316%
30 # Snapdragon S4 +66-187%
32 # Ranges denote minimum and maximum improvement coefficients depending
33 # on benchmark. Lower coefficients are for ECDSA sign, server-side
34 # operation. Keep in mind that +200% means 3x improvement.
37 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
38 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
40 if ($flavour && $flavour ne "void") {
41 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
42 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
43 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
44 die "can't locate arm-xlate.pl";
46 open STDOUT,"| \"$^X\" $xlate $flavour $output";
48 open STDOUT,">$output";
54 #if defined(__thumb2__)
61 ########################################################################
62 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
64 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
65 open TABLE,"<ecp_nistz256_table.c" or
66 open TABLE,"<${dir}../ecp_nistz256_table.c" or
67 die "failed to open ecp_nistz256_table.c:",$!;
72 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
76 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
77 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
79 die "insane number of elements" if ($#arr != 64*16*37-1);
83 .globl ecp_nistz256_precomputed
84 .type ecp_nistz256_precomputed,%object
86 ecp_nistz256_precomputed:
88 ########################################################################
89 # this conversion smashes P256_POINT_AFFINE by individual bytes with
90 # 64 byte interval, similar to
94 @tbl = splice(@arr,0,64*16);
95 for($i=0;$i<64;$i++) {
97 for($j=0;$j<64;$j++) {
98 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
101 $code.=join(',',map { sprintf "0x%02x",$_} @line);
106 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
110 .LRR: @ 2^512 mod P precomputed for NIST P256 polynomial
111 .long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
112 .long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
114 .long 1,0,0,0,0,0,0,0
115 .asciz "ECP_NISTZ256 for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
119 ########################################################################
120 # common register layout, note that $t2 is link register, so that if
121 # internal subroutine uses $t2, then it has to offload lr...
123 ($r_ptr,$a_ptr,$b_ptr,$ff,$a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7,$t1,$t2)=
124 map("r$_",(0..12,14));
125 ($t0,$t3)=($ff,$a_ptr);
128 @ void ecp_nistz256_to_mont(BN_ULONG r0[8],const BN_ULONG r1[8]);
129 .globl ecp_nistz256_to_mont
130 .type ecp_nistz256_to_mont,%function
131 ecp_nistz256_to_mont:
133 b .Lecp_nistz256_mul_mont
134 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
136 @ void ecp_nistz256_from_mont(BN_ULONG r0[8],const BN_ULONG r1[8]);
137 .globl ecp_nistz256_from_mont
138 .type ecp_nistz256_from_mont,%function
139 ecp_nistz256_from_mont:
141 b .Lecp_nistz256_mul_mont
142 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
144 @ void ecp_nistz256_mul_by_2(BN_ULONG r0[8],const BN_ULONG r1[8]);
145 .globl ecp_nistz256_mul_by_2
146 .type ecp_nistz256_mul_by_2,%function
148 ecp_nistz256_mul_by_2:
149 stmdb sp!,{r4-r12,lr}
150 bl __ecp_nistz256_mul_by_2
151 #if __ARM_ARCH__>=5 || !defined(__thumb__)
152 ldmia sp!,{r4-r12,pc}
154 ldmia sp!,{r4-r12,lr}
155 bx lr @ interoperable with Thumb ISA:-)
157 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
159 .type __ecp_nistz256_mul_by_2,%function
161 __ecp_nistz256_mul_by_2:
165 adds $a0,$a0,$a0 @ a[0:7]+=a[0:7], i.e. add with itself
182 .size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
184 @ void ecp_nistz256_add(BN_ULONG r0[8],const BN_ULONG r1[8],
185 @ const BN_ULONG r2[8]);
186 .globl ecp_nistz256_add
187 .type ecp_nistz256_add,%function
190 stmdb sp!,{r4-r12,lr}
191 bl __ecp_nistz256_add
192 #if __ARM_ARCH__>=5 || !defined(__thumb__)
193 ldmia sp!,{r4-r12,pc}
195 ldmia sp!,{r4-r12,lr}
196 bx lr @ interoperable with Thumb ISA:-)
198 .size ecp_nistz256_add,.-ecp_nistz256_add
200 .type __ecp_nistz256_add,%function
203 str lr,[sp,#-4]! @ push lr
231 ldr lr,[sp],#4 @ pop lr
235 @ if a+b >= modulus, subtract modulus.
237 @ But since comparison implies subtraction, we subtract
238 @ modulus and then add it back if subtraction borrowed.
250 @ Note that because mod has special form, i.e. consists of
251 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
252 @ using value of borrow as a whole or extracting single bit.
253 @ Follow $ff register...
255 adds $a0,$a0,$ff @ add synthesized modulus
266 adcs $a6,$a6,$ff,lsr#31
273 .size __ecp_nistz256_add,.-__ecp_nistz256_add
275 @ void ecp_nistz256_mul_by_3(BN_ULONG r0[8],const BN_ULONG r1[8]);
276 .globl ecp_nistz256_mul_by_3
277 .type ecp_nistz256_mul_by_3,%function
279 ecp_nistz256_mul_by_3:
280 stmdb sp!,{r4-r12,lr}
281 bl __ecp_nistz256_mul_by_3
282 #if __ARM_ARCH__>=5 || !defined(__thumb__)
283 ldmia sp!,{r4-r12,pc}
285 ldmia sp!,{r4-r12,lr}
286 bx lr @ interoperable with Thumb ISA:-)
288 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
290 .type __ecp_nistz256_mul_by_3,%function
292 __ecp_nistz256_mul_by_3:
293 str lr,[sp,#-4]! @ push lr
295 @ As multiplication by 3 is performed as 2*n+n, below are inline
296 @ copies of __ecp_nistz256_mul_by_2 and __ecp_nistz256_add, see
297 @ corresponding subroutines for details.
302 adds $a0,$a0,$a0 @ a[0:7]+=a[0:7]
318 subs $a0,$a0,#-1 @ .Lreduce_by_sub but without stores
328 adds $a0,$a0,$ff @ add synthesized modulus
333 ldr $b_ptr,[$a_ptr,#0]
336 adcs $a6,$a6,$ff,lsr#31
341 adds $a0,$a0,$b_ptr @ 2*a[0:7]+=a[0:7]
342 ldr $b_ptr,[$a_ptr,#16]
355 ldr lr,[sp],#4 @ pop lr
358 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
360 @ void ecp_nistz256_div_by_2(BN_ULONG r0[8],const BN_ULONG r1[8]);
361 .globl ecp_nistz256_div_by_2
362 .type ecp_nistz256_div_by_2,%function
364 ecp_nistz256_div_by_2:
365 stmdb sp!,{r4-r12,lr}
366 bl __ecp_nistz256_div_by_2
367 #if __ARM_ARCH__>=5 || !defined(__thumb__)
368 ldmia sp!,{r4-r12,pc}
370 ldmia sp!,{r4-r12,lr}
371 bx lr @ interoperable with Thumb ISA:-)
373 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
375 .type __ecp_nistz256_div_by_2,%function
377 __ecp_nistz256_div_by_2:
378 @ ret = (a is odd ? a+mod : a) >> 1
383 mov $ff,$a0,lsl#31 @ place least significant bit to most
384 @ significant position, now arithmetic
385 @ right shift by 31 will produce -1 or
386 @ 0, while logical right shift 1 or 0,
387 @ this is how modulus is conditionally
388 @ synthesized in this case...
390 adds $a0,$a0,$ff,asr#31
392 adcs $a1,$a1,$ff,asr#31
394 adcs $a2,$a2,$ff,asr#31
399 mov $a0,$a0,lsr#1 @ a[0:7]>>=1, we can start early
400 @ because it doesn't affect flags
402 orr $a0,$a0,$a1,lsl#31
403 adcs $a6,$a6,$ff,lsr#31
405 adcs $a7,$a7,$ff,asr#31
407 adc $b_ptr,$b_ptr,#0 @ top-most carry bit from addition
409 orr $a1,$a1,$a2,lsl#31
412 orr $a2,$a2,$a3,lsl#31
415 orr $a3,$a3,$a4,lsl#31
418 orr $a4,$a4,$a5,lsl#31
421 orr $a5,$a5,$a6,lsl#31
424 orr $a6,$a6,$a7,lsl#31
427 orr $a7,$a7,$b_ptr,lsl#31 @ don't forget the top-most carry bit
432 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
434 @ void ecp_nistz256_sub(BN_ULONG r0[8],const BN_ULONG r1[8],
435 @ const BN_ULONG r2[8]);
436 .globl ecp_nistz256_sub
437 .type ecp_nistz256_sub,%function
440 stmdb sp!,{r4-r12,lr}
441 bl __ecp_nistz256_sub
442 #if __ARM_ARCH__>=5 || !defined(__thumb__)
443 ldmia sp!,{r4-r12,pc}
445 ldmia sp!,{r4-r12,lr}
446 bx lr @ interoperable with Thumb ISA:-)
448 .size ecp_nistz256_sub,.-ecp_nistz256_sub
450 .type __ecp_nistz256_sub,%function
453 str lr,[sp,#-4]! @ push lr
479 sbc $ff,$ff,$ff @ broadcast borrow bit
480 ldr lr,[sp],#4 @ pop lr
484 @ if a-b borrows, add modulus.
486 @ Note that because mod has special form, i.e. consists of
487 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
488 @ broadcasting borrow bit to a register, $ff, and using it as
489 @ a whole or extracting single bit.
491 adds $a0,$a0,$ff @ add synthesized modulus
502 adcs $a6,$a6,$ff,lsr#31
509 .size __ecp_nistz256_sub,.-__ecp_nistz256_sub
511 @ void ecp_nistz256_neg(BN_ULONG r0[8],const BN_ULONG r1[8]);
512 .globl ecp_nistz256_neg
513 .type ecp_nistz256_neg,%function
516 stmdb sp!,{r4-r12,lr}
517 bl __ecp_nistz256_neg
518 #if __ARM_ARCH__>=5 || !defined(__thumb__)
519 ldmia sp!,{r4-r12,pc}
521 ldmia sp!,{r4-r12,lr}
522 bx lr @ interoperable with Thumb ISA:-)
524 .size ecp_nistz256_neg,.-ecp_nistz256_neg
526 .type __ecp_nistz256_neg,%function
549 .size __ecp_nistz256_neg,.-__ecp_nistz256_neg
552 my @acc=map("r$_",(3..11));
553 my ($t0,$t1,$bj,$t2,$t3)=map("r$_",(0,1,2,12,14));
556 @ void ecp_nistz256_sqr_mont(BN_ULONG r0[8],const BN_ULONG r1[8]);
557 .globl ecp_nistz256_sqr_mont
558 .type ecp_nistz256_sqr_mont,%function
560 ecp_nistz256_sqr_mont:
562 b .Lecp_nistz256_mul_mont
563 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
565 @ void ecp_nistz256_mul_mont(BN_ULONG r0[8],const BN_ULONG r1[8],
566 @ const BN_ULONG r2[8]);
567 .globl ecp_nistz256_mul_mont
568 .type ecp_nistz256_mul_mont,%function
570 ecp_nistz256_mul_mont:
571 .Lecp_nistz256_mul_mont:
572 stmdb sp!,{r4-r12,lr}
573 bl __ecp_nistz256_mul_mont
574 #if __ARM_ARCH__>=5 || !defined(__thumb__)
575 ldmia sp!,{r4-r12,pc}
577 ldmia sp!,{r4-r12,lr}
578 bx lr @ interoperable with Thumb ISA:-)
580 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
582 .type __ecp_nistz256_mul_mont,%function
584 __ecp_nistz256_mul_mont:
585 stmdb sp!,{r0-r2,lr} @ make a copy of arguments too
587 ldr $bj,[$b_ptr,#0] @ b[0]
588 ldmia $a_ptr,{@acc[1]-@acc[8]}
590 umull @acc[0],$t3,@acc[1],$bj @ r[0]=a[0]*b[0]
591 stmdb sp!,{$acc[1]-@acc[8]} @ copy a[0-7] to stack, so
592 @ that it can be addressed
593 @ without spending register
595 umull @acc[1],$t0,@acc[2],$bj @ r[1]=a[1]*b[0]
596 umull @acc[2],$t1,@acc[3],$bj
597 adds @acc[1],@acc[1],$t3 @ accumulate high part of mult
598 umull @acc[3],$t2,@acc[4],$bj
599 adcs @acc[2],@acc[2],$t0
600 umull @acc[4],$t3,@acc[5],$bj
601 adcs @acc[3],@acc[3],$t1
602 umull @acc[5],$t0,@acc[6],$bj
603 adcs @acc[4],@acc[4],$t2
604 umull @acc[6],$t1,@acc[7],$bj
605 adcs @acc[5],@acc[5],$t3
606 umull @acc[7],$t2,@acc[8],$bj
607 adcs @acc[6],@acc[6],$t0
608 adcs @acc[7],@acc[7],$t1
609 eor $t3,$t3,$t3 @ first overflow bit is zero
612 for(my $i=1;$i<8;$i++) {
615 # Reduction iteration is normally performed by accumulating
616 # result of multiplication of modulus by "magic" digit [and
617 # omitting least significant word, which is guaranteed to
618 # be 0], but thanks to special form of modulus and "magic"
619 # digit being equal to least significant word, it can be
620 # performed with additions and subtractions alone. Indeed:
622 # ffff.0001.0000.0000.0000.ffff.ffff.ffff
624 # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
626 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
629 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
630 # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
631 # - abcd.0000.0000.0000.0000.0000.0000.abcd
633 # or marking redundant operations:
635 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
636 # + abcd.0000.abcd.0000.0000.abcd.----.----.----
637 # - abcd.----.----.----.----.----.----.----
640 @ multiplication-less reduction $i
641 adds @acc[3],@acc[3],@acc[0] @ r[3]+=r[0]
642 ldr $bj,[sp,#40] @ restore b_ptr
643 adcs @acc[4],@acc[4],#0 @ r[4]+=0
644 adcs @acc[5],@acc[5],#0 @ r[5]+=0
645 adcs @acc[6],@acc[6],@acc[0] @ r[6]+=r[0]
646 ldr $t1,[sp,#0] @ load a[0]
647 adcs @acc[7],@acc[7],#0 @ r[7]+=0
648 ldr $bj,[$bj,#4*$i] @ load b[i]
649 adcs @acc[8],@acc[8],@acc[0] @ r[8]+=r[0]
651 adc $t3,$t3,#0 @ overflow bit
652 subs @acc[7],@acc[7],@acc[0] @ r[7]-=r[0]
653 ldr $t2,[sp,#4] @ a[1]
654 sbcs @acc[8],@acc[8],#0 @ r[8]-=0
655 umlal @acc[1],$t0,$t1,$bj @ "r[0]"+=a[0]*b[i]
657 sbc @acc[0],$t3,#0 @ overflow bit, keep in mind
658 @ that netto result is
659 @ addition of a value which
660 @ makes underflow impossible
662 ldr $t3,[sp,#8] @ a[2]
663 umlal @acc[2],$t1,$t2,$bj @ "r[1]"+=a[1]*b[i]
664 str @acc[0],[sp,#36] @ temporarily offload overflow
666 ldr $t4,[sp,#12] @ a[3], $t4 is alias @acc[0]
667 umlal @acc[3],$t2,$t3,$bj @ "r[2]"+=a[2]*b[i]
669 adds @acc[2],@acc[2],$t0 @ accumulate high part of mult
670 ldr $t0,[sp,#16] @ a[4]
671 umlal @acc[4],$t3,$t4,$bj @ "r[3]"+=a[3]*b[i]
673 adcs @acc[3],@acc[3],$t1
674 ldr $t1,[sp,#20] @ a[5]
675 umlal @acc[5],$t4,$t0,$bj @ "r[4]"+=a[4]*b[i]
677 adcs @acc[4],@acc[4],$t2
678 ldr $t2,[sp,#24] @ a[6]
679 umlal @acc[6],$t0,$t1,$bj @ "r[5]"+=a[5]*b[i]
681 adcs @acc[5],@acc[5],$t3
682 ldr $t3,[sp,#28] @ a[7]
683 umlal @acc[7],$t1,$t2,$bj @ "r[6]"+=a[6]*b[i]
685 adcs @acc[6],@acc[6],$t4
686 ldr @acc[0],[sp,#36] @ restore overflow bit
687 umlal @acc[8],$t2,$t3,$bj @ "r[7]"+=a[7]*b[i]
689 adcs @acc[7],@acc[7],$t0
690 adcs @acc[8],@acc[8],$t1
691 adcs @acc[0],$acc[0],$t2
692 adc $t3,$t3,#0 @ new overflow bit
694 push(@acc,shift(@acc)); # rotate registers, so that
695 # "r[i]" becomes r[i]
698 @ last multiplication-less reduction
699 adds @acc[3],@acc[3],@acc[0]
700 ldr $r_ptr,[sp,#32] @ restore r_ptr
701 adcs @acc[4],@acc[4],#0
702 adcs @acc[5],@acc[5],#0
703 adcs @acc[6],@acc[6],@acc[0]
704 adcs @acc[7],@acc[7],#0
705 adcs @acc[8],@acc[8],@acc[0]
707 subs @acc[7],@acc[7],@acc[0]
708 sbcs @acc[8],@acc[8],#0
709 sbc @acc[0],$t3,#0 @ overflow bit
711 @ Final step is "if result > mod, subtract mod", but we do it
712 @ "other way around", namely subtract modulus from result
713 @ and if it borrowed, add modulus back.
715 adds @acc[1],@acc[1],#1 @ subs @acc[1],@acc[1],#-1
716 adcs @acc[2],@acc[2],#0 @ sbcs @acc[2],@acc[2],#-1
717 adcs @acc[3],@acc[3],#0 @ sbcs @acc[3],@acc[3],#-1
718 sbcs @acc[4],@acc[4],#0
719 sbcs @acc[5],@acc[5],#0
720 sbcs @acc[6],@acc[6],#0
721 sbcs @acc[7],@acc[7],#1
722 adcs @acc[8],@acc[8],#0 @ sbcs @acc[8],@acc[8],#-1
723 ldr lr,[sp,#44] @ restore lr
724 sbc @acc[0],@acc[0],#0 @ broadcast borrow bit
727 @ Note that because mod has special form, i.e. consists of
728 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
729 @ broadcasting borrow bit to a register, @acc[0], and using it as
730 @ a whole or extracting single bit.
732 adds @acc[1],@acc[1],@acc[0] @ add modulus or zero
733 adcs @acc[2],@acc[2],@acc[0]
734 str @acc[1],[$r_ptr,#0]
735 adcs @acc[3],@acc[3],@acc[0]
736 str @acc[2],[$r_ptr,#4]
737 adcs @acc[4],@acc[4],#0
738 str @acc[3],[$r_ptr,#8]
739 adcs @acc[5],@acc[5],#0
740 str @acc[4],[$r_ptr,#12]
741 adcs @acc[6],@acc[6],#0
742 str @acc[5],[$r_ptr,#16]
743 adcs @acc[7],@acc[7],@acc[0],lsr#31
744 str @acc[6],[$r_ptr,#20]
745 adc @acc[8],@acc[8],@acc[0]
746 str @acc[7],[$r_ptr,#24]
747 str @acc[8],[$r_ptr,#28]
750 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
755 my ($out,$inp,$index,$mask)=map("r$_",(0..3));
757 @ void ecp_nistz256_scatter_w5(void *r0,const P256_POINT *r1,
759 .globl ecp_nistz256_scatter_w5
760 .type ecp_nistz256_scatter_w5,%function
762 ecp_nistz256_scatter_w5:
765 add $out,$out,$index,lsl#2
767 ldmia $inp!,{r4-r11} @ X
768 str r4,[$out,#64*0-4]
769 str r5,[$out,#64*1-4]
770 str r6,[$out,#64*2-4]
771 str r7,[$out,#64*3-4]
772 str r8,[$out,#64*4-4]
773 str r9,[$out,#64*5-4]
774 str r10,[$out,#64*6-4]
775 str r11,[$out,#64*7-4]
778 ldmia $inp!,{r4-r11} @ Y
779 str r4,[$out,#64*0-4]
780 str r5,[$out,#64*1-4]
781 str r6,[$out,#64*2-4]
782 str r7,[$out,#64*3-4]
783 str r8,[$out,#64*4-4]
784 str r9,[$out,#64*5-4]
785 str r10,[$out,#64*6-4]
786 str r11,[$out,#64*7-4]
789 ldmia $inp,{r4-r11} @ Z
790 str r4,[$out,#64*0-4]
791 str r5,[$out,#64*1-4]
792 str r6,[$out,#64*2-4]
793 str r7,[$out,#64*3-4]
794 str r8,[$out,#64*4-4]
795 str r9,[$out,#64*5-4]
796 str r10,[$out,#64*6-4]
797 str r11,[$out,#64*7-4]
800 #if __ARM_ARCH__>=5 || defined(__thumb__)
805 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
807 @ void ecp_nistz256_gather_w5(P256_POINT *r0,const void *r1,
809 .globl ecp_nistz256_gather_w5
810 .type ecp_nistz256_gather_w5,%function
812 ecp_nistz256_gather_w5:
820 subne $index,$index,#1
822 add $inp,$inp,$index,lsl#2
841 stmia $out!,{r4-r11} @ X
860 stmia $out!,{r4-r11} @ Y
878 stmia $out,{r4-r11} @ Z
881 #if __ARM_ARCH__>=5 || defined(__thumb__)
886 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
888 @ void ecp_nistz256_scatter_w7(void *r0,const P256_POINT_AFFINE *r1,
890 .globl ecp_nistz256_scatter_w7
891 .type ecp_nistz256_scatter_w7,%function
893 ecp_nistz256_scatter_w7:
898 subs $index,$index,#1
899 strb $mask,[$out,#64*0]
900 mov $mask,$mask,lsr#8
901 strb $mask,[$out,#64*1]
902 mov $mask,$mask,lsr#8
903 strb $mask,[$out,#64*2]
904 mov $mask,$mask,lsr#8
905 strb $mask,[$out,#64*3]
909 #if __ARM_ARCH__>=5 || defined(__thumb__)
914 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
916 @ void ecp_nistz256_gather_w7(P256_POINT_AFFINE *r0,const void *r1,
918 .globl ecp_nistz256_gather_w7
919 .type ecp_nistz256_gather_w7,%function
921 ecp_nistz256_gather_w7:
929 subne $index,$index,#1
936 subs $index,$index,#1
949 #if __ARM_ARCH__>=5 || defined(__thumb__)
954 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
958 # In comparison to integer-only equivalent of below subroutine:
964 # As not all time is spent in multiplication, overall impact is deemed
965 # too low to care about.
967 my ($A0,$A1,$A2,$A3,$Bi,$zero,$temp)=map("d$_",(0..7));
970 my @AxB=map("q$_",(8..15));
972 my ($rptr,$aptr,$bptr,$toutptr)=map("r$_",(0..3));
978 .globl ecp_nistz256_mul_mont_neon
979 .type ecp_nistz256_mul_mont_neon,%function
981 ecp_nistz256_mul_mont_neon:
984 vstmdb sp!,{q4-q5} @ ABI specification says so
987 vld1.32 {${Bi}[0]},[$bptr,:32]!
988 veor $zero,$zero,$zero
989 vld1.32 {$A0-$A3}, [$aptr] @ can't specify :32 :-(
991 mov sp,$toutptr @ alloca
992 vmov.i64 $mask,#0xffff
994 vmull.u32 @AxB[0],$Bi,${A0}[0]
995 vmull.u32 @AxB[1],$Bi,${A0}[1]
996 vmull.u32 @AxB[2],$Bi,${A1}[0]
997 vmull.u32 @AxB[3],$Bi,${A1}[1]
998 vshr.u64 $temp,@AxB[0]#lo,#16
999 vmull.u32 @AxB[4],$Bi,${A2}[0]
1000 vadd.u64 @AxB[0]#hi,@AxB[0]#hi,$temp
1001 vmull.u32 @AxB[5],$Bi,${A2}[1]
1002 vshr.u64 $temp,@AxB[0]#hi,#16 @ upper 32 bits of a[0]*b[0]
1003 vmull.u32 @AxB[6],$Bi,${A3}[0]
1004 vand.u64 @AxB[0],@AxB[0],$mask @ lower 32 bits of a[0]*b[0]
1005 vmull.u32 @AxB[7],$Bi,${A3}[1]
1007 for($i=1;$i<8;$i++) {
1009 vld1.32 {${Bi}[0]},[$bptr,:32]!
1010 veor $zero,$zero,$zero
1011 vadd.u64 @AxB[1]#lo,@AxB[1]#lo,$temp @ reduction
1012 vshl.u64 $mult,@AxB[0],#32
1013 vadd.u64 @AxB[3],@AxB[3],@AxB[0]
1014 vsub.u64 $mult,$mult,@AxB[0]
1016 vadd.u64 @AxB[6],@AxB[6],@AxB[0]
1017 vadd.u64 @AxB[7],@AxB[7],$mult
1019 push(@AxB,shift(@AxB));
1021 vmlal.u32 @AxB[0],$Bi,${A0}[0]
1022 vmlal.u32 @AxB[1],$Bi,${A0}[1]
1023 vmlal.u32 @AxB[2],$Bi,${A1}[0]
1024 vmlal.u32 @AxB[3],$Bi,${A1}[1]
1025 vshr.u64 $temp,@AxB[0]#lo,#16
1026 vmlal.u32 @AxB[4],$Bi,${A2}[0]
1027 vadd.u64 @AxB[0]#hi,@AxB[0]#hi,$temp
1028 vmlal.u32 @AxB[5],$Bi,${A2}[1]
1029 vshr.u64 $temp,@AxB[0]#hi,#16 @ upper 33 bits of a[0]*b[i]+t[0]
1030 vmlal.u32 @AxB[6],$Bi,${A3}[0]
1031 vand.u64 @AxB[0],@AxB[0],$mask @ lower 32 bits of a[0]*b[0]
1032 vmull.u32 @AxB[7],$Bi,${A3}[1]
1036 vadd.u64 @AxB[1]#lo,@AxB[1]#lo,$temp @ last reduction
1037 vshl.u64 $mult,@AxB[0],#32
1038 vadd.u64 @AxB[3],@AxB[3],@AxB[0]
1039 vsub.u64 $mult,$mult,@AxB[0]
1040 vadd.u64 @AxB[6],@AxB[6],@AxB[0]
1041 vadd.u64 @AxB[7],@AxB[7],$mult
1043 vshr.u64 $temp,@AxB[1]#lo,#16 @ convert
1044 vadd.u64 @AxB[1]#hi,@AxB[1]#hi,$temp
1045 vshr.u64 $temp,@AxB[1]#hi,#16
1046 vzip.16 @AxB[1]#lo,@AxB[1]#hi
1050 vadd.u64 @AxB[$_]#lo,@AxB[$_]#lo,$temp
1051 vst1.32 {@AxB[$_-1]#lo[0]},[$toutptr,:32]!
1052 vshr.u64 $temp,@AxB[$_]#lo,#16
1053 vadd.u64 @AxB[$_]#hi,@AxB[$_]#hi,$temp
1054 vshr.u64 $temp,@AxB[$_]#hi,#16
1055 vzip.16 @AxB[$_]#lo,@AxB[$_]#hi
1059 vst1.32 {@AxB[7]#lo[0]},[$toutptr,:32]!
1060 vst1.32 {$temp},[$toutptr] @ upper 33 bits
1076 ldr r9,[sp,#32] @ top-most bit
1094 adcs r7,r7,r9,lsr#31
1102 .size ecp_nistz256_mul_mont_neon,.-ecp_nistz256_mul_mont_neon
1108 ########################################################################
1109 # Below $aN assignment matches order in which 256-bit result appears in
1110 # register bank at return from __ecp_nistz256_mul_mont, so that we can
1111 # skip over reloading it from memory. This means that below functions
1112 # use custom calling sequence accepting 256-bit input in registers,
1113 # output pointer in r0, $r_ptr, and optional pointer in r2, $b_ptr.
1115 # See their "normal" counterparts for insights on calculations.
1117 my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7,
1118 $t0,$t1,$t2,$t3)=map("r$_",(11,3..10,12,14,1));
1122 .type __ecp_nistz256_sub_from,%function
1124 __ecp_nistz256_sub_from:
1125 str lr,[sp,#-4]! @ push lr
1130 ldr $t3,[$b_ptr,#12]
1132 ldr $t0,[$b_ptr,#16]
1134 ldr $t1,[$b_ptr,#20]
1136 ldr $t2,[$b_ptr,#24]
1138 ldr $t3,[$b_ptr,#28]
1143 sbc $ff,$ff,$ff @ broadcast borrow bit
1144 ldr lr,[sp],#4 @ pop lr
1146 adds $a0,$a0,$ff @ add synthesized modulus
1154 str $a3,[$r_ptr,#12]
1156 str $a4,[$r_ptr,#16]
1157 adcs $a6,$a6,$ff,lsr#31
1158 str $a5,[$r_ptr,#20]
1160 str $a6,[$r_ptr,#24]
1161 str $a7,[$r_ptr,#28]
1164 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
1166 .type __ecp_nistz256_sub_morf,%function
1168 __ecp_nistz256_sub_morf:
1169 str lr,[sp,#-4]! @ push lr
1174 ldr $t3,[$b_ptr,#12]
1176 ldr $t0,[$b_ptr,#16]
1178 ldr $t1,[$b_ptr,#20]
1180 ldr $t2,[$b_ptr,#24]
1182 ldr $t3,[$b_ptr,#28]
1187 sbc $ff,$ff,$ff @ broadcast borrow bit
1188 ldr lr,[sp],#4 @ pop lr
1190 adds $a0,$a0,$ff @ add synthesized modulus
1198 str $a3,[$r_ptr,#12]
1200 str $a4,[$r_ptr,#16]
1201 adcs $a6,$a6,$ff,lsr#31
1202 str $a5,[$r_ptr,#20]
1204 str $a6,[$r_ptr,#24]
1205 str $a7,[$r_ptr,#28]
1208 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
1210 .type __ecp_nistz256_add_self,%function
1212 __ecp_nistz256_add_self:
1213 adds $a0,$a0,$a0 @ a[0:7]+=a[0:7]
1224 @ if a+b >= modulus, subtract modulus.
1226 @ But since comparison implies subtraction, we subtract
1227 @ modulus and then add it back if subtraction borrowed.
1239 @ Note that because mod has special form, i.e. consists of
1240 @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
1241 @ using value of borrow as a whole or extracting single bit.
1242 @ Follow $ff register...
1244 adds $a0,$a0,$ff @ add synthesized modulus
1252 str $a3,[$r_ptr,#12]
1254 str $a4,[$r_ptr,#16]
1255 adcs $a6,$a6,$ff,lsr#31
1256 str $a5,[$r_ptr,#20]
1258 str $a6,[$r_ptr,#24]
1259 str $a7,[$r_ptr,#28]
1262 .size __ecp_nistz256_add_self,.-__ecp_nistz256_add_self
1266 ########################################################################
1267 # following subroutines are "literal" implementation of those found in
1270 ########################################################################
1271 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
1274 my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
1275 # above map() describes stack layout with 5 temporary
1276 # 256-bit vectors on top. Then note that we push
1277 # starting from r0, which means that we have copy of
1278 # input arguments just below these temporary vectors.
1281 .globl ecp_nistz256_point_double
1282 .type ecp_nistz256_point_double,%function
1284 ecp_nistz256_point_double:
1285 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1288 .Lpoint_double_shortcut:
1290 ldmia $a_ptr!,{r4-r11} @ copy in_x
1294 bl __ecp_nistz256_mul_by_2 @ p256_mul_by_2(S, in_y);
1296 add $b_ptr,$a_ptr,#32
1297 add $a_ptr,$a_ptr,#32
1298 add $r_ptr,sp,#$Zsqr
1299 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Zsqr, in_z);
1304 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(S, S);
1306 ldr $b_ptr,[sp,#32*5+4]
1307 add $a_ptr,$b_ptr,#32
1308 add $b_ptr,$b_ptr,#64
1309 add $r_ptr,sp,#$tmp0
1310 bl __ecp_nistz256_mul_mont @ p256_mul_mont(tmp0, in_z, in_y);
1312 ldr $r_ptr,[sp,#32*5]
1313 add $r_ptr,$r_ptr,#64
1314 bl __ecp_nistz256_add_self @ p256_mul_by_2(res_z, tmp0);
1316 add $a_ptr,sp,#$in_x
1317 add $b_ptr,sp,#$Zsqr
1319 bl __ecp_nistz256_add @ p256_add(M, in_x, Zsqr);
1321 add $a_ptr,sp,#$in_x
1322 add $b_ptr,sp,#$Zsqr
1323 add $r_ptr,sp,#$Zsqr
1324 bl __ecp_nistz256_sub @ p256_sub(Zsqr, in_x, Zsqr);
1328 add $r_ptr,sp,#$tmp0
1329 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(tmp0, S);
1331 add $a_ptr,sp,#$Zsqr
1334 bl __ecp_nistz256_mul_mont @ p256_mul_mont(M, M, Zsqr);
1336 ldr $r_ptr,[sp,#32*5]
1337 add $a_ptr,sp,#$tmp0
1338 add $r_ptr,$r_ptr,#32
1339 bl __ecp_nistz256_div_by_2 @ p256_div_by_2(res_y, tmp0);
1343 bl __ecp_nistz256_mul_by_3 @ p256_mul_by_3(M, M);
1345 add $a_ptr,sp,#$in_x
1348 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S, S, in_x);
1350 add $r_ptr,sp,#$tmp0
1351 bl __ecp_nistz256_add_self @ p256_mul_by_2(tmp0, S);
1353 ldr $r_ptr,[sp,#32*5]
1356 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(res_x, M);
1358 add $b_ptr,sp,#$tmp0
1359 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, tmp0);
1363 bl __ecp_nistz256_sub_morf @ p256_sub(S, S, res_x);
1367 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S, S, M);
1369 ldr $r_ptr,[sp,#32*5]
1370 add $b_ptr,$r_ptr,#32
1371 add $r_ptr,$r_ptr,#32
1372 bl __ecp_nistz256_sub_from @ p256_sub(res_y, S, res_y);
1374 add sp,sp,#32*5+16 @ +16 means "skip even over saved r0-r3"
1375 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1376 ldmia sp!,{r4-r12,pc}
1378 ldmia sp!,{r4-r12,lr}
1379 bx lr @ interoperable with Thumb ISA:-)
1381 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
1385 ########################################################################
1386 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
1387 # const P256_POINT *in2);
1389 my ($res_x,$res_y,$res_z,
1390 $in1_x,$in1_y,$in1_z,
1391 $in2_x,$in2_y,$in2_z,
1392 $H,$Hsqr,$R,$Rsqr,$Hcub,
1393 $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
1394 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
1395 # above map() describes stack layout with 18 temporary
1396 # 256-bit vectors on top. Then note that we push
1397 # starting from r0, which means that we have copy of
1398 # input arguments just below these temporary vectors.
1399 # We use three of them for !in1infty, !in2intfy and
1400 # result of check for zero.
1403 .globl ecp_nistz256_point_add
1404 .type ecp_nistz256_point_add,%function
1406 ecp_nistz256_point_add:
1407 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1410 ldmia $b_ptr!,{r4-r11} @ copy in2_x
1413 ldmia $b_ptr!,{r4-r11} @ copy in2_y
1415 ldmia $b_ptr,{r4-r11} @ copy in2_z
1429 str r12,[sp,#32*18+8] @ !in2infty
1431 ldmia $a_ptr!,{r4-r11} @ copy in1_x
1434 ldmia $a_ptr!,{r4-r11} @ copy in1_y
1436 ldmia $a_ptr,{r4-r11} @ copy in1_z
1450 str r12,[sp,#32*18+4] @ !in1infty
1452 add $a_ptr,sp,#$in2_z
1453 add $b_ptr,sp,#$in2_z
1454 add $r_ptr,sp,#$Z2sqr
1455 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z2sqr, in2_z);
1457 add $a_ptr,sp,#$in1_z
1458 add $b_ptr,sp,#$in1_z
1459 add $r_ptr,sp,#$Z1sqr
1460 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z1sqr, in1_z);
1462 add $a_ptr,sp,#$in2_z
1463 add $b_ptr,sp,#$Z2sqr
1465 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S1, Z2sqr, in2_z);
1467 add $a_ptr,sp,#$in1_z
1468 add $b_ptr,sp,#$Z1sqr
1470 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, Z1sqr, in1_z);
1472 add $a_ptr,sp,#$in1_y
1475 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S1, S1, in1_y);
1477 add $a_ptr,sp,#$in2_y
1480 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S2, in2_y);
1484 bl __ecp_nistz256_sub_from @ p256_sub(R, S2, S1);
1486 orr $a0,$a0,$a1 @ see if result is zero
1492 add $a_ptr,sp,#$in1_x
1494 add $b_ptr,sp,#$Z2sqr
1495 str $a0,[sp,#32*18+12]
1498 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U1, in1_x, Z2sqr);
1500 add $a_ptr,sp,#$in2_x
1501 add $b_ptr,sp,#$Z1sqr
1503 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, in2_x, Z1sqr);
1507 bl __ecp_nistz256_sub_from @ p256_sub(H, U2, U1);
1509 orr $a0,$a0,$a1 @ see if result is zero
1517 bne .Ladd_proceed @ is_equal(U1,U2)?
1519 ldr $t0,[sp,#32*18+4]
1520 ldr $t1,[sp,#32*18+8]
1521 ldr $t2,[sp,#32*18+12]
1523 beq .Ladd_proceed @ (in1infty || in2infty)?
1525 beq .Ladd_double @ is_equal(S1,S2)?
1527 ldr $r_ptr,[sp,#32*18+16]
1536 stmia $r_ptr!,{r4-r11}
1537 stmia $r_ptr!,{r4-r11}
1538 stmia $r_ptr!,{r4-r11}
1543 ldr $a_ptr,[sp,#32*18+20]
1544 add sp,sp,#32*(18-5)+16 @ difference in frame sizes
1545 b .Lpoint_double_shortcut
1551 add $r_ptr,sp,#$Rsqr
1552 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Rsqr, R);
1555 add $b_ptr,sp,#$in1_z
1556 add $r_ptr,sp,#$res_z
1557 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, H, in1_z);
1561 add $r_ptr,sp,#$Hsqr
1562 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Hsqr, H);
1564 add $a_ptr,sp,#$in2_z
1565 add $b_ptr,sp,#$res_z
1566 add $r_ptr,sp,#$res_z
1567 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, res_z, in2_z);
1570 add $b_ptr,sp,#$Hsqr
1571 add $r_ptr,sp,#$Hcub
1572 bl __ecp_nistz256_mul_mont @ p256_mul_mont(Hcub, Hsqr, H);
1574 add $a_ptr,sp,#$Hsqr
1577 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, U1, Hsqr);
1579 add $r_ptr,sp,#$Hsqr
1580 bl __ecp_nistz256_add_self @ p256_mul_by_2(Hsqr, U2);
1582 add $b_ptr,sp,#$Rsqr
1583 add $r_ptr,sp,#$res_x
1584 bl __ecp_nistz256_sub_morf @ p256_sub(res_x, Rsqr, Hsqr);
1586 add $b_ptr,sp,#$Hcub
1587 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, Hcub);
1590 add $r_ptr,sp,#$res_y
1591 bl __ecp_nistz256_sub_morf @ p256_sub(res_y, U2, res_x);
1593 add $a_ptr,sp,#$Hcub
1596 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S1, Hcub);
1599 add $b_ptr,sp,#$res_y
1600 add $r_ptr,sp,#$res_y
1601 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_y, res_y, R);
1604 bl __ecp_nistz256_sub_from @ p256_sub(res_y, res_y, S2);
1606 ldr r11,[sp,#32*18+4] @ !in1intfy
1607 ldr r12,[sp,#32*18+8] @ !in2intfy
1615 ldr $r_ptr,[sp,#32*18+16]
1617 for($i=0;$i<96;$i+=8) { # conditional moves
1619 ldmia r1!,{r4-r5} @ res_x
1620 ldmia r2!,{r6-r7} @ in2_x
1621 ldmia r3!,{r8-r9} @ in1_x
1632 stmia $r_ptr!,{r4-r5}
1637 add sp,sp,#32*18+16+16 @ +16 means "skip even over saved r0-r3"
1638 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1639 ldmia sp!,{r4-r12,pc}
1641 ldmia sp!,{r4-r12,lr}
1642 bx lr @ interoperable with Thumb ISA:-)
1644 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1648 ########################################################################
1649 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1650 # const P256_POINT_AFFINE *in2);
1652 my ($res_x,$res_y,$res_z,
1653 $in1_x,$in1_y,$in1_z,
1655 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
1657 # above map() describes stack layout with 18 temporary
1658 # 256-bit vectors on top. Then note that we push
1659 # starting from r0, which means that we have copy of
1660 # input arguments just below these temporary vectors.
1661 # We use two of them for !in1infty, !in2intfy.
1663 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1666 .globl ecp_nistz256_point_add_affine
1667 .type ecp_nistz256_point_add_affine,%function
1669 ecp_nistz256_point_add_affine:
1670 stmdb sp!,{r0-r12,lr} @ push from r0, unusual, but intentional
1673 ldmia $a_ptr!,{r4-r11} @ copy in1_x
1676 ldmia $a_ptr!,{r4-r11} @ copy in1_y
1678 ldmia $a_ptr,{r4-r11} @ copy in1_z
1692 str r12,[sp,#32*15+4] @ !in1infty
1694 ldmia $b_ptr!,{r4-r11} @ copy in2_x
1704 ldmia $b_ptr!,{r4-r11} @ copy in2_y
1719 str r12,[sp,#32*15+8] @ !in2infty
1721 add $a_ptr,sp,#$in1_z
1722 add $b_ptr,sp,#$in1_z
1723 add $r_ptr,sp,#$Z1sqr
1724 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Z1sqr, in1_z);
1726 add $a_ptr,sp,#$Z1sqr
1727 add $b_ptr,sp,#$in2_x
1729 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, Z1sqr, in2_x);
1731 add $b_ptr,sp,#$in1_x
1733 bl __ecp_nistz256_sub_from @ p256_sub(H, U2, in1_x);
1735 add $a_ptr,sp,#$Z1sqr
1736 add $b_ptr,sp,#$in1_z
1738 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, Z1sqr, in1_z);
1741 add $b_ptr,sp,#$in1_z
1742 add $r_ptr,sp,#$res_z
1743 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_z, H, in1_z);
1745 add $a_ptr,sp,#$in2_y
1748 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, S2, in2_y);
1750 add $b_ptr,sp,#$in1_y
1752 bl __ecp_nistz256_sub_from @ p256_sub(R, S2, in1_y);
1756 add $r_ptr,sp,#$Hsqr
1757 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Hsqr, H);
1761 add $r_ptr,sp,#$Rsqr
1762 bl __ecp_nistz256_mul_mont @ p256_sqr_mont(Rsqr, R);
1765 add $b_ptr,sp,#$Hsqr
1766 add $r_ptr,sp,#$Hcub
1767 bl __ecp_nistz256_mul_mont @ p256_mul_mont(Hcub, Hsqr, H);
1769 add $a_ptr,sp,#$Hsqr
1770 add $b_ptr,sp,#$in1_x
1772 bl __ecp_nistz256_mul_mont @ p256_mul_mont(U2, in1_x, Hsqr);
1774 add $r_ptr,sp,#$Hsqr
1775 bl __ecp_nistz256_add_self @ p256_mul_by_2(Hsqr, U2);
1777 add $b_ptr,sp,#$Rsqr
1778 add $r_ptr,sp,#$res_x
1779 bl __ecp_nistz256_sub_morf @ p256_sub(res_x, Rsqr, Hsqr);
1781 add $b_ptr,sp,#$Hcub
1782 bl __ecp_nistz256_sub_from @ p256_sub(res_x, res_x, Hcub);
1785 add $r_ptr,sp,#$res_y
1786 bl __ecp_nistz256_sub_morf @ p256_sub(res_y, U2, res_x);
1788 add $a_ptr,sp,#$Hcub
1789 add $b_ptr,sp,#$in1_y
1791 bl __ecp_nistz256_mul_mont @ p256_mul_mont(S2, in1_y, Hcub);
1794 add $b_ptr,sp,#$res_y
1795 add $r_ptr,sp,#$res_y
1796 bl __ecp_nistz256_mul_mont @ p256_mul_mont(res_y, res_y, R);
1799 bl __ecp_nistz256_sub_from @ p256_sub(res_y, res_y, S2);
1801 ldr r11,[sp,#32*15+4] @ !in1intfy
1802 ldr r12,[sp,#32*15+8] @ !in2intfy
1810 ldr $r_ptr,[sp,#32*15]
1812 for($i=0;$i<64;$i+=8) { # conditional moves
1814 ldmia r1!,{r4-r5} @ res_x
1815 ldmia r2!,{r6-r7} @ in2_x
1816 ldmia r3!,{r8-r9} @ in1_x
1827 stmia $r_ptr!,{r4-r5}
1833 ldmia r1!,{r4-r5} @ res_z
1834 ldmia r3!,{r8-r9} @ in1_z
1837 and r6,r11,#@ONE_mont[$j]
1838 and r7,r11,#@ONE_mont[$j+1]
1845 stmia $r_ptr!,{r4-r5}
1849 add sp,sp,#32*15+16 @ +16 means "skip even over saved r0-r3"
1850 #if __ARM_ARCH__>=5 || !defined(__thumb__)
1851 ldmia sp!,{r4-r12,pc}
1853 ldmia sp!,{r4-r12,lr}
1854 bx lr @ interoperable with Thumb ISA:-)
1856 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1860 foreach (split("\n",$code)) {
1861 s/\`([^\`]*)\`/eval $1/geo;
1863 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1867 close STDOUT; # enforce flush