Update copyright year
[openssl.git] / crypto / ec / asm / ecp_nistz256-armv4.pl
index aeeb190335f9ae2f8f57b56b57cd4fe88fa7ff80..83abbdd895780d157f6dd71c02712d6dc8d7a687 100755 (executable)
@@ -1,4 +1,11 @@
-#!/usr/bin/env perl
+#! /usr/bin/env perl
+# Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
 
 # ====================================================================
 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -27,8 +34,8 @@
 # operation. Keep in mind that +200% means 3x improvement.
 
 $flavour = shift;
-if ($flavour=~/^\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
-else { while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} }
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
 
 if ($flavour && $flavour ne "void") {
     $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
@@ -45,7 +52,7 @@ $code.=<<___;
 #include "arm_arch.h"
 
 .text
-#if defined(__thumb2__) && !defined(__APPLE__)
+#if defined(__thumb2__)
 .syntax        unified
 .thumb
 #else
@@ -167,10 +174,7 @@ __ecp_nistz256_mul_by_2:
        adcs    $a6,$a6,$a6
        mov     $ff,#0
        adcs    $a7,$a7,$a7
-#ifdef __thumb2__
-       it      cs
-#endif
-       movcs   $ff,#-1                 @ $ff = carry ? -1 : 0
+       adc     $ff,$ff,#0
 
        b       .Lreduce_by_sub
 .size  __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
@@ -221,35 +225,45 @@ __ecp_nistz256_add:
        adcs    $a6,$a6,$t2
        mov     $ff,#0
        adcs    $a7,$a7,$t3
-#ifdef __thumb2__
-       it      cs
-#endif
-       movcs   $ff,#-1                 @ $ff = carry ? -1 : 0, "broadcast" carry
+       adc     $ff,$ff,#0
        ldr     lr,[sp],#4              @ pop lr
 
 .Lreduce_by_sub:
 
-       @ if a+b carries, subtract modulus.
+       @ if a+b >= modulus, subtract modulus.
        @
+       @ But since comparison implies subtraction, we subtract
+       @ modulus and then add it back if subtraction borrowed.
+
+       subs    $a0,$a0,#-1
+       sbcs    $a1,$a1,#-1
+       sbcs    $a2,$a2,#-1
+       sbcs    $a3,$a3,#0
+       sbcs    $a4,$a4,#0
+       sbcs    $a5,$a5,#0
+       sbcs    $a6,$a6,#1
+       sbcs    $a7,$a7,#-1
+       sbc     $ff,$ff,#0
+
        @ Note that because mod has special form, i.e. consists of
        @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
-       @ using value of broadcasted carry as a whole or extracting
-       @ single bit. Follow $ff register...
+       @ using value of borrow as a whole or extracting single bit.
+       @ Follow $ff register...
 
-       subs    $a0,$a0,$ff             @ subtract synthesized modulus
-       sbcs    $a1,$a1,$ff
+       adds    $a0,$a0,$ff             @ add synthesized modulus
+       adcs    $a1,$a1,$ff
        str     $a0,[$r_ptr,#0]
-       sbcs    $a2,$a2,$ff
+       adcs    $a2,$a2,$ff
        str     $a1,[$r_ptr,#4]
-       sbcs    $a3,$a3,#0
+       adcs    $a3,$a3,#0
        str     $a2,[$r_ptr,#8]
-       sbcs    $a4,$a4,#0
+       adcs    $a4,$a4,#0
        str     $a3,[$r_ptr,#12]
-       sbcs    $a5,$a5,#0
+       adcs    $a5,$a5,#0
        str     $a4,[$r_ptr,#16]
-       sbcs    $a6,$a6,$ff,lsr#31
+       adcs    $a6,$a6,$ff,lsr#31
        str     $a5,[$r_ptr,#20]
-       sbcs    $a7,$a7,$ff
+       adcs    $a7,$a7,$ff
        str     $a6,[$r_ptr,#24]
        str     $a7,[$r_ptr,#28]
 
@@ -297,26 +311,29 @@ __ecp_nistz256_mul_by_3:
        adcs    $a6,$a6,$a6
        mov     $ff,#0
        adcs    $a7,$a7,$a7
-#ifdef __thumb2__
-       it      cs
-#endif
-       movcs   $ff,#-1                 @ $ff = carry ? -1 : 0, "broadcast" carry
-
-       subs    $a0,$a0,$ff             @ subtract synthesized modulus, see
-                                       @ .Lreduce_by_sub for details, except
-                                       @ that we don't write anything to
-                                       @ memory, but keep intermediate
-                                       @ results in registers...
-       sbcs    $a1,$a1,$ff
-       sbcs    $a2,$a2,$ff
+       adc     $ff,$ff,#0
+
+       subs    $a0,$a0,#-1             @ .Lreduce_by_sub but without stores
+       sbcs    $a1,$a1,#-1
+       sbcs    $a2,$a2,#-1
        sbcs    $a3,$a3,#0
        sbcs    $a4,$a4,#0
-        ldr    $b_ptr,[$a_ptr,#0]
        sbcs    $a5,$a5,#0
+       sbcs    $a6,$a6,#1
+       sbcs    $a7,$a7,#-1
+       sbc     $ff,$ff,#0
+
+       adds    $a0,$a0,$ff             @ add synthesized modulus
+       adcs    $a1,$a1,$ff
+       adcs    $a2,$a2,$ff
+       adcs    $a3,$a3,#0
+       adcs    $a4,$a4,#0
+        ldr    $b_ptr,[$a_ptr,#0]
+       adcs    $a5,$a5,#0
         ldr    $t1,[$a_ptr,#4]
-       sbcs    $a6,$a6,$ff,lsr#31
+       adcs    $a6,$a6,$ff,lsr#31
         ldr    $t2,[$a_ptr,#8]
-       sbcs    $a7,$a7,$ff
+       adc     $a7,$a7,$ff
 
        ldr     $t0,[$a_ptr,#12]
        adds    $a0,$a0,$b_ptr          @ 2*a[0:7]+=a[0:7]
@@ -332,10 +349,7 @@ __ecp_nistz256_mul_by_3:
        adcs    $a6,$a6,$t2
        mov     $ff,#0
        adcs    $a7,$a7,$t3
-#ifdef __thumb2__
-       it      cs
-#endif
-       movcs   $ff,#-1                 @ $ff = carry ? -1 : 0, "broadcast" carry
+       adc     $ff,$ff,#0
        ldr     lr,[sp],#4              @ pop lr
 
        b       .Lreduce_by_sub
@@ -367,7 +381,7 @@ __ecp_nistz256_div_by_2:
        mov     $ff,$a0,lsl#31          @ place least significant bit to most
                                        @ significant position, now arithmetic
                                        @ right shift by 31 will produce -1 or
-                                       @ 0, while logical rigth shift 1 or 0,
+                                       @ 0, while logical right shift 1 or 0,
                                        @ this is how modulus is conditionally
                                        @ synthesized in this case...
        ldr     $a3,[$a_ptr,#12]
@@ -880,13 +894,13 @@ ecp_nistz256_scatter_w7:
 .Loop_scatter_w7:
        ldr     $mask,[$inp],#4
        subs    $index,$index,#1
-       strb    $mask,[$out,#64*0-1]
+       strb    $mask,[$out,#64*0]
        mov     $mask,$mask,lsr#8
-       strb    $mask,[$out,#64*1-1]
+       strb    $mask,[$out,#64*1]
        mov     $mask,$mask,lsr#8
-       strb    $mask,[$out,#64*2-1]
+       strb    $mask,[$out,#64*2]
        mov     $mask,$mask,lsr#8
-       strb    $mask,[$out,#64*3-1]
+       strb    $mask,[$out,#64*3]
        add     $out,$out,#64*4
        bne     .Loop_scatter_w7
 
@@ -1203,25 +1217,42 @@ __ecp_nistz256_add_self:
        adcs    $a6,$a6,$a6
        mov     $ff,#0
        adcs    $a7,$a7,$a7
-#ifdef __thumb2__
-       it      cs
-#endif
-       movcs   $ff,#-1                 @ $ff = carry ? -1 : 0
+       adc     $ff,$ff,#0
+
+       @ if a+b >= modulus, subtract modulus.
+       @
+       @ But since comparison implies subtraction, we subtract
+       @ modulus and then add it back if subtraction borrowed.
+
+       subs    $a0,$a0,#-1
+       sbcs    $a1,$a1,#-1
+       sbcs    $a2,$a2,#-1
+       sbcs    $a3,$a3,#0
+       sbcs    $a4,$a4,#0
+       sbcs    $a5,$a5,#0
+       sbcs    $a6,$a6,#1
+       sbcs    $a7,$a7,#-1
+       sbc     $ff,$ff,#0
+
+       @ Note that because mod has special form, i.e. consists of
+       @ 0xffffffff, 1 and 0s, we can conditionally synthesize it by
+       @ using value of borrow as a whole or extracting single bit.
+       @ Follow $ff register...
 
-       subs    $a0,$a0,$ff             @ subtract synthesized modulus
-       sbcs    $a1,$a1,$ff
+       adds    $a0,$a0,$ff             @ add synthesized modulus
+       adcs    $a1,$a1,$ff
        str     $a0,[$r_ptr,#0]
-       sbcs    $a2,$a2,$ff
+       adcs    $a2,$a2,$ff
        str     $a1,[$r_ptr,#4]
-       sbcs    $a3,$a3,#0
+       adcs    $a3,$a3,#0
        str     $a2,[$r_ptr,#8]
-       sbcs    $a4,$a4,#0
+       adcs    $a4,$a4,#0
        str     $a3,[$r_ptr,#12]
-       sbcs    $a5,$a5,#0
+       adcs    $a5,$a5,#0
        str     $a4,[$r_ptr,#16]
-       sbcs    $a6,$a6,$ff,lsr#31
+       adcs    $a6,$a6,$ff,lsr#31
        str     $a5,[$r_ptr,#20]
-       sbcs    $a7,$a7,$ff
+       adcs    $a7,$a7,$ff
        str     $a6,[$r_ptr,#24]
        str     $a7,[$r_ptr,#28]
 
@@ -1231,7 +1262,7 @@ __ecp_nistz256_add_self:
 ___
 
 ########################################################################
-# following subroutines are "literal" implemetation of those found in
+# following subroutines are "literal" implementation of those found in
 # ecp_nistz256.c
 #
 ########################################################################
@@ -1252,6 +1283,7 @@ ecp_nistz256_point_double:
        stmdb   sp!,{r0-r12,lr}         @ push from r0, unusual, but intentional
        sub     sp,sp,#32*5
 
+.Lpoint_double_shortcut:
        add     r3,sp,#$in_x
        ldmia   $a_ptr!,{r4-r11}        @ copy in_x
        stmia   r3,{r4-r11}
@@ -1371,29 +1403,21 @@ $code.=<<___;
 .align 5
 ecp_nistz256_point_add:
        stmdb   sp!,{r0-r12,lr}         @ push from r0, unusual, but intentional
-       sub     sp,sp,#32*18
+       sub     sp,sp,#32*18+16
 
-       ldmia   $b_ptr!,{r4-r11}        @ copy in2
+       ldmia   $b_ptr!,{r4-r11}        @ copy in2_x
        add     r3,sp,#$in2_x
-       orr     r12,r4,r5
-       orr     r12,r12,r6
-       orr     r12,r12,r7
-       orr     r12,r12,r8
-       orr     r12,r12,r9
-       orr     r12,r12,r10
-       orr     r12,r12,r11
        stmia   r3!,{r4-r11}
-       ldmia   $b_ptr!,{r4-r11}
-       orr     r12,r12,r4
-       orr     r12,r12,r5
+       ldmia   $b_ptr!,{r4-r11}        @ copy in2_y
+       stmia   r3!,{r4-r11}
+       ldmia   $b_ptr,{r4-r11}         @ copy in2_z
+       orr     r12,r4,r5
        orr     r12,r12,r6
        orr     r12,r12,r7
        orr     r12,r12,r8
        orr     r12,r12,r9
        orr     r12,r12,r10
        orr     r12,r12,r11
-       stmia   r3!,{r4-r11}
-       ldmia   $b_ptr,{r4-r11}
        cmp     r12,#0
 #ifdef __thumb2__
        it      ne
@@ -1402,27 +1426,19 @@ ecp_nistz256_point_add:
        stmia   r3,{r4-r11}
        str     r12,[sp,#32*18+8]       @ !in2infty
 
-       ldmia   $a_ptr!,{r4-r11}        @ copy in1
+       ldmia   $a_ptr!,{r4-r11}        @ copy in1_x
        add     r3,sp,#$in1_x
-       orr     r12,r4,r5
-       orr     r12,r12,r6
-       orr     r12,r12,r7
-       orr     r12,r12,r8
-       orr     r12,r12,r9
-       orr     r12,r12,r10
-       orr     r12,r12,r11
        stmia   r3!,{r4-r11}
-       ldmia   $a_ptr!,{r4-r11}
-       orr     r12,r12,r4
-       orr     r12,r12,r5
+       ldmia   $a_ptr!,{r4-r11}        @ copy in1_y
+       stmia   r3!,{r4-r11}
+       ldmia   $a_ptr,{r4-r11}         @ copy in1_z
+       orr     r12,r4,r5
        orr     r12,r12,r6
        orr     r12,r12,r7
        orr     r12,r12,r8
        orr     r12,r12,r9
        orr     r12,r12,r10
        orr     r12,r12,r11
-       stmia   r3!,{r4-r11}
-       ldmia   $a_ptr,{r4-r11}
        cmp     r12,#0
 #ifdef __thumb2__
        it      ne
@@ -1504,9 +1520,9 @@ ecp_nistz256_point_add:
        tst     $t0,$t1
        beq     .Ladd_proceed           @ (in1infty || in2infty)?
        tst     $t2,$t2
-       beq     .Ladd_proceed           @ is_equal(S1,S2)?
+       beq     .Ladd_double            @ is_equal(S1,S2)?
 
-       ldr     $r_ptr,[sp,#32*18]
+       ldr     $r_ptr,[sp,#32*18+16]
        eor     r4,r4,r4
        eor     r5,r5,r5
        eor     r6,r6,r6
@@ -1520,6 +1536,12 @@ ecp_nistz256_point_add:
        stmia   $r_ptr!,{r4-r11}
        b       .Ladd_done
 
+.align 4
+.Ladd_double:
+       ldr     $a_ptr,[sp,#32*18+20]
+       add     sp,sp,#32*(18-5)+16     @ difference in frame sizes
+       b       .Lpoint_double_shortcut
+
 .align 4
 .Ladd_proceed:
        add     $a_ptr,sp,#$R
@@ -1588,7 +1610,7 @@ ecp_nistz256_point_add:
        add     r3,sp,#$in1_x
        and     r11,r11,r12
        mvn     r12,r12
-       ldr     $r_ptr,[sp,#32*18]
+       ldr     $r_ptr,[sp,#32*18+16]
 ___
 for($i=0;$i<96;$i+=8) {                        # conditional moves
 $code.=<<___;
@@ -1610,8 +1632,8 @@ ___
 }
 $code.=<<___;
 .Ladd_done:
-       add     sp,sp,#32*18+16         @ +16 means "skip even over saved r0-r3"
-#if __ARM_ARCH__>=5 || defined(__thumb__)
+       add     sp,sp,#32*18+16+16      @ +16 means "skip even over saved r0-r3"
+#if __ARM_ARCH__>=5 || !defined(__thumb__)
        ldmia   sp!,{r4-r12,pc}
 #else
        ldmia   sp!,{r4-r12,lr}
@@ -1646,27 +1668,19 @@ ecp_nistz256_point_add_affine:
        stmdb   sp!,{r0-r12,lr}         @ push from r0, unusual, but intentional
        sub     sp,sp,#32*15
 
-       ldmia   $a_ptr!,{r4-r11}        @ copy in1
+       ldmia   $a_ptr!,{r4-r11}        @ copy in1_x
        add     r3,sp,#$in1_x
-       orr     r12,r4,r5
-       orr     r12,r12,r6
-       orr     r12,r12,r7
-       orr     r12,r12,r8
-       orr     r12,r12,r9
-       orr     r12,r12,r10
-       orr     r12,r12,r11
        stmia   r3!,{r4-r11}
-       ldmia   $a_ptr!,{r4-r11}
-       orr     r12,r12,r4
-       orr     r12,r12,r5
+       ldmia   $a_ptr!,{r4-r11}        @ copy in1_y
+       stmia   r3!,{r4-r11}
+       ldmia   $a_ptr,{r4-r11}         @ copy in1_z
+       orr     r12,r4,r5
        orr     r12,r12,r6
        orr     r12,r12,r7
        orr     r12,r12,r8
        orr     r12,r12,r9
        orr     r12,r12,r10
        orr     r12,r12,r11
-       stmia   r3!,{r4-r11}
-       ldmia   $a_ptr,{r4-r11}
        cmp     r12,#0
 #ifdef __thumb2__
        it      ne
@@ -1675,7 +1689,7 @@ ecp_nistz256_point_add_affine:
        stmia   r3,{r4-r11}
        str     r12,[sp,#32*15+4]       @ !in1infty
 
-       ldmia   $b_ptr!,{r4-r11}        @ copy in2
+       ldmia   $b_ptr!,{r4-r11}        @ copy in2_x
        add     r3,sp,#$in2_x
        orr     r12,r4,r5
        orr     r12,r12,r6
@@ -1685,7 +1699,7 @@ ecp_nistz256_point_add_affine:
        orr     r12,r12,r10
        orr     r12,r12,r11
        stmia   r3!,{r4-r11}
-       ldmia   $b_ptr!,{r4-r11}
+       ldmia   $b_ptr!,{r4-r11}        @ copy in2_y
        orr     r12,r12,r4
        orr     r12,r12,r5
        orr     r12,r12,r6