Merge Intel copyright notice into standard
[openssl.git] / crypto / bn / asm / armv4-mont.pl
1 #! /usr/bin/env perl
2 # Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16
17 # January 2007.
18
19 # Montgomery multiplication for ARMv4.
20 #
21 # Performance improvement naturally varies among CPU implementations
22 # and compilers. The code was observed to provide +65-35% improvement
23 # [depending on key length, less for longer keys] on ARM920T, and
24 # +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
25 # base and compiler generated code with in-lined umull and even umlal
26 # instructions. The latter means that this code didn't really have an
27 # "advantage" of utilizing some "secret" instruction.
28 #
29 # The code is interoperable with Thumb ISA and is rather compact, less
30 # than 1/2KB. Windows CE port would be trivial, as it's exclusively
31 # about decorations, ABI and instruction syntax are identical.
32
33 # November 2013
34 #
35 # Add NEON code path, which handles lengths divisible by 8. RSA/DSA
36 # performance improvement on Cortex-A8 is ~45-100% depending on key
37 # length, more for longer keys. On Cortex-A15 the span is ~10-105%.
38 # On Snapdragon S4 improvement was measured to vary from ~70% to
39 # incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
40 # rather because original integer-only code seems to perform
41 # suboptimally on S4. Situation on Cortex-A9 is unfortunately
42 # different. It's being looked into, but the trouble is that
43 # performance for vectors longer than 256 bits is actually couple
44 # of percent worse than for integer-only code. The code is chosen
45 # for execution on all NEON-capable processors, because gain on
46 # others outweighs the marginal loss on Cortex-A9.
47
48 # September 2015
49 #
50 # Align Cortex-A9 performance with November 2013 improvements, i.e.
51 # NEON code is now ~20-105% faster than integer-only one on this
52 # processor. But this optimization further improved performance even
53 # on other processors: NEON code path is ~45-180% faster than original
54 # integer-only on Cortex-A8, ~10-210% on Cortex-A15, ~70-450% on
55 # Snapdragon S4.
56
57 $flavour = shift;
58 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
59 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
60
61 if ($flavour && $flavour ne "void") {
62     $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
63     ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
64     ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
65     die "can't locate arm-xlate.pl";
66
67     open STDOUT,"| \"$^X\" $xlate $flavour $output";
68 } else {
69     open STDOUT,">$output";
70 }
71
72 $num="r0";      # starts as num argument, but holds &tp[num-1]
73 $ap="r1";
74 $bp="r2"; $bi="r2"; $rp="r2";
75 $np="r3";
76 $tp="r4";
77 $aj="r5";
78 $nj="r6";
79 $tj="r7";
80 $n0="r8";
81 ###########     # r9 is reserved by ELF as platform specific, e.g. TLS pointer
82 $alo="r10";     # sl, gcc uses it to keep @GOT
83 $ahi="r11";     # fp
84 $nlo="r12";     # ip
85 ###########     # r13 is stack pointer
86 $nhi="r14";     # lr
87 ###########     # r15 is program counter
88
89 #### argument block layout relative to &tp[num-1], a.k.a. $num
90 $_rp="$num,#12*4";
91 # ap permanently resides in r1
92 $_bp="$num,#13*4";
93 # np permanently resides in r3
94 $_n0="$num,#14*4";
95 $_num="$num,#15*4";     $_bpend=$_num;
96
97 $code=<<___;
98 #include "arm_arch.h"
99
100 .text
101 #if defined(__thumb2__)
102 .syntax unified
103 .thumb
104 #else
105 .code   32
106 #endif
107
108 #if __ARM_MAX_ARCH__>=7
109 .align  5
110 .LOPENSSL_armcap:
111 .word   OPENSSL_armcap_P-.Lbn_mul_mont
112 #endif
113
114 .global bn_mul_mont
115 .type   bn_mul_mont,%function
116
117 .align  5
118 bn_mul_mont:
119 .Lbn_mul_mont:
120         ldr     ip,[sp,#4]              @ load num
121         stmdb   sp!,{r0,r2}             @ sp points at argument block
122 #if __ARM_MAX_ARCH__>=7
123         tst     ip,#7
124         bne     .Lialu
125         adr     r0,.Lbn_mul_mont
126         ldr     r2,.LOPENSSL_armcap
127         ldr     r0,[r0,r2]
128 #ifdef  __APPLE__
129         ldr     r0,[r0]
130 #endif
131         tst     r0,#ARMV7_NEON          @ NEON available?
132         ldmia   sp, {r0,r2}
133         beq     .Lialu
134         add     sp,sp,#8
135         b       bn_mul8x_mont_neon
136 .align  4
137 .Lialu:
138 #endif
139         cmp     ip,#2
140         mov     $num,ip                 @ load num
141 #ifdef  __thumb2__
142         ittt    lt
143 #endif
144         movlt   r0,#0
145         addlt   sp,sp,#2*4
146         blt     .Labrt
147
148         stmdb   sp!,{r4-r12,lr}         @ save 10 registers
149
150         mov     $num,$num,lsl#2         @ rescale $num for byte count
151         sub     sp,sp,$num              @ alloca(4*num)
152         sub     sp,sp,#4                @ +extra dword
153         sub     $num,$num,#4            @ "num=num-1"
154         add     $tp,$bp,$num            @ &bp[num-1]
155
156         add     $num,sp,$num            @ $num to point at &tp[num-1]
157         ldr     $n0,[$_n0]              @ &n0
158         ldr     $bi,[$bp]               @ bp[0]
159         ldr     $aj,[$ap],#4            @ ap[0],ap++
160         ldr     $nj,[$np],#4            @ np[0],np++
161         ldr     $n0,[$n0]               @ *n0
162         str     $tp,[$_bpend]           @ save &bp[num]
163
164         umull   $alo,$ahi,$aj,$bi       @ ap[0]*bp[0]
165         str     $n0,[$_n0]              @ save n0 value
166         mul     $n0,$alo,$n0            @ "tp[0]"*n0
167         mov     $nlo,#0
168         umlal   $alo,$nlo,$nj,$n0       @ np[0]*n0+"t[0]"
169         mov     $tp,sp
170
171 .L1st:
172         ldr     $aj,[$ap],#4            @ ap[j],ap++
173         mov     $alo,$ahi
174         ldr     $nj,[$np],#4            @ np[j],np++
175         mov     $ahi,#0
176         umlal   $alo,$ahi,$aj,$bi       @ ap[j]*bp[0]
177         mov     $nhi,#0
178         umlal   $nlo,$nhi,$nj,$n0       @ np[j]*n0
179         adds    $nlo,$nlo,$alo
180         str     $nlo,[$tp],#4           @ tp[j-1]=,tp++
181         adc     $nlo,$nhi,#0
182         cmp     $tp,$num
183         bne     .L1st
184
185         adds    $nlo,$nlo,$ahi
186         ldr     $tp,[$_bp]              @ restore bp
187         mov     $nhi,#0
188         ldr     $n0,[$_n0]              @ restore n0
189         adc     $nhi,$nhi,#0
190         str     $nlo,[$num]             @ tp[num-1]=
191         mov     $tj,sp
192         str     $nhi,[$num,#4]          @ tp[num]=
193 \f
194 .Louter:
195         sub     $tj,$num,$tj            @ "original" $num-1 value
196         sub     $ap,$ap,$tj             @ "rewind" ap to &ap[1]
197         ldr     $bi,[$tp,#4]!           @ *(++bp)
198         sub     $np,$np,$tj             @ "rewind" np to &np[1]
199         ldr     $aj,[$ap,#-4]           @ ap[0]
200         ldr     $alo,[sp]               @ tp[0]
201         ldr     $nj,[$np,#-4]           @ np[0]
202         ldr     $tj,[sp,#4]             @ tp[1]
203
204         mov     $ahi,#0
205         umlal   $alo,$ahi,$aj,$bi       @ ap[0]*bp[i]+tp[0]
206         str     $tp,[$_bp]              @ save bp
207         mul     $n0,$alo,$n0
208         mov     $nlo,#0
209         umlal   $alo,$nlo,$nj,$n0       @ np[0]*n0+"tp[0]"
210         mov     $tp,sp
211
212 .Linner:
213         ldr     $aj,[$ap],#4            @ ap[j],ap++
214         adds    $alo,$ahi,$tj           @ +=tp[j]
215         ldr     $nj,[$np],#4            @ np[j],np++
216         mov     $ahi,#0
217         umlal   $alo,$ahi,$aj,$bi       @ ap[j]*bp[i]
218         mov     $nhi,#0
219         umlal   $nlo,$nhi,$nj,$n0       @ np[j]*n0
220         adc     $ahi,$ahi,#0
221         ldr     $tj,[$tp,#8]            @ tp[j+1]
222         adds    $nlo,$nlo,$alo
223         str     $nlo,[$tp],#4           @ tp[j-1]=,tp++
224         adc     $nlo,$nhi,#0
225         cmp     $tp,$num
226         bne     .Linner
227
228         adds    $nlo,$nlo,$ahi
229         mov     $nhi,#0
230         ldr     $tp,[$_bp]              @ restore bp
231         adc     $nhi,$nhi,#0
232         ldr     $n0,[$_n0]              @ restore n0
233         adds    $nlo,$nlo,$tj
234         ldr     $tj,[$_bpend]           @ restore &bp[num]
235         adc     $nhi,$nhi,#0
236         str     $nlo,[$num]             @ tp[num-1]=
237         str     $nhi,[$num,#4]          @ tp[num]=
238
239         cmp     $tp,$tj
240 #ifdef  __thumb2__
241         itt     ne
242 #endif
243         movne   $tj,sp
244         bne     .Louter
245 \f
246         ldr     $rp,[$_rp]              @ pull rp
247         mov     $aj,sp
248         add     $num,$num,#4            @ $num to point at &tp[num]
249         sub     $aj,$num,$aj            @ "original" num value
250         mov     $tp,sp                  @ "rewind" $tp
251         mov     $ap,$tp                 @ "borrow" $ap
252         sub     $np,$np,$aj             @ "rewind" $np to &np[0]
253
254         subs    $tj,$tj,$tj             @ "clear" carry flag
255 .Lsub:  ldr     $tj,[$tp],#4
256         ldr     $nj,[$np],#4
257         sbcs    $tj,$tj,$nj             @ tp[j]-np[j]
258         str     $tj,[$rp],#4            @ rp[j]=
259         teq     $tp,$num                @ preserve carry
260         bne     .Lsub
261         sbcs    $nhi,$nhi,#0            @ upmost carry
262         mov     $tp,sp                  @ "rewind" $tp
263         sub     $rp,$rp,$aj             @ "rewind" $rp
264
265         and     $ap,$tp,$nhi
266         bic     $np,$rp,$nhi
267         orr     $ap,$ap,$np             @ ap=borrow?tp:rp
268
269 .Lcopy: ldr     $tj,[$ap],#4            @ copy or in-place refresh
270         str     sp,[$tp],#4             @ zap tp
271         str     $tj,[$rp],#4
272         cmp     $tp,$num
273         bne     .Lcopy
274
275         mov     sp,$num
276         add     sp,sp,#4                @ skip over tp[num+1]
277         ldmia   sp!,{r4-r12,lr}         @ restore registers
278         add     sp,sp,#2*4              @ skip over {r0,r2}
279         mov     r0,#1
280 .Labrt:
281 #if __ARM_ARCH__>=5
282         ret                             @ bx lr
283 #else
284         tst     lr,#1
285         moveq   pc,lr                   @ be binary compatible with V4, yet
286         bx      lr                      @ interoperable with Thumb ISA:-)
287 #endif
288 .size   bn_mul_mont,.-bn_mul_mont
289 ___
290 {
291 my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
292 my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
293 my ($Z,$Temp)=("q4","q5");
294 my @ACC=map("q$_",(6..13));
295 my ($Bi,$Ni,$M0)=map("d$_",(28..31));
296 my $zero="$Z#lo";
297 my $temp="$Temp#lo";
298
299 my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
300 my ($tinptr,$toutptr,$inner,$outer,$bnptr)=map("r$_",(6..11));
301
302 $code.=<<___;
303 #if __ARM_MAX_ARCH__>=7
304 .arch   armv7-a
305 .fpu    neon
306
307 .type   bn_mul8x_mont_neon,%function
308 .align  5
309 bn_mul8x_mont_neon:
310         mov     ip,sp
311         stmdb   sp!,{r4-r11}
312         vstmdb  sp!,{d8-d15}            @ ABI specification says so
313         ldmia   ip,{r4-r5}              @ load rest of parameter block
314         mov     ip,sp
315
316         cmp     $num,#8
317         bhi     .LNEON_8n
318
319         @ special case for $num==8, everything is in register bank...
320
321         vld1.32         {${Bi}[0]}, [$bptr,:32]!
322         veor            $zero,$zero,$zero
323         sub             $toutptr,sp,$num,lsl#4
324         vld1.32         {$A0-$A3},  [$aptr]!            @ can't specify :32 :-(
325         and             $toutptr,$toutptr,#-64
326         vld1.32         {${M0}[0]}, [$n0,:32]
327         mov             sp,$toutptr                     @ alloca
328         vzip.16         $Bi,$zero
329
330         vmull.u32       @ACC[0],$Bi,${A0}[0]
331         vmull.u32       @ACC[1],$Bi,${A0}[1]
332         vmull.u32       @ACC[2],$Bi,${A1}[0]
333         vshl.i64        $Ni,@ACC[0]#hi,#16
334         vmull.u32       @ACC[3],$Bi,${A1}[1]
335
336         vadd.u64        $Ni,$Ni,@ACC[0]#lo
337         veor            $zero,$zero,$zero
338         vmul.u32        $Ni,$Ni,$M0
339
340         vmull.u32       @ACC[4],$Bi,${A2}[0]
341          vld1.32        {$N0-$N3}, [$nptr]!
342         vmull.u32       @ACC[5],$Bi,${A2}[1]
343         vmull.u32       @ACC[6],$Bi,${A3}[0]
344         vzip.16         $Ni,$zero
345         vmull.u32       @ACC[7],$Bi,${A3}[1]
346
347         vmlal.u32       @ACC[0],$Ni,${N0}[0]
348         sub             $outer,$num,#1
349         vmlal.u32       @ACC[1],$Ni,${N0}[1]
350         vmlal.u32       @ACC[2],$Ni,${N1}[0]
351         vmlal.u32       @ACC[3],$Ni,${N1}[1]
352
353         vmlal.u32       @ACC[4],$Ni,${N2}[0]
354         vmov            $Temp,@ACC[0]
355         vmlal.u32       @ACC[5],$Ni,${N2}[1]
356         vmov            @ACC[0],@ACC[1]
357         vmlal.u32       @ACC[6],$Ni,${N3}[0]
358         vmov            @ACC[1],@ACC[2]
359         vmlal.u32       @ACC[7],$Ni,${N3}[1]
360         vmov            @ACC[2],@ACC[3]
361         vmov            @ACC[3],@ACC[4]
362         vshr.u64        $temp,$temp,#16
363         vmov            @ACC[4],@ACC[5]
364         vmov            @ACC[5],@ACC[6]
365         vadd.u64        $temp,$temp,$Temp#hi
366         vmov            @ACC[6],@ACC[7]
367         veor            @ACC[7],@ACC[7]
368         vshr.u64        $temp,$temp,#16
369
370         b       .LNEON_outer8
371
372 .align  4
373 .LNEON_outer8:
374         vld1.32         {${Bi}[0]}, [$bptr,:32]!
375         veor            $zero,$zero,$zero
376         vzip.16         $Bi,$zero
377         vadd.u64        @ACC[0]#lo,@ACC[0]#lo,$temp
378
379         vmlal.u32       @ACC[0],$Bi,${A0}[0]
380         vmlal.u32       @ACC[1],$Bi,${A0}[1]
381         vmlal.u32       @ACC[2],$Bi,${A1}[0]
382         vshl.i64        $Ni,@ACC[0]#hi,#16
383         vmlal.u32       @ACC[3],$Bi,${A1}[1]
384
385         vadd.u64        $Ni,$Ni,@ACC[0]#lo
386         veor            $zero,$zero,$zero
387         subs            $outer,$outer,#1
388         vmul.u32        $Ni,$Ni,$M0
389
390         vmlal.u32       @ACC[4],$Bi,${A2}[0]
391         vmlal.u32       @ACC[5],$Bi,${A2}[1]
392         vmlal.u32       @ACC[6],$Bi,${A3}[0]
393         vzip.16         $Ni,$zero
394         vmlal.u32       @ACC[7],$Bi,${A3}[1]
395
396         vmlal.u32       @ACC[0],$Ni,${N0}[0]
397         vmlal.u32       @ACC[1],$Ni,${N0}[1]
398         vmlal.u32       @ACC[2],$Ni,${N1}[0]
399         vmlal.u32       @ACC[3],$Ni,${N1}[1]
400
401         vmlal.u32       @ACC[4],$Ni,${N2}[0]
402         vmov            $Temp,@ACC[0]
403         vmlal.u32       @ACC[5],$Ni,${N2}[1]
404         vmov            @ACC[0],@ACC[1]
405         vmlal.u32       @ACC[6],$Ni,${N3}[0]
406         vmov            @ACC[1],@ACC[2]
407         vmlal.u32       @ACC[7],$Ni,${N3}[1]
408         vmov            @ACC[2],@ACC[3]
409         vmov            @ACC[3],@ACC[4]
410         vshr.u64        $temp,$temp,#16
411         vmov            @ACC[4],@ACC[5]
412         vmov            @ACC[5],@ACC[6]
413         vadd.u64        $temp,$temp,$Temp#hi
414         vmov            @ACC[6],@ACC[7]
415         veor            @ACC[7],@ACC[7]
416         vshr.u64        $temp,$temp,#16
417
418         bne     .LNEON_outer8
419
420         vadd.u64        @ACC[0]#lo,@ACC[0]#lo,$temp
421         mov             $toutptr,sp
422         vshr.u64        $temp,@ACC[0]#lo,#16
423         mov             $inner,$num
424         vadd.u64        @ACC[0]#hi,@ACC[0]#hi,$temp
425         add             $tinptr,sp,#96
426         vshr.u64        $temp,@ACC[0]#hi,#16
427         vzip.16         @ACC[0]#lo,@ACC[0]#hi
428
429         b       .LNEON_tail_entry
430
431 .align  4
432 .LNEON_8n:
433         veor            @ACC[0],@ACC[0],@ACC[0]
434          sub            $toutptr,sp,#128
435         veor            @ACC[1],@ACC[1],@ACC[1]
436          sub            $toutptr,$toutptr,$num,lsl#4
437         veor            @ACC[2],@ACC[2],@ACC[2]
438          and            $toutptr,$toutptr,#-64
439         veor            @ACC[3],@ACC[3],@ACC[3]
440          mov            sp,$toutptr                     @ alloca
441         veor            @ACC[4],@ACC[4],@ACC[4]
442          add            $toutptr,$toutptr,#256
443         veor            @ACC[5],@ACC[5],@ACC[5]
444          sub            $inner,$num,#8
445         veor            @ACC[6],@ACC[6],@ACC[6]
446         veor            @ACC[7],@ACC[7],@ACC[7]
447
448 .LNEON_8n_init:
449         vst1.64         {@ACC[0]-@ACC[1]},[$toutptr,:256]!
450         subs            $inner,$inner,#8
451         vst1.64         {@ACC[2]-@ACC[3]},[$toutptr,:256]!
452         vst1.64         {@ACC[4]-@ACC[5]},[$toutptr,:256]!
453         vst1.64         {@ACC[6]-@ACC[7]},[$toutptr,:256]!
454         bne             .LNEON_8n_init
455
456         add             $tinptr,sp,#256
457         vld1.32         {$A0-$A3},[$aptr]!
458         add             $bnptr,sp,#8
459         vld1.32         {${M0}[0]},[$n0,:32]
460         mov             $outer,$num
461         b               .LNEON_8n_outer
462
463 .align  4
464 .LNEON_8n_outer:
465         vld1.32         {${Bi}[0]},[$bptr,:32]! @ *b++
466         veor            $zero,$zero,$zero
467         vzip.16         $Bi,$zero
468         add             $toutptr,sp,#128
469         vld1.32         {$N0-$N3},[$nptr]!
470
471         vmlal.u32       @ACC[0],$Bi,${A0}[0]
472         vmlal.u32       @ACC[1],$Bi,${A0}[1]
473          veor           $zero,$zero,$zero
474         vmlal.u32       @ACC[2],$Bi,${A1}[0]
475          vshl.i64       $Ni,@ACC[0]#hi,#16
476         vmlal.u32       @ACC[3],$Bi,${A1}[1]
477          vadd.u64       $Ni,$Ni,@ACC[0]#lo
478         vmlal.u32       @ACC[4],$Bi,${A2}[0]
479          vmul.u32       $Ni,$Ni,$M0
480         vmlal.u32       @ACC[5],$Bi,${A2}[1]
481         vst1.32         {$Bi},[sp,:64]          @ put aside smashed b[8*i+0]
482         vmlal.u32       @ACC[6],$Bi,${A3}[0]
483          vzip.16        $Ni,$zero
484         vmlal.u32       @ACC[7],$Bi,${A3}[1]
485 ___
486 for ($i=0; $i<7;) {
487 $code.=<<___;
488         vld1.32         {${Bi}[0]},[$bptr,:32]! @ *b++
489         vmlal.u32       @ACC[0],$Ni,${N0}[0]
490         veor            $temp,$temp,$temp
491         vmlal.u32       @ACC[1],$Ni,${N0}[1]
492         vzip.16         $Bi,$temp
493         vmlal.u32       @ACC[2],$Ni,${N1}[0]
494          vshr.u64       @ACC[0]#lo,@ACC[0]#lo,#16
495         vmlal.u32       @ACC[3],$Ni,${N1}[1]
496         vmlal.u32       @ACC[4],$Ni,${N2}[0]
497          vadd.u64       @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
498         vmlal.u32       @ACC[5],$Ni,${N2}[1]
499          vshr.u64       @ACC[0]#lo,@ACC[0]#lo,#16
500         vmlal.u32       @ACC[6],$Ni,${N3}[0]
501         vmlal.u32       @ACC[7],$Ni,${N3}[1]
502          vadd.u64       @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
503         vst1.32         {$Ni},[$bnptr,:64]!     @ put aside smashed m[8*i+$i]
504 ___
505         push(@ACC,shift(@ACC)); $i++;
506 $code.=<<___;
507         vmlal.u32       @ACC[0],$Bi,${A0}[0]
508         vld1.64         {@ACC[7]},[$tinptr,:128]!
509         vmlal.u32       @ACC[1],$Bi,${A0}[1]
510          veor           $zero,$zero,$zero
511         vmlal.u32       @ACC[2],$Bi,${A1}[0]
512          vshl.i64       $Ni,@ACC[0]#hi,#16
513         vmlal.u32       @ACC[3],$Bi,${A1}[1]
514          vadd.u64       $Ni,$Ni,@ACC[0]#lo
515         vmlal.u32       @ACC[4],$Bi,${A2}[0]
516          vmul.u32       $Ni,$Ni,$M0
517         vmlal.u32       @ACC[5],$Bi,${A2}[1]
518         vst1.32         {$Bi},[$bnptr,:64]!     @ put aside smashed b[8*i+$i]
519         vmlal.u32       @ACC[6],$Bi,${A3}[0]
520          vzip.16        $Ni,$zero
521         vmlal.u32       @ACC[7],$Bi,${A3}[1]
522 ___
523 }
524 $code.=<<___;
525         vld1.32         {$Bi},[sp,:64]          @ pull smashed b[8*i+0]
526         vmlal.u32       @ACC[0],$Ni,${N0}[0]
527         vld1.32         {$A0-$A3},[$aptr]!
528         vmlal.u32       @ACC[1],$Ni,${N0}[1]
529         vmlal.u32       @ACC[2],$Ni,${N1}[0]
530          vshr.u64       @ACC[0]#lo,@ACC[0]#lo,#16
531         vmlal.u32       @ACC[3],$Ni,${N1}[1]
532         vmlal.u32       @ACC[4],$Ni,${N2}[0]
533          vadd.u64       @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
534         vmlal.u32       @ACC[5],$Ni,${N2}[1]
535          vshr.u64       @ACC[0]#lo,@ACC[0]#lo,#16
536         vmlal.u32       @ACC[6],$Ni,${N3}[0]
537         vmlal.u32       @ACC[7],$Ni,${N3}[1]
538          vadd.u64       @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
539         vst1.32         {$Ni},[$bnptr,:64]      @ put aside smashed m[8*i+$i]
540         add             $bnptr,sp,#8            @ rewind
541 ___
542         push(@ACC,shift(@ACC));
543 $code.=<<___;
544         sub             $inner,$num,#8
545         b               .LNEON_8n_inner
546
547 .align  4
548 .LNEON_8n_inner:
549         subs            $inner,$inner,#8
550         vmlal.u32       @ACC[0],$Bi,${A0}[0]
551         vld1.64         {@ACC[7]},[$tinptr,:128]
552         vmlal.u32       @ACC[1],$Bi,${A0}[1]
553         vld1.32         {$Ni},[$bnptr,:64]!     @ pull smashed m[8*i+0]
554         vmlal.u32       @ACC[2],$Bi,${A1}[0]
555         vld1.32         {$N0-$N3},[$nptr]!
556         vmlal.u32       @ACC[3],$Bi,${A1}[1]
557         it              ne
558         addne           $tinptr,$tinptr,#16     @ don't advance in last iteration
559         vmlal.u32       @ACC[4],$Bi,${A2}[0]
560         vmlal.u32       @ACC[5],$Bi,${A2}[1]
561         vmlal.u32       @ACC[6],$Bi,${A3}[0]
562         vmlal.u32       @ACC[7],$Bi,${A3}[1]
563 ___
564 for ($i=1; $i<8; $i++) {
565 $code.=<<___;
566         vld1.32         {$Bi},[$bnptr,:64]!     @ pull smashed b[8*i+$i]
567         vmlal.u32       @ACC[0],$Ni,${N0}[0]
568         vmlal.u32       @ACC[1],$Ni,${N0}[1]
569         vmlal.u32       @ACC[2],$Ni,${N1}[0]
570         vmlal.u32       @ACC[3],$Ni,${N1}[1]
571         vmlal.u32       @ACC[4],$Ni,${N2}[0]
572         vmlal.u32       @ACC[5],$Ni,${N2}[1]
573         vmlal.u32       @ACC[6],$Ni,${N3}[0]
574         vmlal.u32       @ACC[7],$Ni,${N3}[1]
575         vst1.64         {@ACC[0]},[$toutptr,:128]!
576 ___
577         push(@ACC,shift(@ACC));
578 $code.=<<___;
579         vmlal.u32       @ACC[0],$Bi,${A0}[0]
580         vld1.64         {@ACC[7]},[$tinptr,:128]
581         vmlal.u32       @ACC[1],$Bi,${A0}[1]
582         vld1.32         {$Ni},[$bnptr,:64]!     @ pull smashed m[8*i+$i]
583         vmlal.u32       @ACC[2],$Bi,${A1}[0]
584         it              ne
585         addne           $tinptr,$tinptr,#16     @ don't advance in last iteration
586         vmlal.u32       @ACC[3],$Bi,${A1}[1]
587         vmlal.u32       @ACC[4],$Bi,${A2}[0]
588         vmlal.u32       @ACC[5],$Bi,${A2}[1]
589         vmlal.u32       @ACC[6],$Bi,${A3}[0]
590         vmlal.u32       @ACC[7],$Bi,${A3}[1]
591 ___
592 }
593 $code.=<<___;
594         it              eq
595         subeq           $aptr,$aptr,$num,lsl#2  @ rewind
596         vmlal.u32       @ACC[0],$Ni,${N0}[0]
597         vld1.32         {$Bi},[sp,:64]          @ pull smashed b[8*i+0]
598         vmlal.u32       @ACC[1],$Ni,${N0}[1]
599         vld1.32         {$A0-$A3},[$aptr]!
600         vmlal.u32       @ACC[2],$Ni,${N1}[0]
601         add             $bnptr,sp,#8            @ rewind
602         vmlal.u32       @ACC[3],$Ni,${N1}[1]
603         vmlal.u32       @ACC[4],$Ni,${N2}[0]
604         vmlal.u32       @ACC[5],$Ni,${N2}[1]
605         vmlal.u32       @ACC[6],$Ni,${N3}[0]
606         vst1.64         {@ACC[0]},[$toutptr,:128]!
607         vmlal.u32       @ACC[7],$Ni,${N3}[1]
608
609         bne             .LNEON_8n_inner
610 ___
611         push(@ACC,shift(@ACC));
612 $code.=<<___;
613         add             $tinptr,sp,#128
614         vst1.64         {@ACC[0]-@ACC[1]},[$toutptr,:256]!
615         veor            q2,q2,q2                @ $N0-$N1
616         vst1.64         {@ACC[2]-@ACC[3]},[$toutptr,:256]!
617         veor            q3,q3,q3                @ $N2-$N3
618         vst1.64         {@ACC[4]-@ACC[5]},[$toutptr,:256]!
619         vst1.64         {@ACC[6]},[$toutptr,:128]
620
621         subs            $outer,$outer,#8
622         vld1.64         {@ACC[0]-@ACC[1]},[$tinptr,:256]!
623         vld1.64         {@ACC[2]-@ACC[3]},[$tinptr,:256]!
624         vld1.64         {@ACC[4]-@ACC[5]},[$tinptr,:256]!
625         vld1.64         {@ACC[6]-@ACC[7]},[$tinptr,:256]!
626
627         itt             ne
628         subne           $nptr,$nptr,$num,lsl#2  @ rewind
629         bne             .LNEON_8n_outer
630
631         add             $toutptr,sp,#128
632         vst1.64         {q2-q3}, [sp,:256]!     @ start wiping stack frame
633         vshr.u64        $temp,@ACC[0]#lo,#16
634         vst1.64         {q2-q3},[sp,:256]!
635         vadd.u64        @ACC[0]#hi,@ACC[0]#hi,$temp
636         vst1.64         {q2-q3}, [sp,:256]!
637         vshr.u64        $temp,@ACC[0]#hi,#16
638         vst1.64         {q2-q3}, [sp,:256]!
639         vzip.16         @ACC[0]#lo,@ACC[0]#hi
640
641         mov             $inner,$num
642         b               .LNEON_tail_entry
643
644 .align  4
645 .LNEON_tail:
646         vadd.u64        @ACC[0]#lo,@ACC[0]#lo,$temp
647         vshr.u64        $temp,@ACC[0]#lo,#16
648         vld1.64         {@ACC[2]-@ACC[3]}, [$tinptr, :256]!
649         vadd.u64        @ACC[0]#hi,@ACC[0]#hi,$temp
650         vld1.64         {@ACC[4]-@ACC[5]}, [$tinptr, :256]!
651         vshr.u64        $temp,@ACC[0]#hi,#16
652         vld1.64         {@ACC[6]-@ACC[7]}, [$tinptr, :256]!
653         vzip.16         @ACC[0]#lo,@ACC[0]#hi
654
655 .LNEON_tail_entry:
656 ___
657 for ($i=1; $i<8; $i++) {
658 $code.=<<___;
659         vadd.u64        @ACC[1]#lo,@ACC[1]#lo,$temp
660         vst1.32         {@ACC[0]#lo[0]}, [$toutptr, :32]!
661         vshr.u64        $temp,@ACC[1]#lo,#16
662         vadd.u64        @ACC[1]#hi,@ACC[1]#hi,$temp
663         vshr.u64        $temp,@ACC[1]#hi,#16
664         vzip.16         @ACC[1]#lo,@ACC[1]#hi
665 ___
666         push(@ACC,shift(@ACC));
667 }
668         push(@ACC,shift(@ACC));
669 $code.=<<___;
670         vld1.64         {@ACC[0]-@ACC[1]}, [$tinptr, :256]!
671         subs            $inner,$inner,#8
672         vst1.32         {@ACC[7]#lo[0]},   [$toutptr, :32]!
673         bne     .LNEON_tail
674
675         vst1.32 {${temp}[0]}, [$toutptr, :32]           @ top-most bit
676         sub     $nptr,$nptr,$num,lsl#2                  @ rewind $nptr
677         subs    $aptr,sp,#0                             @ clear carry flag
678         add     $bptr,sp,$num,lsl#2
679
680 .LNEON_sub:
681         ldmia   $aptr!, {r4-r7}
682         ldmia   $nptr!, {r8-r11}
683         sbcs    r8, r4,r8
684         sbcs    r9, r5,r9
685         sbcs    r10,r6,r10
686         sbcs    r11,r7,r11
687         teq     $aptr,$bptr                             @ preserves carry
688         stmia   $rptr!, {r8-r11}
689         bne     .LNEON_sub
690
691         ldr     r10, [$aptr]                            @ load top-most bit
692         mov     r11,sp
693         veor    q0,q0,q0
694         sub     r11,$bptr,r11                           @ this is num*4
695         veor    q1,q1,q1
696         mov     $aptr,sp
697         sub     $rptr,$rptr,r11                         @ rewind $rptr
698         mov     $nptr,$bptr                             @ second 3/4th of frame
699         sbcs    r10,r10,#0                              @ result is carry flag
700
701 .LNEON_copy_n_zap:
702         ldmia   $aptr!, {r4-r7}
703         ldmia   $rptr,  {r8-r11}
704         it      cc
705         movcc   r8, r4
706         vst1.64 {q0-q1}, [$nptr,:256]!                  @ wipe
707         itt     cc
708         movcc   r9, r5
709         movcc   r10,r6
710         vst1.64 {q0-q1}, [$nptr,:256]!                  @ wipe
711         it      cc
712         movcc   r11,r7
713         ldmia   $aptr, {r4-r7}
714         stmia   $rptr!, {r8-r11}
715         sub     $aptr,$aptr,#16
716         ldmia   $rptr, {r8-r11}
717         it      cc
718         movcc   r8, r4
719         vst1.64 {q0-q1}, [$aptr,:256]!                  @ wipe
720         itt     cc
721         movcc   r9, r5
722         movcc   r10,r6
723         vst1.64 {q0-q1}, [$nptr,:256]!                  @ wipe
724         it      cc
725         movcc   r11,r7
726         teq     $aptr,$bptr                             @ preserves carry
727         stmia   $rptr!, {r8-r11}
728         bne     .LNEON_copy_n_zap
729
730         mov     sp,ip
731         vldmia  sp!,{d8-d15}
732         ldmia   sp!,{r4-r11}
733         ret                                             @ bx lr
734 .size   bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
735 #endif
736 ___
737 }
738 $code.=<<___;
739 .asciz  "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
740 .align  2
741 #if __ARM_MAX_ARCH__>=7
742 .comm   OPENSSL_armcap_P,4,4
743 #endif
744 ___
745
746 foreach (split("\n",$code)) {
747         s/\`([^\`]*)\`/eval $1/ge;
748
749         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/ge        or
750         s/\bret\b/bx    lr/g                                            or
751         s/\bbx\s+lr\b/.word\t0xe12fff1e/g;      # make it possible to compile with -march=armv4
752
753         print $_,"\n";
754 }
755
756 close STDOUT;