x86 assembly pack: update performance results.
[openssl.git] / crypto / poly1305 / asm / poly1305-armv4.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 #                       IALU(*)/gcc-4.4         NEON
18 #
19 # ARM11xx(ARMv6)        7.78/+100%              -
20 # Cortex-A5             6.35/+130%              3.00
21 # Cortex-A8             6.25/+115%              2.36
22 # Cortex-A9             5.10/+95%               2.55
23 # Cortex-A15            3.85/+85%               1.25(**)
24 # Snapdragon S4         5.70/+100%              1.48(**)
25 #
26 # (*)   this is for -march=armv6, i.e. with bunch of ldrb loading data;
27 # (**)  these are trade-off results, they can be improved by ~8% but at
28 #       the cost of 15/12% regression on Cortex-A5/A7, it's even possible
29 #       to improve Cortex-A9 result, but then A5/A7 loose more than 20%;
30
31 $flavour = shift;
32 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
33 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
34
35 if ($flavour && $flavour ne "void") {
36     $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
37     ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
38     ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
39     die "can't locate arm-xlate.pl";
40
41     open STDOUT,"| \"$^X\" $xlate $flavour $output";
42 } else {
43     open STDOUT,">$output";
44 }
45
46 ($ctx,$inp,$len,$padbit)=map("r$_",(0..3));
47
48 $code.=<<___;
49 #include "arm_arch.h"
50
51 .text
52 #if defined(__thumb2__)
53 .syntax unified
54 .thumb
55 #else
56 .code   32
57 #endif
58
59 .globl  poly1305_emit
60 .globl  poly1305_blocks
61 .globl  poly1305_init
62 .type   poly1305_init,%function
63 .align  5
64 poly1305_init:
65 .Lpoly1305_init:
66         stmdb   sp!,{r4-r11}
67
68         eor     r3,r3,r3
69         cmp     $inp,#0
70         str     r3,[$ctx,#0]            @ zero hash value
71         str     r3,[$ctx,#4]
72         str     r3,[$ctx,#8]
73         str     r3,[$ctx,#12]
74         str     r3,[$ctx,#16]
75         str     r3,[$ctx,#36]           @ is_base2_26
76         add     $ctx,$ctx,#20
77
78 #ifdef  __thumb2__
79         it      eq
80 #endif
81         moveq   r0,#0
82         beq     .Lno_key
83
84 #if     __ARM_MAX_ARCH__>=7
85         adr     r11,.Lpoly1305_init
86         ldr     r12,.LOPENSSL_armcap
87 #endif
88         ldrb    r4,[$inp,#0]
89         mov     r10,#0x0fffffff
90         ldrb    r5,[$inp,#1]
91         and     r3,r10,#-4              @ 0x0ffffffc
92         ldrb    r6,[$inp,#2]
93         ldrb    r7,[$inp,#3]
94         orr     r4,r4,r5,lsl#8
95         ldrb    r5,[$inp,#4]
96         orr     r4,r4,r6,lsl#16
97         ldrb    r6,[$inp,#5]
98         orr     r4,r4,r7,lsl#24
99         ldrb    r7,[$inp,#6]
100         and     r4,r4,r10
101
102 #if     __ARM_MAX_ARCH__>=7
103         ldr     r12,[r11,r12]           @ OPENSSL_armcap_P
104 # ifdef __APPLE__
105         ldr     r12,[r12]
106 # endif
107 #endif
108         ldrb    r8,[$inp,#7]
109         orr     r5,r5,r6,lsl#8
110         ldrb    r6,[$inp,#8]
111         orr     r5,r5,r7,lsl#16
112         ldrb    r7,[$inp,#9]
113         orr     r5,r5,r8,lsl#24
114         ldrb    r8,[$inp,#10]
115         and     r5,r5,r3
116
117 #if     __ARM_MAX_ARCH__>=7
118         tst     r12,#ARMV7_NEON         @ check for NEON
119 # ifdef __APPLE__
120         adr     r9,poly1305_blocks_neon
121         adr     r11,poly1305_blocks
122 #  ifdef __thumb2__
123         it      ne
124 #  endif
125         movne   r11,r9
126         adr     r12,poly1305_emit
127         adr     r10,poly1305_emit_neon
128 #  ifdef __thumb2__
129         it      ne
130 #  endif
131         movne   r12,r10
132 # else
133 #  ifdef __thumb2__
134         itete   eq
135 #  endif
136         addeq   r12,r11,#(poly1305_emit-.Lpoly1305_init)
137         addne   r12,r11,#(poly1305_emit_neon-.Lpoly1305_init)
138         addeq   r11,r11,#(poly1305_blocks-.Lpoly1305_init)
139         addne   r11,r11,#(poly1305_blocks_neon-.Lpoly1305_init)
140 # endif
141 # ifdef __thumb2__
142         orr     r12,r12,#1      @ thumb-ify address
143         orr     r11,r11,#1
144 # endif
145 #endif
146         ldrb    r9,[$inp,#11]
147         orr     r6,r6,r7,lsl#8
148         ldrb    r7,[$inp,#12]
149         orr     r6,r6,r8,lsl#16
150         ldrb    r8,[$inp,#13]
151         orr     r6,r6,r9,lsl#24
152         ldrb    r9,[$inp,#14]
153         and     r6,r6,r3
154
155         ldrb    r10,[$inp,#15]
156         orr     r7,r7,r8,lsl#8
157         str     r4,[$ctx,#0]
158         orr     r7,r7,r9,lsl#16
159         str     r5,[$ctx,#4]
160         orr     r7,r7,r10,lsl#24
161         str     r6,[$ctx,#8]
162         and     r7,r7,r3
163         str     r7,[$ctx,#12]
164 #if     __ARM_MAX_ARCH__>=7
165         stmia   r2,{r11,r12}            @ fill functions table
166         mov     r0,#1
167 #else
168         mov     r0,#0
169 #endif
170 .Lno_key:
171         ldmia   sp!,{r4-r11}
172 #if     __ARM_ARCH__>=5
173         ret                             @ bx    lr
174 #else
175         tst     lr,#1
176         moveq   pc,lr                   @ be binary compatible with V4, yet
177         bx      lr                      @ interoperable with Thumb ISA:-)
178 #endif
179 .size   poly1305_init,.-poly1305_init
180 ___
181 {
182 my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12));
183 my ($s1,$s2,$s3)=($r1,$r2,$r3);
184
185 $code.=<<___;
186 .type   poly1305_blocks,%function
187 .align  5
188 poly1305_blocks:
189         stmdb   sp!,{r3-r11,lr}
190
191         ands    $len,$len,#-16
192         beq     .Lno_data
193
194         cmp     $padbit,#0
195         add     $len,$len,$inp          @ end pointer
196         sub     sp,sp,#32
197
198         ldmia   $ctx,{$h0-$r3}          @ load context
199
200         str     $ctx,[sp,#12]           @ offload stuff
201         mov     lr,$inp
202         str     $len,[sp,#16]
203         str     $r1,[sp,#20]
204         str     $r2,[sp,#24]
205         str     $r3,[sp,#28]
206         b       .Loop
207
208 .Loop:
209 #if __ARM_ARCH__<7
210         ldrb    r0,[lr],#16             @ load input
211 # ifdef __thumb2__
212         it      hi
213 # endif
214         addhi   $h4,$h4,#1              @ 1<<128
215         ldrb    r1,[lr,#-15]
216         ldrb    r2,[lr,#-14]
217         ldrb    r3,[lr,#-13]
218         orr     r1,r0,r1,lsl#8
219         ldrb    r0,[lr,#-12]
220         orr     r2,r1,r2,lsl#16
221         ldrb    r1,[lr,#-11]
222         orr     r3,r2,r3,lsl#24
223         ldrb    r2,[lr,#-10]
224         adds    $h0,$h0,r3              @ accumulate input
225
226         ldrb    r3,[lr,#-9]
227         orr     r1,r0,r1,lsl#8
228         ldrb    r0,[lr,#-8]
229         orr     r2,r1,r2,lsl#16
230         ldrb    r1,[lr,#-7]
231         orr     r3,r2,r3,lsl#24
232         ldrb    r2,[lr,#-6]
233         adcs    $h1,$h1,r3
234
235         ldrb    r3,[lr,#-5]
236         orr     r1,r0,r1,lsl#8
237         ldrb    r0,[lr,#-4]
238         orr     r2,r1,r2,lsl#16
239         ldrb    r1,[lr,#-3]
240         orr     r3,r2,r3,lsl#24
241         ldrb    r2,[lr,#-2]
242         adcs    $h2,$h2,r3
243
244         ldrb    r3,[lr,#-1]
245         orr     r1,r0,r1,lsl#8
246         str     lr,[sp,#8]              @ offload input pointer
247         orr     r2,r1,r2,lsl#16
248         add     $s1,$r1,$r1,lsr#2
249         orr     r3,r2,r3,lsl#24
250 #else
251         ldr     r0,[lr],#16             @ load input
252 # ifdef __thumb2__
253         it      hi
254 # endif
255         addhi   $h4,$h4,#1              @ padbit
256         ldr     r1,[lr,#-12]
257         ldr     r2,[lr,#-8]
258         ldr     r3,[lr,#-4]
259 # ifdef __ARMEB__
260         rev     r0,r0
261         rev     r1,r1
262         rev     r2,r2
263         rev     r3,r3
264 # endif
265         adds    $h0,$h0,r0              @ accumulate input
266         str     lr,[sp,#8]              @ offload input pointer
267         adcs    $h1,$h1,r1
268         add     $s1,$r1,$r1,lsr#2
269         adcs    $h2,$h2,r2
270 #endif
271         add     $s2,$r2,$r2,lsr#2
272         adcs    $h3,$h3,r3
273         add     $s3,$r3,$r3,lsr#2
274
275         umull   r2,r3,$h1,$r0
276          adc    $h4,$h4,#0
277         umull   r0,r1,$h0,$r0
278         umlal   r2,r3,$h4,$s1
279         umlal   r0,r1,$h3,$s1
280         ldr     $r1,[sp,#20]            @ reload $r1
281         umlal   r2,r3,$h2,$s3
282         umlal   r0,r1,$h1,$s3
283         umlal   r2,r3,$h3,$s2
284         umlal   r0,r1,$h2,$s2
285         umlal   r2,r3,$h0,$r1
286         str     r0,[sp,#0]              @ future $h0
287          mul    r0,$s2,$h4
288         ldr     $r2,[sp,#24]            @ reload $r2
289         adds    r2,r2,r1                @ d1+=d0>>32
290          eor    r1,r1,r1
291         adc     lr,r3,#0                @ future $h2
292         str     r2,[sp,#4]              @ future $h1
293
294         mul     r2,$s3,$h4
295         eor     r3,r3,r3
296         umlal   r0,r1,$h3,$s3
297         ldr     $r3,[sp,#28]            @ reload $r3
298         umlal   r2,r3,$h3,$r0
299         umlal   r0,r1,$h2,$r0
300         umlal   r2,r3,$h2,$r1
301         umlal   r0,r1,$h1,$r1
302         umlal   r2,r3,$h1,$r2
303         umlal   r0,r1,$h0,$r2
304         umlal   r2,r3,$h0,$r3
305         ldr     $h0,[sp,#0]
306         mul     $h4,$r0,$h4
307         ldr     $h1,[sp,#4]
308
309         adds    $h2,lr,r0               @ d2+=d1>>32
310         ldr     lr,[sp,#8]              @ reload input pointer
311         adc     r1,r1,#0
312         adds    $h3,r2,r1               @ d3+=d2>>32
313         ldr     r0,[sp,#16]             @ reload end pointer
314         adc     r3,r3,#0
315         add     $h4,$h4,r3              @ h4+=d3>>32
316
317         and     r1,$h4,#-4
318         and     $h4,$h4,#3
319         add     r1,r1,r1,lsr#2          @ *=5
320         adds    $h0,$h0,r1
321         adcs    $h1,$h1,#0
322         adcs    $h2,$h2,#0
323         adcs    $h3,$h3,#0
324         adc     $h4,$h4,#0
325
326         cmp     r0,lr                   @ done yet?
327         bhi     .Loop
328
329         ldr     $ctx,[sp,#12]
330         add     sp,sp,#32
331         stmia   $ctx,{$h0-$h4}          @ store the result
332
333 .Lno_data:
334 #if     __ARM_ARCH__>=5
335         ldmia   sp!,{r3-r11,pc}
336 #else
337         ldmia   sp!,{r3-r11,lr}
338         tst     lr,#1
339         moveq   pc,lr                   @ be binary compatible with V4, yet
340         bx      lr                      @ interoperable with Thumb ISA:-)
341 #endif
342 .size   poly1305_blocks,.-poly1305_blocks
343 ___
344 }
345 {
346 my ($ctx,$mac,$nonce)=map("r$_",(0..2));
347 my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11));
348 my $g4=$h4;
349
350 $code.=<<___;
351 .type   poly1305_emit,%function
352 .align  5
353 poly1305_emit:
354         stmdb   sp!,{r4-r11}
355 .Lpoly1305_emit_enter:
356
357         ldmia   $ctx,{$h0-$h4}
358         adds    $g0,$h0,#5              @ compare to modulus
359         adcs    $g1,$h1,#0
360         adcs    $g2,$h2,#0
361         adcs    $g3,$h3,#0
362         adc     $g4,$h4,#0
363         tst     $g4,#4                  @ did it carry/borrow?
364
365 #ifdef  __thumb2__
366         it      ne
367 #endif
368         movne   $h0,$g0
369         ldr     $g0,[$nonce,#0]
370 #ifdef  __thumb2__
371         it      ne
372 #endif
373         movne   $h1,$g1
374         ldr     $g1,[$nonce,#4]
375 #ifdef  __thumb2__
376         it      ne
377 #endif
378         movne   $h2,$g2
379         ldr     $g2,[$nonce,#8]
380 #ifdef  __thumb2__
381         it      ne
382 #endif
383         movne   $h3,$g3
384         ldr     $g3,[$nonce,#12]
385
386         adds    $h0,$h0,$g0
387         adcs    $h1,$h1,$g1
388         adcs    $h2,$h2,$g2
389         adc     $h3,$h3,$g3
390
391 #if __ARM_ARCH__>=7
392 # ifdef __ARMEB__
393         rev     $h0,$h0
394         rev     $h1,$h1
395         rev     $h2,$h2
396         rev     $h3,$h3
397 # endif
398         str     $h0,[$mac,#0]
399         str     $h1,[$mac,#4]
400         str     $h2,[$mac,#8]
401         str     $h3,[$mac,#12]
402 #else
403         strb    $h0,[$mac,#0]
404         mov     $h0,$h0,lsr#8
405         strb    $h1,[$mac,#4]
406         mov     $h1,$h1,lsr#8
407         strb    $h2,[$mac,#8]
408         mov     $h2,$h2,lsr#8
409         strb    $h3,[$mac,#12]
410         mov     $h3,$h3,lsr#8
411
412         strb    $h0,[$mac,#1]
413         mov     $h0,$h0,lsr#8
414         strb    $h1,[$mac,#5]
415         mov     $h1,$h1,lsr#8
416         strb    $h2,[$mac,#9]
417         mov     $h2,$h2,lsr#8
418         strb    $h3,[$mac,#13]
419         mov     $h3,$h3,lsr#8
420
421         strb    $h0,[$mac,#2]
422         mov     $h0,$h0,lsr#8
423         strb    $h1,[$mac,#6]
424         mov     $h1,$h1,lsr#8
425         strb    $h2,[$mac,#10]
426         mov     $h2,$h2,lsr#8
427         strb    $h3,[$mac,#14]
428         mov     $h3,$h3,lsr#8
429
430         strb    $h0,[$mac,#3]
431         strb    $h1,[$mac,#7]
432         strb    $h2,[$mac,#11]
433         strb    $h3,[$mac,#15]
434 #endif
435         ldmia   sp!,{r4-r11}
436 #if     __ARM_ARCH__>=5
437         ret                             @ bx    lr
438 #else
439         tst     lr,#1
440         moveq   pc,lr                   @ be binary compatible with V4, yet
441         bx      lr                      @ interoperable with Thumb ISA:-)
442 #endif
443 .size   poly1305_emit,.-poly1305_emit
444 ___
445 {
446 my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9));
447 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
448 my ($T0,$T1,$MASK) = map("q$_",(15,4,0));
449
450 my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7));
451
452 $code.=<<___;
453 #if     __ARM_MAX_ARCH__>=7
454 .fpu    neon
455
456 .type   poly1305_init_neon,%function
457 .align  5
458 poly1305_init_neon:
459         ldr     r4,[$ctx,#20]           @ load key base 2^32
460         ldr     r5,[$ctx,#24]
461         ldr     r6,[$ctx,#28]
462         ldr     r7,[$ctx,#32]
463
464         and     r2,r4,#0x03ffffff       @ base 2^32 -> base 2^26
465         mov     r3,r4,lsr#26
466         mov     r4,r5,lsr#20
467         orr     r3,r3,r5,lsl#6
468         mov     r5,r6,lsr#14
469         orr     r4,r4,r6,lsl#12
470         mov     r6,r7,lsr#8
471         orr     r5,r5,r7,lsl#18
472         and     r3,r3,#0x03ffffff
473         and     r4,r4,#0x03ffffff
474         and     r5,r5,#0x03ffffff
475
476         vdup.32 $R0,r2                  @ r^1 in both lanes
477         add     r2,r3,r3,lsl#2          @ *5
478         vdup.32 $R1,r3
479         add     r3,r4,r4,lsl#2
480         vdup.32 $S1,r2
481         vdup.32 $R2,r4
482         add     r4,r5,r5,lsl#2
483         vdup.32 $S2,r3
484         vdup.32 $R3,r5
485         add     r5,r6,r6,lsl#2
486         vdup.32 $S3,r4
487         vdup.32 $R4,r6
488         vdup.32 $S4,r5
489
490         mov     $zeros,#2               @ counter
491
492 .Lsquare_neon:
493         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
494         @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
495         @ d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
496         @ d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
497         @ d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
498         @ d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
499
500         vmull.u32       $D0,$R0,${R0}[1]
501         vmull.u32       $D1,$R1,${R0}[1]
502         vmull.u32       $D2,$R2,${R0}[1]
503         vmull.u32       $D3,$R3,${R0}[1]
504         vmull.u32       $D4,$R4,${R0}[1]
505
506         vmlal.u32       $D0,$R4,${S1}[1]
507         vmlal.u32       $D1,$R0,${R1}[1]
508         vmlal.u32       $D2,$R1,${R1}[1]
509         vmlal.u32       $D3,$R2,${R1}[1]
510         vmlal.u32       $D4,$R3,${R1}[1]
511
512         vmlal.u32       $D0,$R3,${S2}[1]
513         vmlal.u32       $D1,$R4,${S2}[1]
514         vmlal.u32       $D3,$R1,${R2}[1]
515         vmlal.u32       $D2,$R0,${R2}[1]
516         vmlal.u32       $D4,$R2,${R2}[1]
517
518         vmlal.u32       $D0,$R2,${S3}[1]
519         vmlal.u32       $D3,$R0,${R3}[1]
520         vmlal.u32       $D1,$R3,${S3}[1]
521         vmlal.u32       $D2,$R4,${S3}[1]
522         vmlal.u32       $D4,$R1,${R3}[1]
523
524         vmlal.u32       $D3,$R4,${S4}[1]
525         vmlal.u32       $D0,$R1,${S4}[1]
526         vmlal.u32       $D1,$R2,${S4}[1]
527         vmlal.u32       $D2,$R3,${S4}[1]
528         vmlal.u32       $D4,$R0,${R4}[1]
529
530         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
531         @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
532         @ and P. Schwabe
533         @
534         @ H0>>+H1>>+H2>>+H3>>+H4
535         @ H3>>+H4>>*5+H0>>+H1
536         @
537         @ Trivia.
538         @
539         @ Result of multiplication of n-bit number by m-bit number is
540         @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
541         @ m-bit number multiplied by 2^n is still n+m bits wide.
542         @
543         @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
544         @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
545         @ one is n+1 bits wide.
546         @
547         @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
548         @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
549         @ can be 27. However! In cases when their width exceeds 26 bits
550         @ they are limited by 2^26+2^6. This in turn means that *sum*
551         @ of the products with these values can still be viewed as sum
552         @ of 52-bit numbers as long as the amount of addends is not a
553         @ power of 2. For example,
554         @
555         @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
556         @
557         @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
558         @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
559         @ 8 * (2^52) or 2^55. However, the value is then multiplied by
560         @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
561         @ which is less than 32 * (2^52) or 2^57. And when processing
562         @ data we are looking at triple as many addends...
563         @
564         @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
565         @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
566         @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
567         @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
568         @ instruction accepts 2x32-bit input and writes 2x64-bit result.
569         @ This means that result of reduction have to be compressed upon
570         @ loop wrap-around. This can be done in the process of reduction
571         @ to minimize amount of instructions [as well as amount of
572         @ 128-bit instructions, which benefits low-end processors], but
573         @ one has to watch for H2 (which is narrower than H0) and 5*H4
574         @ not being wider than 58 bits, so that result of right shift
575         @ by 26 bits fits in 32 bits. This is also useful on x86,
576         @ because it allows to use paddd in place for paddq, which
577         @ benefits Atom, where paddq is ridiculously slow.
578
579         vshr.u64        $T0,$D3,#26
580         vmovn.i64       $D3#lo,$D3
581          vshr.u64       $T1,$D0,#26
582          vmovn.i64      $D0#lo,$D0
583         vadd.i64        $D4,$D4,$T0             @ h3 -> h4
584         vbic.i32        $D3#lo,#0xfc000000      @ &=0x03ffffff
585          vadd.i64       $D1,$D1,$T1             @ h0 -> h1
586          vbic.i32       $D0#lo,#0xfc000000
587
588         vshrn.u64       $T0#lo,$D4,#26
589         vmovn.i64       $D4#lo,$D4
590          vshr.u64       $T1,$D1,#26
591          vmovn.i64      $D1#lo,$D1
592          vadd.i64       $D2,$D2,$T1             @ h1 -> h2
593         vbic.i32        $D4#lo,#0xfc000000
594          vbic.i32       $D1#lo,#0xfc000000
595
596         vadd.i32        $D0#lo,$D0#lo,$T0#lo
597         vshl.u32        $T0#lo,$T0#lo,#2
598          vshrn.u64      $T1#lo,$D2,#26
599          vmovn.i64      $D2#lo,$D2
600         vadd.i32        $D0#lo,$D0#lo,$T0#lo    @ h4 -> h0
601          vadd.i32       $D3#lo,$D3#lo,$T1#lo    @ h2 -> h3
602          vbic.i32       $D2#lo,#0xfc000000
603
604         vshr.u32        $T0#lo,$D0#lo,#26
605         vbic.i32        $D0#lo,#0xfc000000
606          vshr.u32       $T1#lo,$D3#lo,#26
607          vbic.i32       $D3#lo,#0xfc000000
608         vadd.i32        $D1#lo,$D1#lo,$T0#lo    @ h0 -> h1
609          vadd.i32       $D4#lo,$D4#lo,$T1#lo    @ h3 -> h4
610
611         subs            $zeros,$zeros,#1
612         beq             .Lsquare_break_neon
613
614         add             $tbl0,$ctx,#(48+0*9*4)
615         add             $tbl1,$ctx,#(48+1*9*4)
616
617         vtrn.32         $R0,$D0#lo              @ r^2:r^1
618         vtrn.32         $R2,$D2#lo
619         vtrn.32         $R3,$D3#lo
620         vtrn.32         $R1,$D1#lo
621         vtrn.32         $R4,$D4#lo
622
623         vshl.u32        $S2,$R2,#2              @ *5
624         vshl.u32        $S3,$R3,#2
625         vshl.u32        $S1,$R1,#2
626         vshl.u32        $S4,$R4,#2
627         vadd.i32        $S2,$S2,$R2
628         vadd.i32        $S1,$S1,$R1
629         vadd.i32        $S3,$S3,$R3
630         vadd.i32        $S4,$S4,$R4
631
632         vst4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
633         vst4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
634         vst4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
635         vst4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
636         vst1.32         {${S4}[0]},[$tbl0,:32]
637         vst1.32         {${S4}[1]},[$tbl1,:32]
638
639         b               .Lsquare_neon
640
641 .align  4
642 .Lsquare_break_neon:
643         add             $tbl0,$ctx,#(48+2*4*9)
644         add             $tbl1,$ctx,#(48+3*4*9)
645
646         vmov            $R0,$D0#lo              @ r^4:r^3
647         vshl.u32        $S1,$D1#lo,#2           @ *5
648         vmov            $R1,$D1#lo
649         vshl.u32        $S2,$D2#lo,#2
650         vmov            $R2,$D2#lo
651         vshl.u32        $S3,$D3#lo,#2
652         vmov            $R3,$D3#lo
653         vshl.u32        $S4,$D4#lo,#2
654         vmov            $R4,$D4#lo
655         vadd.i32        $S1,$S1,$D1#lo
656         vadd.i32        $S2,$S2,$D2#lo
657         vadd.i32        $S3,$S3,$D3#lo
658         vadd.i32        $S4,$S4,$D4#lo
659
660         vst4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
661         vst4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
662         vst4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
663         vst4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
664         vst1.32         {${S4}[0]},[$tbl0]
665         vst1.32         {${S4}[1]},[$tbl1]
666
667         ret                             @ bx    lr
668 .size   poly1305_init_neon,.-poly1305_init_neon
669
670 .type   poly1305_blocks_neon,%function
671 .align  5
672 poly1305_blocks_neon:
673         ldr     ip,[$ctx,#36]           @ is_base2_26
674         ands    $len,$len,#-16
675         beq     .Lno_data_neon
676
677         cmp     $len,#64
678         bhs     .Lenter_neon
679         tst     ip,ip                   @ is_base2_26?
680         beq     poly1305_blocks
681
682 .Lenter_neon:
683         stmdb   sp!,{r4-r7}
684         vstmdb  sp!,{d8-d15}            @ ABI specification says so
685
686         tst     ip,ip                   @ is_base2_26?
687         bne     .Lbase2_26_neon
688
689         stmdb   sp!,{r1-r3,lr}
690         bl      poly1305_init_neon
691
692         ldr     r4,[$ctx,#0]            @ load hash value base 2^32
693         ldr     r5,[$ctx,#4]
694         ldr     r6,[$ctx,#8]
695         ldr     r7,[$ctx,#12]
696         ldr     ip,[$ctx,#16]
697
698         and     r2,r4,#0x03ffffff       @ base 2^32 -> base 2^26
699         mov     r3,r4,lsr#26
700          veor   $D0#lo,$D0#lo,$D0#lo
701         mov     r4,r5,lsr#20
702         orr     r3,r3,r5,lsl#6
703          veor   $D1#lo,$D1#lo,$D1#lo
704         mov     r5,r6,lsr#14
705         orr     r4,r4,r6,lsl#12
706          veor   $D2#lo,$D2#lo,$D2#lo
707         mov     r6,r7,lsr#8
708         orr     r5,r5,r7,lsl#18
709          veor   $D3#lo,$D3#lo,$D3#lo
710         and     r3,r3,#0x03ffffff
711         orr     r6,r6,ip,lsl#24
712          veor   $D4#lo,$D4#lo,$D4#lo
713         and     r4,r4,#0x03ffffff
714         mov     r1,#1
715         and     r5,r5,#0x03ffffff
716         str     r1,[$ctx,#36]           @ is_base2_26
717
718         vmov.32 $D0#lo[0],r2
719         vmov.32 $D1#lo[0],r3
720         vmov.32 $D2#lo[0],r4
721         vmov.32 $D3#lo[0],r5
722         vmov.32 $D4#lo[0],r6
723         adr     $zeros,.Lzeros
724
725         ldmia   sp!,{r1-r3,lr}
726         b       .Lbase2_32_neon
727
728 .align  4
729 .Lbase2_26_neon:
730         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
731         @ load hash value
732
733         veor            $D0#lo,$D0#lo,$D0#lo
734         veor            $D1#lo,$D1#lo,$D1#lo
735         veor            $D2#lo,$D2#lo,$D2#lo
736         veor            $D3#lo,$D3#lo,$D3#lo
737         veor            $D4#lo,$D4#lo,$D4#lo
738         vld4.32         {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
739         adr             $zeros,.Lzeros
740         vld1.32         {$D4#lo[0]},[$ctx]
741         sub             $ctx,$ctx,#16           @ rewind
742
743 .Lbase2_32_neon:
744         add             $in2,$inp,#32
745         mov             $padbit,$padbit,lsl#24
746         tst             $len,#31
747         beq             .Leven
748
749         vld4.32         {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]!
750         vmov.32         $H4#lo[0],$padbit
751         sub             $len,$len,#16
752         add             $in2,$inp,#32
753
754 # ifdef __ARMEB__
755         vrev32.8        $H0,$H0
756         vrev32.8        $H3,$H3
757         vrev32.8        $H1,$H1
758         vrev32.8        $H2,$H2
759 # endif
760         vsri.u32        $H4#lo,$H3#lo,#8        @ base 2^32 -> base 2^26
761         vshl.u32        $H3#lo,$H3#lo,#18
762
763         vsri.u32        $H3#lo,$H2#lo,#14
764         vshl.u32        $H2#lo,$H2#lo,#12
765         vadd.i32        $H4#hi,$H4#lo,$D4#lo    @ add hash value and move to #hi
766
767         vbic.i32        $H3#lo,#0xfc000000
768         vsri.u32        $H2#lo,$H1#lo,#20
769         vshl.u32        $H1#lo,$H1#lo,#6
770
771         vbic.i32        $H2#lo,#0xfc000000
772         vsri.u32        $H1#lo,$H0#lo,#26
773         vadd.i32        $H3#hi,$H3#lo,$D3#lo
774
775         vbic.i32        $H0#lo,#0xfc000000
776         vbic.i32        $H1#lo,#0xfc000000
777         vadd.i32        $H2#hi,$H2#lo,$D2#lo
778
779         vadd.i32        $H0#hi,$H0#lo,$D0#lo
780         vadd.i32        $H1#hi,$H1#lo,$D1#lo
781
782         mov             $tbl1,$zeros
783         add             $tbl0,$ctx,#48
784
785         cmp             $len,$len
786         b               .Long_tail
787
788 .align  4
789 .Leven:
790         subs            $len,$len,#64
791         it              lo
792         movlo           $in2,$zeros
793
794         vmov.i32        $H4,#1<<24              @ padbit, yes, always
795         vld4.32         {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp]    @ inp[0:1]
796         add             $inp,$inp,#64
797         vld4.32         {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2]    @ inp[2:3] (or 0)
798         add             $in2,$in2,#64
799         itt             hi
800         addhi           $tbl1,$ctx,#(48+1*9*4)
801         addhi           $tbl0,$ctx,#(48+3*9*4)
802
803 # ifdef __ARMEB__
804         vrev32.8        $H0,$H0
805         vrev32.8        $H3,$H3
806         vrev32.8        $H1,$H1
807         vrev32.8        $H2,$H2
808 # endif
809         vsri.u32        $H4,$H3,#8              @ base 2^32 -> base 2^26
810         vshl.u32        $H3,$H3,#18
811
812         vsri.u32        $H3,$H2,#14
813         vshl.u32        $H2,$H2,#12
814
815         vbic.i32        $H3,#0xfc000000
816         vsri.u32        $H2,$H1,#20
817         vshl.u32        $H1,$H1,#6
818
819         vbic.i32        $H2,#0xfc000000
820         vsri.u32        $H1,$H0,#26
821
822         vbic.i32        $H0,#0xfc000000
823         vbic.i32        $H1,#0xfc000000
824
825         bls             .Lskip_loop
826
827         vld4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^2
828         vld4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^4
829         vld4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
830         vld4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
831         b               .Loop_neon
832
833 .align  5
834 .Loop_neon:
835         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
836         @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
837         @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
838         @   \___________________/
839         @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
840         @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
841         @   \___________________/ \____________________/
842         @
843         @ Note that we start with inp[2:3]*r^2. This is because it
844         @ doesn't depend on reduction in previous iteration.
845         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
846         @ d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
847         @ d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
848         @ d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
849         @ d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
850         @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
851
852         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
853         @ inp[2:3]*r^2
854
855         vadd.i32        $H2#lo,$H2#lo,$D2#lo    @ accumulate inp[0:1]
856         vmull.u32       $D2,$H2#hi,${R0}[1]
857         vadd.i32        $H0#lo,$H0#lo,$D0#lo
858         vmull.u32       $D0,$H0#hi,${R0}[1]
859         vadd.i32        $H3#lo,$H3#lo,$D3#lo
860         vmull.u32       $D3,$H3#hi,${R0}[1]
861         vmlal.u32       $D2,$H1#hi,${R1}[1]
862         vadd.i32        $H1#lo,$H1#lo,$D1#lo
863         vmull.u32       $D1,$H1#hi,${R0}[1]
864
865         vadd.i32        $H4#lo,$H4#lo,$D4#lo
866         vmull.u32       $D4,$H4#hi,${R0}[1]
867         subs            $len,$len,#64
868         vmlal.u32       $D0,$H4#hi,${S1}[1]
869         it              lo
870         movlo           $in2,$zeros
871         vmlal.u32       $D3,$H2#hi,${R1}[1]
872         vld1.32         ${S4}[1],[$tbl1,:32]
873         vmlal.u32       $D1,$H0#hi,${R1}[1]
874         vmlal.u32       $D4,$H3#hi,${R1}[1]
875
876         vmlal.u32       $D0,$H3#hi,${S2}[1]
877         vmlal.u32       $D3,$H1#hi,${R2}[1]
878         vmlal.u32       $D4,$H2#hi,${R2}[1]
879         vmlal.u32       $D1,$H4#hi,${S2}[1]
880         vmlal.u32       $D2,$H0#hi,${R2}[1]
881
882         vmlal.u32       $D3,$H0#hi,${R3}[1]
883         vmlal.u32       $D0,$H2#hi,${S3}[1]
884         vmlal.u32       $D4,$H1#hi,${R3}[1]
885         vmlal.u32       $D1,$H3#hi,${S3}[1]
886         vmlal.u32       $D2,$H4#hi,${S3}[1]
887
888         vmlal.u32       $D3,$H4#hi,${S4}[1]
889         vmlal.u32       $D0,$H1#hi,${S4}[1]
890         vmlal.u32       $D4,$H0#hi,${R4}[1]
891         vmlal.u32       $D1,$H2#hi,${S4}[1]
892         vmlal.u32       $D2,$H3#hi,${S4}[1]
893
894         vld4.32         {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2]    @ inp[2:3] (or 0)
895         add             $in2,$in2,#64
896
897         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
898         @ (hash+inp[0:1])*r^4 and accumulate
899
900         vmlal.u32       $D3,$H3#lo,${R0}[0]
901         vmlal.u32       $D0,$H0#lo,${R0}[0]
902         vmlal.u32       $D4,$H4#lo,${R0}[0]
903         vmlal.u32       $D1,$H1#lo,${R0}[0]
904         vmlal.u32       $D2,$H2#lo,${R0}[0]
905         vld1.32         ${S4}[0],[$tbl0,:32]
906
907         vmlal.u32       $D3,$H2#lo,${R1}[0]
908         vmlal.u32       $D0,$H4#lo,${S1}[0]
909         vmlal.u32       $D4,$H3#lo,${R1}[0]
910         vmlal.u32       $D1,$H0#lo,${R1}[0]
911         vmlal.u32       $D2,$H1#lo,${R1}[0]
912
913         vmlal.u32       $D3,$H1#lo,${R2}[0]
914         vmlal.u32       $D0,$H3#lo,${S2}[0]
915         vmlal.u32       $D4,$H2#lo,${R2}[0]
916         vmlal.u32       $D1,$H4#lo,${S2}[0]
917         vmlal.u32       $D2,$H0#lo,${R2}[0]
918
919         vmlal.u32       $D3,$H0#lo,${R3}[0]
920         vmlal.u32       $D0,$H2#lo,${S3}[0]
921         vmlal.u32       $D4,$H1#lo,${R3}[0]
922         vmlal.u32       $D1,$H3#lo,${S3}[0]
923         vmlal.u32       $D3,$H4#lo,${S4}[0]
924
925         vmlal.u32       $D2,$H4#lo,${S3}[0]
926         vmlal.u32       $D0,$H1#lo,${S4}[0]
927         vmlal.u32       $D4,$H0#lo,${R4}[0]
928         vmov.i32        $H4,#1<<24              @ padbit, yes, always
929         vmlal.u32       $D1,$H2#lo,${S4}[0]
930         vmlal.u32       $D2,$H3#lo,${S4}[0]
931
932         vld4.32         {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp]    @ inp[0:1]
933         add             $inp,$inp,#64
934 # ifdef __ARMEB__
935         vrev32.8        $H0,$H0
936         vrev32.8        $H1,$H1
937         vrev32.8        $H2,$H2
938         vrev32.8        $H3,$H3
939 # endif
940
941         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
942         @ lazy reduction interleaved with base 2^32 -> base 2^26 of
943         @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4.
944
945         vshr.u64        $T0,$D3,#26
946         vmovn.i64       $D3#lo,$D3
947          vshr.u64       $T1,$D0,#26
948          vmovn.i64      $D0#lo,$D0
949         vadd.i64        $D4,$D4,$T0             @ h3 -> h4
950         vbic.i32        $D3#lo,#0xfc000000
951           vsri.u32      $H4,$H3,#8              @ base 2^32 -> base 2^26
952          vadd.i64       $D1,$D1,$T1             @ h0 -> h1
953           vshl.u32      $H3,$H3,#18
954          vbic.i32       $D0#lo,#0xfc000000
955
956         vshrn.u64       $T0#lo,$D4,#26
957         vmovn.i64       $D4#lo,$D4
958          vshr.u64       $T1,$D1,#26
959          vmovn.i64      $D1#lo,$D1
960          vadd.i64       $D2,$D2,$T1             @ h1 -> h2
961           vsri.u32      $H3,$H2,#14
962         vbic.i32        $D4#lo,#0xfc000000
963           vshl.u32      $H2,$H2,#12
964          vbic.i32       $D1#lo,#0xfc000000
965
966         vadd.i32        $D0#lo,$D0#lo,$T0#lo
967         vshl.u32        $T0#lo,$T0#lo,#2
968           vbic.i32      $H3,#0xfc000000
969          vshrn.u64      $T1#lo,$D2,#26
970          vmovn.i64      $D2#lo,$D2
971         vaddl.u32       $D0,$D0#lo,$T0#lo       @ h4 -> h0 [widen for a sec]
972           vsri.u32      $H2,$H1,#20
973          vadd.i32       $D3#lo,$D3#lo,$T1#lo    @ h2 -> h3
974           vshl.u32      $H1,$H1,#6
975          vbic.i32       $D2#lo,#0xfc000000
976           vbic.i32      $H2,#0xfc000000
977
978         vshrn.u64       $T0#lo,$D0,#26          @ re-narrow
979         vmovn.i64       $D0#lo,$D0
980           vsri.u32      $H1,$H0,#26
981           vbic.i32      $H0,#0xfc000000
982          vshr.u32       $T1#lo,$D3#lo,#26
983          vbic.i32       $D3#lo,#0xfc000000
984         vbic.i32        $D0#lo,#0xfc000000
985         vadd.i32        $D1#lo,$D1#lo,$T0#lo    @ h0 -> h1
986          vadd.i32       $D4#lo,$D4#lo,$T1#lo    @ h3 -> h4
987           vbic.i32      $H1,#0xfc000000
988
989         bhi             .Loop_neon
990
991 .Lskip_loop:
992         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
993         @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
994
995         add             $tbl1,$ctx,#(48+0*9*4)
996         add             $tbl0,$ctx,#(48+1*9*4)
997         adds            $len,$len,#32
998         it              ne
999         movne           $len,#0
1000         bne             .Long_tail
1001
1002         vadd.i32        $H2#hi,$H2#lo,$D2#lo    @ add hash value and move to #hi
1003         vadd.i32        $H0#hi,$H0#lo,$D0#lo
1004         vadd.i32        $H3#hi,$H3#lo,$D3#lo
1005         vadd.i32        $H1#hi,$H1#lo,$D1#lo
1006         vadd.i32        $H4#hi,$H4#lo,$D4#lo
1007
1008 .Long_tail:
1009         vld4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^1
1010         vld4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^2
1011
1012         vadd.i32        $H2#lo,$H2#lo,$D2#lo    @ can be redundant
1013         vmull.u32       $D2,$H2#hi,$R0
1014         vadd.i32        $H0#lo,$H0#lo,$D0#lo
1015         vmull.u32       $D0,$H0#hi,$R0
1016         vadd.i32        $H3#lo,$H3#lo,$D3#lo
1017         vmull.u32       $D3,$H3#hi,$R0
1018         vadd.i32        $H1#lo,$H1#lo,$D1#lo
1019         vmull.u32       $D1,$H1#hi,$R0
1020         vadd.i32        $H4#lo,$H4#lo,$D4#lo
1021         vmull.u32       $D4,$H4#hi,$R0
1022
1023         vmlal.u32       $D0,$H4#hi,$S1
1024         vld4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1025         vmlal.u32       $D3,$H2#hi,$R1
1026         vld4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1027         vmlal.u32       $D1,$H0#hi,$R1
1028         vmlal.u32       $D4,$H3#hi,$R1
1029         vmlal.u32       $D2,$H1#hi,$R1
1030
1031         vmlal.u32       $D3,$H1#hi,$R2
1032         vld1.32         ${S4}[1],[$tbl1,:32]
1033         vmlal.u32       $D0,$H3#hi,$S2
1034         vld1.32         ${S4}[0],[$tbl0,:32]
1035         vmlal.u32       $D4,$H2#hi,$R2
1036         vmlal.u32       $D1,$H4#hi,$S2
1037         vmlal.u32       $D2,$H0#hi,$R2
1038
1039         vmlal.u32       $D3,$H0#hi,$R3
1040          it             ne
1041          addne          $tbl1,$ctx,#(48+2*9*4)
1042         vmlal.u32       $D0,$H2#hi,$S3
1043          it             ne
1044          addne          $tbl0,$ctx,#(48+3*9*4)
1045         vmlal.u32       $D4,$H1#hi,$R3
1046         vmlal.u32       $D1,$H3#hi,$S3
1047         vmlal.u32       $D2,$H4#hi,$S3
1048
1049         vmlal.u32       $D3,$H4#hi,$S4
1050          vorn           $MASK,$MASK,$MASK       @ all-ones, can be redundant
1051         vmlal.u32       $D0,$H1#hi,$S4
1052          vshr.u64       $MASK,$MASK,#38
1053         vmlal.u32       $D4,$H0#hi,$R4
1054         vmlal.u32       $D1,$H2#hi,$S4
1055         vmlal.u32       $D2,$H3#hi,$S4
1056
1057         beq             .Lshort_tail
1058
1059         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1060         @ (hash+inp[0:1])*r^4:r^3 and accumulate
1061
1062         vld4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^3
1063         vld4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^4
1064
1065         vmlal.u32       $D2,$H2#lo,$R0
1066         vmlal.u32       $D0,$H0#lo,$R0
1067         vmlal.u32       $D3,$H3#lo,$R0
1068         vmlal.u32       $D1,$H1#lo,$R0
1069         vmlal.u32       $D4,$H4#lo,$R0
1070
1071         vmlal.u32       $D0,$H4#lo,$S1
1072         vld4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1073         vmlal.u32       $D3,$H2#lo,$R1
1074         vld4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1075         vmlal.u32       $D1,$H0#lo,$R1
1076         vmlal.u32       $D4,$H3#lo,$R1
1077         vmlal.u32       $D2,$H1#lo,$R1
1078
1079         vmlal.u32       $D3,$H1#lo,$R2
1080         vld1.32         ${S4}[1],[$tbl1,:32]
1081         vmlal.u32       $D0,$H3#lo,$S2
1082         vld1.32         ${S4}[0],[$tbl0,:32]
1083         vmlal.u32       $D4,$H2#lo,$R2
1084         vmlal.u32       $D1,$H4#lo,$S2
1085         vmlal.u32       $D2,$H0#lo,$R2
1086
1087         vmlal.u32       $D3,$H0#lo,$R3
1088         vmlal.u32       $D0,$H2#lo,$S3
1089         vmlal.u32       $D4,$H1#lo,$R3
1090         vmlal.u32       $D1,$H3#lo,$S3
1091         vmlal.u32       $D2,$H4#lo,$S3
1092
1093         vmlal.u32       $D3,$H4#lo,$S4
1094          vorn           $MASK,$MASK,$MASK       @ all-ones
1095         vmlal.u32       $D0,$H1#lo,$S4
1096          vshr.u64       $MASK,$MASK,#38
1097         vmlal.u32       $D4,$H0#lo,$R4
1098         vmlal.u32       $D1,$H2#lo,$S4
1099         vmlal.u32       $D2,$H3#lo,$S4
1100
1101 .Lshort_tail:
1102         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1103         @ horizontal addition
1104
1105         vadd.i64        $D3#lo,$D3#lo,$D3#hi
1106         vadd.i64        $D0#lo,$D0#lo,$D0#hi
1107         vadd.i64        $D4#lo,$D4#lo,$D4#hi
1108         vadd.i64        $D1#lo,$D1#lo,$D1#hi
1109         vadd.i64        $D2#lo,$D2#lo,$D2#hi
1110
1111         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1112         @ lazy reduction, but without narrowing
1113
1114         vshr.u64        $T0,$D3,#26
1115         vand.i64        $D3,$D3,$MASK
1116          vshr.u64       $T1,$D0,#26
1117          vand.i64       $D0,$D0,$MASK
1118         vadd.i64        $D4,$D4,$T0             @ h3 -> h4
1119          vadd.i64       $D1,$D1,$T1             @ h0 -> h1
1120
1121         vshr.u64        $T0,$D4,#26
1122         vand.i64        $D4,$D4,$MASK
1123          vshr.u64       $T1,$D1,#26
1124          vand.i64       $D1,$D1,$MASK
1125          vadd.i64       $D2,$D2,$T1             @ h1 -> h2
1126
1127         vadd.i64        $D0,$D0,$T0
1128         vshl.u64        $T0,$T0,#2
1129          vshr.u64       $T1,$D2,#26
1130          vand.i64       $D2,$D2,$MASK
1131         vadd.i64        $D0,$D0,$T0             @ h4 -> h0
1132          vadd.i64       $D3,$D3,$T1             @ h2 -> h3
1133
1134         vshr.u64        $T0,$D0,#26
1135         vand.i64        $D0,$D0,$MASK
1136          vshr.u64       $T1,$D3,#26
1137          vand.i64       $D3,$D3,$MASK
1138         vadd.i64        $D1,$D1,$T0             @ h0 -> h1
1139          vadd.i64       $D4,$D4,$T1             @ h3 -> h4
1140
1141         cmp             $len,#0
1142         bne             .Leven
1143
1144         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1145         @ store hash value
1146
1147         vst4.32         {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
1148         vst1.32         {$D4#lo[0]},[$ctx]
1149
1150         vldmia  sp!,{d8-d15}                    @ epilogue
1151         ldmia   sp!,{r4-r7}
1152 .Lno_data_neon:
1153         ret                                     @ bx    lr
1154 .size   poly1305_blocks_neon,.-poly1305_blocks_neon
1155
1156 .type   poly1305_emit_neon,%function
1157 .align  5
1158 poly1305_emit_neon:
1159         ldr     ip,[$ctx,#36]           @ is_base2_26
1160
1161         stmdb   sp!,{r4-r11}
1162
1163         tst     ip,ip
1164         beq     .Lpoly1305_emit_enter
1165
1166         ldmia   $ctx,{$h0-$h4}
1167         eor     $g0,$g0,$g0
1168
1169         adds    $h0,$h0,$h1,lsl#26      @ base 2^26 -> base 2^32
1170         mov     $h1,$h1,lsr#6
1171         adcs    $h1,$h1,$h2,lsl#20
1172         mov     $h2,$h2,lsr#12
1173         adcs    $h2,$h2,$h3,lsl#14
1174         mov     $h3,$h3,lsr#18
1175         adcs    $h3,$h3,$h4,lsl#8
1176         adc     $h4,$g0,$h4,lsr#24      @ can be partially reduced ...
1177
1178         and     $g0,$h4,#-4             @ ... so reduce
1179         and     $h4,$h3,#3
1180         add     $g0,$g0,$g0,lsr#2       @ *= 5
1181         adds    $h0,$h0,$g0
1182         adcs    $h1,$h1,#0
1183         adcs    $h2,$h2,#0
1184         adcs    $h3,$h3,#0
1185         adc     $h4,$h4,#0
1186
1187         adds    $g0,$h0,#5              @ compare to modulus
1188         adcs    $g1,$h1,#0
1189         adcs    $g2,$h2,#0
1190         adcs    $g3,$h3,#0
1191         adc     $g4,$h4,#0
1192         tst     $g4,#4                  @ did it carry/borrow?
1193
1194         it      ne
1195         movne   $h0,$g0
1196         ldr     $g0,[$nonce,#0]
1197         it      ne
1198         movne   $h1,$g1
1199         ldr     $g1,[$nonce,#4]
1200         it      ne
1201         movne   $h2,$g2
1202         ldr     $g2,[$nonce,#8]
1203         it      ne
1204         movne   $h3,$g3
1205         ldr     $g3,[$nonce,#12]
1206
1207         adds    $h0,$h0,$g0             @ accumulate nonce
1208         adcs    $h1,$h1,$g1
1209         adcs    $h2,$h2,$g2
1210         adc     $h3,$h3,$g3
1211
1212 # ifdef __ARMEB__
1213         rev     $h0,$h0
1214         rev     $h1,$h1
1215         rev     $h2,$h2
1216         rev     $h3,$h3
1217 # endif
1218         str     $h0,[$mac,#0]           @ store the result
1219         str     $h1,[$mac,#4]
1220         str     $h2,[$mac,#8]
1221         str     $h3,[$mac,#12]
1222
1223         ldmia   sp!,{r4-r11}
1224         ret                             @ bx    lr
1225 .size   poly1305_emit_neon,.-poly1305_emit_neon
1226
1227 .align  5
1228 .Lzeros:
1229 .long   0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
1230 .LOPENSSL_armcap:
1231 .word   OPENSSL_armcap_P-.Lpoly1305_init
1232 #endif
1233 ___
1234 }       }
1235 $code.=<<___;
1236 .asciz  "Poly1305 for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
1237 .align  2
1238 #if     __ARM_MAX_ARCH__>=7
1239 .comm   OPENSSL_armcap_P,4,4
1240 #endif
1241 ___
1242
1243 foreach (split("\n",$code)) {
1244         s/\`([^\`]*)\`/eval $1/geo;
1245
1246         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo       or
1247         s/\bret\b/bx    lr/go                                           or
1248         s/\bbx\s+lr\b/.word\t0xe12fff1e/go;     # make it possible to compile with -march=armv4
1249
1250         print $_,"\n";
1251 }
1252 close STDOUT; # enforce flush