crypto/poly1305: don't break carry chains.
[openssl.git] / crypto / poly1305 / asm / poly1305-armv4.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 #                       IALU(*)/gcc-4.4         NEON
11 #
12 # ARM11xx(ARMv6)        7.78/+100%              -
13 # Cortex-A5             6.35/+130%              2.96
14 # Cortex-A8             6.25/+115%              2.36
15 # Cortex-A9             5.10/+95%               2.55
16 # Cortex-A15            3.85/+85%               1.25(**)
17 # Snapdragon S4         5.70/+100%              1.48(**)
18 #
19 # (*)   this is for -march=armv6, i.e. with bunch of ldrb loading data;
20 # (**)  these are trade-off results, they can be improved by ~8% but at
21 #       the cost of 15/12% regression on Cortex-A5/A7, it's even possible
22 #       to improve Cortex-A9 result, but then A5/A7 loose more than 20%;
23
24 $flavour = shift;
25 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
26 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
27
28 if ($flavour && $flavour ne "void") {
29     $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
30     ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
31     ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
32     die "can't locate arm-xlate.pl";
33
34     open STDOUT,"| \"$^X\" $xlate $flavour $output";
35 } else {
36     open STDOUT,">$output";
37 }
38
39 ($ctx,$inp,$len,$padbit)=map("r$_",(0..3));
40
41 $code.=<<___;
42 #include "arm_arch.h"
43
44 .text
45 #if defined(__thumb2__)
46 .syntax unified
47 .thumb
48 #else
49 .code   32
50 #endif
51
52 .globl  poly1305_emit
53 .globl  poly1305_blocks
54 .globl  poly1305_init
55 .type   poly1305_init,%function
56 .align  5
57 poly1305_init:
58 .Lpoly1305_init:
59         stmdb   sp!,{r4-r11}
60
61         eor     r3,r3,r3
62         cmp     $inp,#0
63         str     r3,[$ctx,#0]            @ zero hash value
64         str     r3,[$ctx,#4]
65         str     r3,[$ctx,#8]
66         str     r3,[$ctx,#12]
67         str     r3,[$ctx,#16]
68         str     r3,[$ctx,#36]           @ is_base2_26
69         add     $ctx,$ctx,#20
70
71 #ifdef  __thumb2__
72         it      eq
73 #endif
74         moveq   r0,#0
75         beq     .Lno_key
76
77 #if     __ARM_MAX_ARCH__>=7
78         adr     r11,.Lpoly1305_init
79         ldr     r12,.LOPENSSL_armcap
80 #endif
81         ldrb    r4,[$inp,#0]
82         mov     r10,#0x0fffffff
83         ldrb    r5,[$inp,#1]
84         and     r3,r10,#-4              @ 0x0ffffffc
85         ldrb    r6,[$inp,#2]
86         ldrb    r7,[$inp,#3]
87         orr     r4,r4,r5,lsl#8
88         ldrb    r5,[$inp,#4]
89         orr     r4,r4,r6,lsl#16
90         ldrb    r6,[$inp,#5]
91         orr     r4,r4,r7,lsl#24
92         ldrb    r7,[$inp,#6]
93         and     r4,r4,r10
94
95 #if     __ARM_MAX_ARCH__>=7
96         ldr     r12,[r11,r12]           @ OPENSSL_armcap_P
97 # ifdef __APPLE__
98         ldr     r12,[r12]
99 # endif
100 #endif
101         ldrb    r8,[$inp,#7]
102         orr     r5,r5,r6,lsl#8
103         ldrb    r6,[$inp,#8]
104         orr     r5,r5,r7,lsl#16
105         ldrb    r7,[$inp,#9]
106         orr     r5,r5,r8,lsl#24
107         ldrb    r8,[$inp,#10]
108         and     r5,r5,r3
109
110 #if     __ARM_MAX_ARCH__>=7
111         tst     r12,#ARMV7_NEON         @ check for NEON
112 # ifdef __APPLE__
113         adr     r9,poly1305_blocks_neon
114         adr     r11,poly1305_blocks
115 #  ifdef __thumb2__
116         it      ne
117 #  endif
118         movne   r11,r9
119         adr     r12,poly1305_emit
120         adr     r10,poly1305_emit_neon
121 #  ifdef __thumb2__
122         it      ne
123 #  endif
124         movne   r12,r10
125 # else
126 #  ifdef __thumb2__
127         itete   eq
128 #  endif
129         addeq   r12,r11,#(poly1305_emit-.Lpoly1305_init)
130         addne   r12,r11,#(poly1305_emit_neon-.Lpoly1305_init)
131         addeq   r11,r11,#(poly1305_blocks-.Lpoly1305_init)
132         addne   r11,r11,#(poly1305_blocks_neon-.Lpoly1305_init)
133 # endif
134 # ifdef __thumb2__
135         orr     r12,r12,#1      @ thumb-ify address
136         orr     r11,r11,#1
137 # endif
138 #endif
139         ldrb    r9,[$inp,#11]
140         orr     r6,r6,r7,lsl#8
141         ldrb    r7,[$inp,#12]
142         orr     r6,r6,r8,lsl#16
143         ldrb    r8,[$inp,#13]
144         orr     r6,r6,r9,lsl#24
145         ldrb    r9,[$inp,#14]
146         and     r6,r6,r3
147
148         ldrb    r10,[$inp,#15]
149         orr     r7,r7,r8,lsl#8
150         str     r4,[$ctx,#0]
151         orr     r7,r7,r9,lsl#16
152         str     r5,[$ctx,#4]
153         orr     r7,r7,r10,lsl#24
154         str     r6,[$ctx,#8]
155         and     r7,r7,r3
156         str     r7,[$ctx,#12]
157 #if     __ARM_MAX_ARCH__>=7
158         stmia   r2,{r11,r12}            @ fill functions table
159         mov     r0,#1
160 #else
161         mov     r0,#0
162 #endif
163 .Lno_key:
164         ldmia   sp!,{r4-r11}
165 #if     __ARM_ARCH__>=5
166         ret                             @ bx    lr
167 #else
168         tst     lr,#1
169         moveq   pc,lr                   @ be binary compatible with V4, yet
170         bx      lr                      @ interoperable with Thumb ISA:-)
171 #endif
172 .size   poly1305_init,.-poly1305_init
173 ___
174 {
175 my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12));
176 my ($s1,$s2,$s3)=($r1,$r2,$r3);
177
178 $code.=<<___;
179 .type   poly1305_blocks,%function
180 .align  5
181 poly1305_blocks:
182         stmdb   sp!,{r3-r11,lr}
183
184         ands    $len,$len,#-16
185         beq     .Lno_data
186
187         cmp     $padbit,#0
188         add     $len,$len,$inp          @ end pointer
189         sub     sp,sp,#32
190
191         ldmia   $ctx,{$h0-$r3}          @ load context
192
193         str     $ctx,[sp,#12]           @ offload stuff
194         mov     lr,$inp
195         str     $len,[sp,#16]
196         str     $r1,[sp,#20]
197         str     $r2,[sp,#24]
198         str     $r3,[sp,#28]
199         b       .Loop
200
201 .Loop:
202 #if __ARM_ARCH__<7
203         ldrb    r0,[lr],#16             @ load input
204 # ifdef __thumb2__
205         it      hi
206 # endif
207         addhi   $h4,$h4,#1              @ 1<<128
208         ldrb    r1,[lr,#-15]
209         ldrb    r2,[lr,#-14]
210         ldrb    r3,[lr,#-13]
211         orr     r1,r0,r1,lsl#8
212         ldrb    r0,[lr,#-12]
213         orr     r2,r1,r2,lsl#16
214         ldrb    r1,[lr,#-11]
215         orr     r3,r2,r3,lsl#24
216         ldrb    r2,[lr,#-10]
217         adds    $h0,$h0,r3              @ accumulate input
218
219         ldrb    r3,[lr,#-9]
220         orr     r1,r0,r1,lsl#8
221         ldrb    r0,[lr,#-8]
222         orr     r2,r1,r2,lsl#16
223         ldrb    r1,[lr,#-7]
224         orr     r3,r2,r3,lsl#24
225         ldrb    r2,[lr,#-6]
226         adcs    $h1,$h1,r3
227
228         ldrb    r3,[lr,#-5]
229         orr     r1,r0,r1,lsl#8
230         ldrb    r0,[lr,#-4]
231         orr     r2,r1,r2,lsl#16
232         ldrb    r1,[lr,#-3]
233         orr     r3,r2,r3,lsl#24
234         ldrb    r2,[lr,#-2]
235         adcs    $h2,$h2,r3
236
237         ldrb    r3,[lr,#-1]
238         orr     r1,r0,r1,lsl#8
239         str     lr,[sp,#8]              @ offload input pointer
240         orr     r2,r1,r2,lsl#16
241         add     $s1,$r1,$r1,lsr#2
242         orr     r3,r2,r3,lsl#24
243 #else
244         ldr     r0,[lr],#16             @ load input
245 # ifdef __thumb2__
246         it      hi
247 # endif
248         addhi   $h4,$h4,#1              @ padbit
249         ldr     r1,[lr,#-12]
250         ldr     r2,[lr,#-8]
251         ldr     r3,[lr,#-4]
252 # ifdef __ARMEB__
253         rev     r0,r0
254         rev     r1,r1
255         rev     r2,r2
256         rev     r3,r3
257 # endif
258         adds    $h0,$h0,r0              @ accumulate input
259         str     lr,[sp,#8]              @ offload input pointer
260         adcs    $h1,$h1,r1
261         add     $s1,$r1,$r1,lsr#2
262         adcs    $h2,$h2,r2
263 #endif
264         add     $s2,$r2,$r2,lsr#2
265         adcs    $h3,$h3,r3
266         add     $s3,$r3,$r3,lsr#2
267
268         umull   r2,r3,$h1,$r0
269          adc    $h4,$h4,#0
270         umull   r0,r1,$h0,$r0
271         umlal   r2,r3,$h4,$s1
272         umlal   r0,r1,$h3,$s1
273         ldr     $r1,[sp,#20]            @ reload $r1
274         umlal   r2,r3,$h2,$s3
275         umlal   r0,r1,$h1,$s3
276         umlal   r2,r3,$h3,$s2
277         umlal   r0,r1,$h2,$s2
278         umlal   r2,r3,$h0,$r1
279         str     r0,[sp,#0]              @ future $h0
280          mul    r0,$s2,$h4
281         ldr     $r2,[sp,#24]            @ reload $r2
282         adds    r2,r2,r1                @ d1+=d0>>32
283          eor    r1,r1,r1
284         adc     lr,r3,#0                @ future $h2
285         str     r2,[sp,#4]              @ future $h1
286
287         mul     r2,$s3,$h4
288         eor     r3,r3,r3
289         umlal   r0,r1,$h3,$s3
290         ldr     $r3,[sp,#28]            @ reload $r3
291         umlal   r2,r3,$h3,$r0
292         umlal   r0,r1,$h2,$r0
293         umlal   r2,r3,$h2,$r1
294         umlal   r0,r1,$h1,$r1
295         umlal   r2,r3,$h1,$r2
296         umlal   r0,r1,$h0,$r2
297         umlal   r2,r3,$h0,$r3
298         ldr     $h0,[sp,#0]
299         mul     $h4,$r0,$h4
300         ldr     $h1,[sp,#4]
301
302         adds    $h2,lr,r0               @ d2+=d1>>32
303         ldr     lr,[sp,#8]              @ reload input pointer
304         adc     r1,r1,#0
305         adds    $h3,r2,r1               @ d3+=d2>>32
306         ldr     r0,[sp,#16]             @ reload end pointer
307         adc     r3,r3,#0
308         add     $h4,$h4,r3              @ h4+=d3>>32
309
310         and     r1,$h4,#-4
311         and     $h4,$h4,#3
312         add     r1,r1,r1,lsr#2          @ *=5
313         adds    $h0,$h0,r1
314         adcs    $h1,$h1,#0
315         adcs    $h2,$h2,#0
316         adcs    $h3,$h3,#0
317         adc     $h4,$h4,#0
318
319         cmp     r0,lr                   @ done yet?
320         bhi     .Loop
321
322         ldr     $ctx,[sp,#12]
323         add     sp,sp,#32
324         stmia   $ctx,{$h0-$h4}          @ store the result
325
326 .Lno_data:
327 #if     __ARM_ARCH__>=5
328         ldmia   sp!,{r3-r11,pc}
329 #else
330         ldmia   sp!,{r3-r11,lr}
331         tst     lr,#1
332         moveq   pc,lr                   @ be binary compatible with V4, yet
333         bx      lr                      @ interoperable with Thumb ISA:-)
334 #endif
335 .size   poly1305_blocks,.-poly1305_blocks
336 ___
337 }
338 {
339 my ($ctx,$mac,$nonce)=map("r$_",(0..2));
340 my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11));
341 my $g4=$h4;
342
343 $code.=<<___;
344 .type   poly1305_emit,%function
345 .align  5
346 poly1305_emit:
347         stmdb   sp!,{r4-r11}
348 .Lpoly1305_emit_enter:
349
350         ldmia   $ctx,{$h0-$h4}
351         adds    $g0,$h0,#5              @ compare to modulus
352         adcs    $g1,$h1,#0
353         adcs    $g2,$h2,#0
354         adcs    $g3,$h3,#0
355         adc     $g4,$h4,#0
356         tst     $g4,#4                  @ did it carry/borrow?
357
358 #ifdef  __thumb2__
359         it      ne
360 #endif
361         movne   $h0,$g0
362         ldr     $g0,[$nonce,#0]
363 #ifdef  __thumb2__
364         it      ne
365 #endif
366         movne   $h1,$g1
367         ldr     $g1,[$nonce,#4]
368 #ifdef  __thumb2__
369         it      ne
370 #endif
371         movne   $h2,$g2
372         ldr     $g2,[$nonce,#8]
373 #ifdef  __thumb2__
374         it      ne
375 #endif
376         movne   $h3,$g3
377         ldr     $g3,[$nonce,#12]
378
379         adds    $h0,$h0,$g0
380         adcs    $h1,$h1,$g1
381         adcs    $h2,$h2,$g2
382         adc     $h3,$h3,$g3
383
384 #if __ARM_ARCH__>=7
385 # ifdef __ARMEB__
386         rev     $h0,$h0
387         rev     $h1,$h1
388         rev     $h2,$h2
389         rev     $h3,$h3
390 # endif
391         str     $h0,[$mac,#0]
392         str     $h1,[$mac,#4]
393         str     $h2,[$mac,#8]
394         str     $h3,[$mac,#12]
395 #else
396         strb    $h0,[$mac,#0]
397         mov     $h0,$h0,lsr#8
398         strb    $h1,[$mac,#4]
399         mov     $h1,$h1,lsr#8
400         strb    $h2,[$mac,#8]
401         mov     $h2,$h2,lsr#8
402         strb    $h3,[$mac,#12]
403         mov     $h3,$h3,lsr#8
404
405         strb    $h0,[$mac,#1]
406         mov     $h0,$h0,lsr#8
407         strb    $h1,[$mac,#5]
408         mov     $h1,$h1,lsr#8
409         strb    $h2,[$mac,#9]
410         mov     $h2,$h2,lsr#8
411         strb    $h3,[$mac,#13]
412         mov     $h3,$h3,lsr#8
413
414         strb    $h0,[$mac,#2]
415         mov     $h0,$h0,lsr#8
416         strb    $h1,[$mac,#6]
417         mov     $h1,$h1,lsr#8
418         strb    $h2,[$mac,#10]
419         mov     $h2,$h2,lsr#8
420         strb    $h3,[$mac,#14]
421         mov     $h3,$h3,lsr#8
422
423         strb    $h0,[$mac,#3]
424         strb    $h1,[$mac,#7]
425         strb    $h2,[$mac,#11]
426         strb    $h3,[$mac,#15]
427 #endif
428         ldmia   sp!,{r4-r11}
429 #if     __ARM_ARCH__>=5
430         ret                             @ bx    lr
431 #else
432         tst     lr,#1
433         moveq   pc,lr                   @ be binary compatible with V4, yet
434         bx      lr                      @ interoperable with Thumb ISA:-)
435 #endif
436 .size   poly1305_emit,.-poly1305_emit
437 ___
438 {
439 my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9));
440 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
441 my ($T0,$T1,$MASK) = map("q$_",(15,4,0));
442
443 my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7));
444
445 $code.=<<___;
446 #if     __ARM_MAX_ARCH__>=7
447 .fpu    neon
448
449 .type   poly1305_init_neon,%function
450 .align  5
451 poly1305_init_neon:
452         ldr     r4,[$ctx,#20]           @ load key base 2^32
453         ldr     r5,[$ctx,#24]
454         ldr     r6,[$ctx,#28]
455         ldr     r7,[$ctx,#32]
456
457         and     r2,r4,#0x03ffffff       @ base 2^32 -> base 2^26
458         mov     r3,r4,lsr#26
459         mov     r4,r5,lsr#20
460         orr     r3,r3,r5,lsl#6
461         mov     r5,r6,lsr#14
462         orr     r4,r4,r6,lsl#12
463         mov     r6,r7,lsr#8
464         orr     r5,r5,r7,lsl#18
465         and     r3,r3,#0x03ffffff
466         and     r4,r4,#0x03ffffff
467         and     r5,r5,#0x03ffffff
468
469         vdup.32 $R0,r2                  @ r^1 in both lanes
470         add     r2,r3,r3,lsl#2          @ *5
471         vdup.32 $R1,r3
472         add     r3,r4,r4,lsl#2
473         vdup.32 $S1,r2
474         vdup.32 $R2,r4
475         add     r4,r5,r5,lsl#2
476         vdup.32 $S2,r3
477         vdup.32 $R3,r5
478         add     r5,r6,r6,lsl#2
479         vdup.32 $S3,r4
480         vdup.32 $R4,r6
481         vdup.32 $S4,r5
482
483         mov     $zeros,#2               @ counter
484
485 .Lsquare_neon:
486         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
487         @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
488         @ d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
489         @ d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
490         @ d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
491         @ d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
492
493         vmull.u32       $D0,$R0,${R0}[1]
494         vmull.u32       $D1,$R1,${R0}[1]
495         vmull.u32       $D2,$R2,${R0}[1]
496         vmull.u32       $D3,$R3,${R0}[1]
497         vmull.u32       $D4,$R4,${R0}[1]
498
499         vmlal.u32       $D0,$R4,${S1}[1]
500         vmlal.u32       $D1,$R0,${R1}[1]
501         vmlal.u32       $D2,$R1,${R1}[1]
502         vmlal.u32       $D3,$R2,${R1}[1]
503         vmlal.u32       $D4,$R3,${R1}[1]
504
505         vmlal.u32       $D0,$R3,${S2}[1]
506         vmlal.u32       $D1,$R4,${S2}[1]
507         vmlal.u32       $D3,$R1,${R2}[1]
508         vmlal.u32       $D2,$R0,${R2}[1]
509         vmlal.u32       $D4,$R2,${R2}[1]
510
511         vmlal.u32       $D0,$R2,${S3}[1]
512         vmlal.u32       $D3,$R0,${R3}[1]
513         vmlal.u32       $D1,$R3,${S3}[1]
514         vmlal.u32       $D2,$R4,${S3}[1]
515         vmlal.u32       $D4,$R1,${R3}[1]
516
517         vmlal.u32       $D3,$R4,${S4}[1]
518         vmlal.u32       $D0,$R1,${S4}[1]
519         vmlal.u32       $D1,$R2,${S4}[1]
520         vmlal.u32       $D2,$R3,${S4}[1]
521         vmlal.u32       $D4,$R0,${R4}[1]
522
523         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
524         @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
525         @ and P. Schwabe
526
527         vshr.u64        $T0,$D3,#26
528         vmovn.i64       $D3#lo,$D3
529          vshr.u64       $T1,$D0,#26
530          vmovn.i64      $D0#lo,$D0
531         vadd.i64        $D4,$D4,$T0             @ h3 -> h4
532         vbic.i32        $D3#lo,#0xfc000000      @ &=0x03ffffff
533          vadd.i64       $D1,$D1,$T1             @ h0 -> h1
534          vbic.i32       $D0#lo,#0xfc000000
535
536         vshrn.u64       $T0#lo,$D4,#26
537         vmovn.i64       $D4#lo,$D4
538          vshr.u64       $T1,$D1,#26
539          vmovn.i64      $D1#lo,$D1
540          vadd.i64       $D2,$D2,$T1             @ h1 -> h2
541         vbic.i32        $D4#lo,#0xfc000000
542          vbic.i32       $D1#lo,#0xfc000000
543
544         vadd.i32        $D0#lo,$D0#lo,$T0#lo
545         vshl.u32        $T0#lo,$T0#lo,#2
546          vshrn.u64      $T1#lo,$D2,#26
547          vmovn.i64      $D2#lo,$D2
548         vadd.i32        $D0#lo,$D0#lo,$T0#lo    @ h4 -> h0
549          vadd.i32       $D3#lo,$D3#lo,$T1#lo    @ h2 -> h3
550          vbic.i32       $D2#lo,#0xfc000000
551
552         vshr.u32        $T0#lo,$D0#lo,#26
553         vbic.i32        $D0#lo,#0xfc000000
554          vshr.u32       $T1#lo,$D3#lo,#26
555          vbic.i32       $D3#lo,#0xfc000000
556         vadd.i32        $D1#lo,$D1#lo,$T0#lo    @ h0 -> h1
557          vadd.i32       $D4#lo,$D4#lo,$T1#lo    @ h3 -> h4
558
559         subs            $zeros,$zeros,#1
560         beq             .Lsquare_break_neon
561
562         add             $tbl0,$ctx,#(48+0*9*4)
563         add             $tbl1,$ctx,#(48+1*9*4)
564
565         vtrn.32         $R0,$D0#lo              @ r^2:r^1
566         vtrn.32         $R2,$D2#lo
567         vtrn.32         $R3,$D3#lo
568         vtrn.32         $R1,$D1#lo
569         vtrn.32         $R4,$D4#lo
570
571         vshl.u32        $S2,$R2,#2              @ *5
572         vshl.u32        $S3,$R3,#2
573         vshl.u32        $S1,$R1,#2
574         vshl.u32        $S4,$R4,#2
575         vadd.i32        $S2,$S2,$R2
576         vadd.i32        $S1,$S1,$R1
577         vadd.i32        $S3,$S3,$R3
578         vadd.i32        $S4,$S4,$R4
579
580         vst4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
581         vst4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
582         vst4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
583         vst4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
584         vst1.32         {${S4}[0]},[$tbl0,:32]
585         vst1.32         {${S4}[1]},[$tbl1,:32]
586
587         b               .Lsquare_neon
588
589 .align  4
590 .Lsquare_break_neon:
591         add             $tbl0,$ctx,#(48+2*4*9)
592         add             $tbl1,$ctx,#(48+3*4*9)
593
594         vmov            $R0,$D0#lo              @ r^4:r^3
595         vshl.u32        $S1,$D1#lo,#2           @ *5
596         vmov            $R1,$D1#lo
597         vshl.u32        $S2,$D2#lo,#2
598         vmov            $R2,$D2#lo
599         vshl.u32        $S3,$D3#lo,#2
600         vmov            $R3,$D3#lo
601         vshl.u32        $S4,$D4#lo,#2
602         vmov            $R4,$D4#lo
603         vadd.i32        $S1,$S1,$D1#lo
604         vadd.i32        $S2,$S2,$D2#lo
605         vadd.i32        $S3,$S3,$D3#lo
606         vadd.i32        $S4,$S4,$D4#lo
607
608         vst4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
609         vst4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
610         vst4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
611         vst4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
612         vst1.32         {${S4}[0]},[$tbl0]
613         vst1.32         {${S4}[1]},[$tbl1]
614
615         ret                             @ bx    lr
616 .size   poly1305_init_neon,.-poly1305_init_neon
617
618 .type   poly1305_blocks_neon,%function
619 .align  5
620 poly1305_blocks_neon:
621         ldr     ip,[$ctx,#36]           @ is_base2_26
622         ands    $len,$len,#-16
623         beq     .Lno_data_neon
624
625         cmp     $len,#64
626         bhs     .Lenter_neon
627         tst     ip,ip                   @ is_base2_26?
628         beq     poly1305_blocks
629
630 .Lenter_neon:
631         stmdb   sp!,{r4-r7}
632         vstmdb  sp!,{d8-d15}            @ ABI specification says so
633
634         tst     ip,ip                   @ is_base2_26?
635         bne     .Lbase2_26_neon
636
637         stmdb   sp!,{r1-r3,lr}
638         bl      poly1305_init_neon
639
640         ldr     r4,[$ctx,#0]            @ load hash value base 2^32
641         ldr     r5,[$ctx,#4]
642         ldr     r6,[$ctx,#8]
643         ldr     r7,[$ctx,#12]
644         ldr     ip,[$ctx,#16]
645
646         and     r2,r4,#0x03ffffff       @ base 2^32 -> base 2^26
647         mov     r3,r4,lsr#26
648          veor   $D0#lo,$D0#lo,$D0#lo
649         mov     r4,r5,lsr#20
650         orr     r3,r3,r5,lsl#6
651          veor   $D1#lo,$D1#lo,$D1#lo
652         mov     r5,r6,lsr#14
653         orr     r4,r4,r6,lsl#12
654          veor   $D2#lo,$D2#lo,$D2#lo
655         mov     r6,r7,lsr#8
656         orr     r5,r5,r7,lsl#18
657          veor   $D3#lo,$D3#lo,$D3#lo
658         and     r3,r3,#0x03ffffff
659         orr     r6,r6,ip,lsl#24
660          veor   $D4#lo,$D4#lo,$D4#lo
661         and     r4,r4,#0x03ffffff
662         mov     r1,#1
663         and     r5,r5,#0x03ffffff
664         str     r1,[$ctx,#36]           @ is_base2_26
665
666         vmov.32 $D0#lo[0],r2
667         vmov.32 $D1#lo[0],r3
668         vmov.32 $D2#lo[0],r4
669         vmov.32 $D3#lo[0],r5
670         vmov.32 $D4#lo[0],r6
671         adr     $zeros,.Lzeros
672
673         ldmia   sp!,{r1-r3,lr}
674         b       .Lbase2_32_neon
675
676 .align  4
677 .Lbase2_26_neon:
678         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
679         @ load hash value
680
681         veor            $D0#lo,$D0#lo,$D0#lo
682         veor            $D1#lo,$D1#lo,$D1#lo
683         veor            $D2#lo,$D2#lo,$D2#lo
684         veor            $D3#lo,$D3#lo,$D3#lo
685         veor            $D4#lo,$D4#lo,$D4#lo
686         vld4.32         {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
687         adr             $zeros,.Lzeros
688         vld1.32         {$D4#lo[0]},[$ctx]
689         sub             $ctx,$ctx,#16           @ rewind
690
691 .Lbase2_32_neon:
692         add             $in2,$inp,#32
693         mov             $padbit,$padbit,lsl#24
694         tst             $len,#31
695         beq             .Leven
696
697         vld4.32         {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]!
698         vmov.32         $H4#lo[0],$padbit
699         sub             $len,$len,#16
700         add             $in2,$inp,#32
701
702 # ifdef __ARMEB__
703         vrev32.8        $H0,$H0
704         vrev32.8        $H3,$H3
705         vrev32.8        $H1,$H1
706         vrev32.8        $H2,$H2
707 # endif
708         vsri.u32        $H4#lo,$H3#lo,#8        @ base 2^32 -> base 2^26
709         vshl.u32        $H3#lo,$H3#lo,#18
710
711         vsri.u32        $H3#lo,$H2#lo,#14
712         vshl.u32        $H2#lo,$H2#lo,#12
713         vadd.i32        $H4#hi,$H4#lo,$D4#lo    @ add hash value and move to #hi
714
715         vbic.i32        $H3#lo,#0xfc000000
716         vsri.u32        $H2#lo,$H1#lo,#20
717         vshl.u32        $H1#lo,$H1#lo,#6
718
719         vbic.i32        $H2#lo,#0xfc000000
720         vsri.u32        $H1#lo,$H0#lo,#26
721         vadd.i32        $H3#hi,$H3#lo,$D3#lo
722
723         vbic.i32        $H0#lo,#0xfc000000
724         vbic.i32        $H1#lo,#0xfc000000
725         vadd.i32        $H2#hi,$H2#lo,$D2#lo
726
727         vadd.i32        $H0#hi,$H0#lo,$D0#lo
728         vadd.i32        $H1#hi,$H1#lo,$D1#lo
729
730         mov             $tbl1,$zeros
731         add             $tbl0,$ctx,#48
732
733         cmp             $len,$len
734         b               .Long_tail
735
736 .align  4
737 .Leven:
738         subs            $len,$len,#64
739         it              lo
740         movlo           $in2,$zeros
741
742         vmov.i32        $H4,#1<<24              @ padbit, yes, always
743         vld4.32         {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp]    @ inp[0:1]
744         add             $inp,$inp,#64
745         vld4.32         {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2]    @ inp[2:3] (or 0)
746         add             $in2,$in2,#64
747         itt             hi
748         addhi           $tbl1,$ctx,#(48+1*9*4)
749         addhi           $tbl0,$ctx,#(48+3*9*4)
750
751 # ifdef __ARMEB__
752         vrev32.8        $H0,$H0
753         vrev32.8        $H3,$H3
754         vrev32.8        $H1,$H1
755         vrev32.8        $H2,$H2
756 # endif
757         vsri.u32        $H4,$H3,#8              @ base 2^32 -> base 2^26
758         vshl.u32        $H3,$H3,#18
759
760         vsri.u32        $H3,$H2,#14
761         vshl.u32        $H2,$H2,#12
762
763         vbic.i32        $H3,#0xfc000000
764         vsri.u32        $H2,$H1,#20
765         vshl.u32        $H1,$H1,#6
766
767         vbic.i32        $H2,#0xfc000000
768         vsri.u32        $H1,$H0,#26
769
770         vbic.i32        $H0,#0xfc000000
771         vbic.i32        $H1,#0xfc000000
772
773         bls             .Lskip_loop
774
775         vld4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^2
776         vld4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^4
777         vld4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
778         vld4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
779         b               .Loop_neon
780
781 .align  5
782 .Loop_neon:
783         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
784         @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
785         @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
786         @   \___________________/
787         @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
788         @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
789         @   \___________________/ \____________________/
790         @
791         @ Note that we start with inp[2:3]*r^2. This is because it
792         @ doesn't depend on reduction in previous iteration.
793         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
794         @ d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
795         @ d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
796         @ d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
797         @ d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
798         @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
799
800         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
801         @ inp[2:3]*r^2
802
803         vadd.i32        $H2#lo,$H2#lo,$D2#lo    @ accumulate inp[0:1]
804         vmull.u32       $D2,$H2#hi,${R0}[1]
805         vadd.i32        $H0#lo,$H0#lo,$D0#lo
806         vmull.u32       $D0,$H0#hi,${R0}[1]
807         vadd.i32        $H3#lo,$H3#lo,$D3#lo
808         vmull.u32       $D3,$H3#hi,${R0}[1]
809         vmlal.u32       $D2,$H1#hi,${R1}[1]
810         vadd.i32        $H1#lo,$H1#lo,$D1#lo
811         vmull.u32       $D1,$H1#hi,${R0}[1]
812
813         vadd.i32        $H4#lo,$H4#lo,$D4#lo
814         vmull.u32       $D4,$H4#hi,${R0}[1]
815         subs            $len,$len,#64
816         vmlal.u32       $D0,$H4#hi,${S1}[1]
817         it              lo
818         movlo           $in2,$zeros
819         vmlal.u32       $D3,$H2#hi,${R1}[1]
820         vld1.32         ${S4}[1],[$tbl1,:32]
821         vmlal.u32       $D1,$H0#hi,${R1}[1]
822         vmlal.u32       $D4,$H3#hi,${R1}[1]
823
824         vmlal.u32       $D0,$H3#hi,${S2}[1]
825         vmlal.u32       $D3,$H1#hi,${R2}[1]
826         vmlal.u32       $D4,$H2#hi,${R2}[1]
827         vmlal.u32       $D1,$H4#hi,${S2}[1]
828         vmlal.u32       $D2,$H0#hi,${R2}[1]
829
830         vmlal.u32       $D3,$H0#hi,${R3}[1]
831         vmlal.u32       $D0,$H2#hi,${S3}[1]
832         vmlal.u32       $D4,$H1#hi,${R3}[1]
833         vmlal.u32       $D1,$H3#hi,${S3}[1]
834         vmlal.u32       $D2,$H4#hi,${S3}[1]
835
836         vmlal.u32       $D3,$H4#hi,${S4}[1]
837         vmlal.u32       $D0,$H1#hi,${S4}[1]
838         vmlal.u32       $D4,$H0#hi,${R4}[1]
839         vmlal.u32       $D1,$H2#hi,${S4}[1]
840         vmlal.u32       $D2,$H3#hi,${S4}[1]
841
842         vld4.32         {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2]    @ inp[2:3] (or 0)
843         add             $in2,$in2,#64
844
845         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
846         @ (hash+inp[0:1])*r^4 and accumulate
847
848         vmlal.u32       $D3,$H3#lo,${R0}[0]
849         vmlal.u32       $D0,$H0#lo,${R0}[0]
850         vmlal.u32       $D4,$H4#lo,${R0}[0]
851         vmlal.u32       $D1,$H1#lo,${R0}[0]
852         vmlal.u32       $D2,$H2#lo,${R0}[0]
853         vld1.32         ${S4}[0],[$tbl0,:32]
854
855         vmlal.u32       $D3,$H2#lo,${R1}[0]
856         vmlal.u32       $D0,$H4#lo,${S1}[0]
857         vmlal.u32       $D4,$H3#lo,${R1}[0]
858         vmlal.u32       $D1,$H0#lo,${R1}[0]
859         vmlal.u32       $D2,$H1#lo,${R1}[0]
860
861         vmlal.u32       $D3,$H1#lo,${R2}[0]
862         vmlal.u32       $D0,$H3#lo,${S2}[0]
863         vmlal.u32       $D4,$H2#lo,${R2}[0]
864         vmlal.u32       $D1,$H4#lo,${S2}[0]
865         vmlal.u32       $D2,$H0#lo,${R2}[0]
866
867         vmlal.u32       $D3,$H0#lo,${R3}[0]
868         vmlal.u32       $D0,$H2#lo,${S3}[0]
869         vmlal.u32       $D4,$H1#lo,${R3}[0]
870         vmlal.u32       $D1,$H3#lo,${S3}[0]
871         vmlal.u32       $D3,$H4#lo,${S4}[0]
872
873         vmlal.u32       $D2,$H4#lo,${S3}[0]
874         vmlal.u32       $D0,$H1#lo,${S4}[0]
875         vmlal.u32       $D4,$H0#lo,${R4}[0]
876         vmov.i32        $H4,#1<<24              @ padbit, yes, always
877         vmlal.u32       $D1,$H2#lo,${S4}[0]
878         vmlal.u32       $D2,$H3#lo,${S4}[0]
879
880         vld4.32         {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp]    @ inp[0:1]
881         add             $inp,$inp,#64
882 # ifdef __ARMEB__
883         vrev32.8        $H0,$H0
884         vrev32.8        $H1,$H1
885         vrev32.8        $H2,$H2
886         vrev32.8        $H3,$H3
887 # endif
888
889         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
890         @ lazy reduction interleaved with base 2^32 -> base 2^26
891
892         vshr.u64        $T0,$D3,#26
893         vmovn.i64       $D3#lo,$D3
894          vshr.u64       $T1,$D0,#26
895          vmovn.i64      $D0#lo,$D0
896         vadd.i64        $D4,$D4,$T0             @ h3 -> h4
897         vbic.i32        $D3#lo,#0xfc000000
898           vsri.u32      $H4,$H3,#8              @ base 2^32 -> base 2^26
899          vadd.i64       $D1,$D1,$T1             @ h0 -> h1
900           vshl.u32      $H3,$H3,#18
901          vbic.i32       $D0#lo,#0xfc000000
902
903         vshrn.u64       $T0#lo,$D4,#26
904         vmovn.i64       $D4#lo,$D4
905          vshr.u64       $T1,$D1,#26
906          vmovn.i64      $D1#lo,$D1
907          vadd.i64       $D2,$D2,$T1             @ h1 -> h2
908           vsri.u32      $H3,$H2,#14
909         vbic.i32        $D4#lo,#0xfc000000
910           vshl.u32      $H2,$H2,#12
911          vbic.i32       $D1#lo,#0xfc000000
912
913         vadd.i32        $D0#lo,$D0#lo,$T0#lo
914         vshl.u32        $T0#lo,$T0#lo,#2
915           vbic.i32      $H3,#0xfc000000
916          vshrn.u64      $T1#lo,$D2,#26
917          vmovn.i64      $D2#lo,$D2
918         vadd.i32        $D0#lo,$D0#lo,$T0#lo    @ h4 -> h0
919           vsri.u32      $H2,$H1,#20
920          vadd.i32       $D3#lo,$D3#lo,$T1#lo    @ h2 -> h3
921           vshl.u32      $H1,$H1,#6
922          vbic.i32       $D2#lo,#0xfc000000
923           vbic.i32      $H2,#0xfc000000
924
925         vshr.u32        $T0#lo,$D0#lo,#26
926         vbic.i32        $D0#lo,#0xfc000000
927           vsri.u32      $H1,$H0,#26
928           vbic.i32      $H0,#0xfc000000
929          vshr.u32       $T1#lo,$D3#lo,#26
930          vbic.i32       $D3#lo,#0xfc000000
931         vadd.i32        $D1#lo,$D1#lo,$T0#lo    @ h0 -> h1
932          vadd.i32       $D4#lo,$D4#lo,$T1#lo    @ h3 -> h4
933           vbic.i32      $H1,#0xfc000000
934
935         bhi             .Loop_neon
936
937 .Lskip_loop:
938         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
939         @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
940
941         add             $tbl1,$ctx,#(48+0*9*4)
942         add             $tbl0,$ctx,#(48+1*9*4)
943         adds            $len,$len,#32
944         it              ne
945         movne           $len,#0
946         bne             .Long_tail
947
948         vadd.i32        $H2#hi,$H2#lo,$D2#lo    @ add hash value and move to #hi
949         vadd.i32        $H0#hi,$H0#lo,$D0#lo
950         vadd.i32        $H3#hi,$H3#lo,$D3#lo
951         vadd.i32        $H1#hi,$H1#lo,$D1#lo
952         vadd.i32        $H4#hi,$H4#lo,$D4#lo
953
954 .Long_tail:
955         vld4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^1
956         vld4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^2
957
958         vadd.i32        $H2#lo,$H2#lo,$D2#lo    @ can be redundant
959         vmull.u32       $D2,$H2#hi,$R0
960         vadd.i32        $H0#lo,$H0#lo,$D0#lo
961         vmull.u32       $D0,$H0#hi,$R0
962         vadd.i32        $H3#lo,$H3#lo,$D3#lo
963         vmull.u32       $D3,$H3#hi,$R0
964         vadd.i32        $H1#lo,$H1#lo,$D1#lo
965         vmull.u32       $D1,$H1#hi,$R0
966         vadd.i32        $H4#lo,$H4#lo,$D4#lo
967         vmull.u32       $D4,$H4#hi,$R0
968
969         vmlal.u32       $D0,$H4#hi,$S1
970         vld4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
971         vmlal.u32       $D3,$H2#hi,$R1
972         vld4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
973         vmlal.u32       $D1,$H0#hi,$R1
974         vmlal.u32       $D4,$H3#hi,$R1
975         vmlal.u32       $D2,$H1#hi,$R1
976
977         vmlal.u32       $D3,$H1#hi,$R2
978         vld1.32         ${S4}[1],[$tbl1,:32]
979         vmlal.u32       $D0,$H3#hi,$S2
980         vld1.32         ${S4}[0],[$tbl0,:32]
981         vmlal.u32       $D4,$H2#hi,$R2
982         vmlal.u32       $D1,$H4#hi,$S2
983         vmlal.u32       $D2,$H0#hi,$R2
984
985         vmlal.u32       $D3,$H0#hi,$R3
986          it             ne
987          addne          $tbl1,$ctx,#(48+2*9*4)
988         vmlal.u32       $D0,$H2#hi,$S3
989          it             ne
990          addne          $tbl0,$ctx,#(48+3*9*4)
991         vmlal.u32       $D4,$H1#hi,$R3
992         vmlal.u32       $D1,$H3#hi,$S3
993         vmlal.u32       $D2,$H4#hi,$S3
994
995         vmlal.u32       $D3,$H4#hi,$S4
996          vorn           $MASK,$MASK,$MASK       @ all-ones, can be redundant
997         vmlal.u32       $D0,$H1#hi,$S4
998          vshr.u64       $MASK,$MASK,#38
999         vmlal.u32       $D4,$H0#hi,$R4
1000         vmlal.u32       $D1,$H2#hi,$S4
1001         vmlal.u32       $D2,$H3#hi,$S4
1002
1003         beq             .Lshort_tail
1004
1005         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1006         @ (hash+inp[0:1])*r^4:r^3 and accumulate
1007
1008         vld4.32         {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^3
1009         vld4.32         {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^4
1010
1011         vmlal.u32       $D2,$H2#lo,$R0
1012         vmlal.u32       $D0,$H0#lo,$R0
1013         vmlal.u32       $D3,$H3#lo,$R0
1014         vmlal.u32       $D1,$H1#lo,$R0
1015         vmlal.u32       $D4,$H4#lo,$R0
1016
1017         vmlal.u32       $D0,$H4#lo,$S1
1018         vld4.32         {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1019         vmlal.u32       $D3,$H2#lo,$R1
1020         vld4.32         {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1021         vmlal.u32       $D1,$H0#lo,$R1
1022         vmlal.u32       $D4,$H3#lo,$R1
1023         vmlal.u32       $D2,$H1#lo,$R1
1024
1025         vmlal.u32       $D3,$H1#lo,$R2
1026         vld1.32         ${S4}[1],[$tbl1,:32]
1027         vmlal.u32       $D0,$H3#lo,$S2
1028         vld1.32         ${S4}[0],[$tbl0,:32]
1029         vmlal.u32       $D4,$H2#lo,$R2
1030         vmlal.u32       $D1,$H4#lo,$S2
1031         vmlal.u32       $D2,$H0#lo,$R2
1032
1033         vmlal.u32       $D3,$H0#lo,$R3
1034         vmlal.u32       $D0,$H2#lo,$S3
1035         vmlal.u32       $D4,$H1#lo,$R3
1036         vmlal.u32       $D1,$H3#lo,$S3
1037         vmlal.u32       $D2,$H4#lo,$S3
1038
1039         vmlal.u32       $D3,$H4#lo,$S4
1040          vorn           $MASK,$MASK,$MASK       @ all-ones
1041         vmlal.u32       $D0,$H1#lo,$S4
1042          vshr.u64       $MASK,$MASK,#38
1043         vmlal.u32       $D4,$H0#lo,$R4
1044         vmlal.u32       $D1,$H2#lo,$S4
1045         vmlal.u32       $D2,$H3#lo,$S4
1046
1047 .Lshort_tail:
1048         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1049         @ horizontal addition
1050
1051         vadd.i64        $D3#lo,$D3#lo,$D3#hi
1052         vadd.i64        $D0#lo,$D0#lo,$D0#hi
1053         vadd.i64        $D4#lo,$D4#lo,$D4#hi
1054         vadd.i64        $D1#lo,$D1#lo,$D1#hi
1055         vadd.i64        $D2#lo,$D2#lo,$D2#hi
1056
1057         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1058         @ lazy reduction, but without narrowing
1059
1060         vshr.u64        $T0,$D3,#26
1061         vand.i64        $D3,$D3,$MASK
1062          vshr.u64       $T1,$D0,#26
1063          vand.i64       $D0,$D0,$MASK
1064         vadd.i64        $D4,$D4,$T0             @ h3 -> h4
1065          vadd.i64       $D1,$D1,$T1             @ h0 -> h1
1066
1067         vshr.u64        $T0,$D4,#26
1068         vand.i64        $D4,$D4,$MASK
1069          vshr.u64       $T1,$D1,#26
1070          vand.i64       $D1,$D1,$MASK
1071          vadd.i64       $D2,$D2,$T1             @ h1 -> h2
1072
1073         vadd.i64        $D0,$D0,$T0
1074         vshl.u64        $T0,$T0,#2
1075          vshr.u64       $T1,$D2,#26
1076          vand.i64       $D2,$D2,$MASK
1077         vadd.i64        $D0,$D0,$T0             @ h4 -> h0
1078          vadd.i64       $D3,$D3,$T1             @ h2 -> h3
1079
1080         vshr.u64        $T0,$D0,#26
1081         vand.i64        $D0,$D0,$MASK
1082          vshr.u64       $T1,$D3,#26
1083          vand.i64       $D3,$D3,$MASK
1084         vadd.i64        $D1,$D1,$T0             @ h0 -> h1
1085          vadd.i64       $D4,$D4,$T1             @ h3 -> h4
1086
1087         cmp             $len,#0
1088         bne             .Leven
1089
1090         @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1091         @ store hash value
1092
1093         vst4.32         {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
1094         vst1.32         {$D4#lo[0]},[$ctx]
1095
1096         vldmia  sp!,{d8-d15}                    @ epilogue
1097         ldmia   sp!,{r4-r7}
1098 .Lno_data_neon:
1099         ret                                     @ bx    lr
1100 .size   poly1305_blocks_neon,.-poly1305_blocks_neon
1101
1102 .type   poly1305_emit_neon,%function
1103 .align  5
1104 poly1305_emit_neon:
1105         ldr     ip,[$ctx,#36]           @ is_base2_26
1106
1107         stmdb   sp!,{r4-r11}
1108
1109         tst     ip,ip
1110         beq     .Lpoly1305_emit_enter
1111
1112         ldmia   $ctx,{$h0-$h4}
1113         eor     $g0,$g0,$g0
1114
1115         adds    $h0,$h0,$h1,lsl#26      @ base 2^26 -> base 2^32
1116         mov     $h1,$h1,lsr#6
1117         adcs    $h1,$h1,$h2,lsl#20
1118         mov     $h2,$h2,lsr#12
1119         adcs    $h2,$h2,$h3,lsl#14
1120         mov     $h3,$h3,lsr#18
1121         adcs    $h3,$h3,$h4,lsl#8
1122         adc     $h4,$g0,$h4,lsr#24      @ can be partially reduced ...
1123
1124         and     $g0,$h4,#-4             @ ... so reduce
1125         and     $h4,$h3,#3
1126         add     $g0,$g0,$g0,lsr#2       @ *= 5
1127         adds    $h0,$h0,$g0
1128         adcs    $h1,$h1,#0
1129         adcs    $h2,$h2,#0
1130         adcs    $h3,$h3,#0
1131         adc     $h4,$h4,#0
1132
1133         adds    $g0,$h0,#5              @ compare to modulus
1134         adcs    $g1,$h1,#0
1135         adcs    $g2,$h2,#0
1136         adcs    $g3,$h3,#0
1137         adc     $g4,$h4,#0
1138         tst     $g4,#4                  @ did it carry/borrow?
1139
1140         it      ne
1141         movne   $h0,$g0
1142         ldr     $g0,[$nonce,#0]
1143         it      ne
1144         movne   $h1,$g1
1145         ldr     $g1,[$nonce,#4]
1146         it      ne
1147         movne   $h2,$g2
1148         ldr     $g2,[$nonce,#8]
1149         it      ne
1150         movne   $h3,$g3
1151         ldr     $g3,[$nonce,#12]
1152
1153         adds    $h0,$h0,$g0             @ accumulate nonce
1154         adcs    $h1,$h1,$g1
1155         adcs    $h2,$h2,$g2
1156         adc     $h3,$h3,$g3
1157
1158 # ifdef __ARMEB__
1159         rev     $h0,$h0
1160         rev     $h1,$h1
1161         rev     $h2,$h2
1162         rev     $h3,$h3
1163 # endif
1164         str     $h0,[$mac,#0]           @ store the result
1165         str     $h1,[$mac,#4]
1166         str     $h2,[$mac,#8]
1167         str     $h3,[$mac,#12]
1168
1169         ldmia   sp!,{r4-r11}
1170         ret                             @ bx    lr
1171 .size   poly1305_emit_neon,.-poly1305_emit_neon
1172
1173 .align  5
1174 .Lzeros:
1175 .long   0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
1176 .LOPENSSL_armcap:
1177 .word   OPENSSL_armcap_P-.Lpoly1305_init
1178 #endif
1179 ___
1180 }       }
1181 $code.=<<___;
1182 .asciz  "Poly1305 for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
1183 .align  2
1184 #if     __ARM_MAX_ARCH__>=7
1185 .comm   OPENSSL_armcap_P,4,4
1186 #endif
1187 ___
1188
1189 foreach (split("\n",$code)) {
1190         s/\`([^\`]*)\`/eval $1/geo;
1191
1192         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo       or
1193         s/\bret\b/bx    lr/go                                           or
1194         s/\bbx\s+lr\b/.word\t0xe12fff1e/go;     # make it possible to compile with -march=armv4
1195
1196         print $_,"\n";
1197 }
1198 close STDOUT; # enforce flush