c4402961d47ca53c03f6cd8673b7f9f04324a109
[openssl.git] / crypto / chacha / asm / chacha-armv4.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # December 2014
18 #
19 # ChaCha20 for ARMv4.
20 #
21 # Performance in cycles per byte out of large buffer.
22 #
23 #                       IALU/gcc-4.4    1xNEON      3xNEON+1xIALU
24 #
25 # Cortex-A5             19.3(*)/+95%    21.8        14.1
26 # Cortex-A8             10.5(*)/+160%   13.9        6.35
27 # Cortex-A9             12.9(**)/+110%  14.3        6.50
28 # Cortex-A15            11.0/+40%       16.0        5.00
29 # Snapdragon S4         11.5/+125%      13.6        4.90
30 #
31 # (*)   most "favourable" result for aligned data on little-endian
32 #       processor, result for misaligned data is 10-15% lower;
33 # (**)  this result is a trade-off: it can be improved by 20%,
34 #       but then Snapdragon S4 and Cortex-A8 results get
35 #       20-25% worse;
36
37 $flavour = shift;
38 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
39 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
40
41 if ($flavour && $flavour ne "void") {
42     $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
43     ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
44     ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
45     die "can't locate arm-xlate.pl";
46
47     open STDOUT,"| \"$^X\" $xlate $flavour $output";
48 } else {
49     open STDOUT,">$output";
50 }
51
52 sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
53 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
54   my $arg = pop;
55     $arg = "#$arg" if ($arg*1 eq $arg);
56     $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
57 }
58
59 my @x=map("r$_",(0..7,"x","x","x","x",12,"x",14,"x"));
60 my @t=map("r$_",(8..11));
61
62 sub ROUND {
63 my ($a0,$b0,$c0,$d0)=@_;
64 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
65 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
66 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
67 my $odd = $d0&1;
68 my ($xc,$xc_) = (@t[0..1]);
69 my ($xd,$xd_) = $odd ? (@t[2],@x[$d1]) : (@x[$d0],@t[2]);
70 my @ret;
71
72         # Consider order in which variables are addressed by their
73         # index:
74         #
75         #       a   b   c   d
76         #
77         #       0   4   8  12 < even round
78         #       1   5   9  13
79         #       2   6  10  14
80         #       3   7  11  15
81         #       0   5  10  15 < odd round
82         #       1   6  11  12
83         #       2   7   8  13
84         #       3   4   9  14
85         #
86         # 'a', 'b' are permanently allocated in registers, @x[0..7],
87         # while 'c's and pair of 'd's are maintained in memory. If
88         # you observe 'c' column, you'll notice that pair of 'c's is
89         # invariant between rounds. This means that we have to reload
90         # them once per round, in the middle. This is why you'll see
91         # bunch of 'c' stores and loads in the middle, but none in
92         # the beginning or end. If you observe 'd' column, you'll
93         # notice that 15 and 13 are reused in next pair of rounds.
94         # This is why these two are chosen for offloading to memory,
95         # to make loads count more.
96                                                         push @ret,(
97         "&add   (@x[$a0],@x[$a0],@x[$b0])",
98         "&mov   ($xd,$xd,'ror#16')",
99          "&add  (@x[$a1],@x[$a1],@x[$b1])",
100          "&mov  ($xd_,$xd_,'ror#16')",
101         "&eor   ($xd,$xd,@x[$a0],'ror#16')",
102          "&eor  ($xd_,$xd_,@x[$a1],'ror#16')",
103
104         "&add   ($xc,$xc,$xd)",
105         "&mov   (@x[$b0],@x[$b0],'ror#20')",
106          "&add  ($xc_,$xc_,$xd_)",
107          "&mov  (@x[$b1],@x[$b1],'ror#20')",
108         "&eor   (@x[$b0],@x[$b0],$xc,'ror#20')",
109          "&eor  (@x[$b1],@x[$b1],$xc_,'ror#20')",
110
111         "&add   (@x[$a0],@x[$a0],@x[$b0])",
112         "&mov   ($xd,$xd,'ror#24')",
113          "&add  (@x[$a1],@x[$a1],@x[$b1])",
114          "&mov  ($xd_,$xd_,'ror#24')",
115         "&eor   ($xd,$xd,@x[$a0],'ror#24')",
116          "&eor  ($xd_,$xd_,@x[$a1],'ror#24')",
117
118         "&add   ($xc,$xc,$xd)",
119         "&mov   (@x[$b0],@x[$b0],'ror#25')"             );
120                                                         push @ret,(
121         "&str   ($xd,'[sp,#4*(16+$d0)]')",
122         "&ldr   ($xd,'[sp,#4*(16+$d2)]')"               ) if ($odd);
123                                                         push @ret,(
124          "&add  ($xc_,$xc_,$xd_)",
125          "&mov  (@x[$b1],@x[$b1],'ror#25')"             );
126                                                         push @ret,(
127          "&str  ($xd_,'[sp,#4*(16+$d1)]')",
128          "&ldr  ($xd_,'[sp,#4*(16+$d3)]')"              ) if (!$odd);
129                                                         push @ret,(
130         "&eor   (@x[$b0],@x[$b0],$xc,'ror#25')",
131          "&eor  (@x[$b1],@x[$b1],$xc_,'ror#25')"        );
132
133         $xd=@x[$d2]                                     if (!$odd);
134         $xd_=@x[$d3]                                    if ($odd);
135                                                         push @ret,(
136         "&str   ($xc,'[sp,#4*(16+$c0)]')",
137         "&ldr   ($xc,'[sp,#4*(16+$c2)]')",
138         "&add   (@x[$a2],@x[$a2],@x[$b2])",
139         "&mov   ($xd,$xd,'ror#16')",
140          "&str  ($xc_,'[sp,#4*(16+$c1)]')",
141          "&ldr  ($xc_,'[sp,#4*(16+$c3)]')",
142          "&add  (@x[$a3],@x[$a3],@x[$b3])",
143          "&mov  ($xd_,$xd_,'ror#16')",
144         "&eor   ($xd,$xd,@x[$a2],'ror#16')",
145          "&eor  ($xd_,$xd_,@x[$a3],'ror#16')",
146
147         "&add   ($xc,$xc,$xd)",
148         "&mov   (@x[$b2],@x[$b2],'ror#20')",
149          "&add  ($xc_,$xc_,$xd_)",
150          "&mov  (@x[$b3],@x[$b3],'ror#20')",
151         "&eor   (@x[$b2],@x[$b2],$xc,'ror#20')",
152          "&eor  (@x[$b3],@x[$b3],$xc_,'ror#20')",
153
154         "&add   (@x[$a2],@x[$a2],@x[$b2])",
155         "&mov   ($xd,$xd,'ror#24')",
156          "&add  (@x[$a3],@x[$a3],@x[$b3])",
157          "&mov  ($xd_,$xd_,'ror#24')",
158         "&eor   ($xd,$xd,@x[$a2],'ror#24')",
159          "&eor  ($xd_,$xd_,@x[$a3],'ror#24')",
160
161         "&add   ($xc,$xc,$xd)",
162         "&mov   (@x[$b2],@x[$b2],'ror#25')",
163          "&add  ($xc_,$xc_,$xd_)",
164          "&mov  (@x[$b3],@x[$b3],'ror#25')",
165         "&eor   (@x[$b2],@x[$b2],$xc,'ror#25')",
166          "&eor  (@x[$b3],@x[$b3],$xc_,'ror#25')"        );
167
168         @ret;
169 }
170
171 $code.=<<___;
172 #include "arm_arch.h"
173
174 #if defined(__thumb2__) || defined(__clang__)
175 .syntax unified
176 #endif
177 #if defined(__thumb2__)
178 .thumb
179 #else
180 .code   32
181 #endif
182
183 #if defined(__thumb2__) || defined(__clang__)
184 #define ldrhsb  ldrbhs
185 #endif
186
187 .text
188
189 .align  5
190 .Lsigma:
191 .long   0x61707865,0x3320646e,0x79622d32,0x6b206574     @ endian-neutral
192 .Lone:
193 .long   1,0,0,0
194 #if __ARM_MAX_ARCH__>=7
195 .LOPENSSL_armcap:
196 # ifdef _WIN32
197 .word   OPENSSL_armcap_P
198 # else
199 .word   OPENSSL_armcap_P-.LChaCha20_ctr32
200 # endif
201 #else
202 .word   -1
203 #endif
204
205 .globl  ChaCha20_ctr32
206 .type   ChaCha20_ctr32,%function
207 .align  5
208 ChaCha20_ctr32:
209 .LChaCha20_ctr32:
210         ldr     r12,[sp,#0]             @ pull pointer to counter and nonce
211         stmdb   sp!,{r0-r2,r4-r11,lr}
212 #if __ARM_ARCH__<7 && !defined(__thumb2__)
213         sub     r14,pc,#16              @ ChaCha20_ctr32
214 #else
215         adr     r14,.LChaCha20_ctr32
216 #endif
217         cmp     r2,#0                   @ len==0?
218 #ifdef  __thumb2__
219         itt     eq
220 #endif
221         addeq   sp,sp,#4*3
222         beq     .Lno_data
223 #if __ARM_MAX_ARCH__>=7
224         cmp     r2,#192                 @ test len
225         bls     .Lshort
226         ldr     r4,[r14,#-32]
227 # if !defined(_WIN32)
228         ldr     r4,[r14,r4]
229 # endif
230 # if defined(__APPLE__) || defined(_WIN32)
231         ldr     r4,[r4]
232 # endif
233         tst     r4,#ARMV7_NEON
234         bne     .LChaCha20_neon
235 .Lshort:
236 #endif
237         ldmia   r12,{r4-r7}             @ load counter and nonce
238         sub     sp,sp,#4*(16)           @ off-load area
239         sub     r14,r14,#64             @ .Lsigma
240         stmdb   sp!,{r4-r7}             @ copy counter and nonce
241         ldmia   r3,{r4-r11}             @ load key
242         ldmia   r14,{r0-r3}             @ load sigma
243         stmdb   sp!,{r4-r11}            @ copy key
244         stmdb   sp!,{r0-r3}             @ copy sigma
245         str     r10,[sp,#4*(16+10)]     @ off-load "@x[10]"
246         str     r11,[sp,#4*(16+11)]     @ off-load "@x[11]"
247         b       .Loop_outer_enter
248
249 .align  4
250 .Loop_outer:
251         ldmia   sp,{r0-r9}              @ load key material
252         str     @t[3],[sp,#4*(32+2)]    @ save len
253         str     r12,  [sp,#4*(32+1)]    @ save inp
254         str     r14,  [sp,#4*(32+0)]    @ save out
255 .Loop_outer_enter:
256         ldr     @t[3], [sp,#4*(15)]
257         ldr     @x[12],[sp,#4*(12)]     @ modulo-scheduled load
258         ldr     @t[2], [sp,#4*(13)]
259         ldr     @x[14],[sp,#4*(14)]
260         str     @t[3], [sp,#4*(16+15)]
261         mov     @t[3],#10
262         b       .Loop
263
264 .align  4
265 .Loop:
266         subs    @t[3],@t[3],#1
267 ___
268         foreach (&ROUND(0, 4, 8,12)) { eval; }
269         foreach (&ROUND(0, 5,10,15)) { eval; }
270 $code.=<<___;
271         bne     .Loop
272
273         ldr     @t[3],[sp,#4*(32+2)]    @ load len
274
275         str     @t[0], [sp,#4*(16+8)]   @ modulo-scheduled store
276         str     @t[1], [sp,#4*(16+9)]
277         str     @x[12],[sp,#4*(16+12)]
278         str     @t[2], [sp,#4*(16+13)]
279         str     @x[14],[sp,#4*(16+14)]
280
281         @ at this point we have first half of 512-bit result in
282         @ @x[0-7] and second half at sp+4*(16+8)
283
284         cmp     @t[3],#64               @ done yet?
285 #ifdef  __thumb2__
286         itete   lo
287 #endif
288         addlo   r12,sp,#4*(0)           @ shortcut or ...
289         ldrhs   r12,[sp,#4*(32+1)]      @ ... load inp
290         addlo   r14,sp,#4*(0)           @ shortcut or ...
291         ldrhs   r14,[sp,#4*(32+0)]      @ ... load out
292
293         ldr     @t[0],[sp,#4*(0)]       @ load key material
294         ldr     @t[1],[sp,#4*(1)]
295
296 #if __ARM_ARCH__>=6 || !defined(__ARMEB__)
297 # if __ARM_ARCH__<7
298         orr     @t[2],r12,r14
299         tst     @t[2],#3                @ are input and output aligned?
300         ldr     @t[2],[sp,#4*(2)]
301         bne     .Lunaligned
302         cmp     @t[3],#64               @ restore flags
303 # else
304         ldr     @t[2],[sp,#4*(2)]
305 # endif
306         ldr     @t[3],[sp,#4*(3)]
307
308         add     @x[0],@x[0],@t[0]       @ accumulate key material
309         add     @x[1],@x[1],@t[1]
310 # ifdef __thumb2__
311         itt     hs
312 # endif
313         ldrhs   @t[0],[r12],#16         @ load input
314         ldrhs   @t[1],[r12,#-12]
315
316         add     @x[2],@x[2],@t[2]
317         add     @x[3],@x[3],@t[3]
318 # ifdef __thumb2__
319         itt     hs
320 # endif
321         ldrhs   @t[2],[r12,#-8]
322         ldrhs   @t[3],[r12,#-4]
323 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
324         rev     @x[0],@x[0]
325         rev     @x[1],@x[1]
326         rev     @x[2],@x[2]
327         rev     @x[3],@x[3]
328 # endif
329 # ifdef __thumb2__
330         itt     hs
331 # endif
332         eorhs   @x[0],@x[0],@t[0]       @ xor with input
333         eorhs   @x[1],@x[1],@t[1]
334          add    @t[0],sp,#4*(4)
335         str     @x[0],[r14],#16         @ store output
336 # ifdef __thumb2__
337         itt     hs
338 # endif
339         eorhs   @x[2],@x[2],@t[2]
340         eorhs   @x[3],@x[3],@t[3]
341          ldmia  @t[0],{@t[0]-@t[3]}     @ load key material
342         str     @x[1],[r14,#-12]
343         str     @x[2],[r14,#-8]
344         str     @x[3],[r14,#-4]
345
346         add     @x[4],@x[4],@t[0]       @ accumulate key material
347         add     @x[5],@x[5],@t[1]
348 # ifdef __thumb2__
349         itt     hs
350 # endif
351         ldrhs   @t[0],[r12],#16         @ load input
352         ldrhs   @t[1],[r12,#-12]
353         add     @x[6],@x[6],@t[2]
354         add     @x[7],@x[7],@t[3]
355 # ifdef __thumb2__
356         itt     hs
357 # endif
358         ldrhs   @t[2],[r12,#-8]
359         ldrhs   @t[3],[r12,#-4]
360 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
361         rev     @x[4],@x[4]
362         rev     @x[5],@x[5]
363         rev     @x[6],@x[6]
364         rev     @x[7],@x[7]
365 # endif
366 # ifdef __thumb2__
367         itt     hs
368 # endif
369         eorhs   @x[4],@x[4],@t[0]
370         eorhs   @x[5],@x[5],@t[1]
371          add    @t[0],sp,#4*(8)
372         str     @x[4],[r14],#16         @ store output
373 # ifdef __thumb2__
374         itt     hs
375 # endif
376         eorhs   @x[6],@x[6],@t[2]
377         eorhs   @x[7],@x[7],@t[3]
378         str     @x[5],[r14,#-12]
379          ldmia  @t[0],{@t[0]-@t[3]}     @ load key material
380         str     @x[6],[r14,#-8]
381          add    @x[0],sp,#4*(16+8)
382         str     @x[7],[r14,#-4]
383
384         ldmia   @x[0],{@x[0]-@x[7]}     @ load second half
385
386         add     @x[0],@x[0],@t[0]       @ accumulate key material
387         add     @x[1],@x[1],@t[1]
388 # ifdef __thumb2__
389         itt     hs
390 # endif
391         ldrhs   @t[0],[r12],#16         @ load input
392         ldrhs   @t[1],[r12,#-12]
393 # ifdef __thumb2__
394         itt     hi
395 # endif
396          strhi  @t[2],[sp,#4*(16+10)]   @ copy "@x[10]" while at it
397          strhi  @t[3],[sp,#4*(16+11)]   @ copy "@x[11]" while at it
398         add     @x[2],@x[2],@t[2]
399         add     @x[3],@x[3],@t[3]
400 # ifdef __thumb2__
401         itt     hs
402 # endif
403         ldrhs   @t[2],[r12,#-8]
404         ldrhs   @t[3],[r12,#-4]
405 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
406         rev     @x[0],@x[0]
407         rev     @x[1],@x[1]
408         rev     @x[2],@x[2]
409         rev     @x[3],@x[3]
410 # endif
411 # ifdef __thumb2__
412         itt     hs
413 # endif
414         eorhs   @x[0],@x[0],@t[0]
415         eorhs   @x[1],@x[1],@t[1]
416          add    @t[0],sp,#4*(12)
417         str     @x[0],[r14],#16         @ store output
418 # ifdef __thumb2__
419         itt     hs
420 # endif
421         eorhs   @x[2],@x[2],@t[2]
422         eorhs   @x[3],@x[3],@t[3]
423         str     @x[1],[r14,#-12]
424          ldmia  @t[0],{@t[0]-@t[3]}     @ load key material
425         str     @x[2],[r14,#-8]
426         str     @x[3],[r14,#-4]
427
428         add     @x[4],@x[4],@t[0]       @ accumulate key material
429         add     @x[5],@x[5],@t[1]
430 # ifdef __thumb2__
431         itt     hi
432 # endif
433          addhi  @t[0],@t[0],#1          @ next counter value
434          strhi  @t[0],[sp,#4*(12)]      @ save next counter value
435 # ifdef __thumb2__
436         itt     hs
437 # endif
438         ldrhs   @t[0],[r12],#16         @ load input
439         ldrhs   @t[1],[r12,#-12]
440         add     @x[6],@x[6],@t[2]
441         add     @x[7],@x[7],@t[3]
442 # ifdef __thumb2__
443         itt     hs
444 # endif
445         ldrhs   @t[2],[r12,#-8]
446         ldrhs   @t[3],[r12,#-4]
447 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
448         rev     @x[4],@x[4]
449         rev     @x[5],@x[5]
450         rev     @x[6],@x[6]
451         rev     @x[7],@x[7]
452 # endif
453 # ifdef __thumb2__
454         itt     hs
455 # endif
456         eorhs   @x[4],@x[4],@t[0]
457         eorhs   @x[5],@x[5],@t[1]
458 # ifdef __thumb2__
459          it     ne
460 # endif
461          ldrne  @t[0],[sp,#4*(32+2)]    @ re-load len
462 # ifdef __thumb2__
463         itt     hs
464 # endif
465         eorhs   @x[6],@x[6],@t[2]
466         eorhs   @x[7],@x[7],@t[3]
467         str     @x[4],[r14],#16         @ store output
468         str     @x[5],[r14,#-12]
469 # ifdef __thumb2__
470         it      hs
471 # endif
472          subhs  @t[3],@t[0],#64         @ len-=64
473         str     @x[6],[r14,#-8]
474         str     @x[7],[r14,#-4]
475         bhi     .Loop_outer
476
477         beq     .Ldone
478 # if __ARM_ARCH__<7
479         b       .Ltail
480
481 .align  4
482 .Lunaligned:                            @ unaligned endian-neutral path
483         cmp     @t[3],#64               @ restore flags
484 # endif
485 #endif
486 #if __ARM_ARCH__<7
487         ldr     @t[3],[sp,#4*(3)]
488 ___
489 for ($i=0;$i<16;$i+=4) {
490 my $j=$i&0x7;
491
492 $code.=<<___    if ($i==4);
493         add     @x[0],sp,#4*(16+8)
494 ___
495 $code.=<<___    if ($i==8);
496         ldmia   @x[0],{@x[0]-@x[7]}             @ load second half
497 # ifdef __thumb2__
498         itt     hi
499 # endif
500         strhi   @t[2],[sp,#4*(16+10)]           @ copy "@x[10]"
501         strhi   @t[3],[sp,#4*(16+11)]           @ copy "@x[11]"
502 ___
503 $code.=<<___;
504         add     @x[$j+0],@x[$j+0],@t[0]         @ accumulate key material
505 ___
506 $code.=<<___    if ($i==12);
507 # ifdef __thumb2__
508         itt     hi
509 # endif
510         addhi   @t[0],@t[0],#1                  @ next counter value
511         strhi   @t[0],[sp,#4*(12)]              @ save next counter value
512 ___
513 $code.=<<___;
514         add     @x[$j+1],@x[$j+1],@t[1]
515         add     @x[$j+2],@x[$j+2],@t[2]
516 # ifdef __thumb2__
517         itete   lo
518 # endif
519         eorlo   @t[0],@t[0],@t[0]               @ zero or ...
520         ldrhsb  @t[0],[r12],#16                 @ ... load input
521         eorlo   @t[1],@t[1],@t[1]
522         ldrhsb  @t[1],[r12,#-12]
523
524         add     @x[$j+3],@x[$j+3],@t[3]
525 # ifdef __thumb2__
526         itete   lo
527 # endif
528         eorlo   @t[2],@t[2],@t[2]
529         ldrhsb  @t[2],[r12,#-8]
530         eorlo   @t[3],@t[3],@t[3]
531         ldrhsb  @t[3],[r12,#-4]
532
533         eor     @x[$j+0],@t[0],@x[$j+0]         @ xor with input (or zero)
534         eor     @x[$j+1],@t[1],@x[$j+1]
535 # ifdef __thumb2__
536         itt     hs
537 # endif
538         ldrhsb  @t[0],[r12,#-15]                @ load more input
539         ldrhsb  @t[1],[r12,#-11]
540         eor     @x[$j+2],@t[2],@x[$j+2]
541          strb   @x[$j+0],[r14],#16              @ store output
542         eor     @x[$j+3],@t[3],@x[$j+3]
543 # ifdef __thumb2__
544         itt     hs
545 # endif
546         ldrhsb  @t[2],[r12,#-7]
547         ldrhsb  @t[3],[r12,#-3]
548          strb   @x[$j+1],[r14,#-12]
549         eor     @x[$j+0],@t[0],@x[$j+0],lsr#8
550          strb   @x[$j+2],[r14,#-8]
551         eor     @x[$j+1],@t[1],@x[$j+1],lsr#8
552 # ifdef __thumb2__
553         itt     hs
554 # endif
555         ldrhsb  @t[0],[r12,#-14]                @ load more input
556         ldrhsb  @t[1],[r12,#-10]
557          strb   @x[$j+3],[r14,#-4]
558         eor     @x[$j+2],@t[2],@x[$j+2],lsr#8
559          strb   @x[$j+0],[r14,#-15]
560         eor     @x[$j+3],@t[3],@x[$j+3],lsr#8
561 # ifdef __thumb2__
562         itt     hs
563 # endif
564         ldrhsb  @t[2],[r12,#-6]
565         ldrhsb  @t[3],[r12,#-2]
566          strb   @x[$j+1],[r14,#-11]
567         eor     @x[$j+0],@t[0],@x[$j+0],lsr#8
568          strb   @x[$j+2],[r14,#-7]
569         eor     @x[$j+1],@t[1],@x[$j+1],lsr#8
570 # ifdef __thumb2__
571         itt     hs
572 # endif
573         ldrhsb  @t[0],[r12,#-13]                @ load more input
574         ldrhsb  @t[1],[r12,#-9]
575          strb   @x[$j+3],[r14,#-3]
576         eor     @x[$j+2],@t[2],@x[$j+2],lsr#8
577          strb   @x[$j+0],[r14,#-14]
578         eor     @x[$j+3],@t[3],@x[$j+3],lsr#8
579 # ifdef __thumb2__
580         itt     hs
581 # endif
582         ldrhsb  @t[2],[r12,#-5]
583         ldrhsb  @t[3],[r12,#-1]
584          strb   @x[$j+1],[r14,#-10]
585          strb   @x[$j+2],[r14,#-6]
586         eor     @x[$j+0],@t[0],@x[$j+0],lsr#8
587          strb   @x[$j+3],[r14,#-2]
588         eor     @x[$j+1],@t[1],@x[$j+1],lsr#8
589          strb   @x[$j+0],[r14,#-13]
590         eor     @x[$j+2],@t[2],@x[$j+2],lsr#8
591          strb   @x[$j+1],[r14,#-9]
592         eor     @x[$j+3],@t[3],@x[$j+3],lsr#8
593          strb   @x[$j+2],[r14,#-5]
594          strb   @x[$j+3],[r14,#-1]
595 ___
596 $code.=<<___    if ($i<12);
597         add     @t[0],sp,#4*(4+$i)
598         ldmia   @t[0],{@t[0]-@t[3]}             @ load key material
599 ___
600 }
601 $code.=<<___;
602 # ifdef __thumb2__
603         it      ne
604 # endif
605         ldrne   @t[0],[sp,#4*(32+2)]            @ re-load len
606 # ifdef __thumb2__
607         it      hs
608 # endif
609         subhs   @t[3],@t[0],#64                 @ len-=64
610         bhi     .Loop_outer
611
612         beq     .Ldone
613 #endif
614
615 .Ltail:
616         ldr     r12,[sp,#4*(32+1)]      @ load inp
617         add     @t[1],sp,#4*(0)
618         ldr     r14,[sp,#4*(32+0)]      @ load out
619
620 .Loop_tail:
621         ldrb    @t[2],[@t[1]],#1        @ read buffer on stack
622         ldrb    @t[3],[r12],#1          @ read input
623         subs    @t[0],@t[0],#1
624         eor     @t[3],@t[3],@t[2]
625         strb    @t[3],[r14],#1          @ store output
626         bne     .Loop_tail
627
628 .Ldone:
629         add     sp,sp,#4*(32+3)
630 .Lno_data:
631         ldmia   sp!,{r4-r11,pc}
632 .size   ChaCha20_ctr32,.-ChaCha20_ctr32
633 ___
634
635 {{{
636 my ($a0,$b0,$c0,$d0,$a1,$b1,$c1,$d1,$a2,$b2,$c2,$d2,$t0,$t1,$t2,$t3) =
637     map("q$_",(0..15));
638
639 sub NEONROUND {
640 my $odd = pop;
641 my ($a,$b,$c,$d,$t)=@_;
642
643         (
644         "&vadd_i32      ($a,$a,$b)",
645         "&veor          ($d,$d,$a)",
646         "&vrev32_16     ($d,$d)",       # vrot ($d,16)
647
648         "&vadd_i32      ($c,$c,$d)",
649         "&veor          ($t,$b,$c)",
650         "&vshr_u32      ($b,$t,20)",
651         "&vsli_32       ($b,$t,12)",
652
653         "&vadd_i32      ($a,$a,$b)",
654         "&veor          ($t,$d,$a)",
655         "&vshr_u32      ($d,$t,24)",
656         "&vsli_32       ($d,$t,8)",
657
658         "&vadd_i32      ($c,$c,$d)",
659         "&veor          ($t,$b,$c)",
660         "&vshr_u32      ($b,$t,25)",
661         "&vsli_32       ($b,$t,7)",
662
663         "&vext_8        ($c,$c,$c,8)",
664         "&vext_8        ($b,$b,$b,$odd?12:4)",
665         "&vext_8        ($d,$d,$d,$odd?4:12)"
666         );
667 }
668
669 $code.=<<___;
670 #if __ARM_MAX_ARCH__>=7
671 .arch   armv7-a
672 .fpu    neon
673
674 .type   ChaCha20_neon,%function
675 .align  5
676 ChaCha20_neon:
677         ldr             r12,[sp,#0]             @ pull pointer to counter and nonce
678         stmdb           sp!,{r0-r2,r4-r11,lr}
679 .LChaCha20_neon:
680         adr             r14,.Lsigma
681         vstmdb          sp!,{d8-d15}            @ ABI spec says so
682         stmdb           sp!,{r0-r3}
683
684         vld1.32         {$b0-$c0},[r3]          @ load key
685         ldmia           r3,{r4-r11}             @ load key
686
687         sub             sp,sp,#4*(16+16)
688         vld1.32         {$d0},[r12]             @ load counter and nonce
689         add             r12,sp,#4*8
690         ldmia           r14,{r0-r3}             @ load sigma
691         vld1.32         {$a0},[r14]!            @ load sigma
692         vld1.32         {$t0},[r14]             @ one
693         vst1.32         {$c0-$d0},[r12]         @ copy 1/2key|counter|nonce
694         vst1.32         {$a0-$b0},[sp]          @ copy sigma|1/2key
695
696         str             r10,[sp,#4*(16+10)]     @ off-load "@x[10]"
697         str             r11,[sp,#4*(16+11)]     @ off-load "@x[11]"
698         vshl.i32        $t1#lo,$t0#lo,#1        @ two
699         vstr            $t0#lo,[sp,#4*(16+0)]
700         vshl.i32        $t2#lo,$t0#lo,#2        @ four
701         vstr            $t1#lo,[sp,#4*(16+2)]
702         vmov            $a1,$a0
703         vstr            $t2#lo,[sp,#4*(16+4)]
704         vmov            $a2,$a0
705         vmov            $b1,$b0
706         vmov            $b2,$b0
707         b               .Loop_neon_enter
708
709 .align  4
710 .Loop_neon_outer:
711         ldmia           sp,{r0-r9}              @ load key material
712         cmp             @t[3],#64*2             @ if len<=64*2
713         bls             .Lbreak_neon            @ switch to integer-only
714         vmov            $a1,$a0
715         str             @t[3],[sp,#4*(32+2)]    @ save len
716         vmov            $a2,$a0
717         str             r12,  [sp,#4*(32+1)]    @ save inp
718         vmov            $b1,$b0
719         str             r14,  [sp,#4*(32+0)]    @ save out
720         vmov            $b2,$b0
721 .Loop_neon_enter:
722         ldr             @t[3], [sp,#4*(15)]
723         vadd.i32        $d1,$d0,$t0             @ counter+1
724         ldr             @x[12],[sp,#4*(12)]     @ modulo-scheduled load
725         vmov            $c1,$c0
726         ldr             @t[2], [sp,#4*(13)]
727         vmov            $c2,$c0
728         ldr             @x[14],[sp,#4*(14)]
729         vadd.i32        $d2,$d1,$t0             @ counter+2
730         str             @t[3], [sp,#4*(16+15)]
731         mov             @t[3],#10
732         add             @x[12],@x[12],#3        @ counter+3
733         b               .Loop_neon
734
735 .align  4
736 .Loop_neon:
737         subs            @t[3],@t[3],#1
738 ___
739         my @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,0);
740         my @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,0);
741         my @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,0);
742         my @thread3=&ROUND(0,4,8,12);
743
744         foreach (@thread0) {
745                 eval;                   eval(shift(@thread3));
746                 eval(shift(@thread1));  eval(shift(@thread3));
747                 eval(shift(@thread2));  eval(shift(@thread3));
748         }
749
750         @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,1);
751         @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,1);
752         @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,1);
753         @thread3=&ROUND(0,5,10,15);
754
755         foreach (@thread0) {
756                 eval;                   eval(shift(@thread3));
757                 eval(shift(@thread1));  eval(shift(@thread3));
758                 eval(shift(@thread2));  eval(shift(@thread3));
759         }
760 $code.=<<___;
761         bne             .Loop_neon
762
763         add             @t[3],sp,#32
764         vld1.32         {$t0-$t1},[sp]          @ load key material
765         vld1.32         {$t2-$t3},[@t[3]]
766
767         ldr             @t[3],[sp,#4*(32+2)]    @ load len
768
769         str             @t[0], [sp,#4*(16+8)]   @ modulo-scheduled store
770         str             @t[1], [sp,#4*(16+9)]
771         str             @x[12],[sp,#4*(16+12)]
772         str             @t[2], [sp,#4*(16+13)]
773         str             @x[14],[sp,#4*(16+14)]
774
775         @ at this point we have first half of 512-bit result in
776         @ @x[0-7] and second half at sp+4*(16+8)
777
778         ldr             r12,[sp,#4*(32+1)]      @ load inp
779         ldr             r14,[sp,#4*(32+0)]      @ load out
780
781         vadd.i32        $a0,$a0,$t0             @ accumulate key material
782         vadd.i32        $a1,$a1,$t0
783         vadd.i32        $a2,$a2,$t0
784         vldr            $t0#lo,[sp,#4*(16+0)]   @ one
785
786         vadd.i32        $b0,$b0,$t1
787         vadd.i32        $b1,$b1,$t1
788         vadd.i32        $b2,$b2,$t1
789         vldr            $t1#lo,[sp,#4*(16+2)]   @ two
790
791         vadd.i32        $c0,$c0,$t2
792         vadd.i32        $c1,$c1,$t2
793         vadd.i32        $c2,$c2,$t2
794         vadd.i32        $d1#lo,$d1#lo,$t0#lo    @ counter+1
795         vadd.i32        $d2#lo,$d2#lo,$t1#lo    @ counter+2
796
797         vadd.i32        $d0,$d0,$t3
798         vadd.i32        $d1,$d1,$t3
799         vadd.i32        $d2,$d2,$t3
800
801         cmp             @t[3],#64*4
802         blo             .Ltail_neon
803
804         vld1.8          {$t0-$t1},[r12]!        @ load input
805          mov            @t[3],sp
806         vld1.8          {$t2-$t3},[r12]!
807         veor            $a0,$a0,$t0             @ xor with input
808         veor            $b0,$b0,$t1
809         vld1.8          {$t0-$t1},[r12]!
810         veor            $c0,$c0,$t2
811         veor            $d0,$d0,$t3
812         vld1.8          {$t2-$t3},[r12]!
813
814         veor            $a1,$a1,$t0
815          vst1.8         {$a0-$b0},[r14]!        @ store output
816         veor            $b1,$b1,$t1
817         vld1.8          {$t0-$t1},[r12]!
818         veor            $c1,$c1,$t2
819          vst1.8         {$c0-$d0},[r14]!
820         veor            $d1,$d1,$t3
821         vld1.8          {$t2-$t3},[r12]!
822
823         veor            $a2,$a2,$t0
824          vld1.32        {$a0-$b0},[@t[3]]!      @ load for next iteration
825          veor           $t0#hi,$t0#hi,$t0#hi
826          vldr           $t0#lo,[sp,#4*(16+4)]   @ four
827         veor            $b2,$b2,$t1
828          vld1.32        {$c0-$d0},[@t[3]]
829         veor            $c2,$c2,$t2
830          vst1.8         {$a1-$b1},[r14]!
831         veor            $d2,$d2,$t3
832          vst1.8         {$c1-$d1},[r14]!
833
834         vadd.i32        $d0#lo,$d0#lo,$t0#lo    @ next counter value
835         vldr            $t0#lo,[sp,#4*(16+0)]   @ one
836
837         ldmia           sp,{@t[0]-@t[3]}        @ load key material
838         add             @x[0],@x[0],@t[0]       @ accumulate key material
839         ldr             @t[0],[r12],#16         @ load input
840          vst1.8         {$a2-$b2},[r14]!
841         add             @x[1],@x[1],@t[1]
842         ldr             @t[1],[r12,#-12]
843          vst1.8         {$c2-$d2},[r14]!
844         add             @x[2],@x[2],@t[2]
845         ldr             @t[2],[r12,#-8]
846         add             @x[3],@x[3],@t[3]
847         ldr             @t[3],[r12,#-4]
848 # ifdef __ARMEB__
849         rev             @x[0],@x[0]
850         rev             @x[1],@x[1]
851         rev             @x[2],@x[2]
852         rev             @x[3],@x[3]
853 # endif
854         eor             @x[0],@x[0],@t[0]       @ xor with input
855          add            @t[0],sp,#4*(4)
856         eor             @x[1],@x[1],@t[1]
857         str             @x[0],[r14],#16         @ store output
858         eor             @x[2],@x[2],@t[2]
859         str             @x[1],[r14,#-12]
860         eor             @x[3],@x[3],@t[3]
861          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
862         str             @x[2],[r14,#-8]
863         str             @x[3],[r14,#-4]
864
865         add             @x[4],@x[4],@t[0]       @ accumulate key material
866         ldr             @t[0],[r12],#16         @ load input
867         add             @x[5],@x[5],@t[1]
868         ldr             @t[1],[r12,#-12]
869         add             @x[6],@x[6],@t[2]
870         ldr             @t[2],[r12,#-8]
871         add             @x[7],@x[7],@t[3]
872         ldr             @t[3],[r12,#-4]
873 # ifdef __ARMEB__
874         rev             @x[4],@x[4]
875         rev             @x[5],@x[5]
876         rev             @x[6],@x[6]
877         rev             @x[7],@x[7]
878 # endif
879         eor             @x[4],@x[4],@t[0]
880          add            @t[0],sp,#4*(8)
881         eor             @x[5],@x[5],@t[1]
882         str             @x[4],[r14],#16         @ store output
883         eor             @x[6],@x[6],@t[2]
884         str             @x[5],[r14,#-12]
885         eor             @x[7],@x[7],@t[3]
886          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
887         str             @x[6],[r14,#-8]
888          add            @x[0],sp,#4*(16+8)
889         str             @x[7],[r14,#-4]
890
891         ldmia           @x[0],{@x[0]-@x[7]}     @ load second half
892
893         add             @x[0],@x[0],@t[0]       @ accumulate key material
894         ldr             @t[0],[r12],#16         @ load input
895         add             @x[1],@x[1],@t[1]
896         ldr             @t[1],[r12,#-12]
897 # ifdef __thumb2__
898         it      hi
899 # endif
900          strhi          @t[2],[sp,#4*(16+10)]   @ copy "@x[10]" while at it
901         add             @x[2],@x[2],@t[2]
902         ldr             @t[2],[r12,#-8]
903 # ifdef __thumb2__
904         it      hi
905 # endif
906          strhi          @t[3],[sp,#4*(16+11)]   @ copy "@x[11]" while at it
907         add             @x[3],@x[3],@t[3]
908         ldr             @t[3],[r12,#-4]
909 # ifdef __ARMEB__
910         rev             @x[0],@x[0]
911         rev             @x[1],@x[1]
912         rev             @x[2],@x[2]
913         rev             @x[3],@x[3]
914 # endif
915         eor             @x[0],@x[0],@t[0]
916          add            @t[0],sp,#4*(12)
917         eor             @x[1],@x[1],@t[1]
918         str             @x[0],[r14],#16         @ store output
919         eor             @x[2],@x[2],@t[2]
920         str             @x[1],[r14,#-12]
921         eor             @x[3],@x[3],@t[3]
922          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
923         str             @x[2],[r14,#-8]
924         str             @x[3],[r14,#-4]
925
926         add             @x[4],@x[4],@t[0]       @ accumulate key material
927          add            @t[0],@t[0],#4          @ next counter value
928         add             @x[5],@x[5],@t[1]
929          str            @t[0],[sp,#4*(12)]      @ save next counter value
930         ldr             @t[0],[r12],#16         @ load input
931         add             @x[6],@x[6],@t[2]
932          add            @x[4],@x[4],#3          @ counter+3
933         ldr             @t[1],[r12,#-12]
934         add             @x[7],@x[7],@t[3]
935         ldr             @t[2],[r12,#-8]
936         ldr             @t[3],[r12,#-4]
937 # ifdef __ARMEB__
938         rev             @x[4],@x[4]
939         rev             @x[5],@x[5]
940         rev             @x[6],@x[6]
941         rev             @x[7],@x[7]
942 # endif
943         eor             @x[4],@x[4],@t[0]
944 # ifdef __thumb2__
945         it      hi
946 # endif
947          ldrhi          @t[0],[sp,#4*(32+2)]    @ re-load len
948         eor             @x[5],@x[5],@t[1]
949         eor             @x[6],@x[6],@t[2]
950         str             @x[4],[r14],#16         @ store output
951         eor             @x[7],@x[7],@t[3]
952         str             @x[5],[r14,#-12]
953          sub            @t[3],@t[0],#64*4       @ len-=64*4
954         str             @x[6],[r14,#-8]
955         str             @x[7],[r14,#-4]
956         bhi             .Loop_neon_outer
957
958         b               .Ldone_neon
959
960 .align  4
961 .Lbreak_neon:
962         @ harmonize NEON and integer-only stack frames: load data
963         @ from NEON frame, but save to integer-only one; distance
964         @ between the two is 4*(32+4+16-32)=4*(20).
965
966         str             @t[3], [sp,#4*(20+32+2)]        @ save len
967          add            @t[3],sp,#4*(32+4)
968         str             r12,   [sp,#4*(20+32+1)]        @ save inp
969         str             r14,   [sp,#4*(20+32+0)]        @ save out
970
971         ldr             @x[12],[sp,#4*(16+10)]
972         ldr             @x[14],[sp,#4*(16+11)]
973          vldmia         @t[3],{d8-d15}                  @ fulfill ABI requirement
974         str             @x[12],[sp,#4*(20+16+10)]       @ copy "@x[10]"
975         str             @x[14],[sp,#4*(20+16+11)]       @ copy "@x[11]"
976
977         ldr             @t[3], [sp,#4*(15)]
978         ldr             @x[12],[sp,#4*(12)]             @ modulo-scheduled load
979         ldr             @t[2], [sp,#4*(13)]
980         ldr             @x[14],[sp,#4*(14)]
981         str             @t[3], [sp,#4*(20+16+15)]
982         add             @t[3],sp,#4*(20)
983         vst1.32         {$a0-$b0},[@t[3]]!              @ copy key
984         add             sp,sp,#4*(20)                   @ switch frame
985         vst1.32         {$c0-$d0},[@t[3]]
986         mov             @t[3],#10
987         b               .Loop                           @ go integer-only
988
989 .align  4
990 .Ltail_neon:
991         cmp             @t[3],#64*3
992         bhs             .L192_or_more_neon
993         cmp             @t[3],#64*2
994         bhs             .L128_or_more_neon
995         cmp             @t[3],#64*1
996         bhs             .L64_or_more_neon
997
998         add             @t[0],sp,#4*(8)
999         vst1.8          {$a0-$b0},[sp]
1000         add             @t[2],sp,#4*(0)
1001         vst1.8          {$c0-$d0},[@t[0]]
1002         b               .Loop_tail_neon
1003
1004 .align  4
1005 .L64_or_more_neon:
1006         vld1.8          {$t0-$t1},[r12]!
1007         vld1.8          {$t2-$t3},[r12]!
1008         veor            $a0,$a0,$t0
1009         veor            $b0,$b0,$t1
1010         veor            $c0,$c0,$t2
1011         veor            $d0,$d0,$t3
1012         vst1.8          {$a0-$b0},[r14]!
1013         vst1.8          {$c0-$d0},[r14]!
1014
1015         beq             .Ldone_neon
1016
1017         add             @t[0],sp,#4*(8)
1018         vst1.8          {$a1-$b1},[sp]
1019         add             @t[2],sp,#4*(0)
1020         vst1.8          {$c1-$d1},[@t[0]]
1021         sub             @t[3],@t[3],#64*1       @ len-=64*1
1022         b               .Loop_tail_neon
1023
1024 .align  4
1025 .L128_or_more_neon:
1026         vld1.8          {$t0-$t1},[r12]!
1027         vld1.8          {$t2-$t3},[r12]!
1028         veor            $a0,$a0,$t0
1029         veor            $b0,$b0,$t1
1030         vld1.8          {$t0-$t1},[r12]!
1031         veor            $c0,$c0,$t2
1032         veor            $d0,$d0,$t3
1033         vld1.8          {$t2-$t3},[r12]!
1034
1035         veor            $a1,$a1,$t0
1036         veor            $b1,$b1,$t1
1037          vst1.8         {$a0-$b0},[r14]!
1038         veor            $c1,$c1,$t2
1039          vst1.8         {$c0-$d0},[r14]!
1040         veor            $d1,$d1,$t3
1041         vst1.8          {$a1-$b1},[r14]!
1042         vst1.8          {$c1-$d1},[r14]!
1043
1044         beq             .Ldone_neon
1045
1046         add             @t[0],sp,#4*(8)
1047         vst1.8          {$a2-$b2},[sp]
1048         add             @t[2],sp,#4*(0)
1049         vst1.8          {$c2-$d2},[@t[0]]
1050         sub             @t[3],@t[3],#64*2       @ len-=64*2
1051         b               .Loop_tail_neon
1052
1053 .align  4
1054 .L192_or_more_neon:
1055         vld1.8          {$t0-$t1},[r12]!
1056         vld1.8          {$t2-$t3},[r12]!
1057         veor            $a0,$a0,$t0
1058         veor            $b0,$b0,$t1
1059         vld1.8          {$t0-$t1},[r12]!
1060         veor            $c0,$c0,$t2
1061         veor            $d0,$d0,$t3
1062         vld1.8          {$t2-$t3},[r12]!
1063
1064         veor            $a1,$a1,$t0
1065         veor            $b1,$b1,$t1
1066         vld1.8          {$t0-$t1},[r12]!
1067         veor            $c1,$c1,$t2
1068          vst1.8         {$a0-$b0},[r14]!
1069         veor            $d1,$d1,$t3
1070         vld1.8          {$t2-$t3},[r12]!
1071
1072         veor            $a2,$a2,$t0
1073          vst1.8         {$c0-$d0},[r14]!
1074         veor            $b2,$b2,$t1
1075          vst1.8         {$a1-$b1},[r14]!
1076         veor            $c2,$c2,$t2
1077          vst1.8         {$c1-$d1},[r14]!
1078         veor            $d2,$d2,$t3
1079         vst1.8          {$a2-$b2},[r14]!
1080         vst1.8          {$c2-$d2},[r14]!
1081
1082         beq             .Ldone_neon
1083
1084         ldmia           sp,{@t[0]-@t[3]}        @ load key material
1085         add             @x[0],@x[0],@t[0]       @ accumulate key material
1086          add            @t[0],sp,#4*(4)
1087         add             @x[1],@x[1],@t[1]
1088         add             @x[2],@x[2],@t[2]
1089         add             @x[3],@x[3],@t[3]
1090          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
1091
1092         add             @x[4],@x[4],@t[0]       @ accumulate key material
1093          add            @t[0],sp,#4*(8)
1094         add             @x[5],@x[5],@t[1]
1095         add             @x[6],@x[6],@t[2]
1096         add             @x[7],@x[7],@t[3]
1097          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
1098 # ifdef __ARMEB__
1099         rev             @x[0],@x[0]
1100         rev             @x[1],@x[1]
1101         rev             @x[2],@x[2]
1102         rev             @x[3],@x[3]
1103         rev             @x[4],@x[4]
1104         rev             @x[5],@x[5]
1105         rev             @x[6],@x[6]
1106         rev             @x[7],@x[7]
1107 # endif
1108         stmia           sp,{@x[0]-@x[7]}
1109          add            @x[0],sp,#4*(16+8)
1110
1111         ldmia           @x[0],{@x[0]-@x[7]}     @ load second half
1112
1113         add             @x[0],@x[0],@t[0]       @ accumulate key material
1114          add            @t[0],sp,#4*(12)
1115         add             @x[1],@x[1],@t[1]
1116         add             @x[2],@x[2],@t[2]
1117         add             @x[3],@x[3],@t[3]
1118          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
1119
1120         add             @x[4],@x[4],@t[0]       @ accumulate key material
1121          add            @t[0],sp,#4*(8)
1122         add             @x[5],@x[5],@t[1]
1123          add            @x[4],@x[4],#3          @ counter+3
1124         add             @x[6],@x[6],@t[2]
1125         add             @x[7],@x[7],@t[3]
1126          ldr            @t[3],[sp,#4*(32+2)]    @ re-load len
1127 # ifdef __ARMEB__
1128         rev             @x[0],@x[0]
1129         rev             @x[1],@x[1]
1130         rev             @x[2],@x[2]
1131         rev             @x[3],@x[3]
1132         rev             @x[4],@x[4]
1133         rev             @x[5],@x[5]
1134         rev             @x[6],@x[6]
1135         rev             @x[7],@x[7]
1136 # endif
1137         stmia           @t[0],{@x[0]-@x[7]}
1138          add            @t[2],sp,#4*(0)
1139          sub            @t[3],@t[3],#64*3       @ len-=64*3
1140
1141 .Loop_tail_neon:
1142         ldrb            @t[0],[@t[2]],#1        @ read buffer on stack
1143         ldrb            @t[1],[r12],#1          @ read input
1144         subs            @t[3],@t[3],#1
1145         eor             @t[0],@t[0],@t[1]
1146         strb            @t[0],[r14],#1          @ store output
1147         bne             .Loop_tail_neon
1148
1149 .Ldone_neon:
1150         add             sp,sp,#4*(32+4)
1151         vldmia          sp,{d8-d15}
1152         add             sp,sp,#4*(16+3)
1153         ldmia           sp!,{r4-r11,pc}
1154 .size   ChaCha20_neon,.-ChaCha20_neon
1155 .comm   OPENSSL_armcap_P,4,4
1156 #endif
1157 ___
1158 }}}
1159
1160 foreach (split("\n",$code)) {
1161         s/\`([^\`]*)\`/eval $1/geo;
1162
1163         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1164
1165         print $_,"\n";
1166 }
1167 close STDOUT;