ARM64 assembly pack: make it Windows-friendly.
[openssl.git] / crypto / chacha / asm / chacha-armv8.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # June 2015
18 #
19 # ChaCha20 for ARMv8.
20 #
21 # Performance in cycles per byte out of large buffer.
22 #
23 #                       IALU/gcc-4.9    3xNEON+1xIALU   6xNEON+2xIALU
24 #
25 # Apple A7              5.50/+49%       3.33            1.70
26 # Cortex-A53            8.40/+80%       4.72            4.72(*)
27 # Cortex-A57            8.06/+43%       4.90            4.43(**)
28 # Denver                4.50/+82%       2.63            2.67(*)
29 # X-Gene                9.50/+46%       8.82            8.89(*)
30 # Mongoose              8.00/+44%       3.64            3.25
31 # Kryo                  8.17/+50%       4.83            4.65
32 #
33 # (*)   it's expected that doubling interleave factor doesn't help
34 #       all processors, only those with higher NEON latency and
35 #       higher instruction issue rate;
36 # (**)  expected improvement was actually higher;
37
38 $flavour=shift;
39 $output=shift;
40
41 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
42 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
43 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
44 die "can't locate arm-xlate.pl";
45
46 open OUT,"| \"$^X\" $xlate $flavour $output";
47 *STDOUT=*OUT;
48
49 sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
50 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
51   my $arg = pop;
52     $arg = "#$arg" if ($arg*1 eq $arg);
53     $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
54 }
55
56 my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4));
57
58 my @x=map("x$_",(5..17,19..21));
59 my @d=map("x$_",(22..28,30));
60
61 sub ROUND {
62 my ($a0,$b0,$c0,$d0)=@_;
63 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
64 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
65 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
66
67     (
68         "&add_32        (@x[$a0],@x[$a0],@x[$b0])",
69          "&add_32       (@x[$a1],@x[$a1],@x[$b1])",
70           "&add_32      (@x[$a2],@x[$a2],@x[$b2])",
71            "&add_32     (@x[$a3],@x[$a3],@x[$b3])",
72         "&eor_32        (@x[$d0],@x[$d0],@x[$a0])",
73          "&eor_32       (@x[$d1],@x[$d1],@x[$a1])",
74           "&eor_32      (@x[$d2],@x[$d2],@x[$a2])",
75            "&eor_32     (@x[$d3],@x[$d3],@x[$a3])",
76         "&ror_32        (@x[$d0],@x[$d0],16)",
77          "&ror_32       (@x[$d1],@x[$d1],16)",
78           "&ror_32      (@x[$d2],@x[$d2],16)",
79            "&ror_32     (@x[$d3],@x[$d3],16)",
80
81         "&add_32        (@x[$c0],@x[$c0],@x[$d0])",
82          "&add_32       (@x[$c1],@x[$c1],@x[$d1])",
83           "&add_32      (@x[$c2],@x[$c2],@x[$d2])",
84            "&add_32     (@x[$c3],@x[$c3],@x[$d3])",
85         "&eor_32        (@x[$b0],@x[$b0],@x[$c0])",
86          "&eor_32       (@x[$b1],@x[$b1],@x[$c1])",
87           "&eor_32      (@x[$b2],@x[$b2],@x[$c2])",
88            "&eor_32     (@x[$b3],@x[$b3],@x[$c3])",
89         "&ror_32        (@x[$b0],@x[$b0],20)",
90          "&ror_32       (@x[$b1],@x[$b1],20)",
91           "&ror_32      (@x[$b2],@x[$b2],20)",
92            "&ror_32     (@x[$b3],@x[$b3],20)",
93
94         "&add_32        (@x[$a0],@x[$a0],@x[$b0])",
95          "&add_32       (@x[$a1],@x[$a1],@x[$b1])",
96           "&add_32      (@x[$a2],@x[$a2],@x[$b2])",
97            "&add_32     (@x[$a3],@x[$a3],@x[$b3])",
98         "&eor_32        (@x[$d0],@x[$d0],@x[$a0])",
99          "&eor_32       (@x[$d1],@x[$d1],@x[$a1])",
100           "&eor_32      (@x[$d2],@x[$d2],@x[$a2])",
101            "&eor_32     (@x[$d3],@x[$d3],@x[$a3])",
102         "&ror_32        (@x[$d0],@x[$d0],24)",
103          "&ror_32       (@x[$d1],@x[$d1],24)",
104           "&ror_32      (@x[$d2],@x[$d2],24)",
105            "&ror_32     (@x[$d3],@x[$d3],24)",
106
107         "&add_32        (@x[$c0],@x[$c0],@x[$d0])",
108          "&add_32       (@x[$c1],@x[$c1],@x[$d1])",
109           "&add_32      (@x[$c2],@x[$c2],@x[$d2])",
110            "&add_32     (@x[$c3],@x[$c3],@x[$d3])",
111         "&eor_32        (@x[$b0],@x[$b0],@x[$c0])",
112          "&eor_32       (@x[$b1],@x[$b1],@x[$c1])",
113           "&eor_32      (@x[$b2],@x[$b2],@x[$c2])",
114            "&eor_32     (@x[$b3],@x[$b3],@x[$c3])",
115         "&ror_32        (@x[$b0],@x[$b0],25)",
116          "&ror_32       (@x[$b1],@x[$b1],25)",
117           "&ror_32      (@x[$b2],@x[$b2],25)",
118            "&ror_32     (@x[$b3],@x[$b3],25)"
119     );
120 }
121
122 $code.=<<___;
123 #include "arm_arch.h"
124
125 .text
126
127 .extern OPENSSL_armcap_P
128
129 .align  5
130 .Lsigma:
131 .quad   0x3320646e61707865,0x6b20657479622d32           // endian-neutral
132 .Lone:
133 .long   1,0,0,0
134 .asciz  "ChaCha20 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
135
136 .globl  ChaCha20_ctr32
137 .type   ChaCha20_ctr32,%function
138 .align  5
139 ChaCha20_ctr32:
140         cbz     $len,.Labort
141         cmp     $len,#192
142         b.lo    .Lshort
143
144         adrp    x17,OPENSSL_armcap_P
145         ldr     w17,[x17,#:lo12:OPENSSL_armcap_P]
146         tst     w17,#ARMV7_NEON
147         b.ne    .LChaCha20_neon
148
149 .Lshort:
150         .inst   0xd503233f                      // paciasp
151         stp     x29,x30,[sp,#-96]!
152         add     x29,sp,#0
153
154         adr     @x[0],.Lsigma
155         stp     x19,x20,[sp,#16]
156         stp     x21,x22,[sp,#32]
157         stp     x23,x24,[sp,#48]
158         stp     x25,x26,[sp,#64]
159         stp     x27,x28,[sp,#80]
160         sub     sp,sp,#64
161
162         ldp     @d[0],@d[1],[@x[0]]             // load sigma
163         ldp     @d[2],@d[3],[$key]              // load key
164         ldp     @d[4],@d[5],[$key,#16]
165         ldp     @d[6],@d[7],[$ctr]              // load counter
166 #ifdef  __ARMEB__
167         ror     @d[2],@d[2],#32
168         ror     @d[3],@d[3],#32
169         ror     @d[4],@d[4],#32
170         ror     @d[5],@d[5],#32
171         ror     @d[6],@d[6],#32
172         ror     @d[7],@d[7],#32
173 #endif
174
175 .Loop_outer:
176         mov.32  @x[0],@d[0]                     // unpack key block
177         lsr     @x[1],@d[0],#32
178         mov.32  @x[2],@d[1]
179         lsr     @x[3],@d[1],#32
180         mov.32  @x[4],@d[2]
181         lsr     @x[5],@d[2],#32
182         mov.32  @x[6],@d[3]
183         lsr     @x[7],@d[3],#32
184         mov.32  @x[8],@d[4]
185         lsr     @x[9],@d[4],#32
186         mov.32  @x[10],@d[5]
187         lsr     @x[11],@d[5],#32
188         mov.32  @x[12],@d[6]
189         lsr     @x[13],@d[6],#32
190         mov.32  @x[14],@d[7]
191         lsr     @x[15],@d[7],#32
192
193         mov     $ctr,#10
194         subs    $len,$len,#64
195 .Loop:
196         sub     $ctr,$ctr,#1
197 ___
198         foreach (&ROUND(0, 4, 8,12)) { eval; }
199         foreach (&ROUND(0, 5,10,15)) { eval; }
200 $code.=<<___;
201         cbnz    $ctr,.Loop
202
203         add.32  @x[0],@x[0],@d[0]               // accumulate key block
204         add     @x[1],@x[1],@d[0],lsr#32
205         add.32  @x[2],@x[2],@d[1]
206         add     @x[3],@x[3],@d[1],lsr#32
207         add.32  @x[4],@x[4],@d[2]
208         add     @x[5],@x[5],@d[2],lsr#32
209         add.32  @x[6],@x[6],@d[3]
210         add     @x[7],@x[7],@d[3],lsr#32
211         add.32  @x[8],@x[8],@d[4]
212         add     @x[9],@x[9],@d[4],lsr#32
213         add.32  @x[10],@x[10],@d[5]
214         add     @x[11],@x[11],@d[5],lsr#32
215         add.32  @x[12],@x[12],@d[6]
216         add     @x[13],@x[13],@d[6],lsr#32
217         add.32  @x[14],@x[14],@d[7]
218         add     @x[15],@x[15],@d[7],lsr#32
219
220         b.lo    .Ltail
221
222         add     @x[0],@x[0],@x[1],lsl#32        // pack
223         add     @x[2],@x[2],@x[3],lsl#32
224         ldp     @x[1],@x[3],[$inp,#0]           // load input
225         add     @x[4],@x[4],@x[5],lsl#32
226         add     @x[6],@x[6],@x[7],lsl#32
227         ldp     @x[5],@x[7],[$inp,#16]
228         add     @x[8],@x[8],@x[9],lsl#32
229         add     @x[10],@x[10],@x[11],lsl#32
230         ldp     @x[9],@x[11],[$inp,#32]
231         add     @x[12],@x[12],@x[13],lsl#32
232         add     @x[14],@x[14],@x[15],lsl#32
233         ldp     @x[13],@x[15],[$inp,#48]
234         add     $inp,$inp,#64
235 #ifdef  __ARMEB__
236         rev     @x[0],@x[0]
237         rev     @x[2],@x[2]
238         rev     @x[4],@x[4]
239         rev     @x[6],@x[6]
240         rev     @x[8],@x[8]
241         rev     @x[10],@x[10]
242         rev     @x[12],@x[12]
243         rev     @x[14],@x[14]
244 #endif
245         eor     @x[0],@x[0],@x[1]
246         eor     @x[2],@x[2],@x[3]
247         eor     @x[4],@x[4],@x[5]
248         eor     @x[6],@x[6],@x[7]
249         eor     @x[8],@x[8],@x[9]
250         eor     @x[10],@x[10],@x[11]
251         eor     @x[12],@x[12],@x[13]
252         eor     @x[14],@x[14],@x[15]
253
254         stp     @x[0],@x[2],[$out,#0]           // store output
255          add    @d[6],@d[6],#1                  // increment counter
256         stp     @x[4],@x[6],[$out,#16]
257         stp     @x[8],@x[10],[$out,#32]
258         stp     @x[12],@x[14],[$out,#48]
259         add     $out,$out,#64
260
261         b.hi    .Loop_outer
262
263         ldp     x19,x20,[x29,#16]
264         add     sp,sp,#64
265         ldp     x21,x22,[x29,#32]
266         ldp     x23,x24,[x29,#48]
267         ldp     x25,x26,[x29,#64]
268         ldp     x27,x28,[x29,#80]
269         ldp     x29,x30,[sp],#96
270         .inst   0xd50323bf                      // autiasp
271 .Labort:
272         ret
273
274 .align  4
275 .Ltail:
276         add     $len,$len,#64
277 .Less_than_64:
278         sub     $out,$out,#1
279         add     $inp,$inp,$len
280         add     $out,$out,$len
281         add     $ctr,sp,$len
282         neg     $len,$len
283
284         add     @x[0],@x[0],@x[1],lsl#32        // pack
285         add     @x[2],@x[2],@x[3],lsl#32
286         add     @x[4],@x[4],@x[5],lsl#32
287         add     @x[6],@x[6],@x[7],lsl#32
288         add     @x[8],@x[8],@x[9],lsl#32
289         add     @x[10],@x[10],@x[11],lsl#32
290         add     @x[12],@x[12],@x[13],lsl#32
291         add     @x[14],@x[14],@x[15],lsl#32
292 #ifdef  __ARMEB__
293         rev     @x[0],@x[0]
294         rev     @x[2],@x[2]
295         rev     @x[4],@x[4]
296         rev     @x[6],@x[6]
297         rev     @x[8],@x[8]
298         rev     @x[10],@x[10]
299         rev     @x[12],@x[12]
300         rev     @x[14],@x[14]
301 #endif
302         stp     @x[0],@x[2],[sp,#0]
303         stp     @x[4],@x[6],[sp,#16]
304         stp     @x[8],@x[10],[sp,#32]
305         stp     @x[12],@x[14],[sp,#48]
306
307 .Loop_tail:
308         ldrb    w10,[$inp,$len]
309         ldrb    w11,[$ctr,$len]
310         add     $len,$len,#1
311         eor     w10,w10,w11
312         strb    w10,[$out,$len]
313         cbnz    $len,.Loop_tail
314
315         stp     xzr,xzr,[sp,#0]
316         stp     xzr,xzr,[sp,#16]
317         stp     xzr,xzr,[sp,#32]
318         stp     xzr,xzr,[sp,#48]
319
320         ldp     x19,x20,[x29,#16]
321         add     sp,sp,#64
322         ldp     x21,x22,[x29,#32]
323         ldp     x23,x24,[x29,#48]
324         ldp     x25,x26,[x29,#64]
325         ldp     x27,x28,[x29,#80]
326         ldp     x29,x30,[sp],#96
327         .inst   0xd50323bf                      // autiasp
328         ret
329 .size   ChaCha20_ctr32,.-ChaCha20_ctr32
330 ___
331
332 {{{
333 my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2,$T3) =
334     map("v$_.4s",(0..7,16..23));
335 my (@K)=map("v$_.4s",(24..30));
336 my $ONE="v31.4s";
337
338 sub NEONROUND {
339 my $odd = pop;
340 my ($a,$b,$c,$d,$t)=@_;
341
342         (
343         "&add           ('$a','$a','$b')",
344         "&eor           ('$d','$d','$a')",
345         "&rev32_16      ('$d','$d')",           # vrot ($d,16)
346
347         "&add           ('$c','$c','$d')",
348         "&eor           ('$t','$b','$c')",
349         "&ushr          ('$b','$t',20)",
350         "&sli           ('$b','$t',12)",
351
352         "&add           ('$a','$a','$b')",
353         "&eor           ('$t','$d','$a')",
354         "&ushr          ('$d','$t',24)",
355         "&sli           ('$d','$t',8)",
356
357         "&add           ('$c','$c','$d')",
358         "&eor           ('$t','$b','$c')",
359         "&ushr          ('$b','$t',25)",
360         "&sli           ('$b','$t',7)",
361
362         "&ext           ('$c','$c','$c',8)",
363         "&ext           ('$d','$d','$d',$odd?4:12)",
364         "&ext           ('$b','$b','$b',$odd?12:4)"
365         );
366 }
367
368 $code.=<<___;
369
370 .type   ChaCha20_neon,%function
371 .align  5
372 ChaCha20_neon:
373 .LChaCha20_neon:
374         .inst   0xd503233f                      // paciasp
375         stp     x29,x30,[sp,#-96]!
376         add     x29,sp,#0
377
378         adr     @x[0],.Lsigma
379         stp     x19,x20,[sp,#16]
380         stp     x21,x22,[sp,#32]
381         stp     x23,x24,[sp,#48]
382         stp     x25,x26,[sp,#64]
383         stp     x27,x28,[sp,#80]
384         cmp     $len,#512
385         b.hs    .L512_or_more_neon
386
387         sub     sp,sp,#64
388
389         ldp     @d[0],@d[1],[@x[0]]             // load sigma
390         ld1     {@K[0]},[@x[0]],#16
391         ldp     @d[2],@d[3],[$key]              // load key
392         ldp     @d[4],@d[5],[$key,#16]
393         ld1     {@K[1],@K[2]},[$key]
394         ldp     @d[6],@d[7],[$ctr]              // load counter
395         ld1     {@K[3]},[$ctr]
396         ld1     {$ONE},[@x[0]]
397 #ifdef  __ARMEB__
398         rev64   @K[0],@K[0]
399         ror     @d[2],@d[2],#32
400         ror     @d[3],@d[3],#32
401         ror     @d[4],@d[4],#32
402         ror     @d[5],@d[5],#32
403         ror     @d[6],@d[6],#32
404         ror     @d[7],@d[7],#32
405 #endif
406         add     @K[3],@K[3],$ONE                // += 1
407         add     @K[4],@K[3],$ONE
408         add     @K[5],@K[4],$ONE
409         shl     $ONE,$ONE,#2                    // 1 -> 4
410
411 .Loop_outer_neon:
412         mov.32  @x[0],@d[0]                     // unpack key block
413         lsr     @x[1],@d[0],#32
414          mov    $A0,@K[0]
415         mov.32  @x[2],@d[1]
416         lsr     @x[3],@d[1],#32
417          mov    $A1,@K[0]
418         mov.32  @x[4],@d[2]
419         lsr     @x[5],@d[2],#32
420          mov    $A2,@K[0]
421         mov.32  @x[6],@d[3]
422          mov    $B0,@K[1]
423         lsr     @x[7],@d[3],#32
424          mov    $B1,@K[1]
425         mov.32  @x[8],@d[4]
426          mov    $B2,@K[1]
427         lsr     @x[9],@d[4],#32
428          mov    $D0,@K[3]
429         mov.32  @x[10],@d[5]
430          mov    $D1,@K[4]
431         lsr     @x[11],@d[5],#32
432          mov    $D2,@K[5]
433         mov.32  @x[12],@d[6]
434          mov    $C0,@K[2]
435         lsr     @x[13],@d[6],#32
436          mov    $C1,@K[2]
437         mov.32  @x[14],@d[7]
438          mov    $C2,@K[2]
439         lsr     @x[15],@d[7],#32
440
441         mov     $ctr,#10
442         subs    $len,$len,#256
443 .Loop_neon:
444         sub     $ctr,$ctr,#1
445 ___
446         my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
447         my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
448         my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
449         my @thread3=&ROUND(0,4,8,12);
450
451         foreach (@thread0) {
452                 eval;                   eval(shift(@thread3));
453                 eval(shift(@thread1));  eval(shift(@thread3));
454                 eval(shift(@thread2));  eval(shift(@thread3));
455         }
456
457         @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
458         @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
459         @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
460         @thread3=&ROUND(0,5,10,15);
461
462         foreach (@thread0) {
463                 eval;                   eval(shift(@thread3));
464                 eval(shift(@thread1));  eval(shift(@thread3));
465                 eval(shift(@thread2));  eval(shift(@thread3));
466         }
467 $code.=<<___;
468         cbnz    $ctr,.Loop_neon
469
470         add.32  @x[0],@x[0],@d[0]               // accumulate key block
471          add    $A0,$A0,@K[0]
472         add     @x[1],@x[1],@d[0],lsr#32
473          add    $A1,$A1,@K[0]
474         add.32  @x[2],@x[2],@d[1]
475          add    $A2,$A2,@K[0]
476         add     @x[3],@x[3],@d[1],lsr#32
477          add    $C0,$C0,@K[2]
478         add.32  @x[4],@x[4],@d[2]
479          add    $C1,$C1,@K[2]
480         add     @x[5],@x[5],@d[2],lsr#32
481          add    $C2,$C2,@K[2]
482         add.32  @x[6],@x[6],@d[3]
483          add    $D0,$D0,@K[3]
484         add     @x[7],@x[7],@d[3],lsr#32
485         add.32  @x[8],@x[8],@d[4]
486          add    $D1,$D1,@K[4]
487         add     @x[9],@x[9],@d[4],lsr#32
488         add.32  @x[10],@x[10],@d[5]
489          add    $D2,$D2,@K[5]
490         add     @x[11],@x[11],@d[5],lsr#32
491         add.32  @x[12],@x[12],@d[6]
492          add    $B0,$B0,@K[1]
493         add     @x[13],@x[13],@d[6],lsr#32
494         add.32  @x[14],@x[14],@d[7]
495          add    $B1,$B1,@K[1]
496         add     @x[15],@x[15],@d[7],lsr#32
497          add    $B2,$B2,@K[1]
498
499         b.lo    .Ltail_neon
500
501         add     @x[0],@x[0],@x[1],lsl#32        // pack
502         add     @x[2],@x[2],@x[3],lsl#32
503         ldp     @x[1],@x[3],[$inp,#0]           // load input
504         add     @x[4],@x[4],@x[5],lsl#32
505         add     @x[6],@x[6],@x[7],lsl#32
506         ldp     @x[5],@x[7],[$inp,#16]
507         add     @x[8],@x[8],@x[9],lsl#32
508         add     @x[10],@x[10],@x[11],lsl#32
509         ldp     @x[9],@x[11],[$inp,#32]
510         add     @x[12],@x[12],@x[13],lsl#32
511         add     @x[14],@x[14],@x[15],lsl#32
512         ldp     @x[13],@x[15],[$inp,#48]
513         add     $inp,$inp,#64
514 #ifdef  __ARMEB__
515         rev     @x[0],@x[0]
516         rev     @x[2],@x[2]
517         rev     @x[4],@x[4]
518         rev     @x[6],@x[6]
519         rev     @x[8],@x[8]
520         rev     @x[10],@x[10]
521         rev     @x[12],@x[12]
522         rev     @x[14],@x[14]
523 #endif
524         ld1.8   {$T0-$T3},[$inp],#64
525         eor     @x[0],@x[0],@x[1]
526         eor     @x[2],@x[2],@x[3]
527         eor     @x[4],@x[4],@x[5]
528         eor     @x[6],@x[6],@x[7]
529         eor     @x[8],@x[8],@x[9]
530          eor    $A0,$A0,$T0
531         eor     @x[10],@x[10],@x[11]
532          eor    $B0,$B0,$T1
533         eor     @x[12],@x[12],@x[13]
534          eor    $C0,$C0,$T2
535         eor     @x[14],@x[14],@x[15]
536          eor    $D0,$D0,$T3
537          ld1.8  {$T0-$T3},[$inp],#64
538
539         stp     @x[0],@x[2],[$out,#0]           // store output
540          add    @d[6],@d[6],#4                  // increment counter
541         stp     @x[4],@x[6],[$out,#16]
542          add    @K[3],@K[3],$ONE                // += 4
543         stp     @x[8],@x[10],[$out,#32]
544          add    @K[4],@K[4],$ONE
545         stp     @x[12],@x[14],[$out,#48]
546          add    @K[5],@K[5],$ONE
547         add     $out,$out,#64
548
549         st1.8   {$A0-$D0},[$out],#64
550         ld1.8   {$A0-$D0},[$inp],#64
551
552         eor     $A1,$A1,$T0
553         eor     $B1,$B1,$T1
554         eor     $C1,$C1,$T2
555         eor     $D1,$D1,$T3
556         st1.8   {$A1-$D1},[$out],#64
557
558         eor     $A2,$A2,$A0
559         eor     $B2,$B2,$B0
560         eor     $C2,$C2,$C0
561         eor     $D2,$D2,$D0
562         st1.8   {$A2-$D2},[$out],#64
563
564         b.hi    .Loop_outer_neon
565
566         ldp     x19,x20,[x29,#16]
567         add     sp,sp,#64
568         ldp     x21,x22,[x29,#32]
569         ldp     x23,x24,[x29,#48]
570         ldp     x25,x26,[x29,#64]
571         ldp     x27,x28,[x29,#80]
572         ldp     x29,x30,[sp],#96
573         .inst   0xd50323bf                      // autiasp
574         ret
575
576 .Ltail_neon:
577         add     $len,$len,#256
578         cmp     $len,#64
579         b.lo    .Less_than_64
580
581         add     @x[0],@x[0],@x[1],lsl#32        // pack
582         add     @x[2],@x[2],@x[3],lsl#32
583         ldp     @x[1],@x[3],[$inp,#0]           // load input
584         add     @x[4],@x[4],@x[5],lsl#32
585         add     @x[6],@x[6],@x[7],lsl#32
586         ldp     @x[5],@x[7],[$inp,#16]
587         add     @x[8],@x[8],@x[9],lsl#32
588         add     @x[10],@x[10],@x[11],lsl#32
589         ldp     @x[9],@x[11],[$inp,#32]
590         add     @x[12],@x[12],@x[13],lsl#32
591         add     @x[14],@x[14],@x[15],lsl#32
592         ldp     @x[13],@x[15],[$inp,#48]
593         add     $inp,$inp,#64
594 #ifdef  __ARMEB__
595         rev     @x[0],@x[0]
596         rev     @x[2],@x[2]
597         rev     @x[4],@x[4]
598         rev     @x[6],@x[6]
599         rev     @x[8],@x[8]
600         rev     @x[10],@x[10]
601         rev     @x[12],@x[12]
602         rev     @x[14],@x[14]
603 #endif
604         eor     @x[0],@x[0],@x[1]
605         eor     @x[2],@x[2],@x[3]
606         eor     @x[4],@x[4],@x[5]
607         eor     @x[6],@x[6],@x[7]
608         eor     @x[8],@x[8],@x[9]
609         eor     @x[10],@x[10],@x[11]
610         eor     @x[12],@x[12],@x[13]
611         eor     @x[14],@x[14],@x[15]
612
613         stp     @x[0],@x[2],[$out,#0]           // store output
614          add    @d[6],@d[6],#4                  // increment counter
615         stp     @x[4],@x[6],[$out,#16]
616         stp     @x[8],@x[10],[$out,#32]
617         stp     @x[12],@x[14],[$out,#48]
618         add     $out,$out,#64
619         b.eq    .Ldone_neon
620         sub     $len,$len,#64
621         cmp     $len,#64
622         b.lo    .Less_than_128
623
624         ld1.8   {$T0-$T3},[$inp],#64
625         eor     $A0,$A0,$T0
626         eor     $B0,$B0,$T1
627         eor     $C0,$C0,$T2
628         eor     $D0,$D0,$T3
629         st1.8   {$A0-$D0},[$out],#64
630         b.eq    .Ldone_neon
631         sub     $len,$len,#64
632         cmp     $len,#64
633         b.lo    .Less_than_192
634
635         ld1.8   {$T0-$T3},[$inp],#64
636         eor     $A1,$A1,$T0
637         eor     $B1,$B1,$T1
638         eor     $C1,$C1,$T2
639         eor     $D1,$D1,$T3
640         st1.8   {$A1-$D1},[$out],#64
641         b.eq    .Ldone_neon
642         sub     $len,$len,#64
643
644         st1.8   {$A2-$D2},[sp]
645         b       .Last_neon
646
647 .Less_than_128:
648         st1.8   {$A0-$D0},[sp]
649         b       .Last_neon
650 .Less_than_192:
651         st1.8   {$A1-$D1},[sp]
652         b       .Last_neon
653
654 .align  4
655 .Last_neon:
656         sub     $out,$out,#1
657         add     $inp,$inp,$len
658         add     $out,$out,$len
659         add     $ctr,sp,$len
660         neg     $len,$len
661
662 .Loop_tail_neon:
663         ldrb    w10,[$inp,$len]
664         ldrb    w11,[$ctr,$len]
665         add     $len,$len,#1
666         eor     w10,w10,w11
667         strb    w10,[$out,$len]
668         cbnz    $len,.Loop_tail_neon
669
670         stp     xzr,xzr,[sp,#0]
671         stp     xzr,xzr,[sp,#16]
672         stp     xzr,xzr,[sp,#32]
673         stp     xzr,xzr,[sp,#48]
674
675 .Ldone_neon:
676         ldp     x19,x20,[x29,#16]
677         add     sp,sp,#64
678         ldp     x21,x22,[x29,#32]
679         ldp     x23,x24,[x29,#48]
680         ldp     x25,x26,[x29,#64]
681         ldp     x27,x28,[x29,#80]
682         ldp     x29,x30,[sp],#96
683         .inst   0xd50323bf                      // autiasp
684         ret
685 .size   ChaCha20_neon,.-ChaCha20_neon
686 ___
687 {
688 my ($T0,$T1,$T2,$T3,$T4,$T5)=@K;
689 my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,
690     $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(0..23));
691
692 $code.=<<___;
693 .type   ChaCha20_512_neon,%function
694 .align  5
695 ChaCha20_512_neon:
696         .inst   0xd503233f                      // paciasp
697         stp     x29,x30,[sp,#-96]!
698         add     x29,sp,#0
699
700         adr     @x[0],.Lsigma
701         stp     x19,x20,[sp,#16]
702         stp     x21,x22,[sp,#32]
703         stp     x23,x24,[sp,#48]
704         stp     x25,x26,[sp,#64]
705         stp     x27,x28,[sp,#80]
706
707 .L512_or_more_neon:
708         sub     sp,sp,#128+64
709
710         ldp     @d[0],@d[1],[@x[0]]             // load sigma
711         ld1     {@K[0]},[@x[0]],#16
712         ldp     @d[2],@d[3],[$key]              // load key
713         ldp     @d[4],@d[5],[$key,#16]
714         ld1     {@K[1],@K[2]},[$key]
715         ldp     @d[6],@d[7],[$ctr]              // load counter
716         ld1     {@K[3]},[$ctr]
717         ld1     {$ONE},[@x[0]]
718 #ifdef  __ARMEB__
719         rev64   @K[0],@K[0]
720         ror     @d[2],@d[2],#32
721         ror     @d[3],@d[3],#32
722         ror     @d[4],@d[4],#32
723         ror     @d[5],@d[5],#32
724         ror     @d[6],@d[6],#32
725         ror     @d[7],@d[7],#32
726 #endif
727         add     @K[3],@K[3],$ONE                // += 1
728         stp     @K[0],@K[1],[sp,#0]             // off-load key block, invariant part
729         add     @K[3],@K[3],$ONE                // not typo
730         str     @K[2],[sp,#32]
731         add     @K[4],@K[3],$ONE
732         add     @K[5],@K[4],$ONE
733         add     @K[6],@K[5],$ONE
734         shl     $ONE,$ONE,#2                    // 1 -> 4
735
736         stp     d8,d9,[sp,#128+0]               // meet ABI requirements
737         stp     d10,d11,[sp,#128+16]
738         stp     d12,d13,[sp,#128+32]
739         stp     d14,d15,[sp,#128+48]
740
741         sub     $len,$len,#512                  // not typo
742
743 .Loop_outer_512_neon:
744          mov    $A0,@K[0]
745          mov    $A1,@K[0]
746          mov    $A2,@K[0]
747          mov    $A3,@K[0]
748          mov    $A4,@K[0]
749          mov    $A5,@K[0]
750          mov    $B0,@K[1]
751         mov.32  @x[0],@d[0]                     // unpack key block
752          mov    $B1,@K[1]
753         lsr     @x[1],@d[0],#32
754          mov    $B2,@K[1]
755         mov.32  @x[2],@d[1]
756          mov    $B3,@K[1]
757         lsr     @x[3],@d[1],#32
758          mov    $B4,@K[1]
759         mov.32  @x[4],@d[2]
760          mov    $B5,@K[1]
761         lsr     @x[5],@d[2],#32
762          mov    $D0,@K[3]
763         mov.32  @x[6],@d[3]
764          mov    $D1,@K[4]
765         lsr     @x[7],@d[3],#32
766          mov    $D2,@K[5]
767         mov.32  @x[8],@d[4]
768          mov    $D3,@K[6]
769         lsr     @x[9],@d[4],#32
770          mov    $C0,@K[2]
771         mov.32  @x[10],@d[5]
772          mov    $C1,@K[2]
773         lsr     @x[11],@d[5],#32
774          add    $D4,$D0,$ONE                    // +4
775         mov.32  @x[12],@d[6]
776          add    $D5,$D1,$ONE                    // +4
777         lsr     @x[13],@d[6],#32
778          mov    $C2,@K[2]
779         mov.32  @x[14],@d[7]
780          mov    $C3,@K[2]
781         lsr     @x[15],@d[7],#32
782          mov    $C4,@K[2]
783          stp    @K[3],@K[4],[sp,#48]            // off-load key block, variable part
784          mov    $C5,@K[2]
785          str    @K[5],[sp,#80]
786
787         mov     $ctr,#5
788         subs    $len,$len,#512
789 .Loop_upper_neon:
790         sub     $ctr,$ctr,#1
791 ___
792         my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
793         my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
794         my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
795         my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
796         my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
797         my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
798         my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
799         my $diff = ($#thread0+1)*6 - $#thread67 - 1;
800         my $i = 0;
801
802         foreach (@thread0) {
803                 eval;                   eval(shift(@thread67));
804                 eval(shift(@thread1));  eval(shift(@thread67));
805                 eval(shift(@thread2));  eval(shift(@thread67));
806                 eval(shift(@thread3));  eval(shift(@thread67));
807                 eval(shift(@thread4));  eval(shift(@thread67));
808                 eval(shift(@thread5));  eval(shift(@thread67));
809         }
810
811         @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
812         @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
813         @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
814         @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
815         @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
816         @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
817         @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
818
819         foreach (@thread0) {
820                 eval;                   eval(shift(@thread67));
821                 eval(shift(@thread1));  eval(shift(@thread67));
822                 eval(shift(@thread2));  eval(shift(@thread67));
823                 eval(shift(@thread3));  eval(shift(@thread67));
824                 eval(shift(@thread4));  eval(shift(@thread67));
825                 eval(shift(@thread5));  eval(shift(@thread67));
826         }
827 $code.=<<___;
828         cbnz    $ctr,.Loop_upper_neon
829
830         add.32  @x[0],@x[0],@d[0]               // accumulate key block
831         add     @x[1],@x[1],@d[0],lsr#32
832         add.32  @x[2],@x[2],@d[1]
833         add     @x[3],@x[3],@d[1],lsr#32
834         add.32  @x[4],@x[4],@d[2]
835         add     @x[5],@x[5],@d[2],lsr#32
836         add.32  @x[6],@x[6],@d[3]
837         add     @x[7],@x[7],@d[3],lsr#32
838         add.32  @x[8],@x[8],@d[4]
839         add     @x[9],@x[9],@d[4],lsr#32
840         add.32  @x[10],@x[10],@d[5]
841         add     @x[11],@x[11],@d[5],lsr#32
842         add.32  @x[12],@x[12],@d[6]
843         add     @x[13],@x[13],@d[6],lsr#32
844         add.32  @x[14],@x[14],@d[7]
845         add     @x[15],@x[15],@d[7],lsr#32
846
847         add     @x[0],@x[0],@x[1],lsl#32        // pack
848         add     @x[2],@x[2],@x[3],lsl#32
849         ldp     @x[1],@x[3],[$inp,#0]           // load input
850         add     @x[4],@x[4],@x[5],lsl#32
851         add     @x[6],@x[6],@x[7],lsl#32
852         ldp     @x[5],@x[7],[$inp,#16]
853         add     @x[8],@x[8],@x[9],lsl#32
854         add     @x[10],@x[10],@x[11],lsl#32
855         ldp     @x[9],@x[11],[$inp,#32]
856         add     @x[12],@x[12],@x[13],lsl#32
857         add     @x[14],@x[14],@x[15],lsl#32
858         ldp     @x[13],@x[15],[$inp,#48]
859         add     $inp,$inp,#64
860 #ifdef  __ARMEB__
861         rev     @x[0],@x[0]
862         rev     @x[2],@x[2]
863         rev     @x[4],@x[4]
864         rev     @x[6],@x[6]
865         rev     @x[8],@x[8]
866         rev     @x[10],@x[10]
867         rev     @x[12],@x[12]
868         rev     @x[14],@x[14]
869 #endif
870         eor     @x[0],@x[0],@x[1]
871         eor     @x[2],@x[2],@x[3]
872         eor     @x[4],@x[4],@x[5]
873         eor     @x[6],@x[6],@x[7]
874         eor     @x[8],@x[8],@x[9]
875         eor     @x[10],@x[10],@x[11]
876         eor     @x[12],@x[12],@x[13]
877         eor     @x[14],@x[14],@x[15]
878
879          stp    @x[0],@x[2],[$out,#0]           // store output
880          add    @d[6],@d[6],#1                  // increment counter
881         mov.32  @x[0],@d[0]                     // unpack key block
882         lsr     @x[1],@d[0],#32
883          stp    @x[4],@x[6],[$out,#16]
884         mov.32  @x[2],@d[1]
885         lsr     @x[3],@d[1],#32
886          stp    @x[8],@x[10],[$out,#32]
887         mov.32  @x[4],@d[2]
888         lsr     @x[5],@d[2],#32
889          stp    @x[12],@x[14],[$out,#48]
890          add    $out,$out,#64
891         mov.32  @x[6],@d[3]
892         lsr     @x[7],@d[3],#32
893         mov.32  @x[8],@d[4]
894         lsr     @x[9],@d[4],#32
895         mov.32  @x[10],@d[5]
896         lsr     @x[11],@d[5],#32
897         mov.32  @x[12],@d[6]
898         lsr     @x[13],@d[6],#32
899         mov.32  @x[14],@d[7]
900         lsr     @x[15],@d[7],#32
901
902         mov     $ctr,#5
903 .Loop_lower_neon:
904         sub     $ctr,$ctr,#1
905 ___
906         @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
907         @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
908         @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
909         @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
910         @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
911         @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
912         @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
913
914         foreach (@thread0) {
915                 eval;                   eval(shift(@thread67));
916                 eval(shift(@thread1));  eval(shift(@thread67));
917                 eval(shift(@thread2));  eval(shift(@thread67));
918                 eval(shift(@thread3));  eval(shift(@thread67));
919                 eval(shift(@thread4));  eval(shift(@thread67));
920                 eval(shift(@thread5));  eval(shift(@thread67));
921         }
922
923         @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
924         @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
925         @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
926         @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
927         @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
928         @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
929         @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
930
931         foreach (@thread0) {
932                 eval;                   eval(shift(@thread67));
933                 eval(shift(@thread1));  eval(shift(@thread67));
934                 eval(shift(@thread2));  eval(shift(@thread67));
935                 eval(shift(@thread3));  eval(shift(@thread67));
936                 eval(shift(@thread4));  eval(shift(@thread67));
937                 eval(shift(@thread5));  eval(shift(@thread67));
938         }
939 $code.=<<___;
940         cbnz    $ctr,.Loop_lower_neon
941
942         add.32  @x[0],@x[0],@d[0]               // accumulate key block
943          ldp    @K[0],@K[1],[sp,#0]
944         add     @x[1],@x[1],@d[0],lsr#32
945          ldp    @K[2],@K[3],[sp,#32]
946         add.32  @x[2],@x[2],@d[1]
947          ldp    @K[4],@K[5],[sp,#64]
948         add     @x[3],@x[3],@d[1],lsr#32
949          add    $A0,$A0,@K[0]
950         add.32  @x[4],@x[4],@d[2]
951          add    $A1,$A1,@K[0]
952         add     @x[5],@x[5],@d[2],lsr#32
953          add    $A2,$A2,@K[0]
954         add.32  @x[6],@x[6],@d[3]
955          add    $A3,$A3,@K[0]
956         add     @x[7],@x[7],@d[3],lsr#32
957          add    $A4,$A4,@K[0]
958         add.32  @x[8],@x[8],@d[4]
959          add    $A5,$A5,@K[0]
960         add     @x[9],@x[9],@d[4],lsr#32
961          add    $C0,$C0,@K[2]
962         add.32  @x[10],@x[10],@d[5]
963          add    $C1,$C1,@K[2]
964         add     @x[11],@x[11],@d[5],lsr#32
965          add    $C2,$C2,@K[2]
966         add.32  @x[12],@x[12],@d[6]
967          add    $C3,$C3,@K[2]
968         add     @x[13],@x[13],@d[6],lsr#32
969          add    $C4,$C4,@K[2]
970         add.32  @x[14],@x[14],@d[7]
971          add    $C5,$C5,@K[2]
972         add     @x[15],@x[15],@d[7],lsr#32
973          add    $D4,$D4,$ONE                    // +4
974         add     @x[0],@x[0],@x[1],lsl#32        // pack
975          add    $D5,$D5,$ONE                    // +4
976         add     @x[2],@x[2],@x[3],lsl#32
977          add    $D0,$D0,@K[3]
978         ldp     @x[1],@x[3],[$inp,#0]           // load input
979          add    $D1,$D1,@K[4]
980         add     @x[4],@x[4],@x[5],lsl#32
981          add    $D2,$D2,@K[5]
982         add     @x[6],@x[6],@x[7],lsl#32
983          add    $D3,$D3,@K[6]
984         ldp     @x[5],@x[7],[$inp,#16]
985          add    $D4,$D4,@K[3]
986         add     @x[8],@x[8],@x[9],lsl#32
987          add    $D5,$D5,@K[4]
988         add     @x[10],@x[10],@x[11],lsl#32
989          add    $B0,$B0,@K[1]
990         ldp     @x[9],@x[11],[$inp,#32]
991          add    $B1,$B1,@K[1]
992         add     @x[12],@x[12],@x[13],lsl#32
993          add    $B2,$B2,@K[1]
994         add     @x[14],@x[14],@x[15],lsl#32
995          add    $B3,$B3,@K[1]
996         ldp     @x[13],@x[15],[$inp,#48]
997          add    $B4,$B4,@K[1]
998         add     $inp,$inp,#64
999          add    $B5,$B5,@K[1]
1000
1001 #ifdef  __ARMEB__
1002         rev     @x[0],@x[0]
1003         rev     @x[2],@x[2]
1004         rev     @x[4],@x[4]
1005         rev     @x[6],@x[6]
1006         rev     @x[8],@x[8]
1007         rev     @x[10],@x[10]
1008         rev     @x[12],@x[12]
1009         rev     @x[14],@x[14]
1010 #endif
1011         ld1.8   {$T0-$T3},[$inp],#64
1012         eor     @x[0],@x[0],@x[1]
1013         eor     @x[2],@x[2],@x[3]
1014         eor     @x[4],@x[4],@x[5]
1015         eor     @x[6],@x[6],@x[7]
1016         eor     @x[8],@x[8],@x[9]
1017          eor    $A0,$A0,$T0
1018         eor     @x[10],@x[10],@x[11]
1019          eor    $B0,$B0,$T1
1020         eor     @x[12],@x[12],@x[13]
1021          eor    $C0,$C0,$T2
1022         eor     @x[14],@x[14],@x[15]
1023          eor    $D0,$D0,$T3
1024          ld1.8  {$T0-$T3},[$inp],#64
1025
1026         stp     @x[0],@x[2],[$out,#0]           // store output
1027          add    @d[6],@d[6],#7                  // increment counter
1028         stp     @x[4],@x[6],[$out,#16]
1029         stp     @x[8],@x[10],[$out,#32]
1030         stp     @x[12],@x[14],[$out,#48]
1031         add     $out,$out,#64
1032         st1.8   {$A0-$D0},[$out],#64
1033
1034         ld1.8   {$A0-$D0},[$inp],#64
1035         eor     $A1,$A1,$T0
1036         eor     $B1,$B1,$T1
1037         eor     $C1,$C1,$T2
1038         eor     $D1,$D1,$T3
1039         st1.8   {$A1-$D1},[$out],#64
1040
1041         ld1.8   {$A1-$D1},[$inp],#64
1042         eor     $A2,$A2,$A0
1043          ldp    @K[0],@K[1],[sp,#0]
1044         eor     $B2,$B2,$B0
1045          ldp    @K[2],@K[3],[sp,#32]
1046         eor     $C2,$C2,$C0
1047         eor     $D2,$D2,$D0
1048         st1.8   {$A2-$D2},[$out],#64
1049
1050         ld1.8   {$A2-$D2},[$inp],#64
1051         eor     $A3,$A3,$A1
1052         eor     $B3,$B3,$B1
1053         eor     $C3,$C3,$C1
1054         eor     $D3,$D3,$D1
1055         st1.8   {$A3-$D3},[$out],#64
1056
1057         ld1.8   {$A3-$D3},[$inp],#64
1058         eor     $A4,$A4,$A2
1059         eor     $B4,$B4,$B2
1060         eor     $C4,$C4,$C2
1061         eor     $D4,$D4,$D2
1062         st1.8   {$A4-$D4},[$out],#64
1063
1064         shl     $A0,$ONE,#1                     // 4 -> 8
1065         eor     $A5,$A5,$A3
1066         eor     $B5,$B5,$B3
1067         eor     $C5,$C5,$C3
1068         eor     $D5,$D5,$D3
1069         st1.8   {$A5-$D5},[$out],#64
1070
1071         add     @K[3],@K[3],$A0                 // += 8
1072         add     @K[4],@K[4],$A0
1073         add     @K[5],@K[5],$A0
1074         add     @K[6],@K[6],$A0
1075
1076         b.hs    .Loop_outer_512_neon
1077
1078         adds    $len,$len,#512
1079         ushr    $A0,$ONE,#2                     // 4 -> 1
1080
1081         ldp     d8,d9,[sp,#128+0]               // meet ABI requirements
1082         ldp     d10,d11,[sp,#128+16]
1083         ldp     d12,d13,[sp,#128+32]
1084         ldp     d14,d15,[sp,#128+48]
1085
1086         stp     @K[0],$ONE,[sp,#0]              // wipe off-load area
1087         stp     @K[0],$ONE,[sp,#32]
1088         stp     @K[0],$ONE,[sp,#64]
1089
1090         b.eq    .Ldone_512_neon
1091
1092         cmp     $len,#192
1093         sub     @K[3],@K[3],$A0                 // -= 1
1094         sub     @K[4],@K[4],$A0
1095         sub     @K[5],@K[5],$A0
1096         add     sp,sp,#128
1097         b.hs    .Loop_outer_neon
1098
1099         eor     @K[1],@K[1],@K[1]
1100         eor     @K[2],@K[2],@K[2]
1101         eor     @K[3],@K[3],@K[3]
1102         eor     @K[4],@K[4],@K[4]
1103         eor     @K[5],@K[5],@K[5]
1104         eor     @K[6],@K[6],@K[6]
1105         b       .Loop_outer
1106
1107 .Ldone_512_neon:
1108         ldp     x19,x20,[x29,#16]
1109         add     sp,sp,#128+64
1110         ldp     x21,x22,[x29,#32]
1111         ldp     x23,x24,[x29,#48]
1112         ldp     x25,x26,[x29,#64]
1113         ldp     x27,x28,[x29,#80]
1114         ldp     x29,x30,[sp],#96
1115         .inst   0xd50323bf                      // autiasp
1116         ret
1117 .size   ChaCha20_512_neon,.-ChaCha20_512_neon
1118 ___
1119 }
1120 }}}
1121
1122 foreach (split("\n",$code)) {
1123         s/\`([^\`]*)\`/eval $1/geo;
1124
1125         (s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1))   or
1126         (m/\b(eor|ext|mov)\b/ and (s/\.4s/\.16b/g or 1))        or
1127         (s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1))   or
1128         (m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1))   or
1129         (s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1));
1130
1131         #s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1132
1133         print $_,"\n";
1134 }
1135 close STDOUT;   # flush