7868389f7166f23bee710dc57404a6cc8999c6ca
[openssl.git] / crypto / chacha / asm / chacha-armv8.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # June 2015
18 #
19 # ChaCha20 for ARMv8.
20 #
21 # April 2019
22 #
23 # Replace 3xNEON+1xIALU code path with 4+1. 4+1 is actually fastest
24 # option on most(*), but not all, processors, yet 6+2 is retained.
25 # This is because penalties are considered tolerable in comparison to
26 # improvement on processors where 6+2 helps. Most notably +37% on
27 # ThunderX2. It's server-oriented processor which will have to serve
28 # as many requests as possible. While others are mostly clients, when
29 # performance doesn't have to be absolute top-notch, just fast enough,
30 # as majority of time is spent "entertaining" relatively slow human.
31 #
32 # Performance in cycles per byte out of large buffer.
33 #
34 #                       IALU/gcc-4.9    4xNEON+1xIALU   6xNEON+2xIALU
35 #
36 # Apple A7              5.50/+49%       2.72            1.60
37 # Cortex-A53            8.40/+80%       4.06            4.45(*)
38 # Cortex-A57            8.06/+43%       4.15            4.40(*)
39 # Denver                4.50/+82%       2.30            2.70(*)
40 # X-Gene                9.50/+46%       8.20            8.90(*)
41 # Mongoose              8.00/+44%       2.74            3.12(*)
42 # Kryo                  8.17/+50%       4.47            4.65(*)
43 # ThunderX2             7.22/+48%       5.64            4.10
44 #
45 # (*)   slower than 4+1:-(
46
47 # $output is the last argument if it looks like a file (it has an extension)
48 # $flavour is the first argument if it doesn't look like a file
49 $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
50 $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
51
52 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
53 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
54 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
55 die "can't locate arm-xlate.pl";
56
57 open OUT,"| \"$^X\" $xlate $flavour \"$output\""
58     or die "can't call $xlate: $!";
59 *STDOUT=*OUT;
60
61 sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
62 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
63   my $arg = pop;
64     $arg = "#$arg" if ($arg*1 eq $arg);
65     $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
66 }
67
68 my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4));
69
70 my @x=map("x$_",(5..17,19..21));
71 my @d=map("x$_",(22..28,30));
72
73 sub ROUND {
74 my ($a0,$b0,$c0,$d0)=@_;
75 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
76 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
77 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
78
79     (
80         "&add_32        (@x[$a0],@x[$a0],@x[$b0])",
81          "&add_32       (@x[$a1],@x[$a1],@x[$b1])",
82           "&add_32      (@x[$a2],@x[$a2],@x[$b2])",
83            "&add_32     (@x[$a3],@x[$a3],@x[$b3])",
84         "&eor_32        (@x[$d0],@x[$d0],@x[$a0])",
85          "&eor_32       (@x[$d1],@x[$d1],@x[$a1])",
86           "&eor_32      (@x[$d2],@x[$d2],@x[$a2])",
87            "&eor_32     (@x[$d3],@x[$d3],@x[$a3])",
88         "&ror_32        (@x[$d0],@x[$d0],16)",
89          "&ror_32       (@x[$d1],@x[$d1],16)",
90           "&ror_32      (@x[$d2],@x[$d2],16)",
91            "&ror_32     (@x[$d3],@x[$d3],16)",
92
93         "&add_32        (@x[$c0],@x[$c0],@x[$d0])",
94          "&add_32       (@x[$c1],@x[$c1],@x[$d1])",
95           "&add_32      (@x[$c2],@x[$c2],@x[$d2])",
96            "&add_32     (@x[$c3],@x[$c3],@x[$d3])",
97         "&eor_32        (@x[$b0],@x[$b0],@x[$c0])",
98          "&eor_32       (@x[$b1],@x[$b1],@x[$c1])",
99           "&eor_32      (@x[$b2],@x[$b2],@x[$c2])",
100            "&eor_32     (@x[$b3],@x[$b3],@x[$c3])",
101         "&ror_32        (@x[$b0],@x[$b0],20)",
102          "&ror_32       (@x[$b1],@x[$b1],20)",
103           "&ror_32      (@x[$b2],@x[$b2],20)",
104            "&ror_32     (@x[$b3],@x[$b3],20)",
105
106         "&add_32        (@x[$a0],@x[$a0],@x[$b0])",
107          "&add_32       (@x[$a1],@x[$a1],@x[$b1])",
108           "&add_32      (@x[$a2],@x[$a2],@x[$b2])",
109            "&add_32     (@x[$a3],@x[$a3],@x[$b3])",
110         "&eor_32        (@x[$d0],@x[$d0],@x[$a0])",
111          "&eor_32       (@x[$d1],@x[$d1],@x[$a1])",
112           "&eor_32      (@x[$d2],@x[$d2],@x[$a2])",
113            "&eor_32     (@x[$d3],@x[$d3],@x[$a3])",
114         "&ror_32        (@x[$d0],@x[$d0],24)",
115          "&ror_32       (@x[$d1],@x[$d1],24)",
116           "&ror_32      (@x[$d2],@x[$d2],24)",
117            "&ror_32     (@x[$d3],@x[$d3],24)",
118
119         "&add_32        (@x[$c0],@x[$c0],@x[$d0])",
120          "&add_32       (@x[$c1],@x[$c1],@x[$d1])",
121           "&add_32      (@x[$c2],@x[$c2],@x[$d2])",
122            "&add_32     (@x[$c3],@x[$c3],@x[$d3])",
123         "&eor_32        (@x[$b0],@x[$b0],@x[$c0])",
124          "&eor_32       (@x[$b1],@x[$b1],@x[$c1])",
125           "&eor_32      (@x[$b2],@x[$b2],@x[$c2])",
126            "&eor_32     (@x[$b3],@x[$b3],@x[$c3])",
127         "&ror_32        (@x[$b0],@x[$b0],25)",
128          "&ror_32       (@x[$b1],@x[$b1],25)",
129           "&ror_32      (@x[$b2],@x[$b2],25)",
130            "&ror_32     (@x[$b3],@x[$b3],25)"
131     );
132 }
133
134 $code.=<<___;
135 #ifndef __KERNEL__
136 # include "arm_arch.h"
137 .extern OPENSSL_armcap_P
138 #endif
139
140 .text
141
142 .align  5
143 .Lsigma:
144 .quad   0x3320646e61707865,0x6b20657479622d32           // endian-neutral
145 .Lone:
146 .long   1,2,3,4
147 .Lrot24:
148 .long   0x02010003,0x06050407,0x0a09080b,0x0e0d0c0f
149 .asciz  "ChaCha20 for ARMv8, CRYPTOGAMS by \@dot-asm"
150
151 .globl  ChaCha20_ctr32
152 .type   ChaCha20_ctr32,%function
153 .align  5
154 ChaCha20_ctr32:
155         cbz     $len,.Labort
156         cmp     $len,#192
157         b.lo    .Lshort
158
159 #ifndef __KERNEL__
160         adrp    x17,OPENSSL_armcap_P
161         ldr     w17,[x17,#:lo12:OPENSSL_armcap_P]
162         tst     w17,#ARMV7_NEON
163         b.ne    .LChaCha20_neon
164 #endif
165
166 .Lshort:
167         .inst   0xd503233f                      // paciasp
168         stp     x29,x30,[sp,#-96]!
169         add     x29,sp,#0
170
171         adr     @x[0],.Lsigma
172         stp     x19,x20,[sp,#16]
173         stp     x21,x22,[sp,#32]
174         stp     x23,x24,[sp,#48]
175         stp     x25,x26,[sp,#64]
176         stp     x27,x28,[sp,#80]
177         sub     sp,sp,#64
178
179         ldp     @d[0],@d[1],[@x[0]]             // load sigma
180         ldp     @d[2],@d[3],[$key]              // load key
181         ldp     @d[4],@d[5],[$key,#16]
182         ldp     @d[6],@d[7],[$ctr]              // load counter
183 #ifdef  __AARCH64EB__
184         ror     @d[2],@d[2],#32
185         ror     @d[3],@d[3],#32
186         ror     @d[4],@d[4],#32
187         ror     @d[5],@d[5],#32
188         ror     @d[6],@d[6],#32
189         ror     @d[7],@d[7],#32
190 #endif
191
192 .Loop_outer:
193         mov.32  @x[0],@d[0]                     // unpack key block
194         lsr     @x[1],@d[0],#32
195         mov.32  @x[2],@d[1]
196         lsr     @x[3],@d[1],#32
197         mov.32  @x[4],@d[2]
198         lsr     @x[5],@d[2],#32
199         mov.32  @x[6],@d[3]
200         lsr     @x[7],@d[3],#32
201         mov.32  @x[8],@d[4]
202         lsr     @x[9],@d[4],#32
203         mov.32  @x[10],@d[5]
204         lsr     @x[11],@d[5],#32
205         mov.32  @x[12],@d[6]
206         lsr     @x[13],@d[6],#32
207         mov.32  @x[14],@d[7]
208         lsr     @x[15],@d[7],#32
209
210         mov     $ctr,#10
211         subs    $len,$len,#64
212 .Loop:
213         sub     $ctr,$ctr,#1
214 ___
215         foreach (&ROUND(0, 4, 8,12)) { eval; }
216         foreach (&ROUND(0, 5,10,15)) { eval; }
217 $code.=<<___;
218         cbnz    $ctr,.Loop
219
220         add.32  @x[0],@x[0],@d[0]               // accumulate key block
221         add     @x[1],@x[1],@d[0],lsr#32
222         add.32  @x[2],@x[2],@d[1]
223         add     @x[3],@x[3],@d[1],lsr#32
224         add.32  @x[4],@x[4],@d[2]
225         add     @x[5],@x[5],@d[2],lsr#32
226         add.32  @x[6],@x[6],@d[3]
227         add     @x[7],@x[7],@d[3],lsr#32
228         add.32  @x[8],@x[8],@d[4]
229         add     @x[9],@x[9],@d[4],lsr#32
230         add.32  @x[10],@x[10],@d[5]
231         add     @x[11],@x[11],@d[5],lsr#32
232         add.32  @x[12],@x[12],@d[6]
233         add     @x[13],@x[13],@d[6],lsr#32
234         add.32  @x[14],@x[14],@d[7]
235         add     @x[15],@x[15],@d[7],lsr#32
236
237         b.lo    .Ltail
238
239         add     @x[0],@x[0],@x[1],lsl#32        // pack
240         add     @x[2],@x[2],@x[3],lsl#32
241         ldp     @x[1],@x[3],[$inp,#0]           // load input
242         add     @x[4],@x[4],@x[5],lsl#32
243         add     @x[6],@x[6],@x[7],lsl#32
244         ldp     @x[5],@x[7],[$inp,#16]
245         add     @x[8],@x[8],@x[9],lsl#32
246         add     @x[10],@x[10],@x[11],lsl#32
247         ldp     @x[9],@x[11],[$inp,#32]
248         add     @x[12],@x[12],@x[13],lsl#32
249         add     @x[14],@x[14],@x[15],lsl#32
250         ldp     @x[13],@x[15],[$inp,#48]
251         add     $inp,$inp,#64
252 #ifdef  __AARCH64EB__
253         rev     @x[0],@x[0]
254         rev     @x[2],@x[2]
255         rev     @x[4],@x[4]
256         rev     @x[6],@x[6]
257         rev     @x[8],@x[8]
258         rev     @x[10],@x[10]
259         rev     @x[12],@x[12]
260         rev     @x[14],@x[14]
261 #endif
262         eor     @x[0],@x[0],@x[1]
263         eor     @x[2],@x[2],@x[3]
264         eor     @x[4],@x[4],@x[5]
265         eor     @x[6],@x[6],@x[7]
266         eor     @x[8],@x[8],@x[9]
267         eor     @x[10],@x[10],@x[11]
268         eor     @x[12],@x[12],@x[13]
269         eor     @x[14],@x[14],@x[15]
270
271         stp     @x[0],@x[2],[$out,#0]           // store output
272          add    @d[6],@d[6],#1                  // increment counter
273         stp     @x[4],@x[6],[$out,#16]
274         stp     @x[8],@x[10],[$out,#32]
275         stp     @x[12],@x[14],[$out,#48]
276         add     $out,$out,#64
277
278         b.hi    .Loop_outer
279
280         ldp     x19,x20,[x29,#16]
281         add     sp,sp,#64
282         ldp     x21,x22,[x29,#32]
283         ldp     x23,x24,[x29,#48]
284         ldp     x25,x26,[x29,#64]
285         ldp     x27,x28,[x29,#80]
286         ldp     x29,x30,[sp],#96
287         .inst   0xd50323bf                      // autiasp
288 .Labort:
289         ret
290
291 .align  4
292 .Ltail:
293         add     $len,$len,#64
294 .Less_than_64:
295         sub     $out,$out,#1
296         add     $inp,$inp,$len
297         add     $out,$out,$len
298         add     $ctr,sp,$len
299         neg     $len,$len
300
301         add     @x[0],@x[0],@x[1],lsl#32        // pack
302         add     @x[2],@x[2],@x[3],lsl#32
303         add     @x[4],@x[4],@x[5],lsl#32
304         add     @x[6],@x[6],@x[7],lsl#32
305         add     @x[8],@x[8],@x[9],lsl#32
306         add     @x[10],@x[10],@x[11],lsl#32
307         add     @x[12],@x[12],@x[13],lsl#32
308         add     @x[14],@x[14],@x[15],lsl#32
309 #ifdef  __AARCH64EB__
310         rev     @x[0],@x[0]
311         rev     @x[2],@x[2]
312         rev     @x[4],@x[4]
313         rev     @x[6],@x[6]
314         rev     @x[8],@x[8]
315         rev     @x[10],@x[10]
316         rev     @x[12],@x[12]
317         rev     @x[14],@x[14]
318 #endif
319         stp     @x[0],@x[2],[sp,#0]
320         stp     @x[4],@x[6],[sp,#16]
321         stp     @x[8],@x[10],[sp,#32]
322         stp     @x[12],@x[14],[sp,#48]
323
324 .Loop_tail:
325         ldrb    w10,[$inp,$len]
326         ldrb    w11,[$ctr,$len]
327         add     $len,$len,#1
328         eor     w10,w10,w11
329         strb    w10,[$out,$len]
330         cbnz    $len,.Loop_tail
331
332         stp     xzr,xzr,[sp,#0]
333         stp     xzr,xzr,[sp,#16]
334         stp     xzr,xzr,[sp,#32]
335         stp     xzr,xzr,[sp,#48]
336
337         ldp     x19,x20,[x29,#16]
338         add     sp,sp,#64
339         ldp     x21,x22,[x29,#32]
340         ldp     x23,x24,[x29,#48]
341         ldp     x25,x26,[x29,#64]
342         ldp     x27,x28,[x29,#80]
343         ldp     x29,x30,[sp],#96
344         .inst   0xd50323bf                      // autiasp
345         ret
346 .size   ChaCha20_ctr32,.-ChaCha20_ctr32
347 ___
348
349 {{{
350 my @K = map("v$_.4s",(0..3));
351 my ($xt0,$xt1,$xt2,$xt3, $CTR,$ROT24) = map("v$_.4s",(4..9));
352 my @X = map("v$_.4s",(16,20,24,28, 17,21,25,29, 18,22,26,30, 19,23,27,31));
353 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
354     $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3) = @X;
355
356 sub NEON_lane_ROUND {
357 my ($a0,$b0,$c0,$d0)=@_;
358 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
359 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
360 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
361 my @x=map("'$_'",@X);
362
363         (
364         "&add           (@x[$a0],@x[$a0],@x[$b0])",     # Q1
365          "&add          (@x[$a1],@x[$a1],@x[$b1])",     # Q2
366           "&add         (@x[$a2],@x[$a2],@x[$b2])",     # Q3
367            "&add        (@x[$a3],@x[$a3],@x[$b3])",     # Q4
368         "&eor           (@x[$d0],@x[$d0],@x[$a0])",
369          "&eor          (@x[$d1],@x[$d1],@x[$a1])",
370           "&eor         (@x[$d2],@x[$d2],@x[$a2])",
371            "&eor        (@x[$d3],@x[$d3],@x[$a3])",
372         "&rev32_16      (@x[$d0],@x[$d0])",
373          "&rev32_16     (@x[$d1],@x[$d1])",
374           "&rev32_16    (@x[$d2],@x[$d2])",
375            "&rev32_16   (@x[$d3],@x[$d3])",
376
377         "&add           (@x[$c0],@x[$c0],@x[$d0])",
378          "&add          (@x[$c1],@x[$c1],@x[$d1])",
379           "&add         (@x[$c2],@x[$c2],@x[$d2])",
380            "&add        (@x[$c3],@x[$c3],@x[$d3])",
381         "&eor           ('$xt0',@x[$b0],@x[$c0])",
382          "&eor          ('$xt1',@x[$b1],@x[$c1])",
383           "&eor         ('$xt2',@x[$b2],@x[$c2])",
384            "&eor        ('$xt3',@x[$b3],@x[$c3])",
385         "&ushr          (@x[$b0],'$xt0',20)",
386          "&ushr         (@x[$b1],'$xt1',20)",
387           "&ushr        (@x[$b2],'$xt2',20)",
388            "&ushr       (@x[$b3],'$xt3',20)",
389         "&sli           (@x[$b0],'$xt0',12)",
390          "&sli          (@x[$b1],'$xt1',12)",
391           "&sli         (@x[$b2],'$xt2',12)",
392            "&sli        (@x[$b3],'$xt3',12)",
393
394         "&add           (@x[$a0],@x[$a0],@x[$b0])",
395          "&add          (@x[$a1],@x[$a1],@x[$b1])",
396           "&add         (@x[$a2],@x[$a2],@x[$b2])",
397            "&add        (@x[$a3],@x[$a3],@x[$b3])",
398         "&eor           ('$xt0',@x[$d0],@x[$a0])",
399          "&eor          ('$xt1',@x[$d1],@x[$a1])",
400           "&eor         ('$xt2',@x[$d2],@x[$a2])",
401            "&eor        ('$xt3',@x[$d3],@x[$a3])",
402         "&tbl           (@x[$d0],'{$xt0}','$ROT24')",
403          "&tbl          (@x[$d1],'{$xt1}','$ROT24')",
404           "&tbl         (@x[$d2],'{$xt2}','$ROT24')",
405            "&tbl        (@x[$d3],'{$xt3}','$ROT24')",
406
407         "&add           (@x[$c0],@x[$c0],@x[$d0])",
408          "&add          (@x[$c1],@x[$c1],@x[$d1])",
409           "&add         (@x[$c2],@x[$c2],@x[$d2])",
410            "&add        (@x[$c3],@x[$c3],@x[$d3])",
411         "&eor           ('$xt0',@x[$b0],@x[$c0])",
412          "&eor          ('$xt1',@x[$b1],@x[$c1])",
413           "&eor         ('$xt2',@x[$b2],@x[$c2])",
414            "&eor        ('$xt3',@x[$b3],@x[$c3])",
415         "&ushr          (@x[$b0],'$xt0',25)",
416          "&ushr         (@x[$b1],'$xt1',25)",
417           "&ushr        (@x[$b2],'$xt2',25)",
418            "&ushr       (@x[$b3],'$xt3',25)",
419         "&sli           (@x[$b0],'$xt0',7)",
420          "&sli          (@x[$b1],'$xt1',7)",
421           "&sli         (@x[$b2],'$xt2',7)",
422            "&sli        (@x[$b3],'$xt3',7)"
423         );
424 }
425
426 $code.=<<___;
427
428 #ifdef  __KERNEL__
429 .globl  ChaCha20_neon
430 #endif
431 .type   ChaCha20_neon,%function
432 .align  5
433 ChaCha20_neon:
434 .LChaCha20_neon:
435         .inst   0xd503233f                      // paciasp
436         stp     x29,x30,[sp,#-96]!
437         add     x29,sp,#0
438
439         adr     @x[0],.Lsigma
440         stp     x19,x20,[sp,#16]
441         stp     x21,x22,[sp,#32]
442         stp     x23,x24,[sp,#48]
443         stp     x25,x26,[sp,#64]
444         stp     x27,x28,[sp,#80]
445         cmp     $len,#512
446         b.hs    .L512_or_more_neon
447
448         sub     sp,sp,#64
449
450         ldp     @d[0],@d[1],[@x[0]]             // load sigma
451         ld1     {@K[0]},[@x[0]],#16
452         ldp     @d[2],@d[3],[$key]              // load key
453         ldp     @d[4],@d[5],[$key,#16]
454         ld1     {@K[1],@K[2]},[$key]
455         ldp     @d[6],@d[7],[$ctr]              // load counter
456         ld1     {@K[3]},[$ctr]
457         stp     d8,d9,[sp]                      // meet ABI requirements
458         ld1     {$CTR,$ROT24},[@x[0]]
459 #ifdef  __AARCH64EB__
460         rev64   @K[0],@K[0]
461         ror     @d[2],@d[2],#32
462         ror     @d[3],@d[3],#32
463         ror     @d[4],@d[4],#32
464         ror     @d[5],@d[5],#32
465         ror     @d[6],@d[6],#32
466         ror     @d[7],@d[7],#32
467 #endif
468
469 .Loop_outer_neon:
470         dup     $xa0,@{K[0]}[0]                 // unpack key block
471          mov.32 @x[0],@d[0]
472         dup     $xa1,@{K[0]}[1]
473          lsr    @x[1],@d[0],#32
474         dup     $xa2,@{K[0]}[2]
475          mov.32 @x[2],@d[1]
476         dup     $xa3,@{K[0]}[3]
477          lsr    @x[3],@d[1],#32
478         dup     $xb0,@{K[1]}[0]
479          mov.32 @x[4],@d[2]
480         dup     $xb1,@{K[1]}[1]
481          lsr    @x[5],@d[2],#32
482         dup     $xb2,@{K[1]}[2]
483          mov.32 @x[6],@d[3]
484         dup     $xb3,@{K[1]}[3]
485          lsr    @x[7],@d[3],#32
486         dup     $xd0,@{K[3]}[0]
487          mov.32 @x[8],@d[4]
488         dup     $xd1,@{K[3]}[1]
489          lsr    @x[9],@d[4],#32
490         dup     $xd2,@{K[3]}[2]
491          mov.32 @x[10],@d[5]
492         dup     $xd3,@{K[3]}[3]
493          lsr    @x[11],@d[5],#32
494         add     $xd0,$xd0,$CTR
495          mov.32 @x[12],@d[6]
496         dup     $xc0,@{K[2]}[0]
497          lsr    @x[13],@d[6],#32
498         dup     $xc1,@{K[2]}[1]
499          mov.32 @x[14],@d[7]
500         dup     $xc2,@{K[2]}[2]
501          lsr    @x[15],@d[7],#32
502         dup     $xc3,@{K[2]}[3]
503
504         mov     $ctr,#10
505         subs    $len,$len,#320
506 .Loop_neon:
507         sub     $ctr,$ctr,#1
508 ___
509         my @plus_one=&ROUND(0,4,8,12);
510         foreach (&NEON_lane_ROUND(0,4,8,12))  { eval; eval(shift(@plus_one)); }
511
512         @plus_one=&ROUND(0,5,10,15);
513         foreach (&NEON_lane_ROUND(0,5,10,15)) { eval; eval(shift(@plus_one)); }
514 $code.=<<___;
515         cbnz    $ctr,.Loop_neon
516
517         add     $xd0,$xd0,$CTR
518
519         zip1    $xt0,$xa0,$xa1                  // transpose data
520         zip1    $xt1,$xa2,$xa3
521         zip2    $xt2,$xa0,$xa1
522         zip2    $xt3,$xa2,$xa3
523         zip1.64 $xa0,$xt0,$xt1
524         zip2.64 $xa1,$xt0,$xt1
525         zip1.64 $xa2,$xt2,$xt3
526         zip2.64 $xa3,$xt2,$xt3
527
528         zip1    $xt0,$xb0,$xb1
529         zip1    $xt1,$xb2,$xb3
530         zip2    $xt2,$xb0,$xb1
531         zip2    $xt3,$xb2,$xb3
532         zip1.64 $xb0,$xt0,$xt1
533         zip2.64 $xb1,$xt0,$xt1
534         zip1.64 $xb2,$xt2,$xt3
535         zip2.64 $xb3,$xt2,$xt3
536
537         zip1    $xt0,$xc0,$xc1
538          add.32 @x[0],@x[0],@d[0]               // accumulate key block
539         zip1    $xt1,$xc2,$xc3
540          add    @x[1],@x[1],@d[0],lsr#32
541         zip2    $xt2,$xc0,$xc1
542          add.32 @x[2],@x[2],@d[1]
543         zip2    $xt3,$xc2,$xc3
544          add    @x[3],@x[3],@d[1],lsr#32
545         zip1.64 $xc0,$xt0,$xt1
546          add.32 @x[4],@x[4],@d[2]
547         zip2.64 $xc1,$xt0,$xt1
548          add    @x[5],@x[5],@d[2],lsr#32
549         zip1.64 $xc2,$xt2,$xt3
550          add.32 @x[6],@x[6],@d[3]
551         zip2.64 $xc3,$xt2,$xt3
552          add    @x[7],@x[7],@d[3],lsr#32
553
554         zip1    $xt0,$xd0,$xd1
555          add.32 @x[8],@x[8],@d[4]
556         zip1    $xt1,$xd2,$xd3
557          add    @x[9],@x[9],@d[4],lsr#32
558         zip2    $xt2,$xd0,$xd1
559          add.32 @x[10],@x[10],@d[5]
560         zip2    $xt3,$xd2,$xd3
561          add    @x[11],@x[11],@d[5],lsr#32
562         zip1.64 $xd0,$xt0,$xt1
563          add.32 @x[12],@x[12],@d[6]
564         zip2.64 $xd1,$xt0,$xt1
565          add    @x[13],@x[13],@d[6],lsr#32
566         zip1.64 $xd2,$xt2,$xt3
567          add.32 @x[14],@x[14],@d[7]
568         zip2.64 $xd3,$xt2,$xt3
569          add    @x[15],@x[15],@d[7],lsr#32
570
571         b.lo    .Ltail_neon
572
573         add     @x[0],@x[0],@x[1],lsl#32        // pack
574         add     @x[2],@x[2],@x[3],lsl#32
575         ldp     @x[1],@x[3],[$inp,#0]           // load input
576          add    $xa0,$xa0,@K[0]                 // accumulate key block
577         add     @x[4],@x[4],@x[5],lsl#32
578         add     @x[6],@x[6],@x[7],lsl#32
579         ldp     @x[5],@x[7],[$inp,#16]
580          add    $xb0,$xb0,@K[1]
581         add     @x[8],@x[8],@x[9],lsl#32
582         add     @x[10],@x[10],@x[11],lsl#32
583         ldp     @x[9],@x[11],[$inp,#32]
584          add    $xc0,$xc0,@K[2]
585         add     @x[12],@x[12],@x[13],lsl#32
586         add     @x[14],@x[14],@x[15],lsl#32
587         ldp     @x[13],@x[15],[$inp,#48]
588          add    $xd0,$xd0,@K[3]
589         add     $inp,$inp,#64
590 #ifdef  __AARCH64EB__
591         rev     @x[0],@x[0]
592         rev     @x[2],@x[2]
593         rev     @x[4],@x[4]
594         rev     @x[6],@x[6]
595         rev     @x[8],@x[8]
596         rev     @x[10],@x[10]
597         rev     @x[12],@x[12]
598         rev     @x[14],@x[14]
599 #endif
600         ld1.8   {$xt0-$xt3},[$inp],#64
601         eor     @x[0],@x[0],@x[1]
602          add    $xa1,$xa1,@K[0]
603         eor     @x[2],@x[2],@x[3]
604          add    $xb1,$xb1,@K[1]
605         eor     @x[4],@x[4],@x[5]
606          add    $xc1,$xc1,@K[2]
607         eor     @x[6],@x[6],@x[7]
608          add    $xd1,$xd1,@K[3]
609         eor     @x[8],@x[8],@x[9]
610          eor    $xa0,$xa0,$xt0
611          movi   $xt0,#5
612         eor     @x[10],@x[10],@x[11]
613          eor    $xb0,$xb0,$xt1
614         eor     @x[12],@x[12],@x[13]
615          eor    $xc0,$xc0,$xt2
616         eor     @x[14],@x[14],@x[15]
617          eor    $xd0,$xd0,$xt3
618          add    $CTR,$CTR,$xt0                  // += 5
619          ld1.8  {$xt0-$xt3},[$inp],#64
620
621         stp     @x[0],@x[2],[$out,#0]           // store output
622          add    @d[6],@d[6],#5                  // increment counter
623         stp     @x[4],@x[6],[$out,#16]
624         stp     @x[8],@x[10],[$out,#32]
625         stp     @x[12],@x[14],[$out,#48]
626         add     $out,$out,#64
627
628         st1.8   {$xa0-$xd0},[$out],#64
629          add    $xa2,$xa2,@K[0]
630          add    $xb2,$xb2,@K[1]
631          add    $xc2,$xc2,@K[2]
632          add    $xd2,$xd2,@K[3]
633         ld1.8   {$xa0-$xd0},[$inp],#64
634
635         eor     $xa1,$xa1,$xt0
636         eor     $xb1,$xb1,$xt1
637         eor     $xc1,$xc1,$xt2
638         eor     $xd1,$xd1,$xt3
639         st1.8   {$xa1-$xd1},[$out],#64
640          add    $xa3,$xa3,@K[0]
641          add    $xb3,$xb3,@K[1]
642          add    $xc3,$xc3,@K[2]
643          add    $xd3,$xd3,@K[3]
644         ld1.8   {$xa1-$xd1},[$inp],#64
645
646         eor     $xa2,$xa2,$xa0
647         eor     $xb2,$xb2,$xb0
648         eor     $xc2,$xc2,$xc0
649         eor     $xd2,$xd2,$xd0
650         st1.8   {$xa2-$xd2},[$out],#64
651
652         eor     $xa3,$xa3,$xa1
653         eor     $xb3,$xb3,$xb1
654         eor     $xc3,$xc3,$xc1
655         eor     $xd3,$xd3,$xd1
656         st1.8   {$xa3-$xd3},[$out],#64
657
658         b.hi    .Loop_outer_neon
659
660         ldp     d8,d9,[sp]                      // meet ABI requirements
661
662         ldp     x19,x20,[x29,#16]
663         add     sp,sp,#64
664         ldp     x21,x22,[x29,#32]
665         ldp     x23,x24,[x29,#48]
666         ldp     x25,x26,[x29,#64]
667         ldp     x27,x28,[x29,#80]
668         ldp     x29,x30,[sp],#96
669         .inst   0xd50323bf                      // autiasp
670         ret
671
672 .align  4
673 .Ltail_neon:
674         add     $len,$len,#320
675         ldp     d8,d9,[sp]                      // meet ABI requirements
676         cmp     $len,#64
677         b.lo    .Less_than_64
678
679         add     @x[0],@x[0],@x[1],lsl#32        // pack
680         add     @x[2],@x[2],@x[3],lsl#32
681         ldp     @x[1],@x[3],[$inp,#0]           // load input
682         add     @x[4],@x[4],@x[5],lsl#32
683         add     @x[6],@x[6],@x[7],lsl#32
684         ldp     @x[5],@x[7],[$inp,#16]
685         add     @x[8],@x[8],@x[9],lsl#32
686         add     @x[10],@x[10],@x[11],lsl#32
687         ldp     @x[9],@x[11],[$inp,#32]
688         add     @x[12],@x[12],@x[13],lsl#32
689         add     @x[14],@x[14],@x[15],lsl#32
690         ldp     @x[13],@x[15],[$inp,#48]
691         add     $inp,$inp,#64
692 #ifdef  __AARCH64EB__
693         rev     @x[0],@x[0]
694         rev     @x[2],@x[2]
695         rev     @x[4],@x[4]
696         rev     @x[6],@x[6]
697         rev     @x[8],@x[8]
698         rev     @x[10],@x[10]
699         rev     @x[12],@x[12]
700         rev     @x[14],@x[14]
701 #endif
702         eor     @x[0],@x[0],@x[1]
703         eor     @x[2],@x[2],@x[3]
704         eor     @x[4],@x[4],@x[5]
705         eor     @x[6],@x[6],@x[7]
706         eor     @x[8],@x[8],@x[9]
707         eor     @x[10],@x[10],@x[11]
708         eor     @x[12],@x[12],@x[13]
709         eor     @x[14],@x[14],@x[15]
710
711         stp     @x[0],@x[2],[$out,#0]           // store output
712          add    $xa0,$xa0,@K[0]                 // accumulate key block
713         stp     @x[4],@x[6],[$out,#16]
714          add    $xb0,$xb0,@K[1]
715         stp     @x[8],@x[10],[$out,#32]
716          add    $xc0,$xc0,@K[2]
717         stp     @x[12],@x[14],[$out,#48]
718          add    $xd0,$xd0,@K[3]
719         add     $out,$out,#64
720         b.eq    .Ldone_neon
721         sub     $len,$len,#64
722         cmp     $len,#64
723         b.lo    .Last_neon
724
725         ld1.8   {$xt0-$xt3},[$inp],#64
726         eor     $xa0,$xa0,$xt0
727         eor     $xb0,$xb0,$xt1
728         eor     $xc0,$xc0,$xt2
729         eor     $xd0,$xd0,$xt3
730         st1.8   {$xa0-$xd0},[$out],#64
731         b.eq    .Ldone_neon
732
733         add     $xa0,$xa1,@K[0]
734         add     $xb0,$xb1,@K[1]
735         sub     $len,$len,#64
736         add     $xc0,$xc1,@K[2]
737         cmp     $len,#64
738         add     $xd0,$xd1,@K[3]
739         b.lo    .Last_neon
740
741         ld1.8   {$xt0-$xt3},[$inp],#64
742         eor     $xa1,$xa0,$xt0
743         eor     $xb1,$xb0,$xt1
744         eor     $xc1,$xc0,$xt2
745         eor     $xd1,$xd0,$xt3
746         st1.8   {$xa1-$xd1},[$out],#64
747         b.eq    .Ldone_neon
748
749         add     $xa0,$xa2,@K[0]
750         add     $xb0,$xb2,@K[1]
751         sub     $len,$len,#64
752         add     $xc0,$xc2,@K[2]
753         cmp     $len,#64
754         add     $xd0,$xd2,@K[3]
755         b.lo    .Last_neon
756
757         ld1.8   {$xt0-$xt3},[$inp],#64
758         eor     $xa2,$xa0,$xt0
759         eor     $xb2,$xb0,$xt1
760         eor     $xc2,$xc0,$xt2
761         eor     $xd2,$xd0,$xt3
762         st1.8   {$xa2-$xd2},[$out],#64
763         b.eq    .Ldone_neon
764
765         add     $xa0,$xa3,@K[0]
766         add     $xb0,$xb3,@K[1]
767         add     $xc0,$xc3,@K[2]
768         add     $xd0,$xd3,@K[3]
769         sub     $len,$len,#64
770
771 .Last_neon:
772         st1.8   {$xa0-$xd0},[sp]
773
774         sub     $out,$out,#1
775         add     $inp,$inp,$len
776         add     $out,$out,$len
777         add     $ctr,sp,$len
778         neg     $len,$len
779
780 .Loop_tail_neon:
781         ldrb    w10,[$inp,$len]
782         ldrb    w11,[$ctr,$len]
783         add     $len,$len,#1
784         eor     w10,w10,w11
785         strb    w10,[$out,$len]
786         cbnz    $len,.Loop_tail_neon
787
788         stp     xzr,xzr,[sp,#0]
789         stp     xzr,xzr,[sp,#16]
790         stp     xzr,xzr,[sp,#32]
791         stp     xzr,xzr,[sp,#48]
792
793 .Ldone_neon:
794         ldp     x19,x20,[x29,#16]
795         add     sp,sp,#64
796         ldp     x21,x22,[x29,#32]
797         ldp     x23,x24,[x29,#48]
798         ldp     x25,x26,[x29,#64]
799         ldp     x27,x28,[x29,#80]
800         ldp     x29,x30,[sp],#96
801         .inst   0xd50323bf                      // autiasp
802         ret
803 .size   ChaCha20_neon,.-ChaCha20_neon
804 ___
805 {
806 my @K = map("v$_.4s",(0..6));
807 my ($T0,$T1,$T2,$T3,$T4,$T5)=@K;
808 my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,
809     $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(8..31));
810 my $rot24 = @K[6];
811 my $ONE = "v7.4s";
812
813 sub NEONROUND {
814 my $odd = pop;
815 my ($a,$b,$c,$d,$t)=@_;
816
817         (
818         "&add           ('$a','$a','$b')",
819         "&eor           ('$d','$d','$a')",
820         "&rev32_16      ('$d','$d')",           # vrot ($d,16)
821
822         "&add           ('$c','$c','$d')",
823         "&eor           ('$t','$b','$c')",
824         "&ushr          ('$b','$t',20)",
825         "&sli           ('$b','$t',12)",
826
827         "&add           ('$a','$a','$b')",
828         "&eor           ('$d','$d','$a')",
829         "&tbl           ('$d','{$d}','$rot24')",
830
831         "&add           ('$c','$c','$d')",
832         "&eor           ('$t','$b','$c')",
833         "&ushr          ('$b','$t',25)",
834         "&sli           ('$b','$t',7)",
835
836         "&ext           ('$c','$c','$c',8)",
837         "&ext           ('$d','$d','$d',$odd?4:12)",
838         "&ext           ('$b','$b','$b',$odd?12:4)"
839         );
840 }
841
842 $code.=<<___;
843 .type   ChaCha20_512_neon,%function
844 .align  5
845 ChaCha20_512_neon:
846         .inst   0xd503233f                      // paciasp
847         stp     x29,x30,[sp,#-96]!
848         add     x29,sp,#0
849
850         adr     @x[0],.Lsigma
851         stp     x19,x20,[sp,#16]
852         stp     x21,x22,[sp,#32]
853         stp     x23,x24,[sp,#48]
854         stp     x25,x26,[sp,#64]
855         stp     x27,x28,[sp,#80]
856
857 .L512_or_more_neon:
858         sub     sp,sp,#128+64
859
860         eor     $ONE,$ONE,$ONE
861         ldp     @d[0],@d[1],[@x[0]]             // load sigma
862         ld1     {@K[0]},[@x[0]],#16
863         ldp     @d[2],@d[3],[$key]              // load key
864         ldp     @d[4],@d[5],[$key,#16]
865         ld1     {@K[1],@K[2]},[$key]
866         ldp     @d[6],@d[7],[$ctr]              // load counter
867         ld1     {@K[3]},[$ctr]
868         ld1     {$ONE}[0],[@x[0]]
869         add     $key,@x[0],#16                  // .Lrot24
870 #ifdef  __AARCH64EB__
871         rev64   @K[0],@K[0]
872         ror     @d[2],@d[2],#32
873         ror     @d[3],@d[3],#32
874         ror     @d[4],@d[4],#32
875         ror     @d[5],@d[5],#32
876         ror     @d[6],@d[6],#32
877         ror     @d[7],@d[7],#32
878 #endif
879         add     @K[3],@K[3],$ONE                // += 1
880         stp     @K[0],@K[1],[sp,#0]             // off-load key block, invariant part
881         add     @K[3],@K[3],$ONE                // not typo
882         str     @K[2],[sp,#32]
883         add     @K[4],@K[3],$ONE
884         add     @K[5],@K[4],$ONE
885         add     @K[6],@K[5],$ONE
886         shl     $ONE,$ONE,#2                    // 1 -> 4
887
888         stp     d8,d9,[sp,#128+0]               // meet ABI requirements
889         stp     d10,d11,[sp,#128+16]
890         stp     d12,d13,[sp,#128+32]
891         stp     d14,d15,[sp,#128+48]
892
893         sub     $len,$len,#512                  // not typo
894
895 .Loop_outer_512_neon:
896          mov    $A0,@K[0]
897          mov    $A1,@K[0]
898          mov    $A2,@K[0]
899          mov    $A3,@K[0]
900          mov    $A4,@K[0]
901          mov    $A5,@K[0]
902          mov    $B0,@K[1]
903         mov.32  @x[0],@d[0]                     // unpack key block
904          mov    $B1,@K[1]
905         lsr     @x[1],@d[0],#32
906          mov    $B2,@K[1]
907         mov.32  @x[2],@d[1]
908          mov    $B3,@K[1]
909         lsr     @x[3],@d[1],#32
910          mov    $B4,@K[1]
911         mov.32  @x[4],@d[2]
912          mov    $B5,@K[1]
913         lsr     @x[5],@d[2],#32
914          mov    $D0,@K[3]
915         mov.32  @x[6],@d[3]
916          mov    $D1,@K[4]
917         lsr     @x[7],@d[3],#32
918          mov    $D2,@K[5]
919         mov.32  @x[8],@d[4]
920          mov    $D3,@K[6]
921         lsr     @x[9],@d[4],#32
922          mov    $C0,@K[2]
923         mov.32  @x[10],@d[5]
924          mov    $C1,@K[2]
925         lsr     @x[11],@d[5],#32
926          add    $D4,$D0,$ONE                    // +4
927         mov.32  @x[12],@d[6]
928          add    $D5,$D1,$ONE                    // +4
929         lsr     @x[13],@d[6],#32
930          mov    $C2,@K[2]
931         mov.32  @x[14],@d[7]
932          mov    $C3,@K[2]
933         lsr     @x[15],@d[7],#32
934          mov    $C4,@K[2]
935          stp    @K[3],@K[4],[sp,#48]            // off-load key block, variable part
936          mov    $C5,@K[2]
937          stp    @K[5],@K[6],[sp,#80]
938
939         mov     $ctr,#5
940         ld1     {$rot24},[$key]
941         subs    $len,$len,#512
942 .Loop_upper_neon:
943         sub     $ctr,$ctr,#1
944 ___
945         my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
946         my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
947         my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
948         my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
949         my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
950         my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
951         my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
952         my $diff = ($#thread0+1)*6 - $#thread67 - 1;
953         my $i = 0;
954
955         foreach (@thread0) {
956                 eval;                   eval(shift(@thread67));
957                 eval(shift(@thread1));  eval(shift(@thread67));
958                 eval(shift(@thread2));  eval(shift(@thread67));
959                 eval(shift(@thread3));  eval(shift(@thread67));
960                 eval(shift(@thread4));  eval(shift(@thread67));
961                 eval(shift(@thread5));  eval(shift(@thread67));
962         }
963
964         @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
965         @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
966         @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
967         @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
968         @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
969         @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
970         @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
971
972         foreach (@thread0) {
973                 eval;                   eval(shift(@thread67));
974                 eval(shift(@thread1));  eval(shift(@thread67));
975                 eval(shift(@thread2));  eval(shift(@thread67));
976                 eval(shift(@thread3));  eval(shift(@thread67));
977                 eval(shift(@thread4));  eval(shift(@thread67));
978                 eval(shift(@thread5));  eval(shift(@thread67));
979         }
980 $code.=<<___;
981         cbnz    $ctr,.Loop_upper_neon
982
983         add.32  @x[0],@x[0],@d[0]               // accumulate key block
984         add     @x[1],@x[1],@d[0],lsr#32
985         add.32  @x[2],@x[2],@d[1]
986         add     @x[3],@x[3],@d[1],lsr#32
987         add.32  @x[4],@x[4],@d[2]
988         add     @x[5],@x[5],@d[2],lsr#32
989         add.32  @x[6],@x[6],@d[3]
990         add     @x[7],@x[7],@d[3],lsr#32
991         add.32  @x[8],@x[8],@d[4]
992         add     @x[9],@x[9],@d[4],lsr#32
993         add.32  @x[10],@x[10],@d[5]
994         add     @x[11],@x[11],@d[5],lsr#32
995         add.32  @x[12],@x[12],@d[6]
996         add     @x[13],@x[13],@d[6],lsr#32
997         add.32  @x[14],@x[14],@d[7]
998         add     @x[15],@x[15],@d[7],lsr#32
999
1000         add     @x[0],@x[0],@x[1],lsl#32        // pack
1001         add     @x[2],@x[2],@x[3],lsl#32
1002         ldp     @x[1],@x[3],[$inp,#0]           // load input
1003         add     @x[4],@x[4],@x[5],lsl#32
1004         add     @x[6],@x[6],@x[7],lsl#32
1005         ldp     @x[5],@x[7],[$inp,#16]
1006         add     @x[8],@x[8],@x[9],lsl#32
1007         add     @x[10],@x[10],@x[11],lsl#32
1008         ldp     @x[9],@x[11],[$inp,#32]
1009         add     @x[12],@x[12],@x[13],lsl#32
1010         add     @x[14],@x[14],@x[15],lsl#32
1011         ldp     @x[13],@x[15],[$inp,#48]
1012         add     $inp,$inp,#64
1013 #ifdef  __AARCH64EB__
1014         rev     @x[0],@x[0]
1015         rev     @x[2],@x[2]
1016         rev     @x[4],@x[4]
1017         rev     @x[6],@x[6]
1018         rev     @x[8],@x[8]
1019         rev     @x[10],@x[10]
1020         rev     @x[12],@x[12]
1021         rev     @x[14],@x[14]
1022 #endif
1023         eor     @x[0],@x[0],@x[1]
1024         eor     @x[2],@x[2],@x[3]
1025         eor     @x[4],@x[4],@x[5]
1026         eor     @x[6],@x[6],@x[7]
1027         eor     @x[8],@x[8],@x[9]
1028         eor     @x[10],@x[10],@x[11]
1029         eor     @x[12],@x[12],@x[13]
1030         eor     @x[14],@x[14],@x[15]
1031
1032          stp    @x[0],@x[2],[$out,#0]           // store output
1033          add    @d[6],@d[6],#1                  // increment counter
1034         mov.32  @x[0],@d[0]                     // unpack key block
1035         lsr     @x[1],@d[0],#32
1036          stp    @x[4],@x[6],[$out,#16]
1037         mov.32  @x[2],@d[1]
1038         lsr     @x[3],@d[1],#32
1039          stp    @x[8],@x[10],[$out,#32]
1040         mov.32  @x[4],@d[2]
1041         lsr     @x[5],@d[2],#32
1042          stp    @x[12],@x[14],[$out,#48]
1043          add    $out,$out,#64
1044         mov.32  @x[6],@d[3]
1045         lsr     @x[7],@d[3],#32
1046         mov.32  @x[8],@d[4]
1047         lsr     @x[9],@d[4],#32
1048         mov.32  @x[10],@d[5]
1049         lsr     @x[11],@d[5],#32
1050         mov.32  @x[12],@d[6]
1051         lsr     @x[13],@d[6],#32
1052         mov.32  @x[14],@d[7]
1053         lsr     @x[15],@d[7],#32
1054
1055         mov     $ctr,#5
1056 .Loop_lower_neon:
1057         sub     $ctr,$ctr,#1
1058 ___
1059         @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
1060         @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
1061         @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
1062         @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
1063         @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
1064         @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
1065         @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
1066
1067         foreach (@thread0) {
1068                 eval;                   eval(shift(@thread67));
1069                 eval(shift(@thread1));  eval(shift(@thread67));
1070                 eval(shift(@thread2));  eval(shift(@thread67));
1071                 eval(shift(@thread3));  eval(shift(@thread67));
1072                 eval(shift(@thread4));  eval(shift(@thread67));
1073                 eval(shift(@thread5));  eval(shift(@thread67));
1074         }
1075
1076         @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
1077         @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
1078         @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
1079         @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
1080         @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
1081         @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
1082         @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
1083
1084         foreach (@thread0) {
1085                 eval;                   eval(shift(@thread67));
1086                 eval(shift(@thread1));  eval(shift(@thread67));
1087                 eval(shift(@thread2));  eval(shift(@thread67));
1088                 eval(shift(@thread3));  eval(shift(@thread67));
1089                 eval(shift(@thread4));  eval(shift(@thread67));
1090                 eval(shift(@thread5));  eval(shift(@thread67));
1091         }
1092 $code.=<<___;
1093         cbnz    $ctr,.Loop_lower_neon
1094
1095         add.32  @x[0],@x[0],@d[0]               // accumulate key block
1096          ldp    @K[0],@K[1],[sp,#0]
1097         add     @x[1],@x[1],@d[0],lsr#32
1098          ldp    @K[2],@K[3],[sp,#32]
1099         add.32  @x[2],@x[2],@d[1]
1100          ldp    @K[4],@K[5],[sp,#64]
1101         add     @x[3],@x[3],@d[1],lsr#32
1102          ldr    @K[6],[sp,#96]
1103          add    $A0,$A0,@K[0]
1104         add.32  @x[4],@x[4],@d[2]
1105          add    $A1,$A1,@K[0]
1106         add     @x[5],@x[5],@d[2],lsr#32
1107          add    $A2,$A2,@K[0]
1108         add.32  @x[6],@x[6],@d[3]
1109          add    $A3,$A3,@K[0]
1110         add     @x[7],@x[7],@d[3],lsr#32
1111          add    $A4,$A4,@K[0]
1112         add.32  @x[8],@x[8],@d[4]
1113          add    $A5,$A5,@K[0]
1114         add     @x[9],@x[9],@d[4],lsr#32
1115          add    $C0,$C0,@K[2]
1116         add.32  @x[10],@x[10],@d[5]
1117          add    $C1,$C1,@K[2]
1118         add     @x[11],@x[11],@d[5],lsr#32
1119          add    $C2,$C2,@K[2]
1120         add.32  @x[12],@x[12],@d[6]
1121          add    $C3,$C3,@K[2]
1122         add     @x[13],@x[13],@d[6],lsr#32
1123          add    $C4,$C4,@K[2]
1124         add.32  @x[14],@x[14],@d[7]
1125          add    $C5,$C5,@K[2]
1126         add     @x[15],@x[15],@d[7],lsr#32
1127          add    $D4,$D4,$ONE                    // +4
1128         add     @x[0],@x[0],@x[1],lsl#32        // pack
1129          add    $D5,$D5,$ONE                    // +4
1130         add     @x[2],@x[2],@x[3],lsl#32
1131          add    $D0,$D0,@K[3]
1132         ldp     @x[1],@x[3],[$inp,#0]           // load input
1133          add    $D1,$D1,@K[4]
1134         add     @x[4],@x[4],@x[5],lsl#32
1135          add    $D2,$D2,@K[5]
1136         add     @x[6],@x[6],@x[7],lsl#32
1137          add    $D3,$D3,@K[6]
1138         ldp     @x[5],@x[7],[$inp,#16]
1139          add    $D4,$D4,@K[3]
1140         add     @x[8],@x[8],@x[9],lsl#32
1141          add    $D5,$D5,@K[4]
1142         add     @x[10],@x[10],@x[11],lsl#32
1143          add    $B0,$B0,@K[1]
1144         ldp     @x[9],@x[11],[$inp,#32]
1145          add    $B1,$B1,@K[1]
1146         add     @x[12],@x[12],@x[13],lsl#32
1147          add    $B2,$B2,@K[1]
1148         add     @x[14],@x[14],@x[15],lsl#32
1149          add    $B3,$B3,@K[1]
1150         ldp     @x[13],@x[15],[$inp,#48]
1151          add    $B4,$B4,@K[1]
1152         add     $inp,$inp,#64
1153          add    $B5,$B5,@K[1]
1154
1155 #ifdef  __AARCH64EB__
1156         rev     @x[0],@x[0]
1157         rev     @x[2],@x[2]
1158         rev     @x[4],@x[4]
1159         rev     @x[6],@x[6]
1160         rev     @x[8],@x[8]
1161         rev     @x[10],@x[10]
1162         rev     @x[12],@x[12]
1163         rev     @x[14],@x[14]
1164 #endif
1165         ld1.8   {$T0-$T3},[$inp],#64
1166         eor     @x[0],@x[0],@x[1]
1167         eor     @x[2],@x[2],@x[3]
1168         eor     @x[4],@x[4],@x[5]
1169         eor     @x[6],@x[6],@x[7]
1170         eor     @x[8],@x[8],@x[9]
1171          eor    $A0,$A0,$T0
1172         eor     @x[10],@x[10],@x[11]
1173          eor    $B0,$B0,$T1
1174         eor     @x[12],@x[12],@x[13]
1175          eor    $C0,$C0,$T2
1176         eor     @x[14],@x[14],@x[15]
1177          eor    $D0,$D0,$T3
1178          ld1.8  {$T0-$T3},[$inp],#64
1179
1180         stp     @x[0],@x[2],[$out,#0]           // store output
1181          add    @d[6],@d[6],#7                  // increment counter
1182         stp     @x[4],@x[6],[$out,#16]
1183         stp     @x[8],@x[10],[$out,#32]
1184         stp     @x[12],@x[14],[$out,#48]
1185         add     $out,$out,#64
1186         st1.8   {$A0-$D0},[$out],#64
1187
1188         ld1.8   {$A0-$D0},[$inp],#64
1189         eor     $A1,$A1,$T0
1190         eor     $B1,$B1,$T1
1191         eor     $C1,$C1,$T2
1192         eor     $D1,$D1,$T3
1193         st1.8   {$A1-$D1},[$out],#64
1194
1195         ld1.8   {$A1-$D1},[$inp],#64
1196         eor     $A2,$A2,$A0
1197          ldp    @K[0],@K[1],[sp,#0]
1198         eor     $B2,$B2,$B0
1199          ldp    @K[2],@K[3],[sp,#32]
1200         eor     $C2,$C2,$C0
1201         eor     $D2,$D2,$D0
1202         st1.8   {$A2-$D2},[$out],#64
1203
1204         ld1.8   {$A2-$D2},[$inp],#64
1205         eor     $A3,$A3,$A1
1206         eor     $B3,$B3,$B1
1207         eor     $C3,$C3,$C1
1208         eor     $D3,$D3,$D1
1209         st1.8   {$A3-$D3},[$out],#64
1210
1211         ld1.8   {$A3-$D3},[$inp],#64
1212         eor     $A4,$A4,$A2
1213         eor     $B4,$B4,$B2
1214         eor     $C4,$C4,$C2
1215         eor     $D4,$D4,$D2
1216         st1.8   {$A4-$D4},[$out],#64
1217
1218         shl     $A0,$ONE,#1                     // 4 -> 8
1219         eor     $A5,$A5,$A3
1220         eor     $B5,$B5,$B3
1221         eor     $C5,$C5,$C3
1222         eor     $D5,$D5,$D3
1223         st1.8   {$A5-$D5},[$out],#64
1224
1225         add     @K[3],@K[3],$A0                 // += 8
1226         add     @K[4],@K[4],$A0
1227         add     @K[5],@K[5],$A0
1228         add     @K[6],@K[6],$A0
1229
1230         b.hs    .Loop_outer_512_neon
1231
1232         adds    $len,$len,#512
1233         ushr    $ONE,$ONE,#1                    // 4 -> 2
1234
1235         ldp     d10,d11,[sp,#128+16]            // meet ABI requirements
1236         ldp     d12,d13,[sp,#128+32]
1237         ldp     d14,d15,[sp,#128+48]
1238
1239         stp     @K[0],@K[0],[sp,#0]             // wipe off-load area
1240         stp     @K[0],@K[0],[sp,#32]
1241         stp     @K[0],@K[0],[sp,#64]
1242
1243         b.eq    .Ldone_512_neon
1244
1245         sub     $key,$key,#16                   // .Lone
1246         cmp     $len,#192
1247         add     sp,sp,#128
1248         sub     @K[3],@K[3],$ONE                // -= 2
1249         ld1     {$CTR,$ROT24},[$key]
1250         b.hs    .Loop_outer_neon
1251
1252         ldp     d8,d9,[sp,#0]                   // meet ABI requirements
1253         eor     @K[1],@K[1],@K[1]
1254         eor     @K[2],@K[2],@K[2]
1255         eor     @K[3],@K[3],@K[3]
1256         eor     @K[4],@K[4],@K[4]
1257         eor     @K[5],@K[5],@K[5]
1258         eor     @K[6],@K[6],@K[6]
1259         b       .Loop_outer
1260
1261 .Ldone_512_neon:
1262         ldp     d8,d9,[sp,#128+0]               // meet ABI requirements
1263         ldp     x19,x20,[x29,#16]
1264         add     sp,sp,#128+64
1265         ldp     x21,x22,[x29,#32]
1266         ldp     x23,x24,[x29,#48]
1267         ldp     x25,x26,[x29,#64]
1268         ldp     x27,x28,[x29,#80]
1269         ldp     x29,x30,[sp],#96
1270         .inst   0xd50323bf                      // autiasp
1271         ret
1272 .size   ChaCha20_512_neon,.-ChaCha20_512_neon
1273 ___
1274 }
1275 }}}
1276
1277 foreach (split("\n",$code)) {
1278         s/\`([^\`]*)\`/eval $1/geo;
1279
1280         (s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1))   or
1281         (m/\b(eor|ext|mov|tbl)\b/ and (s/\.4s/\.16b/g or 1))    or
1282         (s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1))   or
1283         (m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1))   or
1284         (m/\b(dup|ld1)\b/ and (s/\.4(s}?\[[0-3]\])/.$1/g or 1)) or
1285         (s/\b(zip[12])\.64\b/$1/ and (s/\.4s/\.2d/g or 1))      or
1286         (s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1));
1287
1288         #s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1289
1290         print $_,"\n";
1291 }
1292 close STDOUT;   # flush