7fc1749f53e3d094d13dac686d36a418bc095c60
[openssl.git] / crypto / chacha / asm / chacha-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # November 2014
18 #
19 # ChaCha20 for x86_64.
20 #
21 # December 2016
22 #
23 # Add AVX512F code path.
24 #
25 # Performance in cycles per byte out of large buffer.
26 #
27 #               IALU/gcc 4.8(i) 1xSSSE3/SSE2    4xSSSE3     8xAVX2
28 #
29 # P4            9.48/+99%       -/22.7(ii)      -
30 # Core2         7.83/+55%       7.90/8.08       4.35
31 # Westmere      7.19/+50%       5.60/6.70       3.00
32 # Sandy Bridge  8.31/+42%       5.45/6.76       2.72
33 # Ivy Bridge    6.71/+46%       5.40/6.49       2.41
34 # Haswell       5.92/+43%       5.20/6.45       2.42        1.23
35 # Skylake       5.87/+39%       4.70/-          2.31        1.19
36 # Silvermont    12.0/+33%       7.75/7.40       7.03(iii)
37 # Goldmont      10.6/+17%       5.10/-          3.28
38 # Sledgehammer  7.28/+52%       -/14.2(ii)      -
39 # Bulldozer     9.66/+28%       9.85/11.1       3.06(iv)
40 # VIA Nano      10.5/+46%       6.72/8.60       6.05
41 #
42 # (i)   compared to older gcc 3.x one can observe >2x improvement on
43 #       most platforms;
44 # (ii)  as it can be seen, SSE2 performance is too low on legacy
45 #       processors; NxSSE2 results are naturally better, but not
46 #       impressively better than IALU ones, which is why you won't
47 #       find SSE2 code below;
48 # (iii) this is not optimal result for Atom because of MSROM
49 #       limitations, SSE2 can do better, but gain is considered too
50 #       low to justify the [maintenance] effort;
51 # (iv)  Bulldozer actually executes 4xXOP code path that delivers 2.20;
52
53 $flavour = shift;
54 $output  = shift;
55 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
56
57 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
58
59 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
60 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
61 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
62 die "can't locate x86_64-xlate.pl";
63
64 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
65                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
66         $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
67 }
68
69 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
70            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
71         $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
72         $avx += 1 if ($1==2.11 && $2>=8);
73 }
74
75 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
76            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
77         $avx = ($1>=10) + ($1>=11);
78 }
79
80 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
81         $avx = ($2>=3.0) + ($2>3.0);
82 }
83
84 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
85 *STDOUT=*OUT;
86
87 # input parameter block
88 ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
89
90 $code.=<<___;
91 .text
92
93 .extern OPENSSL_ia32cap_P
94
95 .align  64
96 .Lzero:
97 .long   0,0,0,0
98 .Lone:
99 .long   1,0,0,0
100 .Linc:
101 .long   0,1,2,3
102 .Lfour:
103 .long   4,4,4,4
104 .Lincy:
105 .long   0,2,4,6,1,3,5,7
106 .Leight:
107 .long   8,8,8,8,8,8,8,8
108 .Lrot16:
109 .byte   0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
110 .Lrot24:
111 .byte   0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
112 .Lsigma:
113 .asciz  "expand 32-byte k"
114 .align  64
115 .Lzeroz:
116 .long   0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
117 .Lfourz:
118 .long   4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
119 .Lincz:
120 .long   0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
121 .Lsixteen:
122 .long   16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
123 .asciz  "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
124 ___
125
126 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
127 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
128   my $arg = pop;
129     $arg = "\$$arg" if ($arg*1 eq $arg);
130     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
131 }
132
133 @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
134     "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
135 @t=("%esi","%edi");
136
137 sub ROUND {                     # critical path is 24 cycles per round
138 my ($a0,$b0,$c0,$d0)=@_;
139 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
140 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
141 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
142 my ($xc,$xc_)=map("\"$_\"",@t);
143 my @x=map("\"$_\"",@x);
144
145         # Consider order in which variables are addressed by their
146         # index:
147         #
148         #       a   b   c   d
149         #
150         #       0   4   8  12 < even round
151         #       1   5   9  13
152         #       2   6  10  14
153         #       3   7  11  15
154         #       0   5  10  15 < odd round
155         #       1   6  11  12
156         #       2   7   8  13
157         #       3   4   9  14
158         #
159         # 'a', 'b' and 'd's are permanently allocated in registers,
160         # @x[0..7,12..15], while 'c's are maintained in memory. If
161         # you observe 'c' column, you'll notice that pair of 'c's is
162         # invariant between rounds. This means that we have to reload
163         # them once per round, in the middle. This is why you'll see
164         # bunch of 'c' stores and loads in the middle, but none in
165         # the beginning or end.
166
167         # Normally instructions would be interleaved to favour in-order
168         # execution. Generally out-of-order cores manage it gracefully,
169         # but not this time for some reason. As in-order execution
170         # cores are dying breed, old Atom is the only one around,
171         # instructions are left uninterleaved. Besides, Atom is better
172         # off executing 1xSSSE3 code anyway...
173
174         (
175         "&add   (@x[$a0],@x[$b0])",     # Q1
176         "&xor   (@x[$d0],@x[$a0])",
177         "&rol   (@x[$d0],16)",
178          "&add  (@x[$a1],@x[$b1])",     # Q2
179          "&xor  (@x[$d1],@x[$a1])",
180          "&rol  (@x[$d1],16)",
181
182         "&add   ($xc,@x[$d0])",
183         "&xor   (@x[$b0],$xc)",
184         "&rol   (@x[$b0],12)",
185          "&add  ($xc_,@x[$d1])",
186          "&xor  (@x[$b1],$xc_)",
187          "&rol  (@x[$b1],12)",
188
189         "&add   (@x[$a0],@x[$b0])",
190         "&xor   (@x[$d0],@x[$a0])",
191         "&rol   (@x[$d0],8)",
192          "&add  (@x[$a1],@x[$b1])",
193          "&xor  (@x[$d1],@x[$a1])",
194          "&rol  (@x[$d1],8)",
195
196         "&add   ($xc,@x[$d0])",
197         "&xor   (@x[$b0],$xc)",
198         "&rol   (@x[$b0],7)",
199          "&add  ($xc_,@x[$d1])",
200          "&xor  (@x[$b1],$xc_)",
201          "&rol  (@x[$b1],7)",
202
203         "&mov   (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
204          "&mov  (\"4*$c1(%rsp)\",$xc_)",
205         "&mov   ($xc,\"4*$c2(%rsp)\")",
206          "&mov  ($xc_,\"4*$c3(%rsp)\")",
207
208         "&add   (@x[$a2],@x[$b2])",     # Q3
209         "&xor   (@x[$d2],@x[$a2])",
210         "&rol   (@x[$d2],16)",
211          "&add  (@x[$a3],@x[$b3])",     # Q4
212          "&xor  (@x[$d3],@x[$a3])",
213          "&rol  (@x[$d3],16)",
214
215         "&add   ($xc,@x[$d2])",
216         "&xor   (@x[$b2],$xc)",
217         "&rol   (@x[$b2],12)",
218          "&add  ($xc_,@x[$d3])",
219          "&xor  (@x[$b3],$xc_)",
220          "&rol  (@x[$b3],12)",
221
222         "&add   (@x[$a2],@x[$b2])",
223         "&xor   (@x[$d2],@x[$a2])",
224         "&rol   (@x[$d2],8)",
225          "&add  (@x[$a3],@x[$b3])",
226          "&xor  (@x[$d3],@x[$a3])",
227          "&rol  (@x[$d3],8)",
228
229         "&add   ($xc,@x[$d2])",
230         "&xor   (@x[$b2],$xc)",
231         "&rol   (@x[$b2],7)",
232          "&add  ($xc_,@x[$d3])",
233          "&xor  (@x[$b3],$xc_)",
234          "&rol  (@x[$b3],7)"
235         );
236 }
237
238 ########################################################################
239 # Generic code path that handles all lengths on pre-SSSE3 processors.
240 $code.=<<___;
241 .globl  ChaCha20_ctr32
242 .type   ChaCha20_ctr32,\@function,5
243 .align  64
244 ChaCha20_ctr32:
245         cmp     \$0,$len
246         je      .Lno_data
247         mov     OPENSSL_ia32cap_P+4(%rip),%r10
248 ___
249 $code.=<<___    if ($avx>2);
250         bt      \$48,%r10               # check for AVX512F
251         jc      .LChaCha20_avx512
252 ___
253 $code.=<<___;
254         test    \$`1<<(41-32)`,%r10d
255         jnz     .LChaCha20_ssse3
256
257         push    %rbx
258         push    %rbp
259         push    %r12
260         push    %r13
261         push    %r14
262         push    %r15
263         sub     \$64+24,%rsp
264 .Lctr32_body:
265
266         #movdqa .Lsigma(%rip),%xmm0
267         movdqu  ($key),%xmm1
268         movdqu  16($key),%xmm2
269         movdqu  ($counter),%xmm3
270         movdqa  .Lone(%rip),%xmm4
271
272         #movdqa %xmm0,4*0(%rsp)         # key[0]
273         movdqa  %xmm1,4*4(%rsp)         # key[1]
274         movdqa  %xmm2,4*8(%rsp)         # key[2]
275         movdqa  %xmm3,4*12(%rsp)        # key[3]
276         mov     $len,%rbp               # reassign $len
277         jmp     .Loop_outer
278
279 .align  32
280 .Loop_outer:
281         mov     \$0x61707865,@x[0]      # 'expa'
282         mov     \$0x3320646e,@x[1]      # 'nd 3'
283         mov     \$0x79622d32,@x[2]      # '2-by'
284         mov     \$0x6b206574,@x[3]      # 'te k'
285         mov     4*4(%rsp),@x[4]
286         mov     4*5(%rsp),@x[5]
287         mov     4*6(%rsp),@x[6]
288         mov     4*7(%rsp),@x[7]
289         movd    %xmm3,@x[12]
290         mov     4*13(%rsp),@x[13]
291         mov     4*14(%rsp),@x[14]
292         mov     4*15(%rsp),@x[15]
293
294         mov     %rbp,64+0(%rsp)         # save len
295         mov     \$10,%ebp
296         mov     $inp,64+8(%rsp)         # save inp
297         movq    %xmm2,%rsi              # "@x[8]"
298         mov     $out,64+16(%rsp)        # save out
299         mov     %rsi,%rdi
300         shr     \$32,%rdi               # "@x[9]"
301         jmp     .Loop
302
303 .align  32
304 .Loop:
305 ___
306         foreach (&ROUND (0, 4, 8,12)) { eval; }
307         foreach (&ROUND (0, 5,10,15)) { eval; }
308         &dec    ("%ebp");
309         &jnz    (".Loop");
310
311 $code.=<<___;
312         mov     @t[1],4*9(%rsp)         # modulo-scheduled
313         mov     @t[0],4*8(%rsp)
314         mov     64(%rsp),%rbp           # load len
315         movdqa  %xmm2,%xmm1
316         mov     64+8(%rsp),$inp         # load inp
317         paddd   %xmm4,%xmm3             # increment counter
318         mov     64+16(%rsp),$out        # load out
319
320         add     \$0x61707865,@x[0]      # 'expa'
321         add     \$0x3320646e,@x[1]      # 'nd 3'
322         add     \$0x79622d32,@x[2]      # '2-by'
323         add     \$0x6b206574,@x[3]      # 'te k'
324         add     4*4(%rsp),@x[4]
325         add     4*5(%rsp),@x[5]
326         add     4*6(%rsp),@x[6]
327         add     4*7(%rsp),@x[7]
328         add     4*12(%rsp),@x[12]
329         add     4*13(%rsp),@x[13]
330         add     4*14(%rsp),@x[14]
331         add     4*15(%rsp),@x[15]
332         paddd   4*8(%rsp),%xmm1
333
334         cmp     \$64,%rbp
335         jb      .Ltail
336
337         xor     4*0($inp),@x[0]         # xor with input
338         xor     4*1($inp),@x[1]
339         xor     4*2($inp),@x[2]
340         xor     4*3($inp),@x[3]
341         xor     4*4($inp),@x[4]
342         xor     4*5($inp),@x[5]
343         xor     4*6($inp),@x[6]
344         xor     4*7($inp),@x[7]
345         movdqu  4*8($inp),%xmm0
346         xor     4*12($inp),@x[12]
347         xor     4*13($inp),@x[13]
348         xor     4*14($inp),@x[14]
349         xor     4*15($inp),@x[15]
350         lea     4*16($inp),$inp         # inp+=64
351         pxor    %xmm1,%xmm0
352
353         movdqa  %xmm2,4*8(%rsp)
354         movd    %xmm3,4*12(%rsp)
355
356         mov     @x[0],4*0($out)         # write output
357         mov     @x[1],4*1($out)
358         mov     @x[2],4*2($out)
359         mov     @x[3],4*3($out)
360         mov     @x[4],4*4($out)
361         mov     @x[5],4*5($out)
362         mov     @x[6],4*6($out)
363         mov     @x[7],4*7($out)
364         movdqu  %xmm0,4*8($out)
365         mov     @x[12],4*12($out)
366         mov     @x[13],4*13($out)
367         mov     @x[14],4*14($out)
368         mov     @x[15],4*15($out)
369         lea     4*16($out),$out         # out+=64
370
371         sub     \$64,%rbp
372         jnz     .Loop_outer
373
374         jmp     .Ldone
375
376 .align  16
377 .Ltail:
378         mov     @x[0],4*0(%rsp)
379         mov     @x[1],4*1(%rsp)
380         xor     %rbx,%rbx
381         mov     @x[2],4*2(%rsp)
382         mov     @x[3],4*3(%rsp)
383         mov     @x[4],4*4(%rsp)
384         mov     @x[5],4*5(%rsp)
385         mov     @x[6],4*6(%rsp)
386         mov     @x[7],4*7(%rsp)
387         movdqa  %xmm1,4*8(%rsp)
388         mov     @x[12],4*12(%rsp)
389         mov     @x[13],4*13(%rsp)
390         mov     @x[14],4*14(%rsp)
391         mov     @x[15],4*15(%rsp)
392
393 .Loop_tail:
394         movzb   ($inp,%rbx),%eax
395         movzb   (%rsp,%rbx),%edx
396         lea     1(%rbx),%rbx
397         xor     %edx,%eax
398         mov     %al,-1($out,%rbx)
399         dec     %rbp
400         jnz     .Loop_tail
401
402 .Ldone:
403         lea     64+24+48(%rsp),%rsi
404         mov     -48(%rsi),%r15
405         mov     -40(%rsi),%r14
406         mov     -32(%rsi),%r13
407         mov     -24(%rsi),%r12
408         mov     -16(%rsi),%rbp
409         mov     -8(%rsi),%rbx
410         lea     (%rsi),%rsp
411 .Lno_data:
412         ret
413 .size   ChaCha20_ctr32,.-ChaCha20_ctr32
414 ___
415
416 ########################################################################
417 # SSSE3 code path that handles shorter lengths
418 {
419 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
420
421 sub SSSE3ROUND {        # critical path is 20 "SIMD ticks" per round
422         &paddd  ($a,$b);
423         &pxor   ($d,$a);
424         &pshufb ($d,$rot16);
425
426         &paddd  ($c,$d);
427         &pxor   ($b,$c);
428         &movdqa ($t,$b);
429         &psrld  ($b,20);
430         &pslld  ($t,12);
431         &por    ($b,$t);
432
433         &paddd  ($a,$b);
434         &pxor   ($d,$a);
435         &pshufb ($d,$rot24);
436
437         &paddd  ($c,$d);
438         &pxor   ($b,$c);
439         &movdqa ($t,$b);
440         &psrld  ($b,25);
441         &pslld  ($t,7);
442         &por    ($b,$t);
443 }
444
445 my $xframe = $win64 ? 32+8 : 8;
446
447 $code.=<<___;
448 .type   ChaCha20_ssse3,\@function,5
449 .align  32
450 ChaCha20_ssse3:
451 .LChaCha20_ssse3:
452         mov     %rsp,%r9                # frame pointer
453 ___
454 $code.=<<___    if ($avx);
455         test    \$`1<<(43-32)`,%r10d
456         jnz     .LChaCha20_4xop         # XOP is fastest even if we use 1/4
457 ___
458 $code.=<<___;
459         cmp     \$128,$len              # we might throw away some data,
460         ja      .LChaCha20_4x           # but overall it won't be slower
461
462 .Ldo_sse3_after_all:
463         sub     \$64+$xframe,%rsp
464 ___
465 $code.=<<___    if ($win64);
466         movaps  %xmm6,-0x28(%r9)
467         movaps  %xmm7,-0x18(%r9)
468 .Lssse3_body:
469 ___
470 $code.=<<___;
471         movdqa  .Lsigma(%rip),$a
472         movdqu  ($key),$b
473         movdqu  16($key),$c
474         movdqu  ($counter),$d
475         movdqa  .Lrot16(%rip),$rot16
476         movdqa  .Lrot24(%rip),$rot24
477
478         movdqa  $a,0x00(%rsp)
479         movdqa  $b,0x10(%rsp)
480         movdqa  $c,0x20(%rsp)
481         movdqa  $d,0x30(%rsp)
482         mov     \$10,$counter           # reuse $counter
483         jmp     .Loop_ssse3
484
485 .align  32
486 .Loop_outer_ssse3:
487         movdqa  .Lone(%rip),$d
488         movdqa  0x00(%rsp),$a
489         movdqa  0x10(%rsp),$b
490         movdqa  0x20(%rsp),$c
491         paddd   0x30(%rsp),$d
492         mov     \$10,$counter
493         movdqa  $d,0x30(%rsp)
494         jmp     .Loop_ssse3
495
496 .align  32
497 .Loop_ssse3:
498 ___
499         &SSSE3ROUND();
500         &pshufd ($c,$c,0b01001110);
501         &pshufd ($b,$b,0b00111001);
502         &pshufd ($d,$d,0b10010011);
503         &nop    ();
504
505         &SSSE3ROUND();
506         &pshufd ($c,$c,0b01001110);
507         &pshufd ($b,$b,0b10010011);
508         &pshufd ($d,$d,0b00111001);
509
510         &dec    ($counter);
511         &jnz    (".Loop_ssse3");
512
513 $code.=<<___;
514         paddd   0x00(%rsp),$a
515         paddd   0x10(%rsp),$b
516         paddd   0x20(%rsp),$c
517         paddd   0x30(%rsp),$d
518
519         cmp     \$64,$len
520         jb      .Ltail_ssse3
521
522         movdqu  0x00($inp),$t
523         movdqu  0x10($inp),$t1
524         pxor    $t,$a                   # xor with input
525         movdqu  0x20($inp),$t
526         pxor    $t1,$b
527         movdqu  0x30($inp),$t1
528         lea     0x40($inp),$inp         # inp+=64
529         pxor    $t,$c
530         pxor    $t1,$d
531
532         movdqu  $a,0x00($out)           # write output
533         movdqu  $b,0x10($out)
534         movdqu  $c,0x20($out)
535         movdqu  $d,0x30($out)
536         lea     0x40($out),$out         # out+=64
537
538         sub     \$64,$len
539         jnz     .Loop_outer_ssse3
540
541         jmp     .Ldone_ssse3
542
543 .align  16
544 .Ltail_ssse3:
545         movdqa  $a,0x00(%rsp)
546         movdqa  $b,0x10(%rsp)
547         movdqa  $c,0x20(%rsp)
548         movdqa  $d,0x30(%rsp)
549         xor     $counter,$counter
550
551 .Loop_tail_ssse3:
552         movzb   ($inp,$counter),%eax
553         movzb   (%rsp,$counter),%ecx
554         lea     1($counter),$counter
555         xor     %ecx,%eax
556         mov     %al,-1($out,$counter)
557         dec     $len
558         jnz     .Loop_tail_ssse3
559
560 .Ldone_ssse3:
561 ___
562 $code.=<<___    if ($win64);
563         movaps  -0x28(%r9),%xmm6
564         movaps  -0x18(%r9),%xmm7
565 ___
566 $code.=<<___;
567         lea     (%r9),%rsp
568 .Lssse3_epilogue:
569         ret
570 .size   ChaCha20_ssse3,.-ChaCha20_ssse3
571 ___
572 }
573
574 ########################################################################
575 # SSSE3 code path that handles longer messages.
576 {
577 # assign variables to favor Atom front-end
578 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
579     $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
580 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
581         "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
582
583 sub SSSE3_lane_ROUND {
584 my ($a0,$b0,$c0,$d0)=@_;
585 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
586 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
587 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
588 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
589 my @x=map("\"$_\"",@xx);
590
591         # Consider order in which variables are addressed by their
592         # index:
593         #
594         #       a   b   c   d
595         #
596         #       0   4   8  12 < even round
597         #       1   5   9  13
598         #       2   6  10  14
599         #       3   7  11  15
600         #       0   5  10  15 < odd round
601         #       1   6  11  12
602         #       2   7   8  13
603         #       3   4   9  14
604         #
605         # 'a', 'b' and 'd's are permanently allocated in registers,
606         # @x[0..7,12..15], while 'c's are maintained in memory. If
607         # you observe 'c' column, you'll notice that pair of 'c's is
608         # invariant between rounds. This means that we have to reload
609         # them once per round, in the middle. This is why you'll see
610         # bunch of 'c' stores and loads in the middle, but none in
611         # the beginning or end.
612
613         (
614         "&paddd         (@x[$a0],@x[$b0])",     # Q1
615          "&paddd        (@x[$a1],@x[$b1])",     # Q2
616         "&pxor          (@x[$d0],@x[$a0])",
617          "&pxor         (@x[$d1],@x[$a1])",
618         "&pshufb        (@x[$d0],$t1)",
619          "&pshufb       (@x[$d1],$t1)",
620
621         "&paddd         ($xc,@x[$d0])",
622          "&paddd        ($xc_,@x[$d1])",
623         "&pxor          (@x[$b0],$xc)",
624          "&pxor         (@x[$b1],$xc_)",
625         "&movdqa        ($t0,@x[$b0])",
626         "&pslld         (@x[$b0],12)",
627         "&psrld         ($t0,20)",
628          "&movdqa       ($t1,@x[$b1])",
629          "&pslld        (@x[$b1],12)",
630         "&por           (@x[$b0],$t0)",
631          "&psrld        ($t1,20)",
632         "&movdqa        ($t0,'(%r11)')",        # .Lrot24(%rip)
633          "&por          (@x[$b1],$t1)",
634
635         "&paddd         (@x[$a0],@x[$b0])",
636          "&paddd        (@x[$a1],@x[$b1])",
637         "&pxor          (@x[$d0],@x[$a0])",
638          "&pxor         (@x[$d1],@x[$a1])",
639         "&pshufb        (@x[$d0],$t0)",
640          "&pshufb       (@x[$d1],$t0)",
641
642         "&paddd         ($xc,@x[$d0])",
643          "&paddd        ($xc_,@x[$d1])",
644         "&pxor          (@x[$b0],$xc)",
645          "&pxor         (@x[$b1],$xc_)",
646         "&movdqa        ($t1,@x[$b0])",
647         "&pslld         (@x[$b0],7)",
648         "&psrld         ($t1,25)",
649          "&movdqa       ($t0,@x[$b1])",
650          "&pslld        (@x[$b1],7)",
651         "&por           (@x[$b0],$t1)",
652          "&psrld        ($t0,25)",
653         "&movdqa        ($t1,'(%r10)')",        # .Lrot16(%rip)
654          "&por          (@x[$b1],$t0)",
655
656         "&movdqa        (\"`16*($c0-8)`(%rsp)\",$xc)",  # reload pair of 'c's
657          "&movdqa       (\"`16*($c1-8)`(%rsp)\",$xc_)",
658         "&movdqa        ($xc,\"`16*($c2-8)`(%rsp)\")",
659          "&movdqa       ($xc_,\"`16*($c3-8)`(%rsp)\")",
660
661         "&paddd         (@x[$a2],@x[$b2])",     # Q3
662          "&paddd        (@x[$a3],@x[$b3])",     # Q4
663         "&pxor          (@x[$d2],@x[$a2])",
664          "&pxor         (@x[$d3],@x[$a3])",
665         "&pshufb        (@x[$d2],$t1)",
666          "&pshufb       (@x[$d3],$t1)",
667
668         "&paddd         ($xc,@x[$d2])",
669          "&paddd        ($xc_,@x[$d3])",
670         "&pxor          (@x[$b2],$xc)",
671          "&pxor         (@x[$b3],$xc_)",
672         "&movdqa        ($t0,@x[$b2])",
673         "&pslld         (@x[$b2],12)",
674         "&psrld         ($t0,20)",
675          "&movdqa       ($t1,@x[$b3])",
676          "&pslld        (@x[$b3],12)",
677         "&por           (@x[$b2],$t0)",
678          "&psrld        ($t1,20)",
679         "&movdqa        ($t0,'(%r11)')",        # .Lrot24(%rip)
680          "&por          (@x[$b3],$t1)",
681
682         "&paddd         (@x[$a2],@x[$b2])",
683          "&paddd        (@x[$a3],@x[$b3])",
684         "&pxor          (@x[$d2],@x[$a2])",
685          "&pxor         (@x[$d3],@x[$a3])",
686         "&pshufb        (@x[$d2],$t0)",
687          "&pshufb       (@x[$d3],$t0)",
688
689         "&paddd         ($xc,@x[$d2])",
690          "&paddd        ($xc_,@x[$d3])",
691         "&pxor          (@x[$b2],$xc)",
692          "&pxor         (@x[$b3],$xc_)",
693         "&movdqa        ($t1,@x[$b2])",
694         "&pslld         (@x[$b2],7)",
695         "&psrld         ($t1,25)",
696          "&movdqa       ($t0,@x[$b3])",
697          "&pslld        (@x[$b3],7)",
698         "&por           (@x[$b2],$t1)",
699          "&psrld        ($t0,25)",
700         "&movdqa        ($t1,'(%r10)')",        # .Lrot16(%rip)
701          "&por          (@x[$b3],$t0)"
702         );
703 }
704
705 my $xframe = $win64 ? 0xa8 : 8;
706
707 $code.=<<___;
708 .type   ChaCha20_4x,\@function,5
709 .align  32
710 ChaCha20_4x:
711 .LChaCha20_4x:
712         mov             %rsp,%r9                # frame pointer
713         mov             %r10,%r11
714 ___
715 $code.=<<___    if ($avx>1);
716         shr             \$32,%r10               # OPENSSL_ia32cap_P+8
717         test            \$`1<<5`,%r10           # test AVX2
718         jnz             .LChaCha20_8x
719 ___
720 $code.=<<___;
721         cmp             \$192,$len
722         ja              .Lproceed4x
723
724         and             \$`1<<26|1<<22`,%r11    # isolate XSAVE+MOVBE
725         cmp             \$`1<<22`,%r11          # check for MOVBE without XSAVE
726         je              .Ldo_sse3_after_all     # to detect Atom
727
728 .Lproceed4x:
729         sub             \$0x140+$xframe,%rsp
730 ___
731         ################ stack layout
732         # +0x00         SIMD equivalent of @x[8-12]
733         # ...
734         # +0x40         constant copy of key[0-2] smashed by lanes
735         # ...
736         # +0x100        SIMD counters (with nonce smashed by lanes)
737         # ...
738         # +0x140
739 $code.=<<___    if ($win64);
740         movaps          %xmm6,-0xa8(%r9)
741         movaps          %xmm7,-0x98(%r9)
742         movaps          %xmm8,-0x88(%r9)
743         movaps          %xmm9,-0x78(%r9)
744         movaps          %xmm10,-0x68(%r9)
745         movaps          %xmm11,-0x58(%r9)
746         movaps          %xmm12,-0x48(%r9)
747         movaps          %xmm13,-0x38(%r9)
748         movaps          %xmm14,-0x28(%r9)
749         movaps          %xmm15,-0x18(%r9)
750 .L4x_body:
751 ___
752 $code.=<<___;
753         movdqa          .Lsigma(%rip),$xa3      # key[0]
754         movdqu          ($key),$xb3             # key[1]
755         movdqu          16($key),$xt3           # key[2]
756         movdqu          ($counter),$xd3         # key[3]
757         lea             0x100(%rsp),%rcx        # size optimization
758         lea             .Lrot16(%rip),%r10
759         lea             .Lrot24(%rip),%r11
760
761         pshufd          \$0x00,$xa3,$xa0        # smash key by lanes...
762         pshufd          \$0x55,$xa3,$xa1
763         movdqa          $xa0,0x40(%rsp)         # ... and offload
764         pshufd          \$0xaa,$xa3,$xa2
765         movdqa          $xa1,0x50(%rsp)
766         pshufd          \$0xff,$xa3,$xa3
767         movdqa          $xa2,0x60(%rsp)
768         movdqa          $xa3,0x70(%rsp)
769
770         pshufd          \$0x00,$xb3,$xb0
771         pshufd          \$0x55,$xb3,$xb1
772         movdqa          $xb0,0x80-0x100(%rcx)
773         pshufd          \$0xaa,$xb3,$xb2
774         movdqa          $xb1,0x90-0x100(%rcx)
775         pshufd          \$0xff,$xb3,$xb3
776         movdqa          $xb2,0xa0-0x100(%rcx)
777         movdqa          $xb3,0xb0-0x100(%rcx)
778
779         pshufd          \$0x00,$xt3,$xt0        # "$xc0"
780         pshufd          \$0x55,$xt3,$xt1        # "$xc1"
781         movdqa          $xt0,0xc0-0x100(%rcx)
782         pshufd          \$0xaa,$xt3,$xt2        # "$xc2"
783         movdqa          $xt1,0xd0-0x100(%rcx)
784         pshufd          \$0xff,$xt3,$xt3        # "$xc3"
785         movdqa          $xt2,0xe0-0x100(%rcx)
786         movdqa          $xt3,0xf0-0x100(%rcx)
787
788         pshufd          \$0x00,$xd3,$xd0
789         pshufd          \$0x55,$xd3,$xd1
790         paddd           .Linc(%rip),$xd0        # don't save counters yet
791         pshufd          \$0xaa,$xd3,$xd2
792         movdqa          $xd1,0x110-0x100(%rcx)
793         pshufd          \$0xff,$xd3,$xd3
794         movdqa          $xd2,0x120-0x100(%rcx)
795         movdqa          $xd3,0x130-0x100(%rcx)
796
797         jmp             .Loop_enter4x
798
799 .align  32
800 .Loop_outer4x:
801         movdqa          0x40(%rsp),$xa0         # re-load smashed key
802         movdqa          0x50(%rsp),$xa1
803         movdqa          0x60(%rsp),$xa2
804         movdqa          0x70(%rsp),$xa3
805         movdqa          0x80-0x100(%rcx),$xb0
806         movdqa          0x90-0x100(%rcx),$xb1
807         movdqa          0xa0-0x100(%rcx),$xb2
808         movdqa          0xb0-0x100(%rcx),$xb3
809         movdqa          0xc0-0x100(%rcx),$xt0   # "$xc0"
810         movdqa          0xd0-0x100(%rcx),$xt1   # "$xc1"
811         movdqa          0xe0-0x100(%rcx),$xt2   # "$xc2"
812         movdqa          0xf0-0x100(%rcx),$xt3   # "$xc3"
813         movdqa          0x100-0x100(%rcx),$xd0
814         movdqa          0x110-0x100(%rcx),$xd1
815         movdqa          0x120-0x100(%rcx),$xd2
816         movdqa          0x130-0x100(%rcx),$xd3
817         paddd           .Lfour(%rip),$xd0       # next SIMD counters
818
819 .Loop_enter4x:
820         movdqa          $xt2,0x20(%rsp)         # SIMD equivalent of "@x[10]"
821         movdqa          $xt3,0x30(%rsp)         # SIMD equivalent of "@x[11]"
822         movdqa          (%r10),$xt3             # .Lrot16(%rip)
823         mov             \$10,%eax
824         movdqa          $xd0,0x100-0x100(%rcx)  # save SIMD counters
825         jmp             .Loop4x
826
827 .align  32
828 .Loop4x:
829 ___
830         foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
831         foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
832 $code.=<<___;
833         dec             %eax
834         jnz             .Loop4x
835
836         paddd           0x40(%rsp),$xa0         # accumulate key material
837         paddd           0x50(%rsp),$xa1
838         paddd           0x60(%rsp),$xa2
839         paddd           0x70(%rsp),$xa3
840
841         movdqa          $xa0,$xt2               # "de-interlace" data
842         punpckldq       $xa1,$xa0
843         movdqa          $xa2,$xt3
844         punpckldq       $xa3,$xa2
845         punpckhdq       $xa1,$xt2
846         punpckhdq       $xa3,$xt3
847         movdqa          $xa0,$xa1
848         punpcklqdq      $xa2,$xa0               # "a0"
849         movdqa          $xt2,$xa3
850         punpcklqdq      $xt3,$xt2               # "a2"
851         punpckhqdq      $xa2,$xa1               # "a1"
852         punpckhqdq      $xt3,$xa3               # "a3"
853 ___
854         ($xa2,$xt2)=($xt2,$xa2);
855 $code.=<<___;
856         paddd           0x80-0x100(%rcx),$xb0
857         paddd           0x90-0x100(%rcx),$xb1
858         paddd           0xa0-0x100(%rcx),$xb2
859         paddd           0xb0-0x100(%rcx),$xb3
860
861         movdqa          $xa0,0x00(%rsp)         # offload $xaN
862         movdqa          $xa1,0x10(%rsp)
863         movdqa          0x20(%rsp),$xa0         # "xc2"
864         movdqa          0x30(%rsp),$xa1         # "xc3"
865
866         movdqa          $xb0,$xt2
867         punpckldq       $xb1,$xb0
868         movdqa          $xb2,$xt3
869         punpckldq       $xb3,$xb2
870         punpckhdq       $xb1,$xt2
871         punpckhdq       $xb3,$xt3
872         movdqa          $xb0,$xb1
873         punpcklqdq      $xb2,$xb0               # "b0"
874         movdqa          $xt2,$xb3
875         punpcklqdq      $xt3,$xt2               # "b2"
876         punpckhqdq      $xb2,$xb1               # "b1"
877         punpckhqdq      $xt3,$xb3               # "b3"
878 ___
879         ($xb2,$xt2)=($xt2,$xb2);
880         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
881 $code.=<<___;
882         paddd           0xc0-0x100(%rcx),$xc0
883         paddd           0xd0-0x100(%rcx),$xc1
884         paddd           0xe0-0x100(%rcx),$xc2
885         paddd           0xf0-0x100(%rcx),$xc3
886
887         movdqa          $xa2,0x20(%rsp)         # keep offloading $xaN
888         movdqa          $xa3,0x30(%rsp)
889
890         movdqa          $xc0,$xt2
891         punpckldq       $xc1,$xc0
892         movdqa          $xc2,$xt3
893         punpckldq       $xc3,$xc2
894         punpckhdq       $xc1,$xt2
895         punpckhdq       $xc3,$xt3
896         movdqa          $xc0,$xc1
897         punpcklqdq      $xc2,$xc0               # "c0"
898         movdqa          $xt2,$xc3
899         punpcklqdq      $xt3,$xt2               # "c2"
900         punpckhqdq      $xc2,$xc1               # "c1"
901         punpckhqdq      $xt3,$xc3               # "c3"
902 ___
903         ($xc2,$xt2)=($xt2,$xc2);
904         ($xt0,$xt1)=($xa2,$xa3);                # use $xaN as temporary
905 $code.=<<___;
906         paddd           0x100-0x100(%rcx),$xd0
907         paddd           0x110-0x100(%rcx),$xd1
908         paddd           0x120-0x100(%rcx),$xd2
909         paddd           0x130-0x100(%rcx),$xd3
910
911         movdqa          $xd0,$xt2
912         punpckldq       $xd1,$xd0
913         movdqa          $xd2,$xt3
914         punpckldq       $xd3,$xd2
915         punpckhdq       $xd1,$xt2
916         punpckhdq       $xd3,$xt3
917         movdqa          $xd0,$xd1
918         punpcklqdq      $xd2,$xd0               # "d0"
919         movdqa          $xt2,$xd3
920         punpcklqdq      $xt3,$xt2               # "d2"
921         punpckhqdq      $xd2,$xd1               # "d1"
922         punpckhqdq      $xt3,$xd3               # "d3"
923 ___
924         ($xd2,$xt2)=($xt2,$xd2);
925 $code.=<<___;
926         cmp             \$64*4,$len
927         jb              .Ltail4x
928
929         movdqu          0x00($inp),$xt0         # xor with input
930         movdqu          0x10($inp),$xt1
931         movdqu          0x20($inp),$xt2
932         movdqu          0x30($inp),$xt3
933         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
934         pxor            $xb0,$xt1
935         pxor            $xc0,$xt2
936         pxor            $xd0,$xt3
937
938          movdqu         $xt0,0x00($out)
939         movdqu          0x40($inp),$xt0
940          movdqu         $xt1,0x10($out)
941         movdqu          0x50($inp),$xt1
942          movdqu         $xt2,0x20($out)
943         movdqu          0x60($inp),$xt2
944          movdqu         $xt3,0x30($out)
945         movdqu          0x70($inp),$xt3
946         lea             0x80($inp),$inp         # size optimization
947         pxor            0x10(%rsp),$xt0
948         pxor            $xb1,$xt1
949         pxor            $xc1,$xt2
950         pxor            $xd1,$xt3
951
952          movdqu         $xt0,0x40($out)
953         movdqu          0x00($inp),$xt0
954          movdqu         $xt1,0x50($out)
955         movdqu          0x10($inp),$xt1
956          movdqu         $xt2,0x60($out)
957         movdqu          0x20($inp),$xt2
958          movdqu         $xt3,0x70($out)
959          lea            0x80($out),$out         # size optimization
960         movdqu          0x30($inp),$xt3
961         pxor            0x20(%rsp),$xt0
962         pxor            $xb2,$xt1
963         pxor            $xc2,$xt2
964         pxor            $xd2,$xt3
965
966          movdqu         $xt0,0x00($out)
967         movdqu          0x40($inp),$xt0
968          movdqu         $xt1,0x10($out)
969         movdqu          0x50($inp),$xt1
970          movdqu         $xt2,0x20($out)
971         movdqu          0x60($inp),$xt2
972          movdqu         $xt3,0x30($out)
973         movdqu          0x70($inp),$xt3
974         lea             0x80($inp),$inp         # inp+=64*4
975         pxor            0x30(%rsp),$xt0
976         pxor            $xb3,$xt1
977         pxor            $xc3,$xt2
978         pxor            $xd3,$xt3
979         movdqu          $xt0,0x40($out)
980         movdqu          $xt1,0x50($out)
981         movdqu          $xt2,0x60($out)
982         movdqu          $xt3,0x70($out)
983         lea             0x80($out),$out         # out+=64*4
984
985         sub             \$64*4,$len
986         jnz             .Loop_outer4x
987
988         jmp             .Ldone4x
989
990 .Ltail4x:
991         cmp             \$192,$len
992         jae             .L192_or_more4x
993         cmp             \$128,$len
994         jae             .L128_or_more4x
995         cmp             \$64,$len
996         jae             .L64_or_more4x
997
998         #movdqa         0x00(%rsp),$xt0         # $xaN is offloaded, remember?
999         xor             %r10,%r10
1000         #movdqa         $xt0,0x00(%rsp)
1001         movdqa          $xb0,0x10(%rsp)
1002         movdqa          $xc0,0x20(%rsp)
1003         movdqa          $xd0,0x30(%rsp)
1004         jmp             .Loop_tail4x
1005
1006 .align  32
1007 .L64_or_more4x:
1008         movdqu          0x00($inp),$xt0         # xor with input
1009         movdqu          0x10($inp),$xt1
1010         movdqu          0x20($inp),$xt2
1011         movdqu          0x30($inp),$xt3
1012         pxor            0x00(%rsp),$xt0         # $xaxN is offloaded, remember?
1013         pxor            $xb0,$xt1
1014         pxor            $xc0,$xt2
1015         pxor            $xd0,$xt3
1016         movdqu          $xt0,0x00($out)
1017         movdqu          $xt1,0x10($out)
1018         movdqu          $xt2,0x20($out)
1019         movdqu          $xt3,0x30($out)
1020         je              .Ldone4x
1021
1022         movdqa          0x10(%rsp),$xt0         # $xaN is offloaded, remember?
1023         lea             0x40($inp),$inp         # inp+=64*1
1024         xor             %r10,%r10
1025         movdqa          $xt0,0x00(%rsp)
1026         movdqa          $xb1,0x10(%rsp)
1027         lea             0x40($out),$out         # out+=64*1
1028         movdqa          $xc1,0x20(%rsp)
1029         sub             \$64,$len               # len-=64*1
1030         movdqa          $xd1,0x30(%rsp)
1031         jmp             .Loop_tail4x
1032
1033 .align  32
1034 .L128_or_more4x:
1035         movdqu          0x00($inp),$xt0         # xor with input
1036         movdqu          0x10($inp),$xt1
1037         movdqu          0x20($inp),$xt2
1038         movdqu          0x30($inp),$xt3
1039         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1040         pxor            $xb0,$xt1
1041         pxor            $xc0,$xt2
1042         pxor            $xd0,$xt3
1043
1044          movdqu         $xt0,0x00($out)
1045         movdqu          0x40($inp),$xt0
1046          movdqu         $xt1,0x10($out)
1047         movdqu          0x50($inp),$xt1
1048          movdqu         $xt2,0x20($out)
1049         movdqu          0x60($inp),$xt2
1050          movdqu         $xt3,0x30($out)
1051         movdqu          0x70($inp),$xt3
1052         pxor            0x10(%rsp),$xt0
1053         pxor            $xb1,$xt1
1054         pxor            $xc1,$xt2
1055         pxor            $xd1,$xt3
1056         movdqu          $xt0,0x40($out)
1057         movdqu          $xt1,0x50($out)
1058         movdqu          $xt2,0x60($out)
1059         movdqu          $xt3,0x70($out)
1060         je              .Ldone4x
1061
1062         movdqa          0x20(%rsp),$xt0         # $xaN is offloaded, remember?
1063         lea             0x80($inp),$inp         # inp+=64*2
1064         xor             %r10,%r10
1065         movdqa          $xt0,0x00(%rsp)
1066         movdqa          $xb2,0x10(%rsp)
1067         lea             0x80($out),$out         # out+=64*2
1068         movdqa          $xc2,0x20(%rsp)
1069         sub             \$128,$len              # len-=64*2
1070         movdqa          $xd2,0x30(%rsp)
1071         jmp             .Loop_tail4x
1072
1073 .align  32
1074 .L192_or_more4x:
1075         movdqu          0x00($inp),$xt0         # xor with input
1076         movdqu          0x10($inp),$xt1
1077         movdqu          0x20($inp),$xt2
1078         movdqu          0x30($inp),$xt3
1079         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1080         pxor            $xb0,$xt1
1081         pxor            $xc0,$xt2
1082         pxor            $xd0,$xt3
1083
1084          movdqu         $xt0,0x00($out)
1085         movdqu          0x40($inp),$xt0
1086          movdqu         $xt1,0x10($out)
1087         movdqu          0x50($inp),$xt1
1088          movdqu         $xt2,0x20($out)
1089         movdqu          0x60($inp),$xt2
1090          movdqu         $xt3,0x30($out)
1091         movdqu          0x70($inp),$xt3
1092         lea             0x80($inp),$inp         # size optimization
1093         pxor            0x10(%rsp),$xt0
1094         pxor            $xb1,$xt1
1095         pxor            $xc1,$xt2
1096         pxor            $xd1,$xt3
1097
1098          movdqu         $xt0,0x40($out)
1099         movdqu          0x00($inp),$xt0
1100          movdqu         $xt1,0x50($out)
1101         movdqu          0x10($inp),$xt1
1102          movdqu         $xt2,0x60($out)
1103         movdqu          0x20($inp),$xt2
1104          movdqu         $xt3,0x70($out)
1105          lea            0x80($out),$out         # size optimization
1106         movdqu          0x30($inp),$xt3
1107         pxor            0x20(%rsp),$xt0
1108         pxor            $xb2,$xt1
1109         pxor            $xc2,$xt2
1110         pxor            $xd2,$xt3
1111         movdqu          $xt0,0x00($out)
1112         movdqu          $xt1,0x10($out)
1113         movdqu          $xt2,0x20($out)
1114         movdqu          $xt3,0x30($out)
1115         je              .Ldone4x
1116
1117         movdqa          0x30(%rsp),$xt0         # $xaN is offloaded, remember?
1118         lea             0x40($inp),$inp         # inp+=64*3
1119         xor             %r10,%r10
1120         movdqa          $xt0,0x00(%rsp)
1121         movdqa          $xb3,0x10(%rsp)
1122         lea             0x40($out),$out         # out+=64*3
1123         movdqa          $xc3,0x20(%rsp)
1124         sub             \$192,$len              # len-=64*3
1125         movdqa          $xd3,0x30(%rsp)
1126
1127 .Loop_tail4x:
1128         movzb           ($inp,%r10),%eax
1129         movzb           (%rsp,%r10),%ecx
1130         lea             1(%r10),%r10
1131         xor             %ecx,%eax
1132         mov             %al,-1($out,%r10)
1133         dec             $len
1134         jnz             .Loop_tail4x
1135
1136 .Ldone4x:
1137 ___
1138 $code.=<<___    if ($win64);
1139         movaps          -0xa8(%r9),%xmm6
1140         movaps          -0x98(%r9),%xmm7
1141         movaps          -0x88(%r9),%xmm8
1142         movaps          -0x78(%r9),%xmm9
1143         movaps          -0x68(%r9),%xmm10
1144         movaps          -0x58(%r9),%xmm11
1145         movaps          -0x48(%r9),%xmm12
1146         movaps          -0x38(%r9),%xmm13
1147         movaps          -0x28(%r9),%xmm14
1148         movaps          -0x18(%r9),%xmm15
1149 ___
1150 $code.=<<___;
1151         lea             (%r9),%rsp
1152 .L4x_epilogue:
1153         ret
1154 .size   ChaCha20_4x,.-ChaCha20_4x
1155 ___
1156 }
1157
1158 ########################################################################
1159 # XOP code path that handles all lengths.
1160 if ($avx) {
1161 # There is some "anomaly" observed depending on instructions' size or
1162 # alignment. If you look closely at below code you'll notice that
1163 # sometimes argument order varies. The order affects instruction
1164 # encoding by making it larger, and such fiddling gives 5% performance
1165 # improvement. This is on FX-4100...
1166
1167 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1168     $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
1169 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1170          $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
1171
1172 sub XOP_lane_ROUND {
1173 my ($a0,$b0,$c0,$d0)=@_;
1174 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1175 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1176 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1177 my @x=map("\"$_\"",@xx);
1178
1179         (
1180         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
1181          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
1182           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",     # Q3
1183            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",     # Q4
1184         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1185          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1186           "&vpxor       (@x[$d2],@x[$a2],@x[$d2])",
1187            "&vpxor      (@x[$d3],@x[$a3],@x[$d3])",
1188         "&vprotd        (@x[$d0],@x[$d0],16)",
1189          "&vprotd       (@x[$d1],@x[$d1],16)",
1190           "&vprotd      (@x[$d2],@x[$d2],16)",
1191            "&vprotd     (@x[$d3],@x[$d3],16)",
1192
1193         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
1194          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
1195           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
1196            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
1197         "&vpxor         (@x[$b0],@x[$c0],@x[$b0])",
1198          "&vpxor        (@x[$b1],@x[$c1],@x[$b1])",
1199           "&vpxor       (@x[$b2],@x[$b2],@x[$c2])",     # flip
1200            "&vpxor      (@x[$b3],@x[$b3],@x[$c3])",     # flip
1201         "&vprotd        (@x[$b0],@x[$b0],12)",
1202          "&vprotd       (@x[$b1],@x[$b1],12)",
1203           "&vprotd      (@x[$b2],@x[$b2],12)",
1204            "&vprotd     (@x[$b3],@x[$b3],12)",
1205
1206         "&vpaddd        (@x[$a0],@x[$b0],@x[$a0])",     # flip
1207          "&vpaddd       (@x[$a1],@x[$b1],@x[$a1])",     # flip
1208           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",
1209            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",
1210         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1211          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1212           "&vpxor       (@x[$d2],@x[$a2],@x[$d2])",
1213            "&vpxor      (@x[$d3],@x[$a3],@x[$d3])",
1214         "&vprotd        (@x[$d0],@x[$d0],8)",
1215          "&vprotd       (@x[$d1],@x[$d1],8)",
1216           "&vprotd      (@x[$d2],@x[$d2],8)",
1217            "&vprotd     (@x[$d3],@x[$d3],8)",
1218
1219         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
1220          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
1221           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
1222            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
1223         "&vpxor         (@x[$b0],@x[$c0],@x[$b0])",
1224          "&vpxor        (@x[$b1],@x[$c1],@x[$b1])",
1225           "&vpxor       (@x[$b2],@x[$b2],@x[$c2])",     # flip
1226            "&vpxor      (@x[$b3],@x[$b3],@x[$c3])",     # flip
1227         "&vprotd        (@x[$b0],@x[$b0],7)",
1228          "&vprotd       (@x[$b1],@x[$b1],7)",
1229           "&vprotd      (@x[$b2],@x[$b2],7)",
1230            "&vprotd     (@x[$b3],@x[$b3],7)"
1231         );
1232 }
1233
1234 my $xframe = $win64 ? 0xa8 : 8;
1235
1236 $code.=<<___;
1237 .type   ChaCha20_4xop,\@function,5
1238 .align  32
1239 ChaCha20_4xop:
1240 .LChaCha20_4xop:
1241         mov             %rsp,%r9                # frame pointer
1242         sub             \$0x140+$xframe,%rsp
1243 ___
1244         ################ stack layout
1245         # +0x00         SIMD equivalent of @x[8-12]
1246         # ...
1247         # +0x40         constant copy of key[0-2] smashed by lanes
1248         # ...
1249         # +0x100        SIMD counters (with nonce smashed by lanes)
1250         # ...
1251         # +0x140
1252 $code.=<<___    if ($win64);
1253         movaps          %xmm6,-0xa8(%r9)
1254         movaps          %xmm7,-0x98(%r9)
1255         movaps          %xmm8,-0x88(%r9)
1256         movaps          %xmm9,-0x78(%r9)
1257         movaps          %xmm10,-0x68(%r9)
1258         movaps          %xmm11,-0x58(%r9)
1259         movaps          %xmm12,-0x48(%r9)
1260         movaps          %xmm13,-0x38(%r9)
1261         movaps          %xmm14,-0x28(%r9)
1262         movaps          %xmm15,-0x18(%r9)
1263 .L4xop_body:
1264 ___
1265 $code.=<<___;
1266         vzeroupper
1267
1268         vmovdqa         .Lsigma(%rip),$xa3      # key[0]
1269         vmovdqu         ($key),$xb3             # key[1]
1270         vmovdqu         16($key),$xt3           # key[2]
1271         vmovdqu         ($counter),$xd3         # key[3]
1272         lea             0x100(%rsp),%rcx        # size optimization
1273
1274         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
1275         vpshufd         \$0x55,$xa3,$xa1
1276         vmovdqa         $xa0,0x40(%rsp)         # ... and offload
1277         vpshufd         \$0xaa,$xa3,$xa2
1278         vmovdqa         $xa1,0x50(%rsp)
1279         vpshufd         \$0xff,$xa3,$xa3
1280         vmovdqa         $xa2,0x60(%rsp)
1281         vmovdqa         $xa3,0x70(%rsp)
1282
1283         vpshufd         \$0x00,$xb3,$xb0
1284         vpshufd         \$0x55,$xb3,$xb1
1285         vmovdqa         $xb0,0x80-0x100(%rcx)
1286         vpshufd         \$0xaa,$xb3,$xb2
1287         vmovdqa         $xb1,0x90-0x100(%rcx)
1288         vpshufd         \$0xff,$xb3,$xb3
1289         vmovdqa         $xb2,0xa0-0x100(%rcx)
1290         vmovdqa         $xb3,0xb0-0x100(%rcx)
1291
1292         vpshufd         \$0x00,$xt3,$xt0        # "$xc0"
1293         vpshufd         \$0x55,$xt3,$xt1        # "$xc1"
1294         vmovdqa         $xt0,0xc0-0x100(%rcx)
1295         vpshufd         \$0xaa,$xt3,$xt2        # "$xc2"
1296         vmovdqa         $xt1,0xd0-0x100(%rcx)
1297         vpshufd         \$0xff,$xt3,$xt3        # "$xc3"
1298         vmovdqa         $xt2,0xe0-0x100(%rcx)
1299         vmovdqa         $xt3,0xf0-0x100(%rcx)
1300
1301         vpshufd         \$0x00,$xd3,$xd0
1302         vpshufd         \$0x55,$xd3,$xd1
1303         vpaddd          .Linc(%rip),$xd0,$xd0   # don't save counters yet
1304         vpshufd         \$0xaa,$xd3,$xd2
1305         vmovdqa         $xd1,0x110-0x100(%rcx)
1306         vpshufd         \$0xff,$xd3,$xd3
1307         vmovdqa         $xd2,0x120-0x100(%rcx)
1308         vmovdqa         $xd3,0x130-0x100(%rcx)
1309
1310         jmp             .Loop_enter4xop
1311
1312 .align  32
1313 .Loop_outer4xop:
1314         vmovdqa         0x40(%rsp),$xa0         # re-load smashed key
1315         vmovdqa         0x50(%rsp),$xa1
1316         vmovdqa         0x60(%rsp),$xa2
1317         vmovdqa         0x70(%rsp),$xa3
1318         vmovdqa         0x80-0x100(%rcx),$xb0
1319         vmovdqa         0x90-0x100(%rcx),$xb1
1320         vmovdqa         0xa0-0x100(%rcx),$xb2
1321         vmovdqa         0xb0-0x100(%rcx),$xb3
1322         vmovdqa         0xc0-0x100(%rcx),$xt0   # "$xc0"
1323         vmovdqa         0xd0-0x100(%rcx),$xt1   # "$xc1"
1324         vmovdqa         0xe0-0x100(%rcx),$xt2   # "$xc2"
1325         vmovdqa         0xf0-0x100(%rcx),$xt3   # "$xc3"
1326         vmovdqa         0x100-0x100(%rcx),$xd0
1327         vmovdqa         0x110-0x100(%rcx),$xd1
1328         vmovdqa         0x120-0x100(%rcx),$xd2
1329         vmovdqa         0x130-0x100(%rcx),$xd3
1330         vpaddd          .Lfour(%rip),$xd0,$xd0  # next SIMD counters
1331
1332 .Loop_enter4xop:
1333         mov             \$10,%eax
1334         vmovdqa         $xd0,0x100-0x100(%rcx)  # save SIMD counters
1335         jmp             .Loop4xop
1336
1337 .align  32
1338 .Loop4xop:
1339 ___
1340         foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
1341         foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
1342 $code.=<<___;
1343         dec             %eax
1344         jnz             .Loop4xop
1345
1346         vpaddd          0x40(%rsp),$xa0,$xa0    # accumulate key material
1347         vpaddd          0x50(%rsp),$xa1,$xa1
1348         vpaddd          0x60(%rsp),$xa2,$xa2
1349         vpaddd          0x70(%rsp),$xa3,$xa3
1350
1351         vmovdqa         $xt2,0x20(%rsp)         # offload $xc2,3
1352         vmovdqa         $xt3,0x30(%rsp)
1353
1354         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
1355         vpunpckldq      $xa3,$xa2,$xt3
1356         vpunpckhdq      $xa1,$xa0,$xa0
1357         vpunpckhdq      $xa3,$xa2,$xa2
1358         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
1359         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
1360         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
1361         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
1362 ___
1363         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1364 $code.=<<___;
1365         vpaddd          0x80-0x100(%rcx),$xb0,$xb0
1366         vpaddd          0x90-0x100(%rcx),$xb1,$xb1
1367         vpaddd          0xa0-0x100(%rcx),$xb2,$xb2
1368         vpaddd          0xb0-0x100(%rcx),$xb3,$xb3
1369
1370         vmovdqa         $xa0,0x00(%rsp)         # offload $xa0,1
1371         vmovdqa         $xa1,0x10(%rsp)
1372         vmovdqa         0x20(%rsp),$xa0         # "xc2"
1373         vmovdqa         0x30(%rsp),$xa1         # "xc3"
1374
1375         vpunpckldq      $xb1,$xb0,$xt2
1376         vpunpckldq      $xb3,$xb2,$xt3
1377         vpunpckhdq      $xb1,$xb0,$xb0
1378         vpunpckhdq      $xb3,$xb2,$xb2
1379         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
1380         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
1381         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
1382         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
1383 ___
1384         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1385         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1386 $code.=<<___;
1387         vpaddd          0xc0-0x100(%rcx),$xc0,$xc0
1388         vpaddd          0xd0-0x100(%rcx),$xc1,$xc1
1389         vpaddd          0xe0-0x100(%rcx),$xc2,$xc2
1390         vpaddd          0xf0-0x100(%rcx),$xc3,$xc3
1391
1392         vpunpckldq      $xc1,$xc0,$xt2
1393         vpunpckldq      $xc3,$xc2,$xt3
1394         vpunpckhdq      $xc1,$xc0,$xc0
1395         vpunpckhdq      $xc3,$xc2,$xc2
1396         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
1397         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
1398         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
1399         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
1400 ___
1401         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1402 $code.=<<___;
1403         vpaddd          0x100-0x100(%rcx),$xd0,$xd0
1404         vpaddd          0x110-0x100(%rcx),$xd1,$xd1
1405         vpaddd          0x120-0x100(%rcx),$xd2,$xd2
1406         vpaddd          0x130-0x100(%rcx),$xd3,$xd3
1407
1408         vpunpckldq      $xd1,$xd0,$xt2
1409         vpunpckldq      $xd3,$xd2,$xt3
1410         vpunpckhdq      $xd1,$xd0,$xd0
1411         vpunpckhdq      $xd3,$xd2,$xd2
1412         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
1413         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
1414         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
1415         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
1416 ___
1417         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1418         ($xa0,$xa1)=($xt2,$xt3);
1419 $code.=<<___;
1420         vmovdqa         0x00(%rsp),$xa0         # restore $xa0,1
1421         vmovdqa         0x10(%rsp),$xa1
1422
1423         cmp             \$64*4,$len
1424         jb              .Ltail4xop
1425
1426         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1427         vpxor           0x10($inp),$xb0,$xb0
1428         vpxor           0x20($inp),$xc0,$xc0
1429         vpxor           0x30($inp),$xd0,$xd0
1430         vpxor           0x40($inp),$xa1,$xa1
1431         vpxor           0x50($inp),$xb1,$xb1
1432         vpxor           0x60($inp),$xc1,$xc1
1433         vpxor           0x70($inp),$xd1,$xd1
1434         lea             0x80($inp),$inp         # size optimization
1435         vpxor           0x00($inp),$xa2,$xa2
1436         vpxor           0x10($inp),$xb2,$xb2
1437         vpxor           0x20($inp),$xc2,$xc2
1438         vpxor           0x30($inp),$xd2,$xd2
1439         vpxor           0x40($inp),$xa3,$xa3
1440         vpxor           0x50($inp),$xb3,$xb3
1441         vpxor           0x60($inp),$xc3,$xc3
1442         vpxor           0x70($inp),$xd3,$xd3
1443         lea             0x80($inp),$inp         # inp+=64*4
1444
1445         vmovdqu         $xa0,0x00($out)
1446         vmovdqu         $xb0,0x10($out)
1447         vmovdqu         $xc0,0x20($out)
1448         vmovdqu         $xd0,0x30($out)
1449         vmovdqu         $xa1,0x40($out)
1450         vmovdqu         $xb1,0x50($out)
1451         vmovdqu         $xc1,0x60($out)
1452         vmovdqu         $xd1,0x70($out)
1453         lea             0x80($out),$out         # size optimization
1454         vmovdqu         $xa2,0x00($out)
1455         vmovdqu         $xb2,0x10($out)
1456         vmovdqu         $xc2,0x20($out)
1457         vmovdqu         $xd2,0x30($out)
1458         vmovdqu         $xa3,0x40($out)
1459         vmovdqu         $xb3,0x50($out)
1460         vmovdqu         $xc3,0x60($out)
1461         vmovdqu         $xd3,0x70($out)
1462         lea             0x80($out),$out         # out+=64*4
1463
1464         sub             \$64*4,$len
1465         jnz             .Loop_outer4xop
1466
1467         jmp             .Ldone4xop
1468
1469 .align  32
1470 .Ltail4xop:
1471         cmp             \$192,$len
1472         jae             .L192_or_more4xop
1473         cmp             \$128,$len
1474         jae             .L128_or_more4xop
1475         cmp             \$64,$len
1476         jae             .L64_or_more4xop
1477
1478         xor             %r10,%r10
1479         vmovdqa         $xa0,0x00(%rsp)
1480         vmovdqa         $xb0,0x10(%rsp)
1481         vmovdqa         $xc0,0x20(%rsp)
1482         vmovdqa         $xd0,0x30(%rsp)
1483         jmp             .Loop_tail4xop
1484
1485 .align  32
1486 .L64_or_more4xop:
1487         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1488         vpxor           0x10($inp),$xb0,$xb0
1489         vpxor           0x20($inp),$xc0,$xc0
1490         vpxor           0x30($inp),$xd0,$xd0
1491         vmovdqu         $xa0,0x00($out)
1492         vmovdqu         $xb0,0x10($out)
1493         vmovdqu         $xc0,0x20($out)
1494         vmovdqu         $xd0,0x30($out)
1495         je              .Ldone4xop
1496
1497         lea             0x40($inp),$inp         # inp+=64*1
1498         vmovdqa         $xa1,0x00(%rsp)
1499         xor             %r10,%r10
1500         vmovdqa         $xb1,0x10(%rsp)
1501         lea             0x40($out),$out         # out+=64*1
1502         vmovdqa         $xc1,0x20(%rsp)
1503         sub             \$64,$len               # len-=64*1
1504         vmovdqa         $xd1,0x30(%rsp)
1505         jmp             .Loop_tail4xop
1506
1507 .align  32
1508 .L128_or_more4xop:
1509         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1510         vpxor           0x10($inp),$xb0,$xb0
1511         vpxor           0x20($inp),$xc0,$xc0
1512         vpxor           0x30($inp),$xd0,$xd0
1513         vpxor           0x40($inp),$xa1,$xa1
1514         vpxor           0x50($inp),$xb1,$xb1
1515         vpxor           0x60($inp),$xc1,$xc1
1516         vpxor           0x70($inp),$xd1,$xd1
1517
1518         vmovdqu         $xa0,0x00($out)
1519         vmovdqu         $xb0,0x10($out)
1520         vmovdqu         $xc0,0x20($out)
1521         vmovdqu         $xd0,0x30($out)
1522         vmovdqu         $xa1,0x40($out)
1523         vmovdqu         $xb1,0x50($out)
1524         vmovdqu         $xc1,0x60($out)
1525         vmovdqu         $xd1,0x70($out)
1526         je              .Ldone4xop
1527
1528         lea             0x80($inp),$inp         # inp+=64*2
1529         vmovdqa         $xa2,0x00(%rsp)
1530         xor             %r10,%r10
1531         vmovdqa         $xb2,0x10(%rsp)
1532         lea             0x80($out),$out         # out+=64*2
1533         vmovdqa         $xc2,0x20(%rsp)
1534         sub             \$128,$len              # len-=64*2
1535         vmovdqa         $xd2,0x30(%rsp)
1536         jmp             .Loop_tail4xop
1537
1538 .align  32
1539 .L192_or_more4xop:
1540         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1541         vpxor           0x10($inp),$xb0,$xb0
1542         vpxor           0x20($inp),$xc0,$xc0
1543         vpxor           0x30($inp),$xd0,$xd0
1544         vpxor           0x40($inp),$xa1,$xa1
1545         vpxor           0x50($inp),$xb1,$xb1
1546         vpxor           0x60($inp),$xc1,$xc1
1547         vpxor           0x70($inp),$xd1,$xd1
1548         lea             0x80($inp),$inp         # size optimization
1549         vpxor           0x00($inp),$xa2,$xa2
1550         vpxor           0x10($inp),$xb2,$xb2
1551         vpxor           0x20($inp),$xc2,$xc2
1552         vpxor           0x30($inp),$xd2,$xd2
1553
1554         vmovdqu         $xa0,0x00($out)
1555         vmovdqu         $xb0,0x10($out)
1556         vmovdqu         $xc0,0x20($out)
1557         vmovdqu         $xd0,0x30($out)
1558         vmovdqu         $xa1,0x40($out)
1559         vmovdqu         $xb1,0x50($out)
1560         vmovdqu         $xc1,0x60($out)
1561         vmovdqu         $xd1,0x70($out)
1562         lea             0x80($out),$out         # size optimization
1563         vmovdqu         $xa2,0x00($out)
1564         vmovdqu         $xb2,0x10($out)
1565         vmovdqu         $xc2,0x20($out)
1566         vmovdqu         $xd2,0x30($out)
1567         je              .Ldone4xop
1568
1569         lea             0x40($inp),$inp         # inp+=64*3
1570         vmovdqa         $xa3,0x00(%rsp)
1571         xor             %r10,%r10
1572         vmovdqa         $xb3,0x10(%rsp)
1573         lea             0x40($out),$out         # out+=64*3
1574         vmovdqa         $xc3,0x20(%rsp)
1575         sub             \$192,$len              # len-=64*3
1576         vmovdqa         $xd3,0x30(%rsp)
1577
1578 .Loop_tail4xop:
1579         movzb           ($inp,%r10),%eax
1580         movzb           (%rsp,%r10),%ecx
1581         lea             1(%r10),%r10
1582         xor             %ecx,%eax
1583         mov             %al,-1($out,%r10)
1584         dec             $len
1585         jnz             .Loop_tail4xop
1586
1587 .Ldone4xop:
1588         vzeroupper
1589 ___
1590 $code.=<<___    if ($win64);
1591         movaps          -0xa8(%r9),%xmm6
1592         movaps          -0x98(%r9),%xmm7
1593         movaps          -0x88(%r9),%xmm8
1594         movaps          -0x78(%r9),%xmm9
1595         movaps          -0x68(%r9),%xmm10
1596         movaps          -0x58(%r9),%xmm11
1597         movaps          -0x48(%r9),%xmm12
1598         movaps          -0x38(%r9),%xmm13
1599         movaps          -0x28(%r9),%xmm14
1600         movaps          -0x18(%r9),%xmm15
1601 ___
1602 $code.=<<___;
1603         lea             (%r9),%rsp
1604 .L4xop_epilogue:
1605         ret
1606 .size   ChaCha20_4xop,.-ChaCha20_4xop
1607 ___
1608 }
1609
1610 ########################################################################
1611 # AVX2 code path
1612 if ($avx>1) {
1613 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1614     $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1615 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1616         "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1617
1618 sub AVX2_lane_ROUND {
1619 my ($a0,$b0,$c0,$d0)=@_;
1620 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1621 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1622 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1623 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1624 my @x=map("\"$_\"",@xx);
1625
1626         # Consider order in which variables are addressed by their
1627         # index:
1628         #
1629         #       a   b   c   d
1630         #
1631         #       0   4   8  12 < even round
1632         #       1   5   9  13
1633         #       2   6  10  14
1634         #       3   7  11  15
1635         #       0   5  10  15 < odd round
1636         #       1   6  11  12
1637         #       2   7   8  13
1638         #       3   4   9  14
1639         #
1640         # 'a', 'b' and 'd's are permanently allocated in registers,
1641         # @x[0..7,12..15], while 'c's are maintained in memory. If
1642         # you observe 'c' column, you'll notice that pair of 'c's is
1643         # invariant between rounds. This means that we have to reload
1644         # them once per round, in the middle. This is why you'll see
1645         # bunch of 'c' stores and loads in the middle, but none in
1646         # the beginning or end.
1647
1648         (
1649         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
1650         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1651         "&vpshufb       (@x[$d0],@x[$d0],$t1)",
1652          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
1653          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1654          "&vpshufb      (@x[$d1],@x[$d1],$t1)",
1655
1656         "&vpaddd        ($xc,$xc,@x[$d0])",
1657         "&vpxor         (@x[$b0],$xc,@x[$b0])",
1658         "&vpslld        ($t0,@x[$b0],12)",
1659         "&vpsrld        (@x[$b0],@x[$b0],20)",
1660         "&vpor          (@x[$b0],$t0,@x[$b0])",
1661         "&vbroadcasti128($t0,'(%r11)')",                # .Lrot24(%rip)
1662          "&vpaddd       ($xc_,$xc_,@x[$d1])",
1663          "&vpxor        (@x[$b1],$xc_,@x[$b1])",
1664          "&vpslld       ($t1,@x[$b1],12)",
1665          "&vpsrld       (@x[$b1],@x[$b1],20)",
1666          "&vpor         (@x[$b1],$t1,@x[$b1])",
1667
1668         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",
1669         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1670         "&vpshufb       (@x[$d0],@x[$d0],$t0)",
1671          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",
1672          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1673          "&vpshufb      (@x[$d1],@x[$d1],$t0)",
1674
1675         "&vpaddd        ($xc,$xc,@x[$d0])",
1676         "&vpxor         (@x[$b0],$xc,@x[$b0])",
1677         "&vpslld        ($t1,@x[$b0],7)",
1678         "&vpsrld        (@x[$b0],@x[$b0],25)",
1679         "&vpor          (@x[$b0],$t1,@x[$b0])",
1680         "&vbroadcasti128($t1,'(%r10)')",                # .Lrot16(%rip)
1681          "&vpaddd       ($xc_,$xc_,@x[$d1])",
1682          "&vpxor        (@x[$b1],$xc_,@x[$b1])",
1683          "&vpslld       ($t0,@x[$b1],7)",
1684          "&vpsrld       (@x[$b1],@x[$b1],25)",
1685          "&vpor         (@x[$b1],$t0,@x[$b1])",
1686
1687         "&vmovdqa       (\"`32*($c0-8)`(%rsp)\",$xc)",  # reload pair of 'c's
1688          "&vmovdqa      (\"`32*($c1-8)`(%rsp)\",$xc_)",
1689         "&vmovdqa       ($xc,\"`32*($c2-8)`(%rsp)\")",
1690          "&vmovdqa      ($xc_,\"`32*($c3-8)`(%rsp)\")",
1691
1692         "&vpaddd        (@x[$a2],@x[$a2],@x[$b2])",     # Q3
1693         "&vpxor         (@x[$d2],@x[$a2],@x[$d2])",
1694         "&vpshufb       (@x[$d2],@x[$d2],$t1)",
1695          "&vpaddd       (@x[$a3],@x[$a3],@x[$b3])",     # Q4
1696          "&vpxor        (@x[$d3],@x[$a3],@x[$d3])",
1697          "&vpshufb      (@x[$d3],@x[$d3],$t1)",
1698
1699         "&vpaddd        ($xc,$xc,@x[$d2])",
1700         "&vpxor         (@x[$b2],$xc,@x[$b2])",
1701         "&vpslld        ($t0,@x[$b2],12)",
1702         "&vpsrld        (@x[$b2],@x[$b2],20)",
1703         "&vpor          (@x[$b2],$t0,@x[$b2])",
1704         "&vbroadcasti128($t0,'(%r11)')",                # .Lrot24(%rip)
1705          "&vpaddd       ($xc_,$xc_,@x[$d3])",
1706          "&vpxor        (@x[$b3],$xc_,@x[$b3])",
1707          "&vpslld       ($t1,@x[$b3],12)",
1708          "&vpsrld       (@x[$b3],@x[$b3],20)",
1709          "&vpor         (@x[$b3],$t1,@x[$b3])",
1710
1711         "&vpaddd        (@x[$a2],@x[$a2],@x[$b2])",
1712         "&vpxor         (@x[$d2],@x[$a2],@x[$d2])",
1713         "&vpshufb       (@x[$d2],@x[$d2],$t0)",
1714          "&vpaddd       (@x[$a3],@x[$a3],@x[$b3])",
1715          "&vpxor        (@x[$d3],@x[$a3],@x[$d3])",
1716          "&vpshufb      (@x[$d3],@x[$d3],$t0)",
1717
1718         "&vpaddd        ($xc,$xc,@x[$d2])",
1719         "&vpxor         (@x[$b2],$xc,@x[$b2])",
1720         "&vpslld        ($t1,@x[$b2],7)",
1721         "&vpsrld        (@x[$b2],@x[$b2],25)",
1722         "&vpor          (@x[$b2],$t1,@x[$b2])",
1723         "&vbroadcasti128($t1,'(%r10)')",                # .Lrot16(%rip)
1724          "&vpaddd       ($xc_,$xc_,@x[$d3])",
1725          "&vpxor        (@x[$b3],$xc_,@x[$b3])",
1726          "&vpslld       ($t0,@x[$b3],7)",
1727          "&vpsrld       (@x[$b3],@x[$b3],25)",
1728          "&vpor         (@x[$b3],$t0,@x[$b3])"
1729         );
1730 }
1731
1732 my $xframe = $win64 ? 0xa8 : 8;
1733
1734 $code.=<<___;
1735 .type   ChaCha20_8x,\@function,5
1736 .align  32
1737 ChaCha20_8x:
1738 .LChaCha20_8x:
1739         mov             %rsp,%r9                # frame register
1740         sub             \$0x280+$xframe,%rsp
1741         and             \$-32,%rsp
1742 ___
1743 $code.=<<___    if ($win64);
1744         movaps          %xmm6,-0xa8(%r9)
1745         movaps          %xmm7,-0x98(%r9)
1746         movaps          %xmm8,-0x88(%r9)
1747         movaps          %xmm9,-0x78(%r9)
1748         movaps          %xmm10,-0x68(%r9)
1749         movaps          %xmm11,-0x58(%r9)
1750         movaps          %xmm12,-0x48(%r9)
1751         movaps          %xmm13,-0x38(%r9)
1752         movaps          %xmm14,-0x28(%r9)
1753         movaps          %xmm15,-0x18(%r9)
1754 .L8x_body:
1755 ___
1756 $code.=<<___;
1757         vzeroupper
1758
1759         ################ stack layout
1760         # +0x00         SIMD equivalent of @x[8-12]
1761         # ...
1762         # +0x80         constant copy of key[0-2] smashed by lanes
1763         # ...
1764         # +0x200        SIMD counters (with nonce smashed by lanes)
1765         # ...
1766         # +0x280
1767
1768         vbroadcasti128  .Lsigma(%rip),$xa3      # key[0]
1769         vbroadcasti128  ($key),$xb3             # key[1]
1770         vbroadcasti128  16($key),$xt3           # key[2]
1771         vbroadcasti128  ($counter),$xd3         # key[3]
1772         lea             0x100(%rsp),%rcx        # size optimization
1773         lea             0x200(%rsp),%rax        # size optimization
1774         lea             .Lrot16(%rip),%r10
1775         lea             .Lrot24(%rip),%r11
1776
1777         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
1778         vpshufd         \$0x55,$xa3,$xa1
1779         vmovdqa         $xa0,0x80-0x100(%rcx)   # ... and offload
1780         vpshufd         \$0xaa,$xa3,$xa2
1781         vmovdqa         $xa1,0xa0-0x100(%rcx)
1782         vpshufd         \$0xff,$xa3,$xa3
1783         vmovdqa         $xa2,0xc0-0x100(%rcx)
1784         vmovdqa         $xa3,0xe0-0x100(%rcx)
1785
1786         vpshufd         \$0x00,$xb3,$xb0
1787         vpshufd         \$0x55,$xb3,$xb1
1788         vmovdqa         $xb0,0x100-0x100(%rcx)
1789         vpshufd         \$0xaa,$xb3,$xb2
1790         vmovdqa         $xb1,0x120-0x100(%rcx)
1791         vpshufd         \$0xff,$xb3,$xb3
1792         vmovdqa         $xb2,0x140-0x100(%rcx)
1793         vmovdqa         $xb3,0x160-0x100(%rcx)
1794
1795         vpshufd         \$0x00,$xt3,$xt0        # "xc0"
1796         vpshufd         \$0x55,$xt3,$xt1        # "xc1"
1797         vmovdqa         $xt0,0x180-0x200(%rax)
1798         vpshufd         \$0xaa,$xt3,$xt2        # "xc2"
1799         vmovdqa         $xt1,0x1a0-0x200(%rax)
1800         vpshufd         \$0xff,$xt3,$xt3        # "xc3"
1801         vmovdqa         $xt2,0x1c0-0x200(%rax)
1802         vmovdqa         $xt3,0x1e0-0x200(%rax)
1803
1804         vpshufd         \$0x00,$xd3,$xd0
1805         vpshufd         \$0x55,$xd3,$xd1
1806         vpaddd          .Lincy(%rip),$xd0,$xd0  # don't save counters yet
1807         vpshufd         \$0xaa,$xd3,$xd2
1808         vmovdqa         $xd1,0x220-0x200(%rax)
1809         vpshufd         \$0xff,$xd3,$xd3
1810         vmovdqa         $xd2,0x240-0x200(%rax)
1811         vmovdqa         $xd3,0x260-0x200(%rax)
1812
1813         jmp             .Loop_enter8x
1814
1815 .align  32
1816 .Loop_outer8x:
1817         vmovdqa         0x80-0x100(%rcx),$xa0   # re-load smashed key
1818         vmovdqa         0xa0-0x100(%rcx),$xa1
1819         vmovdqa         0xc0-0x100(%rcx),$xa2
1820         vmovdqa         0xe0-0x100(%rcx),$xa3
1821         vmovdqa         0x100-0x100(%rcx),$xb0
1822         vmovdqa         0x120-0x100(%rcx),$xb1
1823         vmovdqa         0x140-0x100(%rcx),$xb2
1824         vmovdqa         0x160-0x100(%rcx),$xb3
1825         vmovdqa         0x180-0x200(%rax),$xt0  # "xc0"
1826         vmovdqa         0x1a0-0x200(%rax),$xt1  # "xc1"
1827         vmovdqa         0x1c0-0x200(%rax),$xt2  # "xc2"
1828         vmovdqa         0x1e0-0x200(%rax),$xt3  # "xc3"
1829         vmovdqa         0x200-0x200(%rax),$xd0
1830         vmovdqa         0x220-0x200(%rax),$xd1
1831         vmovdqa         0x240-0x200(%rax),$xd2
1832         vmovdqa         0x260-0x200(%rax),$xd3
1833         vpaddd          .Leight(%rip),$xd0,$xd0 # next SIMD counters
1834
1835 .Loop_enter8x:
1836         vmovdqa         $xt2,0x40(%rsp)         # SIMD equivalent of "@x[10]"
1837         vmovdqa         $xt3,0x60(%rsp)         # SIMD equivalent of "@x[11]"
1838         vbroadcasti128  (%r10),$xt3
1839         vmovdqa         $xd0,0x200-0x200(%rax)  # save SIMD counters
1840         mov             \$10,%eax
1841         jmp             .Loop8x
1842
1843 .align  32
1844 .Loop8x:
1845 ___
1846         foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1847         foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1848 $code.=<<___;
1849         dec             %eax
1850         jnz             .Loop8x
1851
1852         lea             0x200(%rsp),%rax        # size optimization
1853         vpaddd          0x80-0x100(%rcx),$xa0,$xa0      # accumulate key
1854         vpaddd          0xa0-0x100(%rcx),$xa1,$xa1
1855         vpaddd          0xc0-0x100(%rcx),$xa2,$xa2
1856         vpaddd          0xe0-0x100(%rcx),$xa3,$xa3
1857
1858         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
1859         vpunpckldq      $xa3,$xa2,$xt3
1860         vpunpckhdq      $xa1,$xa0,$xa0
1861         vpunpckhdq      $xa3,$xa2,$xa2
1862         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
1863         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
1864         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
1865         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
1866 ___
1867         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1868 $code.=<<___;
1869         vpaddd          0x100-0x100(%rcx),$xb0,$xb0
1870         vpaddd          0x120-0x100(%rcx),$xb1,$xb1
1871         vpaddd          0x140-0x100(%rcx),$xb2,$xb2
1872         vpaddd          0x160-0x100(%rcx),$xb3,$xb3
1873
1874         vpunpckldq      $xb1,$xb0,$xt2
1875         vpunpckldq      $xb3,$xb2,$xt3
1876         vpunpckhdq      $xb1,$xb0,$xb0
1877         vpunpckhdq      $xb3,$xb2,$xb2
1878         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
1879         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
1880         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
1881         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
1882 ___
1883         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1884 $code.=<<___;
1885         vperm2i128      \$0x20,$xb0,$xa0,$xt3   # "de-interlace" further
1886         vperm2i128      \$0x31,$xb0,$xa0,$xb0
1887         vperm2i128      \$0x20,$xb1,$xa1,$xa0
1888         vperm2i128      \$0x31,$xb1,$xa1,$xb1
1889         vperm2i128      \$0x20,$xb2,$xa2,$xa1
1890         vperm2i128      \$0x31,$xb2,$xa2,$xb2
1891         vperm2i128      \$0x20,$xb3,$xa3,$xa2
1892         vperm2i128      \$0x31,$xb3,$xa3,$xb3
1893 ___
1894         ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1895         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1896 $code.=<<___;
1897         vmovdqa         $xa0,0x00(%rsp)         # offload $xaN
1898         vmovdqa         $xa1,0x20(%rsp)
1899         vmovdqa         0x40(%rsp),$xc2         # $xa0
1900         vmovdqa         0x60(%rsp),$xc3         # $xa1
1901
1902         vpaddd          0x180-0x200(%rax),$xc0,$xc0
1903         vpaddd          0x1a0-0x200(%rax),$xc1,$xc1
1904         vpaddd          0x1c0-0x200(%rax),$xc2,$xc2
1905         vpaddd          0x1e0-0x200(%rax),$xc3,$xc3
1906
1907         vpunpckldq      $xc1,$xc0,$xt2
1908         vpunpckldq      $xc3,$xc2,$xt3
1909         vpunpckhdq      $xc1,$xc0,$xc0
1910         vpunpckhdq      $xc3,$xc2,$xc2
1911         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
1912         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
1913         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
1914         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
1915 ___
1916         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1917 $code.=<<___;
1918         vpaddd          0x200-0x200(%rax),$xd0,$xd0
1919         vpaddd          0x220-0x200(%rax),$xd1,$xd1
1920         vpaddd          0x240-0x200(%rax),$xd2,$xd2
1921         vpaddd          0x260-0x200(%rax),$xd3,$xd3
1922
1923         vpunpckldq      $xd1,$xd0,$xt2
1924         vpunpckldq      $xd3,$xd2,$xt3
1925         vpunpckhdq      $xd1,$xd0,$xd0
1926         vpunpckhdq      $xd3,$xd2,$xd2
1927         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
1928         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
1929         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
1930         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
1931 ___
1932         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1933 $code.=<<___;
1934         vperm2i128      \$0x20,$xd0,$xc0,$xt3   # "de-interlace" further
1935         vperm2i128      \$0x31,$xd0,$xc0,$xd0
1936         vperm2i128      \$0x20,$xd1,$xc1,$xc0
1937         vperm2i128      \$0x31,$xd1,$xc1,$xd1
1938         vperm2i128      \$0x20,$xd2,$xc2,$xc1
1939         vperm2i128      \$0x31,$xd2,$xc2,$xd2
1940         vperm2i128      \$0x20,$xd3,$xc3,$xc2
1941         vperm2i128      \$0x31,$xd3,$xc3,$xd3
1942 ___
1943         ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1944         ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1945         ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1946         ($xa0,$xa1)=($xt2,$xt3);
1947 $code.=<<___;
1948         vmovdqa         0x00(%rsp),$xa0         # $xaN was offloaded, remember?
1949         vmovdqa         0x20(%rsp),$xa1
1950
1951         cmp             \$64*8,$len
1952         jb              .Ltail8x
1953
1954         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1955         vpxor           0x20($inp),$xb0,$xb0
1956         vpxor           0x40($inp),$xc0,$xc0
1957         vpxor           0x60($inp),$xd0,$xd0
1958         lea             0x80($inp),$inp         # size optimization
1959         vmovdqu         $xa0,0x00($out)
1960         vmovdqu         $xb0,0x20($out)
1961         vmovdqu         $xc0,0x40($out)
1962         vmovdqu         $xd0,0x60($out)
1963         lea             0x80($out),$out         # size optimization
1964
1965         vpxor           0x00($inp),$xa1,$xa1
1966         vpxor           0x20($inp),$xb1,$xb1
1967         vpxor           0x40($inp),$xc1,$xc1
1968         vpxor           0x60($inp),$xd1,$xd1
1969         lea             0x80($inp),$inp         # size optimization
1970         vmovdqu         $xa1,0x00($out)
1971         vmovdqu         $xb1,0x20($out)
1972         vmovdqu         $xc1,0x40($out)
1973         vmovdqu         $xd1,0x60($out)
1974         lea             0x80($out),$out         # size optimization
1975
1976         vpxor           0x00($inp),$xa2,$xa2
1977         vpxor           0x20($inp),$xb2,$xb2
1978         vpxor           0x40($inp),$xc2,$xc2
1979         vpxor           0x60($inp),$xd2,$xd2
1980         lea             0x80($inp),$inp         # size optimization
1981         vmovdqu         $xa2,0x00($out)
1982         vmovdqu         $xb2,0x20($out)
1983         vmovdqu         $xc2,0x40($out)
1984         vmovdqu         $xd2,0x60($out)
1985         lea             0x80($out),$out         # size optimization
1986
1987         vpxor           0x00($inp),$xa3,$xa3
1988         vpxor           0x20($inp),$xb3,$xb3
1989         vpxor           0x40($inp),$xc3,$xc3
1990         vpxor           0x60($inp),$xd3,$xd3
1991         lea             0x80($inp),$inp         # size optimization
1992         vmovdqu         $xa3,0x00($out)
1993         vmovdqu         $xb3,0x20($out)
1994         vmovdqu         $xc3,0x40($out)
1995         vmovdqu         $xd3,0x60($out)
1996         lea             0x80($out),$out         # size optimization
1997
1998         sub             \$64*8,$len
1999         jnz             .Loop_outer8x
2000
2001         jmp             .Ldone8x
2002
2003 .Ltail8x:
2004         cmp             \$448,$len
2005         jae             .L448_or_more8x
2006         cmp             \$384,$len
2007         jae             .L384_or_more8x
2008         cmp             \$320,$len
2009         jae             .L320_or_more8x
2010         cmp             \$256,$len
2011         jae             .L256_or_more8x
2012         cmp             \$192,$len
2013         jae             .L192_or_more8x
2014         cmp             \$128,$len
2015         jae             .L128_or_more8x
2016         cmp             \$64,$len
2017         jae             .L64_or_more8x
2018
2019         xor             %r10,%r10
2020         vmovdqa         $xa0,0x00(%rsp)
2021         vmovdqa         $xb0,0x20(%rsp)
2022         jmp             .Loop_tail8x
2023
2024 .align  32
2025 .L64_or_more8x:
2026         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2027         vpxor           0x20($inp),$xb0,$xb0
2028         vmovdqu         $xa0,0x00($out)
2029         vmovdqu         $xb0,0x20($out)
2030         je              .Ldone8x
2031
2032         lea             0x40($inp),$inp         # inp+=64*1
2033         xor             %r10,%r10
2034         vmovdqa         $xc0,0x00(%rsp)
2035         lea             0x40($out),$out         # out+=64*1
2036         sub             \$64,$len               # len-=64*1
2037         vmovdqa         $xd0,0x20(%rsp)
2038         jmp             .Loop_tail8x
2039
2040 .align  32
2041 .L128_or_more8x:
2042         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2043         vpxor           0x20($inp),$xb0,$xb0
2044         vpxor           0x40($inp),$xc0,$xc0
2045         vpxor           0x60($inp),$xd0,$xd0
2046         vmovdqu         $xa0,0x00($out)
2047         vmovdqu         $xb0,0x20($out)
2048         vmovdqu         $xc0,0x40($out)
2049         vmovdqu         $xd0,0x60($out)
2050         je              .Ldone8x
2051
2052         lea             0x80($inp),$inp         # inp+=64*2
2053         xor             %r10,%r10
2054         vmovdqa         $xa1,0x00(%rsp)
2055         lea             0x80($out),$out         # out+=64*2
2056         sub             \$128,$len              # len-=64*2
2057         vmovdqa         $xb1,0x20(%rsp)
2058         jmp             .Loop_tail8x
2059
2060 .align  32
2061 .L192_or_more8x:
2062         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2063         vpxor           0x20($inp),$xb0,$xb0
2064         vpxor           0x40($inp),$xc0,$xc0
2065         vpxor           0x60($inp),$xd0,$xd0
2066         vpxor           0x80($inp),$xa1,$xa1
2067         vpxor           0xa0($inp),$xb1,$xb1
2068         vmovdqu         $xa0,0x00($out)
2069         vmovdqu         $xb0,0x20($out)
2070         vmovdqu         $xc0,0x40($out)
2071         vmovdqu         $xd0,0x60($out)
2072         vmovdqu         $xa1,0x80($out)
2073         vmovdqu         $xb1,0xa0($out)
2074         je              .Ldone8x
2075
2076         lea             0xc0($inp),$inp         # inp+=64*3
2077         xor             %r10,%r10
2078         vmovdqa         $xc1,0x00(%rsp)
2079         lea             0xc0($out),$out         # out+=64*3
2080         sub             \$192,$len              # len-=64*3
2081         vmovdqa         $xd1,0x20(%rsp)
2082         jmp             .Loop_tail8x
2083
2084 .align  32
2085 .L256_or_more8x:
2086         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2087         vpxor           0x20($inp),$xb0,$xb0
2088         vpxor           0x40($inp),$xc0,$xc0
2089         vpxor           0x60($inp),$xd0,$xd0
2090         vpxor           0x80($inp),$xa1,$xa1
2091         vpxor           0xa0($inp),$xb1,$xb1
2092         vpxor           0xc0($inp),$xc1,$xc1
2093         vpxor           0xe0($inp),$xd1,$xd1
2094         vmovdqu         $xa0,0x00($out)
2095         vmovdqu         $xb0,0x20($out)
2096         vmovdqu         $xc0,0x40($out)
2097         vmovdqu         $xd0,0x60($out)
2098         vmovdqu         $xa1,0x80($out)
2099         vmovdqu         $xb1,0xa0($out)
2100         vmovdqu         $xc1,0xc0($out)
2101         vmovdqu         $xd1,0xe0($out)
2102         je              .Ldone8x
2103
2104         lea             0x100($inp),$inp        # inp+=64*4
2105         xor             %r10,%r10
2106         vmovdqa         $xa2,0x00(%rsp)
2107         lea             0x100($out),$out        # out+=64*4
2108         sub             \$256,$len              # len-=64*4
2109         vmovdqa         $xb2,0x20(%rsp)
2110         jmp             .Loop_tail8x
2111
2112 .align  32
2113 .L320_or_more8x:
2114         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2115         vpxor           0x20($inp),$xb0,$xb0
2116         vpxor           0x40($inp),$xc0,$xc0
2117         vpxor           0x60($inp),$xd0,$xd0
2118         vpxor           0x80($inp),$xa1,$xa1
2119         vpxor           0xa0($inp),$xb1,$xb1
2120         vpxor           0xc0($inp),$xc1,$xc1
2121         vpxor           0xe0($inp),$xd1,$xd1
2122         vpxor           0x100($inp),$xa2,$xa2
2123         vpxor           0x120($inp),$xb2,$xb2
2124         vmovdqu         $xa0,0x00($out)
2125         vmovdqu         $xb0,0x20($out)
2126         vmovdqu         $xc0,0x40($out)
2127         vmovdqu         $xd0,0x60($out)
2128         vmovdqu         $xa1,0x80($out)
2129         vmovdqu         $xb1,0xa0($out)
2130         vmovdqu         $xc1,0xc0($out)
2131         vmovdqu         $xd1,0xe0($out)
2132         vmovdqu         $xa2,0x100($out)
2133         vmovdqu         $xb2,0x120($out)
2134         je              .Ldone8x
2135
2136         lea             0x140($inp),$inp        # inp+=64*5
2137         xor             %r10,%r10
2138         vmovdqa         $xc2,0x00(%rsp)
2139         lea             0x140($out),$out        # out+=64*5
2140         sub             \$320,$len              # len-=64*5
2141         vmovdqa         $xd2,0x20(%rsp)
2142         jmp             .Loop_tail8x
2143
2144 .align  32
2145 .L384_or_more8x:
2146         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2147         vpxor           0x20($inp),$xb0,$xb0
2148         vpxor           0x40($inp),$xc0,$xc0
2149         vpxor           0x60($inp),$xd0,$xd0
2150         vpxor           0x80($inp),$xa1,$xa1
2151         vpxor           0xa0($inp),$xb1,$xb1
2152         vpxor           0xc0($inp),$xc1,$xc1
2153         vpxor           0xe0($inp),$xd1,$xd1
2154         vpxor           0x100($inp),$xa2,$xa2
2155         vpxor           0x120($inp),$xb2,$xb2
2156         vpxor           0x140($inp),$xc2,$xc2
2157         vpxor           0x160($inp),$xd2,$xd2
2158         vmovdqu         $xa0,0x00($out)
2159         vmovdqu         $xb0,0x20($out)
2160         vmovdqu         $xc0,0x40($out)
2161         vmovdqu         $xd0,0x60($out)
2162         vmovdqu         $xa1,0x80($out)
2163         vmovdqu         $xb1,0xa0($out)
2164         vmovdqu         $xc1,0xc0($out)
2165         vmovdqu         $xd1,0xe0($out)
2166         vmovdqu         $xa2,0x100($out)
2167         vmovdqu         $xb2,0x120($out)
2168         vmovdqu         $xc2,0x140($out)
2169         vmovdqu         $xd2,0x160($out)
2170         je              .Ldone8x
2171
2172         lea             0x180($inp),$inp        # inp+=64*6
2173         xor             %r10,%r10
2174         vmovdqa         $xa3,0x00(%rsp)
2175         lea             0x180($out),$out        # out+=64*6
2176         sub             \$384,$len              # len-=64*6
2177         vmovdqa         $xb3,0x20(%rsp)
2178         jmp             .Loop_tail8x
2179
2180 .align  32
2181 .L448_or_more8x:
2182         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2183         vpxor           0x20($inp),$xb0,$xb0
2184         vpxor           0x40($inp),$xc0,$xc0
2185         vpxor           0x60($inp),$xd0,$xd0
2186         vpxor           0x80($inp),$xa1,$xa1
2187         vpxor           0xa0($inp),$xb1,$xb1
2188         vpxor           0xc0($inp),$xc1,$xc1
2189         vpxor           0xe0($inp),$xd1,$xd1
2190         vpxor           0x100($inp),$xa2,$xa2
2191         vpxor           0x120($inp),$xb2,$xb2
2192         vpxor           0x140($inp),$xc2,$xc2
2193         vpxor           0x160($inp),$xd2,$xd2
2194         vpxor           0x180($inp),$xa3,$xa3
2195         vpxor           0x1a0($inp),$xb3,$xb3
2196         vmovdqu         $xa0,0x00($out)
2197         vmovdqu         $xb0,0x20($out)
2198         vmovdqu         $xc0,0x40($out)
2199         vmovdqu         $xd0,0x60($out)
2200         vmovdqu         $xa1,0x80($out)
2201         vmovdqu         $xb1,0xa0($out)
2202         vmovdqu         $xc1,0xc0($out)
2203         vmovdqu         $xd1,0xe0($out)
2204         vmovdqu         $xa2,0x100($out)
2205         vmovdqu         $xb2,0x120($out)
2206         vmovdqu         $xc2,0x140($out)
2207         vmovdqu         $xd2,0x160($out)
2208         vmovdqu         $xa3,0x180($out)
2209         vmovdqu         $xb3,0x1a0($out)
2210         je              .Ldone8x
2211
2212         lea             0x1c0($inp),$inp        # inp+=64*7
2213         xor             %r10,%r10
2214         vmovdqa         $xc3,0x00(%rsp)
2215         lea             0x1c0($out),$out        # out+=64*7
2216         sub             \$448,$len              # len-=64*7
2217         vmovdqa         $xd3,0x20(%rsp)
2218
2219 .Loop_tail8x:
2220         movzb           ($inp,%r10),%eax
2221         movzb           (%rsp,%r10),%ecx
2222         lea             1(%r10),%r10
2223         xor             %ecx,%eax
2224         mov             %al,-1($out,%r10)
2225         dec             $len
2226         jnz             .Loop_tail8x
2227
2228 .Ldone8x:
2229         vzeroall
2230 ___
2231 $code.=<<___    if ($win64);
2232         movaps          -0xa8(%r9),%xmm6
2233         movaps          -0x98(%r9),%xmm7
2234         movaps          -0x88(%r9),%xmm8
2235         movaps          -0x78(%r9),%xmm9
2236         movaps          -0x68(%r9),%xmm10
2237         movaps          -0x58(%r9),%xmm11
2238         movaps          -0x48(%r9),%xmm12
2239         movaps          -0x38(%r9),%xmm13
2240         movaps          -0x28(%r9),%xmm14
2241         movaps          -0x18(%r9),%xmm15
2242 ___
2243 $code.=<<___;
2244         lea             (%r9),%rsp
2245 .L8x_epilogue:
2246         ret
2247 .size   ChaCha20_8x,.-ChaCha20_8x
2248 ___
2249 }
2250
2251 ########################################################################
2252 # AVX512 code paths
2253 if ($avx>2) {
2254 # This one handles shorter inputs...
2255
2256 my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
2257 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
2258
2259 sub AVX512ROUND {       # critical path is 14 "SIMD ticks" per round
2260         &vpaddd ($a,$a,$b);
2261         &vpxord ($d,$d,$a);
2262         &vprold ($d,$d,16);
2263
2264         &vpaddd ($c,$c,$d);
2265         &vpxord ($b,$b,$c);
2266         &vprold ($b,$b,12);
2267
2268         &vpaddd ($a,$a,$b);
2269         &vpxord ($d,$d,$a);
2270         &vprold ($d,$d,8);
2271
2272         &vpaddd ($c,$c,$d);
2273         &vpxord ($b,$b,$c);
2274         &vprold ($b,$b,7);
2275 }
2276
2277 my $xframe = $win64 ? 32+8 : 8;
2278
2279 $code.=<<___;
2280 .type   ChaCha20_avx512,\@function,5
2281 .align  32
2282 ChaCha20_avx512:
2283 .LChaCha20_avx512:
2284         mov     %rsp,%r9                # frame pointer
2285         cmp     \$512,$len
2286         ja      .LChaCha20_16x
2287
2288         sub     \$64+$xframe,%rsp
2289 ___
2290 $code.=<<___    if ($win64);
2291         movaps  %xmm6,-0x28(%r9)
2292         movaps  %xmm7,-0x18(%r9)
2293 .Lavx512_body:
2294 ___
2295 $code.=<<___;
2296         vbroadcasti32x4 .Lsigma(%rip),$a
2297         vbroadcasti32x4 ($key),$b
2298         vbroadcasti32x4 16($key),$c
2299         vbroadcasti32x4 ($counter),$d
2300
2301         vmovdqa32       $a,$a_
2302         vmovdqa32       $b,$b_
2303         vmovdqa32       $c,$c_
2304         vpaddd          .Lzeroz(%rip),$d,$d
2305         vmovdqa32       .Lfourz(%rip),$fourz
2306         mov             \$10,$counter   # reuse $counter
2307         vmovdqa32       $d,$d_
2308         jmp             .Loop_avx512
2309
2310 .align  16
2311 .Loop_outer_avx512:
2312         vmovdqa32       $a_,$a
2313         vmovdqa32       $b_,$b
2314         vmovdqa32       $c_,$c
2315         vpaddd          $fourz,$d_,$d
2316         mov             \$10,$counter
2317         vmovdqa32       $d,$d_
2318         jmp             .Loop_avx512
2319
2320 .align  32
2321 .Loop_avx512:
2322 ___
2323         &AVX512ROUND();
2324         &vpshufd        ($c,$c,0b01001110);
2325         &vpshufd        ($b,$b,0b00111001);
2326         &vpshufd        ($d,$d,0b10010011);
2327
2328         &AVX512ROUND();
2329         &vpshufd        ($c,$c,0b01001110);
2330         &vpshufd        ($b,$b,0b10010011);
2331         &vpshufd        ($d,$d,0b00111001);
2332
2333         &dec            ($counter);
2334         &jnz            (".Loop_avx512");
2335
2336 $code.=<<___;
2337         vpaddd          $a_,$a,$a
2338         vpaddd          $b_,$b,$b
2339         vpaddd          $c_,$c,$c
2340         vpaddd          $d_,$d,$d
2341
2342         sub             \$64,$len
2343         jb              .Ltail64_avx512
2344
2345         vpxor           0x00($inp),%x#$a,$t0    # xor with input
2346         vpxor           0x10($inp),%x#$b,$t1
2347         vpxor           0x20($inp),%x#$c,$t2
2348         vpxor           0x30($inp),%x#$d,$t3
2349         lea             0x40($inp),$inp         # inp+=64
2350
2351         vmovdqu         $t0,0x00($out)          # write output
2352         vmovdqu         $t1,0x10($out)
2353         vmovdqu         $t2,0x20($out)
2354         vmovdqu         $t3,0x30($out)
2355         lea             0x40($out),$out         # out+=64
2356
2357         jz              .Ldone_avx512
2358
2359         vextracti32x4   \$1,$a,$t0
2360         vextracti32x4   \$1,$b,$t1
2361         vextracti32x4   \$1,$c,$t2
2362         vextracti32x4   \$1,$d,$t3
2363
2364         sub             \$64,$len
2365         jb              .Ltail_avx512
2366
2367         vpxor           0x00($inp),$t0,$t0      # xor with input
2368         vpxor           0x10($inp),$t1,$t1
2369         vpxor           0x20($inp),$t2,$t2
2370         vpxor           0x30($inp),$t3,$t3
2371         lea             0x40($inp),$inp         # inp+=64
2372
2373         vmovdqu         $t0,0x00($out)          # write output
2374         vmovdqu         $t1,0x10($out)
2375         vmovdqu         $t2,0x20($out)
2376         vmovdqu         $t3,0x30($out)
2377         lea             0x40($out),$out         # out+=64
2378
2379         jz              .Ldone_avx512
2380
2381         vextracti32x4   \$2,$a,$t0
2382         vextracti32x4   \$2,$b,$t1
2383         vextracti32x4   \$2,$c,$t2
2384         vextracti32x4   \$2,$d,$t3
2385
2386         sub             \$64,$len
2387         jb              .Ltail_avx512
2388
2389         vpxor           0x00($inp),$t0,$t0      # xor with input
2390         vpxor           0x10($inp),$t1,$t1
2391         vpxor           0x20($inp),$t2,$t2
2392         vpxor           0x30($inp),$t3,$t3
2393         lea             0x40($inp),$inp         # inp+=64
2394
2395         vmovdqu         $t0,0x00($out)          # write output
2396         vmovdqu         $t1,0x10($out)
2397         vmovdqu         $t2,0x20($out)
2398         vmovdqu         $t3,0x30($out)
2399         lea             0x40($out),$out         # out+=64
2400
2401         jz              .Ldone_avx512
2402
2403         vextracti32x4   \$3,$a,$t0
2404         vextracti32x4   \$3,$b,$t1
2405         vextracti32x4   \$3,$c,$t2
2406         vextracti32x4   \$3,$d,$t3
2407
2408         sub             \$64,$len
2409         jb              .Ltail_avx512
2410
2411         vpxor           0x00($inp),$t0,$t0      # xor with input
2412         vpxor           0x10($inp),$t1,$t1
2413         vpxor           0x20($inp),$t2,$t2
2414         vpxor           0x30($inp),$t3,$t3
2415         lea             0x40($inp),$inp         # inp+=64
2416
2417         vmovdqu         $t0,0x00($out)          # write output
2418         vmovdqu         $t1,0x10($out)
2419         vmovdqu         $t2,0x20($out)
2420         vmovdqu         $t3,0x30($out)
2421         lea             0x40($out),$out         # out+=64
2422
2423         jnz             .Loop_outer_avx512
2424
2425         jmp             .Ldone_avx512
2426
2427 .align  16
2428 .Ltail64_avx512:
2429         vmovdqa         %x#$a,0x00(%rsp)
2430         vmovdqa         %x#$b,0x10(%rsp)
2431         vmovdqa         %x#$c,0x20(%rsp)
2432         vmovdqa         %x#$d,0x30(%rsp)
2433         add             \$64,$len
2434         jmp             .Loop_tail_avx512
2435
2436 .align  16
2437 .Ltail_avx512:
2438         vmovdqa         $t0,0x00(%rsp)
2439         vmovdqa         $t1,0x10(%rsp)
2440         vmovdqa         $t2,0x20(%rsp)
2441         vmovdqa         $t3,0x30(%rsp)
2442         add             \$64,$len
2443
2444 .Loop_tail_avx512:
2445         movzb           ($inp,$counter),%eax
2446         movzb           (%rsp,$counter),%ecx
2447         lea             1($counter),$counter
2448         xor             %ecx,%eax
2449         mov             %al,-1($out,$counter)
2450         dec             $len
2451         jnz             .Loop_tail_avx512
2452
2453         vmovdqa32       $a_,0x00(%rsp)
2454
2455 .Ldone_avx512:
2456         vzeroall
2457 ___
2458 $code.=<<___    if ($win64);
2459         movaps  -0x28(%r9),%xmm6
2460         movaps  -0x18(%r9),%xmm7
2461 ___
2462 $code.=<<___;
2463         lea     (%r9),%rsp
2464 .Lavx512_epilogue:
2465         ret
2466 .size   ChaCha20_avx512,.-ChaCha20_avx512
2467 ___
2468 }
2469 if ($avx>2) {
2470 # This one handles longer inputs...
2471
2472 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2473     $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2474 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2475          $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2476 my @key=map("%zmm$_",(16..31));
2477 my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2478
2479 sub AVX512_lane_ROUND {
2480 my ($a0,$b0,$c0,$d0)=@_;
2481 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2482 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2483 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2484 my @x=map("\"$_\"",@xx);
2485
2486         (
2487         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
2488          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
2489           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",     # Q3
2490            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",     # Q4
2491         "&vpxord        (@x[$d0],@x[$d0],@x[$a0])",
2492          "&vpxord       (@x[$d1],@x[$d1],@x[$a1])",
2493           "&vpxord      (@x[$d2],@x[$d2],@x[$a2])",
2494            "&vpxord     (@x[$d3],@x[$d3],@x[$a3])",
2495         "&vprold        (@x[$d0],@x[$d0],16)",
2496          "&vprold       (@x[$d1],@x[$d1],16)",
2497           "&vprold      (@x[$d2],@x[$d2],16)",
2498            "&vprold     (@x[$d3],@x[$d3],16)",
2499
2500         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
2501          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
2502           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
2503            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
2504         "&vpxord        (@x[$b0],@x[$b0],@x[$c0])",
2505          "&vpxord       (@x[$b1],@x[$b1],@x[$c1])",
2506           "&vpxord      (@x[$b2],@x[$b2],@x[$c2])",
2507            "&vpxord     (@x[$b3],@x[$b3],@x[$c3])",
2508         "&vprold        (@x[$b0],@x[$b0],12)",
2509          "&vprold       (@x[$b1],@x[$b1],12)",
2510           "&vprold      (@x[$b2],@x[$b2],12)",
2511            "&vprold     (@x[$b3],@x[$b3],12)",
2512
2513         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",
2514          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",
2515           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",
2516            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",
2517         "&vpxord        (@x[$d0],@x[$d0],@x[$a0])",
2518          "&vpxord       (@x[$d1],@x[$d1],@x[$a1])",
2519           "&vpxord      (@x[$d2],@x[$d2],@x[$a2])",
2520            "&vpxord     (@x[$d3],@x[$d3],@x[$a3])",
2521         "&vprold        (@x[$d0],@x[$d0],8)",
2522          "&vprold       (@x[$d1],@x[$d1],8)",
2523           "&vprold      (@x[$d2],@x[$d2],8)",
2524            "&vprold     (@x[$d3],@x[$d3],8)",
2525
2526         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
2527          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
2528           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
2529            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
2530         "&vpxord        (@x[$b0],@x[$b0],@x[$c0])",
2531          "&vpxord       (@x[$b1],@x[$b1],@x[$c1])",
2532           "&vpxord      (@x[$b2],@x[$b2],@x[$c2])",
2533            "&vpxord     (@x[$b3],@x[$b3],@x[$c3])",
2534         "&vprold        (@x[$b0],@x[$b0],7)",
2535          "&vprold       (@x[$b1],@x[$b1],7)",
2536           "&vprold      (@x[$b2],@x[$b2],7)",
2537            "&vprold     (@x[$b3],@x[$b3],7)"
2538         );
2539 }
2540
2541 my $xframe = $win64 ? 0xa8 : 8;
2542
2543 $code.=<<___;
2544 .type   ChaCha20_16x,\@function,5
2545 .align  32
2546 ChaCha20_16x:
2547 .LChaCha20_16x:
2548         mov             %rsp,%r9                # frame register
2549         sub             \$64+$xframe,%rsp
2550         and             \$-64,%rsp
2551 ___
2552 $code.=<<___    if ($win64);
2553         movaps          %xmm6,-0xa8(%r9)
2554         movaps          %xmm7,-0x98(%r9)
2555         movaps          %xmm8,-0x88(%r9)
2556         movaps          %xmm9,-0x78(%r9)
2557         movaps          %xmm10,-0x68(%r9)
2558         movaps          %xmm11,-0x58(%r9)
2559         movaps          %xmm12,-0x48(%r9)
2560         movaps          %xmm13,-0x38(%r9)
2561         movaps          %xmm14,-0x28(%r9)
2562         movaps          %xmm15,-0x18(%r9)
2563 .L16x_body:
2564 ___
2565 $code.=<<___;
2566         vzeroupper
2567
2568         lea             .Lsigma(%rip),%r10
2569         vbroadcasti32x4 (%r10),$xa3             # key[0]
2570         vbroadcasti32x4 ($key),$xb3             # key[1]
2571         vbroadcasti32x4 16($key),$xc3           # key[2]
2572         vbroadcasti32x4 ($counter),$xd3         # key[3]
2573
2574         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
2575         vpshufd         \$0x55,$xa3,$xa1
2576         vpshufd         \$0xaa,$xa3,$xa2
2577         vpshufd         \$0xff,$xa3,$xa3
2578         vmovdqa64       $xa0,@key[0]
2579         vmovdqa64       $xa1,@key[1]
2580         vmovdqa64       $xa2,@key[2]
2581         vmovdqa64       $xa3,@key[3]
2582
2583         vpshufd         \$0x00,$xb3,$xb0
2584         vpshufd         \$0x55,$xb3,$xb1
2585         vpshufd         \$0xaa,$xb3,$xb2
2586         vpshufd         \$0xff,$xb3,$xb3
2587         vmovdqa64       $xb0,@key[4]
2588         vmovdqa64       $xb1,@key[5]
2589         vmovdqa64       $xb2,@key[6]
2590         vmovdqa64       $xb3,@key[7]
2591
2592         vpshufd         \$0x00,$xc3,$xc0
2593         vpshufd         \$0x55,$xc3,$xc1
2594         vpshufd         \$0xaa,$xc3,$xc2
2595         vpshufd         \$0xff,$xc3,$xc3
2596         vmovdqa64       $xc0,@key[8]
2597         vmovdqa64       $xc1,@key[9]
2598         vmovdqa64       $xc2,@key[10]
2599         vmovdqa64       $xc3,@key[11]
2600
2601         vpshufd         \$0x00,$xd3,$xd0
2602         vpshufd         \$0x55,$xd3,$xd1
2603         vpshufd         \$0xaa,$xd3,$xd2
2604         vpshufd         \$0xff,$xd3,$xd3
2605         vpaddd          .Lincz(%rip),$xd0,$xd0  # don't save counters yet
2606         vmovdqa64       $xd0,@key[12]
2607         vmovdqa64       $xd1,@key[13]
2608         vmovdqa64       $xd2,@key[14]
2609         vmovdqa64       $xd3,@key[15]
2610
2611         mov             \$10,%eax
2612         jmp             .Loop16x
2613
2614 .align  32
2615 .Loop_outer16x:
2616         vpbroadcastd    0(%r10),$xa0            # reload key
2617         vpbroadcastd    4(%r10),$xa1
2618         vpbroadcastd    8(%r10),$xa2
2619         vpbroadcastd    12(%r10),$xa3
2620         vpaddd          .Lsixteen(%rip),@key[12],@key[12]       # next SIMD counters
2621         vmovdqa64       @key[4],$xb0
2622         vmovdqa64       @key[5],$xb1
2623         vmovdqa64       @key[6],$xb2
2624         vmovdqa64       @key[7],$xb3
2625         vmovdqa64       @key[8],$xc0
2626         vmovdqa64       @key[9],$xc1
2627         vmovdqa64       @key[10],$xc2
2628         vmovdqa64       @key[11],$xc3
2629         vmovdqa64       @key[12],$xd0
2630         vmovdqa64       @key[13],$xd1
2631         vmovdqa64       @key[14],$xd2
2632         vmovdqa64       @key[15],$xd3
2633
2634         vmovdqa64       $xa0,@key[0]
2635         vmovdqa64       $xa1,@key[1]
2636         vmovdqa64       $xa2,@key[2]
2637         vmovdqa64       $xa3,@key[3]
2638
2639         mov             \$10,%eax
2640         jmp             .Loop16x
2641
2642 .align  32
2643 .Loop16x:
2644 ___
2645         foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2646         foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2647 $code.=<<___;
2648         dec             %eax
2649         jnz             .Loop16x
2650
2651         vpaddd          @key[0],$xa0,$xa0       # accumulate key
2652         vpaddd          @key[1],$xa1,$xa1
2653         vpaddd          @key[2],$xa2,$xa2
2654         vpaddd          @key[3],$xa3,$xa3
2655
2656         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
2657         vpunpckldq      $xa3,$xa2,$xt3
2658         vpunpckhdq      $xa1,$xa0,$xa0
2659         vpunpckhdq      $xa3,$xa2,$xa2
2660         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
2661         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
2662         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
2663         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
2664 ___
2665         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2666 $code.=<<___;
2667         vpaddd          @key[4],$xb0,$xb0
2668         vpaddd          @key[5],$xb1,$xb1
2669         vpaddd          @key[6],$xb2,$xb2
2670         vpaddd          @key[7],$xb3,$xb3
2671
2672         vpunpckldq      $xb1,$xb0,$xt2
2673         vpunpckldq      $xb3,$xb2,$xt3
2674         vpunpckhdq      $xb1,$xb0,$xb0
2675         vpunpckhdq      $xb3,$xb2,$xb2
2676         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
2677         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
2678         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
2679         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
2680 ___
2681         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2682 $code.=<<___;
2683         vshufi32x4      \$0x44,$xb0,$xa0,$xt3   # "de-interlace" further
2684         vshufi32x4      \$0xee,$xb0,$xa0,$xb0
2685         vshufi32x4      \$0x44,$xb1,$xa1,$xa0
2686         vshufi32x4      \$0xee,$xb1,$xa1,$xb1
2687         vshufi32x4      \$0x44,$xb2,$xa2,$xa1
2688         vshufi32x4      \$0xee,$xb2,$xa2,$xb2
2689         vshufi32x4      \$0x44,$xb3,$xa3,$xa2
2690         vshufi32x4      \$0xee,$xb3,$xa3,$xb3
2691 ___
2692         ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2693 $code.=<<___;
2694         vpaddd          @key[8],$xc0,$xc0
2695         vpaddd          @key[9],$xc1,$xc1
2696         vpaddd          @key[10],$xc2,$xc2
2697         vpaddd          @key[11],$xc3,$xc3
2698
2699         vpunpckldq      $xc1,$xc0,$xt2
2700         vpunpckldq      $xc3,$xc2,$xt3
2701         vpunpckhdq      $xc1,$xc0,$xc0
2702         vpunpckhdq      $xc3,$xc2,$xc2
2703         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
2704         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
2705         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
2706         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
2707 ___
2708         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2709 $code.=<<___;
2710         vpaddd          @key[12],$xd0,$xd0
2711         vpaddd          @key[13],$xd1,$xd1
2712         vpaddd          @key[14],$xd2,$xd2
2713         vpaddd          @key[15],$xd3,$xd3
2714
2715         vpunpckldq      $xd1,$xd0,$xt2
2716         vpunpckldq      $xd3,$xd2,$xt3
2717         vpunpckhdq      $xd1,$xd0,$xd0
2718         vpunpckhdq      $xd3,$xd2,$xd2
2719         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
2720         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
2721         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
2722         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
2723 ___
2724         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2725 $code.=<<___;
2726         vshufi32x4      \$0x44,$xd0,$xc0,$xt3   # "de-interlace" further
2727         vshufi32x4      \$0xee,$xd0,$xc0,$xd0
2728         vshufi32x4      \$0x44,$xd1,$xc1,$xc0
2729         vshufi32x4      \$0xee,$xd1,$xc1,$xd1
2730         vshufi32x4      \$0x44,$xd2,$xc2,$xc1
2731         vshufi32x4      \$0xee,$xd2,$xc2,$xd2
2732         vshufi32x4      \$0x44,$xd3,$xc3,$xc2
2733         vshufi32x4      \$0xee,$xd3,$xc3,$xd3
2734 ___
2735         ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2736 $code.=<<___;
2737         vshufi32x4      \$0x88,$xc0,$xa0,$xt0   # "de-interlace" further
2738         vshufi32x4      \$0xdd,$xc0,$xa0,$xa0
2739          vshufi32x4     \$0x88,$xd0,$xb0,$xc0
2740          vshufi32x4     \$0xdd,$xd0,$xb0,$xd0
2741         vshufi32x4      \$0x88,$xc1,$xa1,$xt1
2742         vshufi32x4      \$0xdd,$xc1,$xa1,$xa1
2743          vshufi32x4     \$0x88,$xd1,$xb1,$xc1
2744          vshufi32x4     \$0xdd,$xd1,$xb1,$xd1
2745         vshufi32x4      \$0x88,$xc2,$xa2,$xt2
2746         vshufi32x4      \$0xdd,$xc2,$xa2,$xa2
2747          vshufi32x4     \$0x88,$xd2,$xb2,$xc2
2748          vshufi32x4     \$0xdd,$xd2,$xb2,$xd2
2749         vshufi32x4      \$0x88,$xc3,$xa3,$xt3
2750         vshufi32x4      \$0xdd,$xc3,$xa3,$xa3
2751          vshufi32x4     \$0x88,$xd3,$xb3,$xc3
2752          vshufi32x4     \$0xdd,$xd3,$xb3,$xd3
2753 ___
2754         ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2755         ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2756
2757         ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2758          $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2759         ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2760          $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2761 $code.=<<___;
2762         cmp             \$64*16,$len
2763         jb              .Ltail16x
2764
2765         vpxord          0x00($inp),$xa0,$xa0    # xor with input
2766         vpxord          0x40($inp),$xb0,$xb0
2767         vpxord          0x80($inp),$xc0,$xc0
2768         vpxord          0xc0($inp),$xd0,$xd0
2769         vmovdqu32       $xa0,0x00($out)
2770         vmovdqu32       $xb0,0x40($out)
2771         vmovdqu32       $xc0,0x80($out)
2772         vmovdqu32       $xd0,0xc0($out)
2773
2774         vpxord          0x100($inp),$xa1,$xa1
2775         vpxord          0x140($inp),$xb1,$xb1
2776         vpxord          0x180($inp),$xc1,$xc1
2777         vpxord          0x1c0($inp),$xd1,$xd1
2778         vmovdqu32       $xa1,0x100($out)
2779         vmovdqu32       $xb1,0x140($out)
2780         vmovdqu32       $xc1,0x180($out)
2781         vmovdqu32       $xd1,0x1c0($out)
2782
2783         vpxord          0x200($inp),$xa2,$xa2
2784         vpxord          0x240($inp),$xb2,$xb2
2785         vpxord          0x280($inp),$xc2,$xc2
2786         vpxord          0x2c0($inp),$xd2,$xd2
2787         vmovdqu32       $xa2,0x200($out)
2788         vmovdqu32       $xb2,0x240($out)
2789         vmovdqu32       $xc2,0x280($out)
2790         vmovdqu32       $xd2,0x2c0($out)
2791
2792         vpxord          0x300($inp),$xa3,$xa3
2793         vpxord          0x340($inp),$xb3,$xb3
2794         vpxord          0x380($inp),$xc3,$xc3
2795         vpxord          0x3c0($inp),$xd3,$xd3
2796         lea             0x400($inp),$inp
2797         vmovdqu32       $xa3,0x300($out)
2798         vmovdqu32       $xb3,0x340($out)
2799         vmovdqu32       $xc3,0x380($out)
2800         vmovdqu32       $xd3,0x3c0($out)
2801         lea             0x400($out),$out
2802
2803         sub             \$64*16,$len
2804         jnz             .Loop_outer16x
2805
2806         jmp             .Ldone16x
2807
2808 .align  32
2809 .Ltail16x:
2810         xor             %r10,%r10
2811         sub             $inp,$out
2812         cmp             \$64*1,$len
2813         jb              .Less_than_64_16x
2814         vpxord          ($inp),$xa0,$xa0        # xor with input
2815         vmovdqu32       $xa0,($out,$inp)
2816         je              .Ldone16x
2817         vmovdqa32       $xb0,$xa0
2818         lea             64($inp),$inp
2819
2820         cmp             \$64*2,$len
2821         jb              .Less_than_64_16x
2822         vpxord          ($inp),$xb0,$xb0
2823         vmovdqu32       $xb0,($out,$inp)
2824         je              .Ldone16x
2825         vmovdqa32       $xc0,$xa0
2826         lea             64($inp),$inp
2827
2828         cmp             \$64*3,$len
2829         jb              .Less_than_64_16x
2830         vpxord          ($inp),$xc0,$xc0
2831         vmovdqu32       $xc0,($out,$inp)
2832         je              .Ldone16x
2833         vmovdqa32       $xd0,$xa0
2834         lea             64($inp),$inp
2835
2836         cmp             \$64*4,$len
2837         jb              .Less_than_64_16x
2838         vpxord          ($inp),$xd0,$xd0
2839         vmovdqu32       $xd0,($out,$inp)
2840         je              .Ldone16x
2841         vmovdqa32       $xa1,$xa0
2842         lea             64($inp),$inp
2843
2844         cmp             \$64*5,$len
2845         jb              .Less_than_64_16x
2846         vpxord          ($inp),$xa1,$xa1
2847         vmovdqu32       $xa1,($out,$inp)
2848         je              .Ldone16x
2849         vmovdqa32       $xb1,$xa0
2850         lea             64($inp),$inp
2851
2852         cmp             \$64*6,$len
2853         jb              .Less_than_64_16x
2854         vpxord          ($inp),$xb1,$xb1
2855         vmovdqu32       $xb1,($out,$inp)
2856         je              .Ldone16x
2857         vmovdqa32       $xc1,$xa0
2858         lea             64($inp),$inp
2859
2860         cmp             \$64*7,$len
2861         jb              .Less_than_64_16x
2862         vpxord          ($inp),$xc1,$xc1
2863         vmovdqu32       $xc1,($out,$inp)
2864         je              .Ldone16x
2865         vmovdqa32       $xd1,$xa0
2866         lea             64($inp),$inp
2867
2868         cmp             \$64*8,$len
2869         jb              .Less_than_64_16x
2870         vpxord          ($inp),$xd1,$xd1
2871         vmovdqu32       $xd1,($out,$inp)
2872         je              .Ldone16x
2873         vmovdqa32       $xa2,$xa0
2874         lea             64($inp),$inp
2875
2876         cmp             \$64*9,$len
2877         jb              .Less_than_64_16x
2878         vpxord          ($inp),$xa2,$xa2
2879         vmovdqu32       $xa2,($out,$inp)
2880         je              .Ldone16x
2881         vmovdqa32       $xb2,$xa0
2882         lea             64($inp),$inp
2883
2884         cmp             \$64*10,$len
2885         jb              .Less_than_64_16x
2886         vpxord          ($inp),$xb2,$xb2
2887         vmovdqu32       $xb2,($out,$inp)
2888         je              .Ldone16x
2889         vmovdqa32       $xc2,$xa0
2890         lea             64($inp),$inp
2891
2892         cmp             \$64*11,$len
2893         jb              .Less_than_64_16x
2894         vpxord          ($inp),$xc2,$xc2
2895         vmovdqu32       $xc2,($out,$inp)
2896         je              .Ldone16x
2897         vmovdqa32       $xd2,$xa0
2898         lea             64($inp),$inp
2899
2900         cmp             \$64*12,$len
2901         jb              .Less_than_64_16x
2902         vpxord          ($inp),$xd2,$xd2
2903         vmovdqu32       $xd2,($out,$inp)
2904         je              .Ldone16x
2905         vmovdqa32       $xa3,$xa0
2906         lea             64($inp),$inp
2907
2908         cmp             \$64*13,$len
2909         jb              .Less_than_64_16x
2910         vpxord          ($inp),$xa3,$xa3
2911         vmovdqu32       $xa3,($out,$inp)
2912         je              .Ldone16x
2913         vmovdqa32       $xb3,$xa0
2914         lea             64($inp),$inp
2915
2916         cmp             \$64*14,$len
2917         jb              .Less_than_64_16x
2918         vpxord          ($inp),$xb3,$xb3
2919         vmovdqu32       $xb3,($out,$inp)
2920         je              .Ldone16x
2921         vmovdqa32       $xc3,$xa0
2922         lea             64($inp),$inp
2923
2924         cmp             \$64*15,$len
2925         jb              .Less_than_64_16x
2926         vpxord          ($inp),$xc3,$xc3
2927         vmovdqu32       $xc3,($out,$inp)
2928         je              .Ldone16x
2929         vmovdqa32       $xd3,$xa0
2930         lea             64($inp),$inp
2931
2932 .Less_than_64_16x:
2933         vmovdqa32       $xa0,0x00(%rsp)
2934         lea             ($out,$inp),$out
2935         and             \$63,$len
2936
2937 .Loop_tail16x:
2938         movzb           ($inp,%r10),%eax
2939         movzb           (%rsp,%r10),%ecx
2940         lea             1(%r10),%r10
2941         xor             %ecx,%eax
2942         mov             %al,-1($out,%r10)
2943         dec             $len
2944         jnz             .Loop_tail16x
2945
2946         vpxord          $xa0,$xa0,$xa0
2947         vmovdqa32       $xa0,0(%rsp)
2948
2949 .Ldone16x:
2950         vzeroall
2951 ___
2952 $code.=<<___    if ($win64);
2953         movaps          -0xa8(%r9),%xmm6
2954         movaps          -0x98(%r9),%xmm7
2955         movaps          -0x88(%r9),%xmm8
2956         movaps          -0x78(%r9),%xmm9
2957         movaps          -0x68(%r9),%xmm10
2958         movaps          -0x58(%r9),%xmm11
2959         movaps          -0x48(%r9),%xmm12
2960         movaps          -0x38(%r9),%xmm13
2961         movaps          -0x28(%r9),%xmm14
2962         movaps          -0x18(%r9),%xmm15
2963 ___
2964 $code.=<<___;
2965         lea             (%r9),%rsp
2966 .L16x_epilogue:
2967         ret
2968 .size   ChaCha20_16x,.-ChaCha20_16x
2969 ___
2970 }
2971
2972 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2973 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
2974 if ($win64) {
2975 $rec="%rcx";
2976 $frame="%rdx";
2977 $context="%r8";
2978 $disp="%r9";
2979
2980 $code.=<<___;
2981 .extern __imp_RtlVirtualUnwind
2982 .type   se_handler,\@abi-omnipotent
2983 .align  16
2984 se_handler:
2985         push    %rsi
2986         push    %rdi
2987         push    %rbx
2988         push    %rbp
2989         push    %r12
2990         push    %r13
2991         push    %r14
2992         push    %r15
2993         pushfq
2994         sub     \$64,%rsp
2995
2996         mov     120($context),%rax      # pull context->Rax
2997         mov     248($context),%rbx      # pull context->Rip
2998
2999         mov     8($disp),%rsi           # disp->ImageBase
3000         mov     56($disp),%r11          # disp->HandlerData
3001
3002         lea     .Lctr32_body(%rip),%r10
3003         cmp     %r10,%rbx               # context->Rip<.Lprologue
3004         jb      .Lcommon_seh_tail
3005
3006         mov     152($context),%rax      # pull context->Rsp
3007
3008         lea     .Lno_data(%rip),%r10    # epilogue label
3009         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
3010         jae     .Lcommon_seh_tail
3011
3012         lea     64+24+48(%rax),%rax
3013
3014         mov     -8(%rax),%rbx
3015         mov     -16(%rax),%rbp
3016         mov     -24(%rax),%r12
3017         mov     -32(%rax),%r13
3018         mov     -40(%rax),%r14
3019         mov     -48(%rax),%r15
3020         mov     %rbx,144($context)      # restore context->Rbx
3021         mov     %rbp,160($context)      # restore context->Rbp
3022         mov     %r12,216($context)      # restore context->R12
3023         mov     %r13,224($context)      # restore context->R13
3024         mov     %r14,232($context)      # restore context->R14
3025         mov     %r15,240($context)      # restore context->R14
3026
3027 .Lcommon_seh_tail:
3028         mov     8(%rax),%rdi
3029         mov     16(%rax),%rsi
3030         mov     %rax,152($context)      # restore context->Rsp
3031         mov     %rsi,168($context)      # restore context->Rsi
3032         mov     %rdi,176($context)      # restore context->Rdi
3033
3034         mov     40($disp),%rdi          # disp->ContextRecord
3035         mov     $context,%rsi           # context
3036         mov     \$154,%ecx              # sizeof(CONTEXT)
3037         .long   0xa548f3fc              # cld; rep movsq
3038
3039         mov     $disp,%rsi
3040         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
3041         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
3042         mov     0(%rsi),%r8             # arg3, disp->ControlPc
3043         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
3044         mov     40(%rsi),%r10           # disp->ContextRecord
3045         lea     56(%rsi),%r11           # &disp->HandlerData
3046         lea     24(%rsi),%r12           # &disp->EstablisherFrame
3047         mov     %r10,32(%rsp)           # arg5
3048         mov     %r11,40(%rsp)           # arg6
3049         mov     %r12,48(%rsp)           # arg7
3050         mov     %rcx,56(%rsp)           # arg8, (NULL)
3051         call    *__imp_RtlVirtualUnwind(%rip)
3052
3053         mov     \$1,%eax                # ExceptionContinueSearch
3054         add     \$64,%rsp
3055         popfq
3056         pop     %r15
3057         pop     %r14
3058         pop     %r13
3059         pop     %r12
3060         pop     %rbp
3061         pop     %rbx
3062         pop     %rdi
3063         pop     %rsi
3064         ret
3065 .size   se_handler,.-se_handler
3066
3067 .type   ssse3_handler,\@abi-omnipotent
3068 .align  16
3069 ssse3_handler:
3070         push    %rsi
3071         push    %rdi
3072         push    %rbx
3073         push    %rbp
3074         push    %r12
3075         push    %r13
3076         push    %r14
3077         push    %r15
3078         pushfq
3079         sub     \$64,%rsp
3080
3081         mov     120($context),%rax      # pull context->Rax
3082         mov     248($context),%rbx      # pull context->Rip
3083
3084         mov     8($disp),%rsi           # disp->ImageBase
3085         mov     56($disp),%r11          # disp->HandlerData
3086
3087         mov     0(%r11),%r10d           # HandlerData[0]
3088         lea     (%rsi,%r10),%r10        # prologue label
3089         cmp     %r10,%rbx               # context->Rip<prologue label
3090         jb      .Lcommon_seh_tail
3091
3092         mov     192($context),%rax      # pull context->R9
3093
3094         mov     4(%r11),%r10d           # HandlerData[1]
3095         lea     (%rsi,%r10),%r10        # epilogue label
3096         cmp     %r10,%rbx               # context->Rip>=epilogue label
3097         jae     .Lcommon_seh_tail
3098
3099         lea     -0x28(%rax),%rsi
3100         lea     512($context),%rdi      # &context.Xmm6
3101         mov     \$4,%ecx
3102         .long   0xa548f3fc              # cld; rep movsq
3103
3104         jmp     .Lcommon_seh_tail
3105 .size   ssse3_handler,.-ssse3_handler
3106
3107 .type   full_handler,\@abi-omnipotent
3108 .align  16
3109 full_handler:
3110         push    %rsi
3111         push    %rdi
3112         push    %rbx
3113         push    %rbp
3114         push    %r12
3115         push    %r13
3116         push    %r14
3117         push    %r15
3118         pushfq
3119         sub     \$64,%rsp
3120
3121         mov     120($context),%rax      # pull context->Rax
3122         mov     248($context),%rbx      # pull context->Rip
3123
3124         mov     8($disp),%rsi           # disp->ImageBase
3125         mov     56($disp),%r11          # disp->HandlerData
3126
3127         mov     0(%r11),%r10d           # HandlerData[0]
3128         lea     (%rsi,%r10),%r10        # prologue label
3129         cmp     %r10,%rbx               # context->Rip<prologue label
3130         jb      .Lcommon_seh_tail
3131
3132         mov     192($context),%rax      # pull context->R9
3133
3134         mov     4(%r11),%r10d           # HandlerData[1]
3135         lea     (%rsi,%r10),%r10        # epilogue label
3136         cmp     %r10,%rbx               # context->Rip>=epilogue label
3137         jae     .Lcommon_seh_tail
3138
3139         lea     -0xa8(%rax),%rsi
3140         lea     512($context),%rdi      # &context.Xmm6
3141         mov     \$20,%ecx
3142         .long   0xa548f3fc              # cld; rep movsq
3143
3144         jmp     .Lcommon_seh_tail
3145 .size   full_handler,.-full_handler
3146
3147 .section        .pdata
3148 .align  4
3149         .rva    .LSEH_begin_ChaCha20_ctr32
3150         .rva    .LSEH_end_ChaCha20_ctr32
3151         .rva    .LSEH_info_ChaCha20_ctr32
3152
3153         .rva    .LSEH_begin_ChaCha20_ssse3
3154         .rva    .LSEH_end_ChaCha20_ssse3
3155         .rva    .LSEH_info_ChaCha20_ssse3
3156
3157         .rva    .LSEH_begin_ChaCha20_4x
3158         .rva    .LSEH_end_ChaCha20_4x
3159         .rva    .LSEH_info_ChaCha20_4x
3160 ___
3161 $code.=<<___ if ($avx);
3162         .rva    .LSEH_begin_ChaCha20_4xop
3163         .rva    .LSEH_end_ChaCha20_4xop
3164         .rva    .LSEH_info_ChaCha20_4xop
3165 ___
3166 $code.=<<___ if ($avx>1);
3167         .rva    .LSEH_begin_ChaCha20_8x
3168         .rva    .LSEH_end_ChaCha20_8x
3169         .rva    .LSEH_info_ChaCha20_8x
3170 ___
3171 $code.=<<___ if ($avx>2);
3172         .rva    .LSEH_begin_ChaCha20_avx512
3173         .rva    .LSEH_end_ChaCha20_avx512
3174         .rva    .LSEH_info_ChaCha20_avx512
3175
3176         .rva    .LSEH_begin_ChaCha20_16x
3177         .rva    .LSEH_end_ChaCha20_16x
3178         .rva    .LSEH_info_ChaCha20_16x
3179 ___
3180 $code.=<<___;
3181 .section        .xdata
3182 .align  8
3183 .LSEH_info_ChaCha20_ctr32:
3184         .byte   9,0,0,0
3185         .rva    se_handler
3186
3187 .LSEH_info_ChaCha20_ssse3:
3188         .byte   9,0,0,0
3189         .rva    ssse3_handler
3190         .rva    .Lssse3_body,.Lssse3_epilogue
3191
3192 .LSEH_info_ChaCha20_4x:
3193         .byte   9,0,0,0
3194         .rva    full_handler
3195         .rva    .L4x_body,.L4x_epilogue
3196 ___
3197 $code.=<<___ if ($avx);
3198 .LSEH_info_ChaCha20_4xop:
3199         .byte   9,0,0,0
3200         .rva    full_handler
3201         .rva    .L4xop_body,.L4xop_epilogue             # HandlerData[]
3202 ___
3203 $code.=<<___ if ($avx>1);
3204 .LSEH_info_ChaCha20_8x:
3205         .byte   9,0,0,0
3206         .rva    full_handler
3207         .rva    .L8x_body,.L8x_epilogue                 # HandlerData[]
3208 ___
3209 $code.=<<___ if ($avx>2);
3210 .LSEH_info_ChaCha20_avx512:
3211         .byte   9,0,0,0
3212         .rva    ssse3_handler
3213         .rva    .Lavx512_body,.Lavx512_epilogue         # HandlerData[]
3214
3215 .LSEH_info_ChaCha20_16x:
3216         .byte   9,0,0,0
3217         .rva    full_handler
3218         .rva    .L16x_body,.L16x_epilogue               # HandlerData[]
3219 ___
3220 }
3221
3222 foreach (split("\n",$code)) {
3223         s/\`([^\`]*)\`/eval $1/ge;
3224
3225         s/%x#%[yz]/%x/g;        # "down-shift"
3226
3227         print $_,"\n";
3228 }
3229
3230 close STDOUT;