b59d96f8da64ae3b9a62350aff7fa5f60ad0b929
[openssl.git] / crypto / chacha / asm / chacha-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # November 2014
18 #
19 # ChaCha20 for x86_64.
20 #
21 # December 2016
22 #
23 # Add AVX512F code path.
24 #
25 # Performance in cycles per byte out of large buffer.
26 #
27 #               IALU/gcc 4.8(i) 1xSSSE3/SSE2    4xSSSE3     8xAVX2
28 #
29 # P4            9.48/+99%       -/22.7(ii)      -
30 # Core2         7.83/+55%       7.90/8.08       4.35
31 # Westmere      7.19/+50%       5.60/6.70       3.00
32 # Sandy Bridge  8.31/+42%       5.45/6.76       2.72
33 # Ivy Bridge    6.71/+46%       5.40/6.49       2.41
34 # Haswell       5.92/+43%       5.20/6.45       2.42        1.23
35 # Skylake       5.87/+39%       4.70/-          2.31        1.19
36 # Silvermont    12.0/+33%       7.75/7.40       7.03(iii)
37 # Goldmont      10.6/+17%       5.10/-          3.28
38 # Sledgehammer  7.28/+52%       -/14.2(ii)      -
39 # Bulldozer     9.66/+28%       9.85/11.1       3.06(iv)
40 # VIA Nano      10.5/+46%       6.72/8.60       6.05
41 #
42 # (i)   compared to older gcc 3.x one can observe >2x improvement on
43 #       most platforms;
44 # (ii)  as it can be seen, SSE2 performance is too low on legacy
45 #       processors; NxSSE2 results are naturally better, but not
46 #       impressively better than IALU ones, which is why you won't
47 #       find SSE2 code below;
48 # (iii) this is not optimal result for Atom because of MSROM
49 #       limitations, SSE2 can do better, but gain is considered too
50 #       low to justify the [maintenance] effort;
51 # (iv)  Bulldozer actually executes 4xXOP code path that delivers 2.20;
52
53 $flavour = shift;
54 $output  = shift;
55 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
56
57 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
58
59 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
60 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
61 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
62 die "can't locate x86_64-xlate.pl";
63
64 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
65                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
66         $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
67 }
68
69 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
70            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
71         $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
72         $avx += 1 if ($1==2.11 && $2>=8);
73 }
74
75 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
76            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
77         $avx = ($1>=10) + ($1>=11);
78 }
79
80 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
81         $avx = ($2>=3.0) + ($2>3.0);
82 }
83
84 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
85 *STDOUT=*OUT;
86
87 # input parameter block
88 ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
89
90 $code.=<<___;
91 .text
92
93 .extern OPENSSL_ia32cap_P
94
95 .align  64
96 .Lzero:
97 .long   0,0,0,0
98 .Lone:
99 .long   1,0,0,0
100 .Linc:
101 .long   0,1,2,3
102 .Lfour:
103 .long   4,4,4,4
104 .Lincy:
105 .long   0,2,4,6,1,3,5,7
106 .Leight:
107 .long   8,8,8,8,8,8,8,8
108 .Lrot16:
109 .byte   0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
110 .Lrot24:
111 .byte   0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
112 .Lsigma:
113 .asciz  "expand 32-byte k"
114 .align  64
115 .Lzeroz:
116 .long   0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
117 .Lfourz:
118 .long   4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
119 .Lincz:
120 .long   0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
121 .Lsixteen:
122 .long   16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
123 .asciz  "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
124 ___
125
126 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
127 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
128   my $arg = pop;
129     $arg = "\$$arg" if ($arg*1 eq $arg);
130     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
131 }
132
133 @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
134     "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
135 @t=("%esi","%edi");
136
137 sub ROUND {                     # critical path is 24 cycles per round
138 my ($a0,$b0,$c0,$d0)=@_;
139 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
140 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
141 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
142 my ($xc,$xc_)=map("\"$_\"",@t);
143 my @x=map("\"$_\"",@x);
144
145         # Consider order in which variables are addressed by their
146         # index:
147         #
148         #       a   b   c   d
149         #
150         #       0   4   8  12 < even round
151         #       1   5   9  13
152         #       2   6  10  14
153         #       3   7  11  15
154         #       0   5  10  15 < odd round
155         #       1   6  11  12
156         #       2   7   8  13
157         #       3   4   9  14
158         #
159         # 'a', 'b' and 'd's are permanently allocated in registers,
160         # @x[0..7,12..15], while 'c's are maintained in memory. If
161         # you observe 'c' column, you'll notice that pair of 'c's is
162         # invariant between rounds. This means that we have to reload
163         # them once per round, in the middle. This is why you'll see
164         # bunch of 'c' stores and loads in the middle, but none in
165         # the beginning or end.
166
167         # Normally instructions would be interleaved to favour in-order
168         # execution. Generally out-of-order cores manage it gracefully,
169         # but not this time for some reason. As in-order execution
170         # cores are dying breed, old Atom is the only one around,
171         # instructions are left uninterleaved. Besides, Atom is better
172         # off executing 1xSSSE3 code anyway...
173
174         (
175         "&add   (@x[$a0],@x[$b0])",     # Q1
176         "&xor   (@x[$d0],@x[$a0])",
177         "&rol   (@x[$d0],16)",
178          "&add  (@x[$a1],@x[$b1])",     # Q2
179          "&xor  (@x[$d1],@x[$a1])",
180          "&rol  (@x[$d1],16)",
181
182         "&add   ($xc,@x[$d0])",
183         "&xor   (@x[$b0],$xc)",
184         "&rol   (@x[$b0],12)",
185          "&add  ($xc_,@x[$d1])",
186          "&xor  (@x[$b1],$xc_)",
187          "&rol  (@x[$b1],12)",
188
189         "&add   (@x[$a0],@x[$b0])",
190         "&xor   (@x[$d0],@x[$a0])",
191         "&rol   (@x[$d0],8)",
192          "&add  (@x[$a1],@x[$b1])",
193          "&xor  (@x[$d1],@x[$a1])",
194          "&rol  (@x[$d1],8)",
195
196         "&add   ($xc,@x[$d0])",
197         "&xor   (@x[$b0],$xc)",
198         "&rol   (@x[$b0],7)",
199          "&add  ($xc_,@x[$d1])",
200          "&xor  (@x[$b1],$xc_)",
201          "&rol  (@x[$b1],7)",
202
203         "&mov   (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
204          "&mov  (\"4*$c1(%rsp)\",$xc_)",
205         "&mov   ($xc,\"4*$c2(%rsp)\")",
206          "&mov  ($xc_,\"4*$c3(%rsp)\")",
207
208         "&add   (@x[$a2],@x[$b2])",     # Q3
209         "&xor   (@x[$d2],@x[$a2])",
210         "&rol   (@x[$d2],16)",
211          "&add  (@x[$a3],@x[$b3])",     # Q4
212          "&xor  (@x[$d3],@x[$a3])",
213          "&rol  (@x[$d3],16)",
214
215         "&add   ($xc,@x[$d2])",
216         "&xor   (@x[$b2],$xc)",
217         "&rol   (@x[$b2],12)",
218          "&add  ($xc_,@x[$d3])",
219          "&xor  (@x[$b3],$xc_)",
220          "&rol  (@x[$b3],12)",
221
222         "&add   (@x[$a2],@x[$b2])",
223         "&xor   (@x[$d2],@x[$a2])",
224         "&rol   (@x[$d2],8)",
225          "&add  (@x[$a3],@x[$b3])",
226          "&xor  (@x[$d3],@x[$a3])",
227          "&rol  (@x[$d3],8)",
228
229         "&add   ($xc,@x[$d2])",
230         "&xor   (@x[$b2],$xc)",
231         "&rol   (@x[$b2],7)",
232          "&add  ($xc_,@x[$d3])",
233          "&xor  (@x[$b3],$xc_)",
234          "&rol  (@x[$b3],7)"
235         );
236 }
237
238 ########################################################################
239 # Generic code path that handles all lengths on pre-SSSE3 processors.
240 $code.=<<___;
241 .globl  ChaCha20_ctr32
242 .type   ChaCha20_ctr32,\@function,5
243 .align  64
244 ChaCha20_ctr32:
245 .cfi_startproc
246         cmp     \$0,$len
247         je      .Lno_data
248         mov     OPENSSL_ia32cap_P+4(%rip),%r10
249 ___
250 $code.=<<___    if ($avx>2);
251         bt      \$48,%r10               # check for AVX512F
252         jc      .LChaCha20_avx512
253 ___
254 $code.=<<___;
255         test    \$`1<<(41-32)`,%r10d
256         jnz     .LChaCha20_ssse3
257
258         push    %rbx
259 .cfi_push       %rbx
260         push    %rbp
261 .cfi_push       %rbp
262         push    %r12
263 .cfi_push       %r12
264         push    %r13
265 .cfi_push       %r13
266         push    %r14
267 .cfi_push       %r14
268         push    %r15
269 .cfi_push       %r15
270         sub     \$64+24,%rsp
271 .cfi_adjust_cfa_offset  64+24
272 .Lctr32_body:
273
274         #movdqa .Lsigma(%rip),%xmm0
275         movdqu  ($key),%xmm1
276         movdqu  16($key),%xmm2
277         movdqu  ($counter),%xmm3
278         movdqa  .Lone(%rip),%xmm4
279
280         #movdqa %xmm0,4*0(%rsp)         # key[0]
281         movdqa  %xmm1,4*4(%rsp)         # key[1]
282         movdqa  %xmm2,4*8(%rsp)         # key[2]
283         movdqa  %xmm3,4*12(%rsp)        # key[3]
284         mov     $len,%rbp               # reassign $len
285         jmp     .Loop_outer
286
287 .align  32
288 .Loop_outer:
289         mov     \$0x61707865,@x[0]      # 'expa'
290         mov     \$0x3320646e,@x[1]      # 'nd 3'
291         mov     \$0x79622d32,@x[2]      # '2-by'
292         mov     \$0x6b206574,@x[3]      # 'te k'
293         mov     4*4(%rsp),@x[4]
294         mov     4*5(%rsp),@x[5]
295         mov     4*6(%rsp),@x[6]
296         mov     4*7(%rsp),@x[7]
297         movd    %xmm3,@x[12]
298         mov     4*13(%rsp),@x[13]
299         mov     4*14(%rsp),@x[14]
300         mov     4*15(%rsp),@x[15]
301
302         mov     %rbp,64+0(%rsp)         # save len
303         mov     \$10,%ebp
304         mov     $inp,64+8(%rsp)         # save inp
305         movq    %xmm2,%rsi              # "@x[8]"
306         mov     $out,64+16(%rsp)        # save out
307         mov     %rsi,%rdi
308         shr     \$32,%rdi               # "@x[9]"
309         jmp     .Loop
310
311 .align  32
312 .Loop:
313 ___
314         foreach (&ROUND (0, 4, 8,12)) { eval; }
315         foreach (&ROUND (0, 5,10,15)) { eval; }
316         &dec    ("%ebp");
317         &jnz    (".Loop");
318
319 $code.=<<___;
320         mov     @t[1],4*9(%rsp)         # modulo-scheduled
321         mov     @t[0],4*8(%rsp)
322         mov     64(%rsp),%rbp           # load len
323         movdqa  %xmm2,%xmm1
324         mov     64+8(%rsp),$inp         # load inp
325         paddd   %xmm4,%xmm3             # increment counter
326         mov     64+16(%rsp),$out        # load out
327
328         add     \$0x61707865,@x[0]      # 'expa'
329         add     \$0x3320646e,@x[1]      # 'nd 3'
330         add     \$0x79622d32,@x[2]      # '2-by'
331         add     \$0x6b206574,@x[3]      # 'te k'
332         add     4*4(%rsp),@x[4]
333         add     4*5(%rsp),@x[5]
334         add     4*6(%rsp),@x[6]
335         add     4*7(%rsp),@x[7]
336         add     4*12(%rsp),@x[12]
337         add     4*13(%rsp),@x[13]
338         add     4*14(%rsp),@x[14]
339         add     4*15(%rsp),@x[15]
340         paddd   4*8(%rsp),%xmm1
341
342         cmp     \$64,%rbp
343         jb      .Ltail
344
345         xor     4*0($inp),@x[0]         # xor with input
346         xor     4*1($inp),@x[1]
347         xor     4*2($inp),@x[2]
348         xor     4*3($inp),@x[3]
349         xor     4*4($inp),@x[4]
350         xor     4*5($inp),@x[5]
351         xor     4*6($inp),@x[6]
352         xor     4*7($inp),@x[7]
353         movdqu  4*8($inp),%xmm0
354         xor     4*12($inp),@x[12]
355         xor     4*13($inp),@x[13]
356         xor     4*14($inp),@x[14]
357         xor     4*15($inp),@x[15]
358         lea     4*16($inp),$inp         # inp+=64
359         pxor    %xmm1,%xmm0
360
361         movdqa  %xmm2,4*8(%rsp)
362         movd    %xmm3,4*12(%rsp)
363
364         mov     @x[0],4*0($out)         # write output
365         mov     @x[1],4*1($out)
366         mov     @x[2],4*2($out)
367         mov     @x[3],4*3($out)
368         mov     @x[4],4*4($out)
369         mov     @x[5],4*5($out)
370         mov     @x[6],4*6($out)
371         mov     @x[7],4*7($out)
372         movdqu  %xmm0,4*8($out)
373         mov     @x[12],4*12($out)
374         mov     @x[13],4*13($out)
375         mov     @x[14],4*14($out)
376         mov     @x[15],4*15($out)
377         lea     4*16($out),$out         # out+=64
378
379         sub     \$64,%rbp
380         jnz     .Loop_outer
381
382         jmp     .Ldone
383
384 .align  16
385 .Ltail:
386         mov     @x[0],4*0(%rsp)
387         mov     @x[1],4*1(%rsp)
388         xor     %rbx,%rbx
389         mov     @x[2],4*2(%rsp)
390         mov     @x[3],4*3(%rsp)
391         mov     @x[4],4*4(%rsp)
392         mov     @x[5],4*5(%rsp)
393         mov     @x[6],4*6(%rsp)
394         mov     @x[7],4*7(%rsp)
395         movdqa  %xmm1,4*8(%rsp)
396         mov     @x[12],4*12(%rsp)
397         mov     @x[13],4*13(%rsp)
398         mov     @x[14],4*14(%rsp)
399         mov     @x[15],4*15(%rsp)
400
401 .Loop_tail:
402         movzb   ($inp,%rbx),%eax
403         movzb   (%rsp,%rbx),%edx
404         lea     1(%rbx),%rbx
405         xor     %edx,%eax
406         mov     %al,-1($out,%rbx)
407         dec     %rbp
408         jnz     .Loop_tail
409
410 .Ldone:
411         lea     64+24+48(%rsp),%rsi
412 .cfi_def_cfa    %rsi,8
413         mov     -48(%rsi),%r15
414 .cfi_restore    %r15
415         mov     -40(%rsi),%r14
416 .cfi_restore    %r14
417         mov     -32(%rsi),%r13
418 .cfi_restore    %r13
419         mov     -24(%rsi),%r12
420 .cfi_restore    %r12
421         mov     -16(%rsi),%rbp
422 .cfi_restore    %rbp
423         mov     -8(%rsi),%rbx
424 .cfi_restore    %rbx
425         lea     (%rsi),%rsp
426 .cfi_def_cfa_register   %rsp
427 .Lno_data:
428         ret
429 .cfi_endproc
430 .size   ChaCha20_ctr32,.-ChaCha20_ctr32
431 ___
432
433 ########################################################################
434 # SSSE3 code path that handles shorter lengths
435 {
436 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
437
438 sub SSSE3ROUND {        # critical path is 20 "SIMD ticks" per round
439         &paddd  ($a,$b);
440         &pxor   ($d,$a);
441         &pshufb ($d,$rot16);
442
443         &paddd  ($c,$d);
444         &pxor   ($b,$c);
445         &movdqa ($t,$b);
446         &psrld  ($b,20);
447         &pslld  ($t,12);
448         &por    ($b,$t);
449
450         &paddd  ($a,$b);
451         &pxor   ($d,$a);
452         &pshufb ($d,$rot24);
453
454         &paddd  ($c,$d);
455         &pxor   ($b,$c);
456         &movdqa ($t,$b);
457         &psrld  ($b,25);
458         &pslld  ($t,7);
459         &por    ($b,$t);
460 }
461
462 my $xframe = $win64 ? 32+8 : 8;
463
464 $code.=<<___;
465 .type   ChaCha20_ssse3,\@function,5
466 .align  32
467 ChaCha20_ssse3:
468 .cfi_startproc
469 .LChaCha20_ssse3:
470         mov     %rsp,%r9                # frame pointer
471 .cfi_def_cfa_register   %r9
472 ___
473 $code.=<<___    if ($avx);
474         test    \$`1<<(43-32)`,%r10d
475         jnz     .LChaCha20_4xop         # XOP is fastest even if we use 1/4
476 ___
477 $code.=<<___;
478         cmp     \$128,$len              # we might throw away some data,
479         ja      .LChaCha20_4x           # but overall it won't be slower
480
481 .Ldo_sse3_after_all:
482         sub     \$64+$xframe,%rsp
483 ___
484 $code.=<<___    if ($win64);
485         movaps  %xmm6,-0x28(%r9)
486         movaps  %xmm7,-0x18(%r9)
487 .Lssse3_body:
488 ___
489 $code.=<<___;
490         movdqa  .Lsigma(%rip),$a
491         movdqu  ($key),$b
492         movdqu  16($key),$c
493         movdqu  ($counter),$d
494         movdqa  .Lrot16(%rip),$rot16
495         movdqa  .Lrot24(%rip),$rot24
496
497         movdqa  $a,0x00(%rsp)
498         movdqa  $b,0x10(%rsp)
499         movdqa  $c,0x20(%rsp)
500         movdqa  $d,0x30(%rsp)
501         mov     \$10,$counter           # reuse $counter
502         jmp     .Loop_ssse3
503
504 .align  32
505 .Loop_outer_ssse3:
506         movdqa  .Lone(%rip),$d
507         movdqa  0x00(%rsp),$a
508         movdqa  0x10(%rsp),$b
509         movdqa  0x20(%rsp),$c
510         paddd   0x30(%rsp),$d
511         mov     \$10,$counter
512         movdqa  $d,0x30(%rsp)
513         jmp     .Loop_ssse3
514
515 .align  32
516 .Loop_ssse3:
517 ___
518         &SSSE3ROUND();
519         &pshufd ($c,$c,0b01001110);
520         &pshufd ($b,$b,0b00111001);
521         &pshufd ($d,$d,0b10010011);
522         &nop    ();
523
524         &SSSE3ROUND();
525         &pshufd ($c,$c,0b01001110);
526         &pshufd ($b,$b,0b10010011);
527         &pshufd ($d,$d,0b00111001);
528
529         &dec    ($counter);
530         &jnz    (".Loop_ssse3");
531
532 $code.=<<___;
533         paddd   0x00(%rsp),$a
534         paddd   0x10(%rsp),$b
535         paddd   0x20(%rsp),$c
536         paddd   0x30(%rsp),$d
537
538         cmp     \$64,$len
539         jb      .Ltail_ssse3
540
541         movdqu  0x00($inp),$t
542         movdqu  0x10($inp),$t1
543         pxor    $t,$a                   # xor with input
544         movdqu  0x20($inp),$t
545         pxor    $t1,$b
546         movdqu  0x30($inp),$t1
547         lea     0x40($inp),$inp         # inp+=64
548         pxor    $t,$c
549         pxor    $t1,$d
550
551         movdqu  $a,0x00($out)           # write output
552         movdqu  $b,0x10($out)
553         movdqu  $c,0x20($out)
554         movdqu  $d,0x30($out)
555         lea     0x40($out),$out         # out+=64
556
557         sub     \$64,$len
558         jnz     .Loop_outer_ssse3
559
560         jmp     .Ldone_ssse3
561
562 .align  16
563 .Ltail_ssse3:
564         movdqa  $a,0x00(%rsp)
565         movdqa  $b,0x10(%rsp)
566         movdqa  $c,0x20(%rsp)
567         movdqa  $d,0x30(%rsp)
568         xor     $counter,$counter
569
570 .Loop_tail_ssse3:
571         movzb   ($inp,$counter),%eax
572         movzb   (%rsp,$counter),%ecx
573         lea     1($counter),$counter
574         xor     %ecx,%eax
575         mov     %al,-1($out,$counter)
576         dec     $len
577         jnz     .Loop_tail_ssse3
578
579 .Ldone_ssse3:
580 ___
581 $code.=<<___    if ($win64);
582         movaps  -0x28(%r9),%xmm6
583         movaps  -0x18(%r9),%xmm7
584 ___
585 $code.=<<___;
586         lea     (%r9),%rsp
587 .cfi_def_cfa_register   %rsp
588 .Lssse3_epilogue:
589         ret
590 .cfi_endproc
591 .size   ChaCha20_ssse3,.-ChaCha20_ssse3
592 ___
593 }
594
595 ########################################################################
596 # SSSE3 code path that handles longer messages.
597 {
598 # assign variables to favor Atom front-end
599 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
600     $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
601 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
602         "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
603
604 sub SSSE3_lane_ROUND {
605 my ($a0,$b0,$c0,$d0)=@_;
606 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
607 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
608 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
609 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
610 my @x=map("\"$_\"",@xx);
611
612         # Consider order in which variables are addressed by their
613         # index:
614         #
615         #       a   b   c   d
616         #
617         #       0   4   8  12 < even round
618         #       1   5   9  13
619         #       2   6  10  14
620         #       3   7  11  15
621         #       0   5  10  15 < odd round
622         #       1   6  11  12
623         #       2   7   8  13
624         #       3   4   9  14
625         #
626         # 'a', 'b' and 'd's are permanently allocated in registers,
627         # @x[0..7,12..15], while 'c's are maintained in memory. If
628         # you observe 'c' column, you'll notice that pair of 'c's is
629         # invariant between rounds. This means that we have to reload
630         # them once per round, in the middle. This is why you'll see
631         # bunch of 'c' stores and loads in the middle, but none in
632         # the beginning or end.
633
634         (
635         "&paddd         (@x[$a0],@x[$b0])",     # Q1
636          "&paddd        (@x[$a1],@x[$b1])",     # Q2
637         "&pxor          (@x[$d0],@x[$a0])",
638          "&pxor         (@x[$d1],@x[$a1])",
639         "&pshufb        (@x[$d0],$t1)",
640          "&pshufb       (@x[$d1],$t1)",
641
642         "&paddd         ($xc,@x[$d0])",
643          "&paddd        ($xc_,@x[$d1])",
644         "&pxor          (@x[$b0],$xc)",
645          "&pxor         (@x[$b1],$xc_)",
646         "&movdqa        ($t0,@x[$b0])",
647         "&pslld         (@x[$b0],12)",
648         "&psrld         ($t0,20)",
649          "&movdqa       ($t1,@x[$b1])",
650          "&pslld        (@x[$b1],12)",
651         "&por           (@x[$b0],$t0)",
652          "&psrld        ($t1,20)",
653         "&movdqa        ($t0,'(%r11)')",        # .Lrot24(%rip)
654          "&por          (@x[$b1],$t1)",
655
656         "&paddd         (@x[$a0],@x[$b0])",
657          "&paddd        (@x[$a1],@x[$b1])",
658         "&pxor          (@x[$d0],@x[$a0])",
659          "&pxor         (@x[$d1],@x[$a1])",
660         "&pshufb        (@x[$d0],$t0)",
661          "&pshufb       (@x[$d1],$t0)",
662
663         "&paddd         ($xc,@x[$d0])",
664          "&paddd        ($xc_,@x[$d1])",
665         "&pxor          (@x[$b0],$xc)",
666          "&pxor         (@x[$b1],$xc_)",
667         "&movdqa        ($t1,@x[$b0])",
668         "&pslld         (@x[$b0],7)",
669         "&psrld         ($t1,25)",
670          "&movdqa       ($t0,@x[$b1])",
671          "&pslld        (@x[$b1],7)",
672         "&por           (@x[$b0],$t1)",
673          "&psrld        ($t0,25)",
674         "&movdqa        ($t1,'(%r10)')",        # .Lrot16(%rip)
675          "&por          (@x[$b1],$t0)",
676
677         "&movdqa        (\"`16*($c0-8)`(%rsp)\",$xc)",  # reload pair of 'c's
678          "&movdqa       (\"`16*($c1-8)`(%rsp)\",$xc_)",
679         "&movdqa        ($xc,\"`16*($c2-8)`(%rsp)\")",
680          "&movdqa       ($xc_,\"`16*($c3-8)`(%rsp)\")",
681
682         "&paddd         (@x[$a2],@x[$b2])",     # Q3
683          "&paddd        (@x[$a3],@x[$b3])",     # Q4
684         "&pxor          (@x[$d2],@x[$a2])",
685          "&pxor         (@x[$d3],@x[$a3])",
686         "&pshufb        (@x[$d2],$t1)",
687          "&pshufb       (@x[$d3],$t1)",
688
689         "&paddd         ($xc,@x[$d2])",
690          "&paddd        ($xc_,@x[$d3])",
691         "&pxor          (@x[$b2],$xc)",
692          "&pxor         (@x[$b3],$xc_)",
693         "&movdqa        ($t0,@x[$b2])",
694         "&pslld         (@x[$b2],12)",
695         "&psrld         ($t0,20)",
696          "&movdqa       ($t1,@x[$b3])",
697          "&pslld        (@x[$b3],12)",
698         "&por           (@x[$b2],$t0)",
699          "&psrld        ($t1,20)",
700         "&movdqa        ($t0,'(%r11)')",        # .Lrot24(%rip)
701          "&por          (@x[$b3],$t1)",
702
703         "&paddd         (@x[$a2],@x[$b2])",
704          "&paddd        (@x[$a3],@x[$b3])",
705         "&pxor          (@x[$d2],@x[$a2])",
706          "&pxor         (@x[$d3],@x[$a3])",
707         "&pshufb        (@x[$d2],$t0)",
708          "&pshufb       (@x[$d3],$t0)",
709
710         "&paddd         ($xc,@x[$d2])",
711          "&paddd        ($xc_,@x[$d3])",
712         "&pxor          (@x[$b2],$xc)",
713          "&pxor         (@x[$b3],$xc_)",
714         "&movdqa        ($t1,@x[$b2])",
715         "&pslld         (@x[$b2],7)",
716         "&psrld         ($t1,25)",
717          "&movdqa       ($t0,@x[$b3])",
718          "&pslld        (@x[$b3],7)",
719         "&por           (@x[$b2],$t1)",
720          "&psrld        ($t0,25)",
721         "&movdqa        ($t1,'(%r10)')",        # .Lrot16(%rip)
722          "&por          (@x[$b3],$t0)"
723         );
724 }
725
726 my $xframe = $win64 ? 0xa8 : 8;
727
728 $code.=<<___;
729 .type   ChaCha20_4x,\@function,5
730 .align  32
731 ChaCha20_4x:
732 .cfi_startproc
733 .LChaCha20_4x:
734         mov             %rsp,%r9                # frame pointer
735 .cfi_def_cfa_register   %r9
736         mov             %r10,%r11
737 ___
738 $code.=<<___    if ($avx>1);
739         shr             \$32,%r10               # OPENSSL_ia32cap_P+8
740         test            \$`1<<5`,%r10           # test AVX2
741         jnz             .LChaCha20_8x
742 ___
743 $code.=<<___;
744         cmp             \$192,$len
745         ja              .Lproceed4x
746
747         and             \$`1<<26|1<<22`,%r11    # isolate XSAVE+MOVBE
748         cmp             \$`1<<22`,%r11          # check for MOVBE without XSAVE
749         je              .Ldo_sse3_after_all     # to detect Atom
750
751 .Lproceed4x:
752         sub             \$0x140+$xframe,%rsp
753 ___
754         ################ stack layout
755         # +0x00         SIMD equivalent of @x[8-12]
756         # ...
757         # +0x40         constant copy of key[0-2] smashed by lanes
758         # ...
759         # +0x100        SIMD counters (with nonce smashed by lanes)
760         # ...
761         # +0x140
762 $code.=<<___    if ($win64);
763         movaps          %xmm6,-0xa8(%r9)
764         movaps          %xmm7,-0x98(%r9)
765         movaps          %xmm8,-0x88(%r9)
766         movaps          %xmm9,-0x78(%r9)
767         movaps          %xmm10,-0x68(%r9)
768         movaps          %xmm11,-0x58(%r9)
769         movaps          %xmm12,-0x48(%r9)
770         movaps          %xmm13,-0x38(%r9)
771         movaps          %xmm14,-0x28(%r9)
772         movaps          %xmm15,-0x18(%r9)
773 .L4x_body:
774 ___
775 $code.=<<___;
776         movdqa          .Lsigma(%rip),$xa3      # key[0]
777         movdqu          ($key),$xb3             # key[1]
778         movdqu          16($key),$xt3           # key[2]
779         movdqu          ($counter),$xd3         # key[3]
780         lea             0x100(%rsp),%rcx        # size optimization
781         lea             .Lrot16(%rip),%r10
782         lea             .Lrot24(%rip),%r11
783
784         pshufd          \$0x00,$xa3,$xa0        # smash key by lanes...
785         pshufd          \$0x55,$xa3,$xa1
786         movdqa          $xa0,0x40(%rsp)         # ... and offload
787         pshufd          \$0xaa,$xa3,$xa2
788         movdqa          $xa1,0x50(%rsp)
789         pshufd          \$0xff,$xa3,$xa3
790         movdqa          $xa2,0x60(%rsp)
791         movdqa          $xa3,0x70(%rsp)
792
793         pshufd          \$0x00,$xb3,$xb0
794         pshufd          \$0x55,$xb3,$xb1
795         movdqa          $xb0,0x80-0x100(%rcx)
796         pshufd          \$0xaa,$xb3,$xb2
797         movdqa          $xb1,0x90-0x100(%rcx)
798         pshufd          \$0xff,$xb3,$xb3
799         movdqa          $xb2,0xa0-0x100(%rcx)
800         movdqa          $xb3,0xb0-0x100(%rcx)
801
802         pshufd          \$0x00,$xt3,$xt0        # "$xc0"
803         pshufd          \$0x55,$xt3,$xt1        # "$xc1"
804         movdqa          $xt0,0xc0-0x100(%rcx)
805         pshufd          \$0xaa,$xt3,$xt2        # "$xc2"
806         movdqa          $xt1,0xd0-0x100(%rcx)
807         pshufd          \$0xff,$xt3,$xt3        # "$xc3"
808         movdqa          $xt2,0xe0-0x100(%rcx)
809         movdqa          $xt3,0xf0-0x100(%rcx)
810
811         pshufd          \$0x00,$xd3,$xd0
812         pshufd          \$0x55,$xd3,$xd1
813         paddd           .Linc(%rip),$xd0        # don't save counters yet
814         pshufd          \$0xaa,$xd3,$xd2
815         movdqa          $xd1,0x110-0x100(%rcx)
816         pshufd          \$0xff,$xd3,$xd3
817         movdqa          $xd2,0x120-0x100(%rcx)
818         movdqa          $xd3,0x130-0x100(%rcx)
819
820         jmp             .Loop_enter4x
821
822 .align  32
823 .Loop_outer4x:
824         movdqa          0x40(%rsp),$xa0         # re-load smashed key
825         movdqa          0x50(%rsp),$xa1
826         movdqa          0x60(%rsp),$xa2
827         movdqa          0x70(%rsp),$xa3
828         movdqa          0x80-0x100(%rcx),$xb0
829         movdqa          0x90-0x100(%rcx),$xb1
830         movdqa          0xa0-0x100(%rcx),$xb2
831         movdqa          0xb0-0x100(%rcx),$xb3
832         movdqa          0xc0-0x100(%rcx),$xt0   # "$xc0"
833         movdqa          0xd0-0x100(%rcx),$xt1   # "$xc1"
834         movdqa          0xe0-0x100(%rcx),$xt2   # "$xc2"
835         movdqa          0xf0-0x100(%rcx),$xt3   # "$xc3"
836         movdqa          0x100-0x100(%rcx),$xd0
837         movdqa          0x110-0x100(%rcx),$xd1
838         movdqa          0x120-0x100(%rcx),$xd2
839         movdqa          0x130-0x100(%rcx),$xd3
840         paddd           .Lfour(%rip),$xd0       # next SIMD counters
841
842 .Loop_enter4x:
843         movdqa          $xt2,0x20(%rsp)         # SIMD equivalent of "@x[10]"
844         movdqa          $xt3,0x30(%rsp)         # SIMD equivalent of "@x[11]"
845         movdqa          (%r10),$xt3             # .Lrot16(%rip)
846         mov             \$10,%eax
847         movdqa          $xd0,0x100-0x100(%rcx)  # save SIMD counters
848         jmp             .Loop4x
849
850 .align  32
851 .Loop4x:
852 ___
853         foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
854         foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
855 $code.=<<___;
856         dec             %eax
857         jnz             .Loop4x
858
859         paddd           0x40(%rsp),$xa0         # accumulate key material
860         paddd           0x50(%rsp),$xa1
861         paddd           0x60(%rsp),$xa2
862         paddd           0x70(%rsp),$xa3
863
864         movdqa          $xa0,$xt2               # "de-interlace" data
865         punpckldq       $xa1,$xa0
866         movdqa          $xa2,$xt3
867         punpckldq       $xa3,$xa2
868         punpckhdq       $xa1,$xt2
869         punpckhdq       $xa3,$xt3
870         movdqa          $xa0,$xa1
871         punpcklqdq      $xa2,$xa0               # "a0"
872         movdqa          $xt2,$xa3
873         punpcklqdq      $xt3,$xt2               # "a2"
874         punpckhqdq      $xa2,$xa1               # "a1"
875         punpckhqdq      $xt3,$xa3               # "a3"
876 ___
877         ($xa2,$xt2)=($xt2,$xa2);
878 $code.=<<___;
879         paddd           0x80-0x100(%rcx),$xb0
880         paddd           0x90-0x100(%rcx),$xb1
881         paddd           0xa0-0x100(%rcx),$xb2
882         paddd           0xb0-0x100(%rcx),$xb3
883
884         movdqa          $xa0,0x00(%rsp)         # offload $xaN
885         movdqa          $xa1,0x10(%rsp)
886         movdqa          0x20(%rsp),$xa0         # "xc2"
887         movdqa          0x30(%rsp),$xa1         # "xc3"
888
889         movdqa          $xb0,$xt2
890         punpckldq       $xb1,$xb0
891         movdqa          $xb2,$xt3
892         punpckldq       $xb3,$xb2
893         punpckhdq       $xb1,$xt2
894         punpckhdq       $xb3,$xt3
895         movdqa          $xb0,$xb1
896         punpcklqdq      $xb2,$xb0               # "b0"
897         movdqa          $xt2,$xb3
898         punpcklqdq      $xt3,$xt2               # "b2"
899         punpckhqdq      $xb2,$xb1               # "b1"
900         punpckhqdq      $xt3,$xb3               # "b3"
901 ___
902         ($xb2,$xt2)=($xt2,$xb2);
903         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
904 $code.=<<___;
905         paddd           0xc0-0x100(%rcx),$xc0
906         paddd           0xd0-0x100(%rcx),$xc1
907         paddd           0xe0-0x100(%rcx),$xc2
908         paddd           0xf0-0x100(%rcx),$xc3
909
910         movdqa          $xa2,0x20(%rsp)         # keep offloading $xaN
911         movdqa          $xa3,0x30(%rsp)
912
913         movdqa          $xc0,$xt2
914         punpckldq       $xc1,$xc0
915         movdqa          $xc2,$xt3
916         punpckldq       $xc3,$xc2
917         punpckhdq       $xc1,$xt2
918         punpckhdq       $xc3,$xt3
919         movdqa          $xc0,$xc1
920         punpcklqdq      $xc2,$xc0               # "c0"
921         movdqa          $xt2,$xc3
922         punpcklqdq      $xt3,$xt2               # "c2"
923         punpckhqdq      $xc2,$xc1               # "c1"
924         punpckhqdq      $xt3,$xc3               # "c3"
925 ___
926         ($xc2,$xt2)=($xt2,$xc2);
927         ($xt0,$xt1)=($xa2,$xa3);                # use $xaN as temporary
928 $code.=<<___;
929         paddd           0x100-0x100(%rcx),$xd0
930         paddd           0x110-0x100(%rcx),$xd1
931         paddd           0x120-0x100(%rcx),$xd2
932         paddd           0x130-0x100(%rcx),$xd3
933
934         movdqa          $xd0,$xt2
935         punpckldq       $xd1,$xd0
936         movdqa          $xd2,$xt3
937         punpckldq       $xd3,$xd2
938         punpckhdq       $xd1,$xt2
939         punpckhdq       $xd3,$xt3
940         movdqa          $xd0,$xd1
941         punpcklqdq      $xd2,$xd0               # "d0"
942         movdqa          $xt2,$xd3
943         punpcklqdq      $xt3,$xt2               # "d2"
944         punpckhqdq      $xd2,$xd1               # "d1"
945         punpckhqdq      $xt3,$xd3               # "d3"
946 ___
947         ($xd2,$xt2)=($xt2,$xd2);
948 $code.=<<___;
949         cmp             \$64*4,$len
950         jb              .Ltail4x
951
952         movdqu          0x00($inp),$xt0         # xor with input
953         movdqu          0x10($inp),$xt1
954         movdqu          0x20($inp),$xt2
955         movdqu          0x30($inp),$xt3
956         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
957         pxor            $xb0,$xt1
958         pxor            $xc0,$xt2
959         pxor            $xd0,$xt3
960
961          movdqu         $xt0,0x00($out)
962         movdqu          0x40($inp),$xt0
963          movdqu         $xt1,0x10($out)
964         movdqu          0x50($inp),$xt1
965          movdqu         $xt2,0x20($out)
966         movdqu          0x60($inp),$xt2
967          movdqu         $xt3,0x30($out)
968         movdqu          0x70($inp),$xt3
969         lea             0x80($inp),$inp         # size optimization
970         pxor            0x10(%rsp),$xt0
971         pxor            $xb1,$xt1
972         pxor            $xc1,$xt2
973         pxor            $xd1,$xt3
974
975          movdqu         $xt0,0x40($out)
976         movdqu          0x00($inp),$xt0
977          movdqu         $xt1,0x50($out)
978         movdqu          0x10($inp),$xt1
979          movdqu         $xt2,0x60($out)
980         movdqu          0x20($inp),$xt2
981          movdqu         $xt3,0x70($out)
982          lea            0x80($out),$out         # size optimization
983         movdqu          0x30($inp),$xt3
984         pxor            0x20(%rsp),$xt0
985         pxor            $xb2,$xt1
986         pxor            $xc2,$xt2
987         pxor            $xd2,$xt3
988
989          movdqu         $xt0,0x00($out)
990         movdqu          0x40($inp),$xt0
991          movdqu         $xt1,0x10($out)
992         movdqu          0x50($inp),$xt1
993          movdqu         $xt2,0x20($out)
994         movdqu          0x60($inp),$xt2
995          movdqu         $xt3,0x30($out)
996         movdqu          0x70($inp),$xt3
997         lea             0x80($inp),$inp         # inp+=64*4
998         pxor            0x30(%rsp),$xt0
999         pxor            $xb3,$xt1
1000         pxor            $xc3,$xt2
1001         pxor            $xd3,$xt3
1002         movdqu          $xt0,0x40($out)
1003         movdqu          $xt1,0x50($out)
1004         movdqu          $xt2,0x60($out)
1005         movdqu          $xt3,0x70($out)
1006         lea             0x80($out),$out         # out+=64*4
1007
1008         sub             \$64*4,$len
1009         jnz             .Loop_outer4x
1010
1011         jmp             .Ldone4x
1012
1013 .Ltail4x:
1014         cmp             \$192,$len
1015         jae             .L192_or_more4x
1016         cmp             \$128,$len
1017         jae             .L128_or_more4x
1018         cmp             \$64,$len
1019         jae             .L64_or_more4x
1020
1021         #movdqa         0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1022         xor             %r10,%r10
1023         #movdqa         $xt0,0x00(%rsp)
1024         movdqa          $xb0,0x10(%rsp)
1025         movdqa          $xc0,0x20(%rsp)
1026         movdqa          $xd0,0x30(%rsp)
1027         jmp             .Loop_tail4x
1028
1029 .align  32
1030 .L64_or_more4x:
1031         movdqu          0x00($inp),$xt0         # xor with input
1032         movdqu          0x10($inp),$xt1
1033         movdqu          0x20($inp),$xt2
1034         movdqu          0x30($inp),$xt3
1035         pxor            0x00(%rsp),$xt0         # $xaxN is offloaded, remember?
1036         pxor            $xb0,$xt1
1037         pxor            $xc0,$xt2
1038         pxor            $xd0,$xt3
1039         movdqu          $xt0,0x00($out)
1040         movdqu          $xt1,0x10($out)
1041         movdqu          $xt2,0x20($out)
1042         movdqu          $xt3,0x30($out)
1043         je              .Ldone4x
1044
1045         movdqa          0x10(%rsp),$xt0         # $xaN is offloaded, remember?
1046         lea             0x40($inp),$inp         # inp+=64*1
1047         xor             %r10,%r10
1048         movdqa          $xt0,0x00(%rsp)
1049         movdqa          $xb1,0x10(%rsp)
1050         lea             0x40($out),$out         # out+=64*1
1051         movdqa          $xc1,0x20(%rsp)
1052         sub             \$64,$len               # len-=64*1
1053         movdqa          $xd1,0x30(%rsp)
1054         jmp             .Loop_tail4x
1055
1056 .align  32
1057 .L128_or_more4x:
1058         movdqu          0x00($inp),$xt0         # xor with input
1059         movdqu          0x10($inp),$xt1
1060         movdqu          0x20($inp),$xt2
1061         movdqu          0x30($inp),$xt3
1062         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1063         pxor            $xb0,$xt1
1064         pxor            $xc0,$xt2
1065         pxor            $xd0,$xt3
1066
1067          movdqu         $xt0,0x00($out)
1068         movdqu          0x40($inp),$xt0
1069          movdqu         $xt1,0x10($out)
1070         movdqu          0x50($inp),$xt1
1071          movdqu         $xt2,0x20($out)
1072         movdqu          0x60($inp),$xt2
1073          movdqu         $xt3,0x30($out)
1074         movdqu          0x70($inp),$xt3
1075         pxor            0x10(%rsp),$xt0
1076         pxor            $xb1,$xt1
1077         pxor            $xc1,$xt2
1078         pxor            $xd1,$xt3
1079         movdqu          $xt0,0x40($out)
1080         movdqu          $xt1,0x50($out)
1081         movdqu          $xt2,0x60($out)
1082         movdqu          $xt3,0x70($out)
1083         je              .Ldone4x
1084
1085         movdqa          0x20(%rsp),$xt0         # $xaN is offloaded, remember?
1086         lea             0x80($inp),$inp         # inp+=64*2
1087         xor             %r10,%r10
1088         movdqa          $xt0,0x00(%rsp)
1089         movdqa          $xb2,0x10(%rsp)
1090         lea             0x80($out),$out         # out+=64*2
1091         movdqa          $xc2,0x20(%rsp)
1092         sub             \$128,$len              # len-=64*2
1093         movdqa          $xd2,0x30(%rsp)
1094         jmp             .Loop_tail4x
1095
1096 .align  32
1097 .L192_or_more4x:
1098         movdqu          0x00($inp),$xt0         # xor with input
1099         movdqu          0x10($inp),$xt1
1100         movdqu          0x20($inp),$xt2
1101         movdqu          0x30($inp),$xt3
1102         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1103         pxor            $xb0,$xt1
1104         pxor            $xc0,$xt2
1105         pxor            $xd0,$xt3
1106
1107          movdqu         $xt0,0x00($out)
1108         movdqu          0x40($inp),$xt0
1109          movdqu         $xt1,0x10($out)
1110         movdqu          0x50($inp),$xt1
1111          movdqu         $xt2,0x20($out)
1112         movdqu          0x60($inp),$xt2
1113          movdqu         $xt3,0x30($out)
1114         movdqu          0x70($inp),$xt3
1115         lea             0x80($inp),$inp         # size optimization
1116         pxor            0x10(%rsp),$xt0
1117         pxor            $xb1,$xt1
1118         pxor            $xc1,$xt2
1119         pxor            $xd1,$xt3
1120
1121          movdqu         $xt0,0x40($out)
1122         movdqu          0x00($inp),$xt0
1123          movdqu         $xt1,0x50($out)
1124         movdqu          0x10($inp),$xt1
1125          movdqu         $xt2,0x60($out)
1126         movdqu          0x20($inp),$xt2
1127          movdqu         $xt3,0x70($out)
1128          lea            0x80($out),$out         # size optimization
1129         movdqu          0x30($inp),$xt3
1130         pxor            0x20(%rsp),$xt0
1131         pxor            $xb2,$xt1
1132         pxor            $xc2,$xt2
1133         pxor            $xd2,$xt3
1134         movdqu          $xt0,0x00($out)
1135         movdqu          $xt1,0x10($out)
1136         movdqu          $xt2,0x20($out)
1137         movdqu          $xt3,0x30($out)
1138         je              .Ldone4x
1139
1140         movdqa          0x30(%rsp),$xt0         # $xaN is offloaded, remember?
1141         lea             0x40($inp),$inp         # inp+=64*3
1142         xor             %r10,%r10
1143         movdqa          $xt0,0x00(%rsp)
1144         movdqa          $xb3,0x10(%rsp)
1145         lea             0x40($out),$out         # out+=64*3
1146         movdqa          $xc3,0x20(%rsp)
1147         sub             \$192,$len              # len-=64*3
1148         movdqa          $xd3,0x30(%rsp)
1149
1150 .Loop_tail4x:
1151         movzb           ($inp,%r10),%eax
1152         movzb           (%rsp,%r10),%ecx
1153         lea             1(%r10),%r10
1154         xor             %ecx,%eax
1155         mov             %al,-1($out,%r10)
1156         dec             $len
1157         jnz             .Loop_tail4x
1158
1159 .Ldone4x:
1160 ___
1161 $code.=<<___    if ($win64);
1162         movaps          -0xa8(%r9),%xmm6
1163         movaps          -0x98(%r9),%xmm7
1164         movaps          -0x88(%r9),%xmm8
1165         movaps          -0x78(%r9),%xmm9
1166         movaps          -0x68(%r9),%xmm10
1167         movaps          -0x58(%r9),%xmm11
1168         movaps          -0x48(%r9),%xmm12
1169         movaps          -0x38(%r9),%xmm13
1170         movaps          -0x28(%r9),%xmm14
1171         movaps          -0x18(%r9),%xmm15
1172 ___
1173 $code.=<<___;
1174         lea             (%r9),%rsp
1175 .cfi_def_cfa_register   %rsp
1176 .L4x_epilogue:
1177         ret
1178 .cfi_endproc
1179 .size   ChaCha20_4x,.-ChaCha20_4x
1180 ___
1181 }
1182
1183 ########################################################################
1184 # XOP code path that handles all lengths.
1185 if ($avx) {
1186 # There is some "anomaly" observed depending on instructions' size or
1187 # alignment. If you look closely at below code you'll notice that
1188 # sometimes argument order varies. The order affects instruction
1189 # encoding by making it larger, and such fiddling gives 5% performance
1190 # improvement. This is on FX-4100...
1191
1192 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1193     $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
1194 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1195          $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
1196
1197 sub XOP_lane_ROUND {
1198 my ($a0,$b0,$c0,$d0)=@_;
1199 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1200 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1201 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1202 my @x=map("\"$_\"",@xx);
1203
1204         (
1205         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
1206          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
1207           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",     # Q3
1208            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",     # Q4
1209         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1210          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1211           "&vpxor       (@x[$d2],@x[$a2],@x[$d2])",
1212            "&vpxor      (@x[$d3],@x[$a3],@x[$d3])",
1213         "&vprotd        (@x[$d0],@x[$d0],16)",
1214          "&vprotd       (@x[$d1],@x[$d1],16)",
1215           "&vprotd      (@x[$d2],@x[$d2],16)",
1216            "&vprotd     (@x[$d3],@x[$d3],16)",
1217
1218         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
1219          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
1220           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
1221            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
1222         "&vpxor         (@x[$b0],@x[$c0],@x[$b0])",
1223          "&vpxor        (@x[$b1],@x[$c1],@x[$b1])",
1224           "&vpxor       (@x[$b2],@x[$b2],@x[$c2])",     # flip
1225            "&vpxor      (@x[$b3],@x[$b3],@x[$c3])",     # flip
1226         "&vprotd        (@x[$b0],@x[$b0],12)",
1227          "&vprotd       (@x[$b1],@x[$b1],12)",
1228           "&vprotd      (@x[$b2],@x[$b2],12)",
1229            "&vprotd     (@x[$b3],@x[$b3],12)",
1230
1231         "&vpaddd        (@x[$a0],@x[$b0],@x[$a0])",     # flip
1232          "&vpaddd       (@x[$a1],@x[$b1],@x[$a1])",     # flip
1233           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",
1234            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",
1235         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1236          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1237           "&vpxor       (@x[$d2],@x[$a2],@x[$d2])",
1238            "&vpxor      (@x[$d3],@x[$a3],@x[$d3])",
1239         "&vprotd        (@x[$d0],@x[$d0],8)",
1240          "&vprotd       (@x[$d1],@x[$d1],8)",
1241           "&vprotd      (@x[$d2],@x[$d2],8)",
1242            "&vprotd     (@x[$d3],@x[$d3],8)",
1243
1244         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
1245          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
1246           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
1247            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
1248         "&vpxor         (@x[$b0],@x[$c0],@x[$b0])",
1249          "&vpxor        (@x[$b1],@x[$c1],@x[$b1])",
1250           "&vpxor       (@x[$b2],@x[$b2],@x[$c2])",     # flip
1251            "&vpxor      (@x[$b3],@x[$b3],@x[$c3])",     # flip
1252         "&vprotd        (@x[$b0],@x[$b0],7)",
1253          "&vprotd       (@x[$b1],@x[$b1],7)",
1254           "&vprotd      (@x[$b2],@x[$b2],7)",
1255            "&vprotd     (@x[$b3],@x[$b3],7)"
1256         );
1257 }
1258
1259 my $xframe = $win64 ? 0xa8 : 8;
1260
1261 $code.=<<___;
1262 .type   ChaCha20_4xop,\@function,5
1263 .align  32
1264 ChaCha20_4xop:
1265 .cfi_startproc
1266 .LChaCha20_4xop:
1267         mov             %rsp,%r9                # frame pointer
1268 .cfi_def_cfa_register   %r9
1269         sub             \$0x140+$xframe,%rsp
1270 ___
1271         ################ stack layout
1272         # +0x00         SIMD equivalent of @x[8-12]
1273         # ...
1274         # +0x40         constant copy of key[0-2] smashed by lanes
1275         # ...
1276         # +0x100        SIMD counters (with nonce smashed by lanes)
1277         # ...
1278         # +0x140
1279 $code.=<<___    if ($win64);
1280         movaps          %xmm6,-0xa8(%r9)
1281         movaps          %xmm7,-0x98(%r9)
1282         movaps          %xmm8,-0x88(%r9)
1283         movaps          %xmm9,-0x78(%r9)
1284         movaps          %xmm10,-0x68(%r9)
1285         movaps          %xmm11,-0x58(%r9)
1286         movaps          %xmm12,-0x48(%r9)
1287         movaps          %xmm13,-0x38(%r9)
1288         movaps          %xmm14,-0x28(%r9)
1289         movaps          %xmm15,-0x18(%r9)
1290 .L4xop_body:
1291 ___
1292 $code.=<<___;
1293         vzeroupper
1294
1295         vmovdqa         .Lsigma(%rip),$xa3      # key[0]
1296         vmovdqu         ($key),$xb3             # key[1]
1297         vmovdqu         16($key),$xt3           # key[2]
1298         vmovdqu         ($counter),$xd3         # key[3]
1299         lea             0x100(%rsp),%rcx        # size optimization
1300
1301         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
1302         vpshufd         \$0x55,$xa3,$xa1
1303         vmovdqa         $xa0,0x40(%rsp)         # ... and offload
1304         vpshufd         \$0xaa,$xa3,$xa2
1305         vmovdqa         $xa1,0x50(%rsp)
1306         vpshufd         \$0xff,$xa3,$xa3
1307         vmovdqa         $xa2,0x60(%rsp)
1308         vmovdqa         $xa3,0x70(%rsp)
1309
1310         vpshufd         \$0x00,$xb3,$xb0
1311         vpshufd         \$0x55,$xb3,$xb1
1312         vmovdqa         $xb0,0x80-0x100(%rcx)
1313         vpshufd         \$0xaa,$xb3,$xb2
1314         vmovdqa         $xb1,0x90-0x100(%rcx)
1315         vpshufd         \$0xff,$xb3,$xb3
1316         vmovdqa         $xb2,0xa0-0x100(%rcx)
1317         vmovdqa         $xb3,0xb0-0x100(%rcx)
1318
1319         vpshufd         \$0x00,$xt3,$xt0        # "$xc0"
1320         vpshufd         \$0x55,$xt3,$xt1        # "$xc1"
1321         vmovdqa         $xt0,0xc0-0x100(%rcx)
1322         vpshufd         \$0xaa,$xt3,$xt2        # "$xc2"
1323         vmovdqa         $xt1,0xd0-0x100(%rcx)
1324         vpshufd         \$0xff,$xt3,$xt3        # "$xc3"
1325         vmovdqa         $xt2,0xe0-0x100(%rcx)
1326         vmovdqa         $xt3,0xf0-0x100(%rcx)
1327
1328         vpshufd         \$0x00,$xd3,$xd0
1329         vpshufd         \$0x55,$xd3,$xd1
1330         vpaddd          .Linc(%rip),$xd0,$xd0   # don't save counters yet
1331         vpshufd         \$0xaa,$xd3,$xd2
1332         vmovdqa         $xd1,0x110-0x100(%rcx)
1333         vpshufd         \$0xff,$xd3,$xd3
1334         vmovdqa         $xd2,0x120-0x100(%rcx)
1335         vmovdqa         $xd3,0x130-0x100(%rcx)
1336
1337         jmp             .Loop_enter4xop
1338
1339 .align  32
1340 .Loop_outer4xop:
1341         vmovdqa         0x40(%rsp),$xa0         # re-load smashed key
1342         vmovdqa         0x50(%rsp),$xa1
1343         vmovdqa         0x60(%rsp),$xa2
1344         vmovdqa         0x70(%rsp),$xa3
1345         vmovdqa         0x80-0x100(%rcx),$xb0
1346         vmovdqa         0x90-0x100(%rcx),$xb1
1347         vmovdqa         0xa0-0x100(%rcx),$xb2
1348         vmovdqa         0xb0-0x100(%rcx),$xb3
1349         vmovdqa         0xc0-0x100(%rcx),$xt0   # "$xc0"
1350         vmovdqa         0xd0-0x100(%rcx),$xt1   # "$xc1"
1351         vmovdqa         0xe0-0x100(%rcx),$xt2   # "$xc2"
1352         vmovdqa         0xf0-0x100(%rcx),$xt3   # "$xc3"
1353         vmovdqa         0x100-0x100(%rcx),$xd0
1354         vmovdqa         0x110-0x100(%rcx),$xd1
1355         vmovdqa         0x120-0x100(%rcx),$xd2
1356         vmovdqa         0x130-0x100(%rcx),$xd3
1357         vpaddd          .Lfour(%rip),$xd0,$xd0  # next SIMD counters
1358
1359 .Loop_enter4xop:
1360         mov             \$10,%eax
1361         vmovdqa         $xd0,0x100-0x100(%rcx)  # save SIMD counters
1362         jmp             .Loop4xop
1363
1364 .align  32
1365 .Loop4xop:
1366 ___
1367         foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
1368         foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
1369 $code.=<<___;
1370         dec             %eax
1371         jnz             .Loop4xop
1372
1373         vpaddd          0x40(%rsp),$xa0,$xa0    # accumulate key material
1374         vpaddd          0x50(%rsp),$xa1,$xa1
1375         vpaddd          0x60(%rsp),$xa2,$xa2
1376         vpaddd          0x70(%rsp),$xa3,$xa3
1377
1378         vmovdqa         $xt2,0x20(%rsp)         # offload $xc2,3
1379         vmovdqa         $xt3,0x30(%rsp)
1380
1381         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
1382         vpunpckldq      $xa3,$xa2,$xt3
1383         vpunpckhdq      $xa1,$xa0,$xa0
1384         vpunpckhdq      $xa3,$xa2,$xa2
1385         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
1386         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
1387         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
1388         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
1389 ___
1390         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1391 $code.=<<___;
1392         vpaddd          0x80-0x100(%rcx),$xb0,$xb0
1393         vpaddd          0x90-0x100(%rcx),$xb1,$xb1
1394         vpaddd          0xa0-0x100(%rcx),$xb2,$xb2
1395         vpaddd          0xb0-0x100(%rcx),$xb3,$xb3
1396
1397         vmovdqa         $xa0,0x00(%rsp)         # offload $xa0,1
1398         vmovdqa         $xa1,0x10(%rsp)
1399         vmovdqa         0x20(%rsp),$xa0         # "xc2"
1400         vmovdqa         0x30(%rsp),$xa1         # "xc3"
1401
1402         vpunpckldq      $xb1,$xb0,$xt2
1403         vpunpckldq      $xb3,$xb2,$xt3
1404         vpunpckhdq      $xb1,$xb0,$xb0
1405         vpunpckhdq      $xb3,$xb2,$xb2
1406         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
1407         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
1408         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
1409         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
1410 ___
1411         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1412         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1413 $code.=<<___;
1414         vpaddd          0xc0-0x100(%rcx),$xc0,$xc0
1415         vpaddd          0xd0-0x100(%rcx),$xc1,$xc1
1416         vpaddd          0xe0-0x100(%rcx),$xc2,$xc2
1417         vpaddd          0xf0-0x100(%rcx),$xc3,$xc3
1418
1419         vpunpckldq      $xc1,$xc0,$xt2
1420         vpunpckldq      $xc3,$xc2,$xt3
1421         vpunpckhdq      $xc1,$xc0,$xc0
1422         vpunpckhdq      $xc3,$xc2,$xc2
1423         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
1424         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
1425         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
1426         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
1427 ___
1428         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1429 $code.=<<___;
1430         vpaddd          0x100-0x100(%rcx),$xd0,$xd0
1431         vpaddd          0x110-0x100(%rcx),$xd1,$xd1
1432         vpaddd          0x120-0x100(%rcx),$xd2,$xd2
1433         vpaddd          0x130-0x100(%rcx),$xd3,$xd3
1434
1435         vpunpckldq      $xd1,$xd0,$xt2
1436         vpunpckldq      $xd3,$xd2,$xt3
1437         vpunpckhdq      $xd1,$xd0,$xd0
1438         vpunpckhdq      $xd3,$xd2,$xd2
1439         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
1440         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
1441         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
1442         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
1443 ___
1444         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1445         ($xa0,$xa1)=($xt2,$xt3);
1446 $code.=<<___;
1447         vmovdqa         0x00(%rsp),$xa0         # restore $xa0,1
1448         vmovdqa         0x10(%rsp),$xa1
1449
1450         cmp             \$64*4,$len
1451         jb              .Ltail4xop
1452
1453         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1454         vpxor           0x10($inp),$xb0,$xb0
1455         vpxor           0x20($inp),$xc0,$xc0
1456         vpxor           0x30($inp),$xd0,$xd0
1457         vpxor           0x40($inp),$xa1,$xa1
1458         vpxor           0x50($inp),$xb1,$xb1
1459         vpxor           0x60($inp),$xc1,$xc1
1460         vpxor           0x70($inp),$xd1,$xd1
1461         lea             0x80($inp),$inp         # size optimization
1462         vpxor           0x00($inp),$xa2,$xa2
1463         vpxor           0x10($inp),$xb2,$xb2
1464         vpxor           0x20($inp),$xc2,$xc2
1465         vpxor           0x30($inp),$xd2,$xd2
1466         vpxor           0x40($inp),$xa3,$xa3
1467         vpxor           0x50($inp),$xb3,$xb3
1468         vpxor           0x60($inp),$xc3,$xc3
1469         vpxor           0x70($inp),$xd3,$xd3
1470         lea             0x80($inp),$inp         # inp+=64*4
1471
1472         vmovdqu         $xa0,0x00($out)
1473         vmovdqu         $xb0,0x10($out)
1474         vmovdqu         $xc0,0x20($out)
1475         vmovdqu         $xd0,0x30($out)
1476         vmovdqu         $xa1,0x40($out)
1477         vmovdqu         $xb1,0x50($out)
1478         vmovdqu         $xc1,0x60($out)
1479         vmovdqu         $xd1,0x70($out)
1480         lea             0x80($out),$out         # size optimization
1481         vmovdqu         $xa2,0x00($out)
1482         vmovdqu         $xb2,0x10($out)
1483         vmovdqu         $xc2,0x20($out)
1484         vmovdqu         $xd2,0x30($out)
1485         vmovdqu         $xa3,0x40($out)
1486         vmovdqu         $xb3,0x50($out)
1487         vmovdqu         $xc3,0x60($out)
1488         vmovdqu         $xd3,0x70($out)
1489         lea             0x80($out),$out         # out+=64*4
1490
1491         sub             \$64*4,$len
1492         jnz             .Loop_outer4xop
1493
1494         jmp             .Ldone4xop
1495
1496 .align  32
1497 .Ltail4xop:
1498         cmp             \$192,$len
1499         jae             .L192_or_more4xop
1500         cmp             \$128,$len
1501         jae             .L128_or_more4xop
1502         cmp             \$64,$len
1503         jae             .L64_or_more4xop
1504
1505         xor             %r10,%r10
1506         vmovdqa         $xa0,0x00(%rsp)
1507         vmovdqa         $xb0,0x10(%rsp)
1508         vmovdqa         $xc0,0x20(%rsp)
1509         vmovdqa         $xd0,0x30(%rsp)
1510         jmp             .Loop_tail4xop
1511
1512 .align  32
1513 .L64_or_more4xop:
1514         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1515         vpxor           0x10($inp),$xb0,$xb0
1516         vpxor           0x20($inp),$xc0,$xc0
1517         vpxor           0x30($inp),$xd0,$xd0
1518         vmovdqu         $xa0,0x00($out)
1519         vmovdqu         $xb0,0x10($out)
1520         vmovdqu         $xc0,0x20($out)
1521         vmovdqu         $xd0,0x30($out)
1522         je              .Ldone4xop
1523
1524         lea             0x40($inp),$inp         # inp+=64*1
1525         vmovdqa         $xa1,0x00(%rsp)
1526         xor             %r10,%r10
1527         vmovdqa         $xb1,0x10(%rsp)
1528         lea             0x40($out),$out         # out+=64*1
1529         vmovdqa         $xc1,0x20(%rsp)
1530         sub             \$64,$len               # len-=64*1
1531         vmovdqa         $xd1,0x30(%rsp)
1532         jmp             .Loop_tail4xop
1533
1534 .align  32
1535 .L128_or_more4xop:
1536         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1537         vpxor           0x10($inp),$xb0,$xb0
1538         vpxor           0x20($inp),$xc0,$xc0
1539         vpxor           0x30($inp),$xd0,$xd0
1540         vpxor           0x40($inp),$xa1,$xa1
1541         vpxor           0x50($inp),$xb1,$xb1
1542         vpxor           0x60($inp),$xc1,$xc1
1543         vpxor           0x70($inp),$xd1,$xd1
1544
1545         vmovdqu         $xa0,0x00($out)
1546         vmovdqu         $xb0,0x10($out)
1547         vmovdqu         $xc0,0x20($out)
1548         vmovdqu         $xd0,0x30($out)
1549         vmovdqu         $xa1,0x40($out)
1550         vmovdqu         $xb1,0x50($out)
1551         vmovdqu         $xc1,0x60($out)
1552         vmovdqu         $xd1,0x70($out)
1553         je              .Ldone4xop
1554
1555         lea             0x80($inp),$inp         # inp+=64*2
1556         vmovdqa         $xa2,0x00(%rsp)
1557         xor             %r10,%r10
1558         vmovdqa         $xb2,0x10(%rsp)
1559         lea             0x80($out),$out         # out+=64*2
1560         vmovdqa         $xc2,0x20(%rsp)
1561         sub             \$128,$len              # len-=64*2
1562         vmovdqa         $xd2,0x30(%rsp)
1563         jmp             .Loop_tail4xop
1564
1565 .align  32
1566 .L192_or_more4xop:
1567         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1568         vpxor           0x10($inp),$xb0,$xb0
1569         vpxor           0x20($inp),$xc0,$xc0
1570         vpxor           0x30($inp),$xd0,$xd0
1571         vpxor           0x40($inp),$xa1,$xa1
1572         vpxor           0x50($inp),$xb1,$xb1
1573         vpxor           0x60($inp),$xc1,$xc1
1574         vpxor           0x70($inp),$xd1,$xd1
1575         lea             0x80($inp),$inp         # size optimization
1576         vpxor           0x00($inp),$xa2,$xa2
1577         vpxor           0x10($inp),$xb2,$xb2
1578         vpxor           0x20($inp),$xc2,$xc2
1579         vpxor           0x30($inp),$xd2,$xd2
1580
1581         vmovdqu         $xa0,0x00($out)
1582         vmovdqu         $xb0,0x10($out)
1583         vmovdqu         $xc0,0x20($out)
1584         vmovdqu         $xd0,0x30($out)
1585         vmovdqu         $xa1,0x40($out)
1586         vmovdqu         $xb1,0x50($out)
1587         vmovdqu         $xc1,0x60($out)
1588         vmovdqu         $xd1,0x70($out)
1589         lea             0x80($out),$out         # size optimization
1590         vmovdqu         $xa2,0x00($out)
1591         vmovdqu         $xb2,0x10($out)
1592         vmovdqu         $xc2,0x20($out)
1593         vmovdqu         $xd2,0x30($out)
1594         je              .Ldone4xop
1595
1596         lea             0x40($inp),$inp         # inp+=64*3
1597         vmovdqa         $xa3,0x00(%rsp)
1598         xor             %r10,%r10
1599         vmovdqa         $xb3,0x10(%rsp)
1600         lea             0x40($out),$out         # out+=64*3
1601         vmovdqa         $xc3,0x20(%rsp)
1602         sub             \$192,$len              # len-=64*3
1603         vmovdqa         $xd3,0x30(%rsp)
1604
1605 .Loop_tail4xop:
1606         movzb           ($inp,%r10),%eax
1607         movzb           (%rsp,%r10),%ecx
1608         lea             1(%r10),%r10
1609         xor             %ecx,%eax
1610         mov             %al,-1($out,%r10)
1611         dec             $len
1612         jnz             .Loop_tail4xop
1613
1614 .Ldone4xop:
1615         vzeroupper
1616 ___
1617 $code.=<<___    if ($win64);
1618         movaps          -0xa8(%r9),%xmm6
1619         movaps          -0x98(%r9),%xmm7
1620         movaps          -0x88(%r9),%xmm8
1621         movaps          -0x78(%r9),%xmm9
1622         movaps          -0x68(%r9),%xmm10
1623         movaps          -0x58(%r9),%xmm11
1624         movaps          -0x48(%r9),%xmm12
1625         movaps          -0x38(%r9),%xmm13
1626         movaps          -0x28(%r9),%xmm14
1627         movaps          -0x18(%r9),%xmm15
1628 ___
1629 $code.=<<___;
1630         lea             (%r9),%rsp
1631 .cfi_def_cfa_register   %rsp
1632 .L4xop_epilogue:
1633         ret
1634 .cfi_endproc
1635 .size   ChaCha20_4xop,.-ChaCha20_4xop
1636 ___
1637 }
1638
1639 ########################################################################
1640 # AVX2 code path
1641 if ($avx>1) {
1642 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1643     $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1644 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1645         "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1646
1647 sub AVX2_lane_ROUND {
1648 my ($a0,$b0,$c0,$d0)=@_;
1649 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1650 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1651 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1652 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1653 my @x=map("\"$_\"",@xx);
1654
1655         # Consider order in which variables are addressed by their
1656         # index:
1657         #
1658         #       a   b   c   d
1659         #
1660         #       0   4   8  12 < even round
1661         #       1   5   9  13
1662         #       2   6  10  14
1663         #       3   7  11  15
1664         #       0   5  10  15 < odd round
1665         #       1   6  11  12
1666         #       2   7   8  13
1667         #       3   4   9  14
1668         #
1669         # 'a', 'b' and 'd's are permanently allocated in registers,
1670         # @x[0..7,12..15], while 'c's are maintained in memory. If
1671         # you observe 'c' column, you'll notice that pair of 'c's is
1672         # invariant between rounds. This means that we have to reload
1673         # them once per round, in the middle. This is why you'll see
1674         # bunch of 'c' stores and loads in the middle, but none in
1675         # the beginning or end.
1676
1677         (
1678         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
1679         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1680         "&vpshufb       (@x[$d0],@x[$d0],$t1)",
1681          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
1682          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1683          "&vpshufb      (@x[$d1],@x[$d1],$t1)",
1684
1685         "&vpaddd        ($xc,$xc,@x[$d0])",
1686         "&vpxor         (@x[$b0],$xc,@x[$b0])",
1687         "&vpslld        ($t0,@x[$b0],12)",
1688         "&vpsrld        (@x[$b0],@x[$b0],20)",
1689         "&vpor          (@x[$b0],$t0,@x[$b0])",
1690         "&vbroadcasti128($t0,'(%r11)')",                # .Lrot24(%rip)
1691          "&vpaddd       ($xc_,$xc_,@x[$d1])",
1692          "&vpxor        (@x[$b1],$xc_,@x[$b1])",
1693          "&vpslld       ($t1,@x[$b1],12)",
1694          "&vpsrld       (@x[$b1],@x[$b1],20)",
1695          "&vpor         (@x[$b1],$t1,@x[$b1])",
1696
1697         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",
1698         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1699         "&vpshufb       (@x[$d0],@x[$d0],$t0)",
1700          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",
1701          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1702          "&vpshufb      (@x[$d1],@x[$d1],$t0)",
1703
1704         "&vpaddd        ($xc,$xc,@x[$d0])",
1705         "&vpxor         (@x[$b0],$xc,@x[$b0])",
1706         "&vpslld        ($t1,@x[$b0],7)",
1707         "&vpsrld        (@x[$b0],@x[$b0],25)",
1708         "&vpor          (@x[$b0],$t1,@x[$b0])",
1709         "&vbroadcasti128($t1,'(%r10)')",                # .Lrot16(%rip)
1710          "&vpaddd       ($xc_,$xc_,@x[$d1])",
1711          "&vpxor        (@x[$b1],$xc_,@x[$b1])",
1712          "&vpslld       ($t0,@x[$b1],7)",
1713          "&vpsrld       (@x[$b1],@x[$b1],25)",
1714          "&vpor         (@x[$b1],$t0,@x[$b1])",
1715
1716         "&vmovdqa       (\"`32*($c0-8)`(%rsp)\",$xc)",  # reload pair of 'c's
1717          "&vmovdqa      (\"`32*($c1-8)`(%rsp)\",$xc_)",
1718         "&vmovdqa       ($xc,\"`32*($c2-8)`(%rsp)\")",
1719          "&vmovdqa      ($xc_,\"`32*($c3-8)`(%rsp)\")",
1720
1721         "&vpaddd        (@x[$a2],@x[$a2],@x[$b2])",     # Q3
1722         "&vpxor         (@x[$d2],@x[$a2],@x[$d2])",
1723         "&vpshufb       (@x[$d2],@x[$d2],$t1)",
1724          "&vpaddd       (@x[$a3],@x[$a3],@x[$b3])",     # Q4
1725          "&vpxor        (@x[$d3],@x[$a3],@x[$d3])",
1726          "&vpshufb      (@x[$d3],@x[$d3],$t1)",
1727
1728         "&vpaddd        ($xc,$xc,@x[$d2])",
1729         "&vpxor         (@x[$b2],$xc,@x[$b2])",
1730         "&vpslld        ($t0,@x[$b2],12)",
1731         "&vpsrld        (@x[$b2],@x[$b2],20)",
1732         "&vpor          (@x[$b2],$t0,@x[$b2])",
1733         "&vbroadcasti128($t0,'(%r11)')",                # .Lrot24(%rip)
1734          "&vpaddd       ($xc_,$xc_,@x[$d3])",
1735          "&vpxor        (@x[$b3],$xc_,@x[$b3])",
1736          "&vpslld       ($t1,@x[$b3],12)",
1737          "&vpsrld       (@x[$b3],@x[$b3],20)",
1738          "&vpor         (@x[$b3],$t1,@x[$b3])",
1739
1740         "&vpaddd        (@x[$a2],@x[$a2],@x[$b2])",
1741         "&vpxor         (@x[$d2],@x[$a2],@x[$d2])",
1742         "&vpshufb       (@x[$d2],@x[$d2],$t0)",
1743          "&vpaddd       (@x[$a3],@x[$a3],@x[$b3])",
1744          "&vpxor        (@x[$d3],@x[$a3],@x[$d3])",
1745          "&vpshufb      (@x[$d3],@x[$d3],$t0)",
1746
1747         "&vpaddd        ($xc,$xc,@x[$d2])",
1748         "&vpxor         (@x[$b2],$xc,@x[$b2])",
1749         "&vpslld        ($t1,@x[$b2],7)",
1750         "&vpsrld        (@x[$b2],@x[$b2],25)",
1751         "&vpor          (@x[$b2],$t1,@x[$b2])",
1752         "&vbroadcasti128($t1,'(%r10)')",                # .Lrot16(%rip)
1753          "&vpaddd       ($xc_,$xc_,@x[$d3])",
1754          "&vpxor        (@x[$b3],$xc_,@x[$b3])",
1755          "&vpslld       ($t0,@x[$b3],7)",
1756          "&vpsrld       (@x[$b3],@x[$b3],25)",
1757          "&vpor         (@x[$b3],$t0,@x[$b3])"
1758         );
1759 }
1760
1761 my $xframe = $win64 ? 0xa8 : 8;
1762
1763 $code.=<<___;
1764 .type   ChaCha20_8x,\@function,5
1765 .align  32
1766 ChaCha20_8x:
1767 .cfi_startproc
1768 .LChaCha20_8x:
1769         mov             %rsp,%r9                # frame register
1770 .cfi_def_cfa_register   %r9
1771         sub             \$0x280+$xframe,%rsp
1772         and             \$-32,%rsp
1773 ___
1774 $code.=<<___    if ($win64);
1775         movaps          %xmm6,-0xa8(%r9)
1776         movaps          %xmm7,-0x98(%r9)
1777         movaps          %xmm8,-0x88(%r9)
1778         movaps          %xmm9,-0x78(%r9)
1779         movaps          %xmm10,-0x68(%r9)
1780         movaps          %xmm11,-0x58(%r9)
1781         movaps          %xmm12,-0x48(%r9)
1782         movaps          %xmm13,-0x38(%r9)
1783         movaps          %xmm14,-0x28(%r9)
1784         movaps          %xmm15,-0x18(%r9)
1785 .L8x_body:
1786 ___
1787 $code.=<<___;
1788         vzeroupper
1789
1790         ################ stack layout
1791         # +0x00         SIMD equivalent of @x[8-12]
1792         # ...
1793         # +0x80         constant copy of key[0-2] smashed by lanes
1794         # ...
1795         # +0x200        SIMD counters (with nonce smashed by lanes)
1796         # ...
1797         # +0x280
1798
1799         vbroadcasti128  .Lsigma(%rip),$xa3      # key[0]
1800         vbroadcasti128  ($key),$xb3             # key[1]
1801         vbroadcasti128  16($key),$xt3           # key[2]
1802         vbroadcasti128  ($counter),$xd3         # key[3]
1803         lea             0x100(%rsp),%rcx        # size optimization
1804         lea             0x200(%rsp),%rax        # size optimization
1805         lea             .Lrot16(%rip),%r10
1806         lea             .Lrot24(%rip),%r11
1807
1808         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
1809         vpshufd         \$0x55,$xa3,$xa1
1810         vmovdqa         $xa0,0x80-0x100(%rcx)   # ... and offload
1811         vpshufd         \$0xaa,$xa3,$xa2
1812         vmovdqa         $xa1,0xa0-0x100(%rcx)
1813         vpshufd         \$0xff,$xa3,$xa3
1814         vmovdqa         $xa2,0xc0-0x100(%rcx)
1815         vmovdqa         $xa3,0xe0-0x100(%rcx)
1816
1817         vpshufd         \$0x00,$xb3,$xb0
1818         vpshufd         \$0x55,$xb3,$xb1
1819         vmovdqa         $xb0,0x100-0x100(%rcx)
1820         vpshufd         \$0xaa,$xb3,$xb2
1821         vmovdqa         $xb1,0x120-0x100(%rcx)
1822         vpshufd         \$0xff,$xb3,$xb3
1823         vmovdqa         $xb2,0x140-0x100(%rcx)
1824         vmovdqa         $xb3,0x160-0x100(%rcx)
1825
1826         vpshufd         \$0x00,$xt3,$xt0        # "xc0"
1827         vpshufd         \$0x55,$xt3,$xt1        # "xc1"
1828         vmovdqa         $xt0,0x180-0x200(%rax)
1829         vpshufd         \$0xaa,$xt3,$xt2        # "xc2"
1830         vmovdqa         $xt1,0x1a0-0x200(%rax)
1831         vpshufd         \$0xff,$xt3,$xt3        # "xc3"
1832         vmovdqa         $xt2,0x1c0-0x200(%rax)
1833         vmovdqa         $xt3,0x1e0-0x200(%rax)
1834
1835         vpshufd         \$0x00,$xd3,$xd0
1836         vpshufd         \$0x55,$xd3,$xd1
1837         vpaddd          .Lincy(%rip),$xd0,$xd0  # don't save counters yet
1838         vpshufd         \$0xaa,$xd3,$xd2
1839         vmovdqa         $xd1,0x220-0x200(%rax)
1840         vpshufd         \$0xff,$xd3,$xd3
1841         vmovdqa         $xd2,0x240-0x200(%rax)
1842         vmovdqa         $xd3,0x260-0x200(%rax)
1843
1844         jmp             .Loop_enter8x
1845
1846 .align  32
1847 .Loop_outer8x:
1848         vmovdqa         0x80-0x100(%rcx),$xa0   # re-load smashed key
1849         vmovdqa         0xa0-0x100(%rcx),$xa1
1850         vmovdqa         0xc0-0x100(%rcx),$xa2
1851         vmovdqa         0xe0-0x100(%rcx),$xa3
1852         vmovdqa         0x100-0x100(%rcx),$xb0
1853         vmovdqa         0x120-0x100(%rcx),$xb1
1854         vmovdqa         0x140-0x100(%rcx),$xb2
1855         vmovdqa         0x160-0x100(%rcx),$xb3
1856         vmovdqa         0x180-0x200(%rax),$xt0  # "xc0"
1857         vmovdqa         0x1a0-0x200(%rax),$xt1  # "xc1"
1858         vmovdqa         0x1c0-0x200(%rax),$xt2  # "xc2"
1859         vmovdqa         0x1e0-0x200(%rax),$xt3  # "xc3"
1860         vmovdqa         0x200-0x200(%rax),$xd0
1861         vmovdqa         0x220-0x200(%rax),$xd1
1862         vmovdqa         0x240-0x200(%rax),$xd2
1863         vmovdqa         0x260-0x200(%rax),$xd3
1864         vpaddd          .Leight(%rip),$xd0,$xd0 # next SIMD counters
1865
1866 .Loop_enter8x:
1867         vmovdqa         $xt2,0x40(%rsp)         # SIMD equivalent of "@x[10]"
1868         vmovdqa         $xt3,0x60(%rsp)         # SIMD equivalent of "@x[11]"
1869         vbroadcasti128  (%r10),$xt3
1870         vmovdqa         $xd0,0x200-0x200(%rax)  # save SIMD counters
1871         mov             \$10,%eax
1872         jmp             .Loop8x
1873
1874 .align  32
1875 .Loop8x:
1876 ___
1877         foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1878         foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1879 $code.=<<___;
1880         dec             %eax
1881         jnz             .Loop8x
1882
1883         lea             0x200(%rsp),%rax        # size optimization
1884         vpaddd          0x80-0x100(%rcx),$xa0,$xa0      # accumulate key
1885         vpaddd          0xa0-0x100(%rcx),$xa1,$xa1
1886         vpaddd          0xc0-0x100(%rcx),$xa2,$xa2
1887         vpaddd          0xe0-0x100(%rcx),$xa3,$xa3
1888
1889         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
1890         vpunpckldq      $xa3,$xa2,$xt3
1891         vpunpckhdq      $xa1,$xa0,$xa0
1892         vpunpckhdq      $xa3,$xa2,$xa2
1893         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
1894         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
1895         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
1896         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
1897 ___
1898         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1899 $code.=<<___;
1900         vpaddd          0x100-0x100(%rcx),$xb0,$xb0
1901         vpaddd          0x120-0x100(%rcx),$xb1,$xb1
1902         vpaddd          0x140-0x100(%rcx),$xb2,$xb2
1903         vpaddd          0x160-0x100(%rcx),$xb3,$xb3
1904
1905         vpunpckldq      $xb1,$xb0,$xt2
1906         vpunpckldq      $xb3,$xb2,$xt3
1907         vpunpckhdq      $xb1,$xb0,$xb0
1908         vpunpckhdq      $xb3,$xb2,$xb2
1909         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
1910         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
1911         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
1912         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
1913 ___
1914         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1915 $code.=<<___;
1916         vperm2i128      \$0x20,$xb0,$xa0,$xt3   # "de-interlace" further
1917         vperm2i128      \$0x31,$xb0,$xa0,$xb0
1918         vperm2i128      \$0x20,$xb1,$xa1,$xa0
1919         vperm2i128      \$0x31,$xb1,$xa1,$xb1
1920         vperm2i128      \$0x20,$xb2,$xa2,$xa1
1921         vperm2i128      \$0x31,$xb2,$xa2,$xb2
1922         vperm2i128      \$0x20,$xb3,$xa3,$xa2
1923         vperm2i128      \$0x31,$xb3,$xa3,$xb3
1924 ___
1925         ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1926         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1927 $code.=<<___;
1928         vmovdqa         $xa0,0x00(%rsp)         # offload $xaN
1929         vmovdqa         $xa1,0x20(%rsp)
1930         vmovdqa         0x40(%rsp),$xc2         # $xa0
1931         vmovdqa         0x60(%rsp),$xc3         # $xa1
1932
1933         vpaddd          0x180-0x200(%rax),$xc0,$xc0
1934         vpaddd          0x1a0-0x200(%rax),$xc1,$xc1
1935         vpaddd          0x1c0-0x200(%rax),$xc2,$xc2
1936         vpaddd          0x1e0-0x200(%rax),$xc3,$xc3
1937
1938         vpunpckldq      $xc1,$xc0,$xt2
1939         vpunpckldq      $xc3,$xc2,$xt3
1940         vpunpckhdq      $xc1,$xc0,$xc0
1941         vpunpckhdq      $xc3,$xc2,$xc2
1942         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
1943         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
1944         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
1945         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
1946 ___
1947         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1948 $code.=<<___;
1949         vpaddd          0x200-0x200(%rax),$xd0,$xd0
1950         vpaddd          0x220-0x200(%rax),$xd1,$xd1
1951         vpaddd          0x240-0x200(%rax),$xd2,$xd2
1952         vpaddd          0x260-0x200(%rax),$xd3,$xd3
1953
1954         vpunpckldq      $xd1,$xd0,$xt2
1955         vpunpckldq      $xd3,$xd2,$xt3
1956         vpunpckhdq      $xd1,$xd0,$xd0
1957         vpunpckhdq      $xd3,$xd2,$xd2
1958         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
1959         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
1960         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
1961         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
1962 ___
1963         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1964 $code.=<<___;
1965         vperm2i128      \$0x20,$xd0,$xc0,$xt3   # "de-interlace" further
1966         vperm2i128      \$0x31,$xd0,$xc0,$xd0
1967         vperm2i128      \$0x20,$xd1,$xc1,$xc0
1968         vperm2i128      \$0x31,$xd1,$xc1,$xd1
1969         vperm2i128      \$0x20,$xd2,$xc2,$xc1
1970         vperm2i128      \$0x31,$xd2,$xc2,$xd2
1971         vperm2i128      \$0x20,$xd3,$xc3,$xc2
1972         vperm2i128      \$0x31,$xd3,$xc3,$xd3
1973 ___
1974         ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1975         ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1976         ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1977         ($xa0,$xa1)=($xt2,$xt3);
1978 $code.=<<___;
1979         vmovdqa         0x00(%rsp),$xa0         # $xaN was offloaded, remember?
1980         vmovdqa         0x20(%rsp),$xa1
1981
1982         cmp             \$64*8,$len
1983         jb              .Ltail8x
1984
1985         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1986         vpxor           0x20($inp),$xb0,$xb0
1987         vpxor           0x40($inp),$xc0,$xc0
1988         vpxor           0x60($inp),$xd0,$xd0
1989         lea             0x80($inp),$inp         # size optimization
1990         vmovdqu         $xa0,0x00($out)
1991         vmovdqu         $xb0,0x20($out)
1992         vmovdqu         $xc0,0x40($out)
1993         vmovdqu         $xd0,0x60($out)
1994         lea             0x80($out),$out         # size optimization
1995
1996         vpxor           0x00($inp),$xa1,$xa1
1997         vpxor           0x20($inp),$xb1,$xb1
1998         vpxor           0x40($inp),$xc1,$xc1
1999         vpxor           0x60($inp),$xd1,$xd1
2000         lea             0x80($inp),$inp         # size optimization
2001         vmovdqu         $xa1,0x00($out)
2002         vmovdqu         $xb1,0x20($out)
2003         vmovdqu         $xc1,0x40($out)
2004         vmovdqu         $xd1,0x60($out)
2005         lea             0x80($out),$out         # size optimization
2006
2007         vpxor           0x00($inp),$xa2,$xa2
2008         vpxor           0x20($inp),$xb2,$xb2
2009         vpxor           0x40($inp),$xc2,$xc2
2010         vpxor           0x60($inp),$xd2,$xd2
2011         lea             0x80($inp),$inp         # size optimization
2012         vmovdqu         $xa2,0x00($out)
2013         vmovdqu         $xb2,0x20($out)
2014         vmovdqu         $xc2,0x40($out)
2015         vmovdqu         $xd2,0x60($out)
2016         lea             0x80($out),$out         # size optimization
2017
2018         vpxor           0x00($inp),$xa3,$xa3
2019         vpxor           0x20($inp),$xb3,$xb3
2020         vpxor           0x40($inp),$xc3,$xc3
2021         vpxor           0x60($inp),$xd3,$xd3
2022         lea             0x80($inp),$inp         # size optimization
2023         vmovdqu         $xa3,0x00($out)
2024         vmovdqu         $xb3,0x20($out)
2025         vmovdqu         $xc3,0x40($out)
2026         vmovdqu         $xd3,0x60($out)
2027         lea             0x80($out),$out         # size optimization
2028
2029         sub             \$64*8,$len
2030         jnz             .Loop_outer8x
2031
2032         jmp             .Ldone8x
2033
2034 .Ltail8x:
2035         cmp             \$448,$len
2036         jae             .L448_or_more8x
2037         cmp             \$384,$len
2038         jae             .L384_or_more8x
2039         cmp             \$320,$len
2040         jae             .L320_or_more8x
2041         cmp             \$256,$len
2042         jae             .L256_or_more8x
2043         cmp             \$192,$len
2044         jae             .L192_or_more8x
2045         cmp             \$128,$len
2046         jae             .L128_or_more8x
2047         cmp             \$64,$len
2048         jae             .L64_or_more8x
2049
2050         xor             %r10,%r10
2051         vmovdqa         $xa0,0x00(%rsp)
2052         vmovdqa         $xb0,0x20(%rsp)
2053         jmp             .Loop_tail8x
2054
2055 .align  32
2056 .L64_or_more8x:
2057         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2058         vpxor           0x20($inp),$xb0,$xb0
2059         vmovdqu         $xa0,0x00($out)
2060         vmovdqu         $xb0,0x20($out)
2061         je              .Ldone8x
2062
2063         lea             0x40($inp),$inp         # inp+=64*1
2064         xor             %r10,%r10
2065         vmovdqa         $xc0,0x00(%rsp)
2066         lea             0x40($out),$out         # out+=64*1
2067         sub             \$64,$len               # len-=64*1
2068         vmovdqa         $xd0,0x20(%rsp)
2069         jmp             .Loop_tail8x
2070
2071 .align  32
2072 .L128_or_more8x:
2073         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2074         vpxor           0x20($inp),$xb0,$xb0
2075         vpxor           0x40($inp),$xc0,$xc0
2076         vpxor           0x60($inp),$xd0,$xd0
2077         vmovdqu         $xa0,0x00($out)
2078         vmovdqu         $xb0,0x20($out)
2079         vmovdqu         $xc0,0x40($out)
2080         vmovdqu         $xd0,0x60($out)
2081         je              .Ldone8x
2082
2083         lea             0x80($inp),$inp         # inp+=64*2
2084         xor             %r10,%r10
2085         vmovdqa         $xa1,0x00(%rsp)
2086         lea             0x80($out),$out         # out+=64*2
2087         sub             \$128,$len              # len-=64*2
2088         vmovdqa         $xb1,0x20(%rsp)
2089         jmp             .Loop_tail8x
2090
2091 .align  32
2092 .L192_or_more8x:
2093         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2094         vpxor           0x20($inp),$xb0,$xb0
2095         vpxor           0x40($inp),$xc0,$xc0
2096         vpxor           0x60($inp),$xd0,$xd0
2097         vpxor           0x80($inp),$xa1,$xa1
2098         vpxor           0xa0($inp),$xb1,$xb1
2099         vmovdqu         $xa0,0x00($out)
2100         vmovdqu         $xb0,0x20($out)
2101         vmovdqu         $xc0,0x40($out)
2102         vmovdqu         $xd0,0x60($out)
2103         vmovdqu         $xa1,0x80($out)
2104         vmovdqu         $xb1,0xa0($out)
2105         je              .Ldone8x
2106
2107         lea             0xc0($inp),$inp         # inp+=64*3
2108         xor             %r10,%r10
2109         vmovdqa         $xc1,0x00(%rsp)
2110         lea             0xc0($out),$out         # out+=64*3
2111         sub             \$192,$len              # len-=64*3
2112         vmovdqa         $xd1,0x20(%rsp)
2113         jmp             .Loop_tail8x
2114
2115 .align  32
2116 .L256_or_more8x:
2117         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2118         vpxor           0x20($inp),$xb0,$xb0
2119         vpxor           0x40($inp),$xc0,$xc0
2120         vpxor           0x60($inp),$xd0,$xd0
2121         vpxor           0x80($inp),$xa1,$xa1
2122         vpxor           0xa0($inp),$xb1,$xb1
2123         vpxor           0xc0($inp),$xc1,$xc1
2124         vpxor           0xe0($inp),$xd1,$xd1
2125         vmovdqu         $xa0,0x00($out)
2126         vmovdqu         $xb0,0x20($out)
2127         vmovdqu         $xc0,0x40($out)
2128         vmovdqu         $xd0,0x60($out)
2129         vmovdqu         $xa1,0x80($out)
2130         vmovdqu         $xb1,0xa0($out)
2131         vmovdqu         $xc1,0xc0($out)
2132         vmovdqu         $xd1,0xe0($out)
2133         je              .Ldone8x
2134
2135         lea             0x100($inp),$inp        # inp+=64*4
2136         xor             %r10,%r10
2137         vmovdqa         $xa2,0x00(%rsp)
2138         lea             0x100($out),$out        # out+=64*4
2139         sub             \$256,$len              # len-=64*4
2140         vmovdqa         $xb2,0x20(%rsp)
2141         jmp             .Loop_tail8x
2142
2143 .align  32
2144 .L320_or_more8x:
2145         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2146         vpxor           0x20($inp),$xb0,$xb0
2147         vpxor           0x40($inp),$xc0,$xc0
2148         vpxor           0x60($inp),$xd0,$xd0
2149         vpxor           0x80($inp),$xa1,$xa1
2150         vpxor           0xa0($inp),$xb1,$xb1
2151         vpxor           0xc0($inp),$xc1,$xc1
2152         vpxor           0xe0($inp),$xd1,$xd1
2153         vpxor           0x100($inp),$xa2,$xa2
2154         vpxor           0x120($inp),$xb2,$xb2
2155         vmovdqu         $xa0,0x00($out)
2156         vmovdqu         $xb0,0x20($out)
2157         vmovdqu         $xc0,0x40($out)
2158         vmovdqu         $xd0,0x60($out)
2159         vmovdqu         $xa1,0x80($out)
2160         vmovdqu         $xb1,0xa0($out)
2161         vmovdqu         $xc1,0xc0($out)
2162         vmovdqu         $xd1,0xe0($out)
2163         vmovdqu         $xa2,0x100($out)
2164         vmovdqu         $xb2,0x120($out)
2165         je              .Ldone8x
2166
2167         lea             0x140($inp),$inp        # inp+=64*5
2168         xor             %r10,%r10
2169         vmovdqa         $xc2,0x00(%rsp)
2170         lea             0x140($out),$out        # out+=64*5
2171         sub             \$320,$len              # len-=64*5
2172         vmovdqa         $xd2,0x20(%rsp)
2173         jmp             .Loop_tail8x
2174
2175 .align  32
2176 .L384_or_more8x:
2177         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2178         vpxor           0x20($inp),$xb0,$xb0
2179         vpxor           0x40($inp),$xc0,$xc0
2180         vpxor           0x60($inp),$xd0,$xd0
2181         vpxor           0x80($inp),$xa1,$xa1
2182         vpxor           0xa0($inp),$xb1,$xb1
2183         vpxor           0xc0($inp),$xc1,$xc1
2184         vpxor           0xe0($inp),$xd1,$xd1
2185         vpxor           0x100($inp),$xa2,$xa2
2186         vpxor           0x120($inp),$xb2,$xb2
2187         vpxor           0x140($inp),$xc2,$xc2
2188         vpxor           0x160($inp),$xd2,$xd2
2189         vmovdqu         $xa0,0x00($out)
2190         vmovdqu         $xb0,0x20($out)
2191         vmovdqu         $xc0,0x40($out)
2192         vmovdqu         $xd0,0x60($out)
2193         vmovdqu         $xa1,0x80($out)
2194         vmovdqu         $xb1,0xa0($out)
2195         vmovdqu         $xc1,0xc0($out)
2196         vmovdqu         $xd1,0xe0($out)
2197         vmovdqu         $xa2,0x100($out)
2198         vmovdqu         $xb2,0x120($out)
2199         vmovdqu         $xc2,0x140($out)
2200         vmovdqu         $xd2,0x160($out)
2201         je              .Ldone8x
2202
2203         lea             0x180($inp),$inp        # inp+=64*6
2204         xor             %r10,%r10
2205         vmovdqa         $xa3,0x00(%rsp)
2206         lea             0x180($out),$out        # out+=64*6
2207         sub             \$384,$len              # len-=64*6
2208         vmovdqa         $xb3,0x20(%rsp)
2209         jmp             .Loop_tail8x
2210
2211 .align  32
2212 .L448_or_more8x:
2213         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2214         vpxor           0x20($inp),$xb0,$xb0
2215         vpxor           0x40($inp),$xc0,$xc0
2216         vpxor           0x60($inp),$xd0,$xd0
2217         vpxor           0x80($inp),$xa1,$xa1
2218         vpxor           0xa0($inp),$xb1,$xb1
2219         vpxor           0xc0($inp),$xc1,$xc1
2220         vpxor           0xe0($inp),$xd1,$xd1
2221         vpxor           0x100($inp),$xa2,$xa2
2222         vpxor           0x120($inp),$xb2,$xb2
2223         vpxor           0x140($inp),$xc2,$xc2
2224         vpxor           0x160($inp),$xd2,$xd2
2225         vpxor           0x180($inp),$xa3,$xa3
2226         vpxor           0x1a0($inp),$xb3,$xb3
2227         vmovdqu         $xa0,0x00($out)
2228         vmovdqu         $xb0,0x20($out)
2229         vmovdqu         $xc0,0x40($out)
2230         vmovdqu         $xd0,0x60($out)
2231         vmovdqu         $xa1,0x80($out)
2232         vmovdqu         $xb1,0xa0($out)
2233         vmovdqu         $xc1,0xc0($out)
2234         vmovdqu         $xd1,0xe0($out)
2235         vmovdqu         $xa2,0x100($out)
2236         vmovdqu         $xb2,0x120($out)
2237         vmovdqu         $xc2,0x140($out)
2238         vmovdqu         $xd2,0x160($out)
2239         vmovdqu         $xa3,0x180($out)
2240         vmovdqu         $xb3,0x1a0($out)
2241         je              .Ldone8x
2242
2243         lea             0x1c0($inp),$inp        # inp+=64*7
2244         xor             %r10,%r10
2245         vmovdqa         $xc3,0x00(%rsp)
2246         lea             0x1c0($out),$out        # out+=64*7
2247         sub             \$448,$len              # len-=64*7
2248         vmovdqa         $xd3,0x20(%rsp)
2249
2250 .Loop_tail8x:
2251         movzb           ($inp,%r10),%eax
2252         movzb           (%rsp,%r10),%ecx
2253         lea             1(%r10),%r10
2254         xor             %ecx,%eax
2255         mov             %al,-1($out,%r10)
2256         dec             $len
2257         jnz             .Loop_tail8x
2258
2259 .Ldone8x:
2260         vzeroall
2261 ___
2262 $code.=<<___    if ($win64);
2263         movaps          -0xa8(%r9),%xmm6
2264         movaps          -0x98(%r9),%xmm7
2265         movaps          -0x88(%r9),%xmm8
2266         movaps          -0x78(%r9),%xmm9
2267         movaps          -0x68(%r9),%xmm10
2268         movaps          -0x58(%r9),%xmm11
2269         movaps          -0x48(%r9),%xmm12
2270         movaps          -0x38(%r9),%xmm13
2271         movaps          -0x28(%r9),%xmm14
2272         movaps          -0x18(%r9),%xmm15
2273 ___
2274 $code.=<<___;
2275         lea             (%r9),%rsp
2276 .cfi_def_cfa_register   %rsp
2277 .L8x_epilogue:
2278         ret
2279 .cfi_endproc
2280 .size   ChaCha20_8x,.-ChaCha20_8x
2281 ___
2282 }
2283
2284 ########################################################################
2285 # AVX512 code paths
2286 if ($avx>2) {
2287 # This one handles shorter inputs...
2288
2289 my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
2290 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
2291
2292 sub AVX512ROUND {       # critical path is 14 "SIMD ticks" per round
2293         &vpaddd ($a,$a,$b);
2294         &vpxord ($d,$d,$a);
2295         &vprold ($d,$d,16);
2296
2297         &vpaddd ($c,$c,$d);
2298         &vpxord ($b,$b,$c);
2299         &vprold ($b,$b,12);
2300
2301         &vpaddd ($a,$a,$b);
2302         &vpxord ($d,$d,$a);
2303         &vprold ($d,$d,8);
2304
2305         &vpaddd ($c,$c,$d);
2306         &vpxord ($b,$b,$c);
2307         &vprold ($b,$b,7);
2308 }
2309
2310 my $xframe = $win64 ? 32+8 : 8;
2311
2312 $code.=<<___;
2313 .type   ChaCha20_avx512,\@function,5
2314 .align  32
2315 ChaCha20_avx512:
2316 .cfi_startproc
2317 .LChaCha20_avx512:
2318         mov     %rsp,%r9                # frame pointer
2319 .cfi_def_cfa_register   %r9
2320         cmp     \$512,$len
2321         ja      .LChaCha20_16x
2322
2323         sub     \$64+$xframe,%rsp
2324 ___
2325 $code.=<<___    if ($win64);
2326         movaps  %xmm6,-0x28(%r9)
2327         movaps  %xmm7,-0x18(%r9)
2328 .Lavx512_body:
2329 ___
2330 $code.=<<___;
2331         vbroadcasti32x4 .Lsigma(%rip),$a
2332         vbroadcasti32x4 ($key),$b
2333         vbroadcasti32x4 16($key),$c
2334         vbroadcasti32x4 ($counter),$d
2335
2336         vmovdqa32       $a,$a_
2337         vmovdqa32       $b,$b_
2338         vmovdqa32       $c,$c_
2339         vpaddd          .Lzeroz(%rip),$d,$d
2340         vmovdqa32       .Lfourz(%rip),$fourz
2341         mov             \$10,$counter   # reuse $counter
2342         vmovdqa32       $d,$d_
2343         jmp             .Loop_avx512
2344
2345 .align  16
2346 .Loop_outer_avx512:
2347         vmovdqa32       $a_,$a
2348         vmovdqa32       $b_,$b
2349         vmovdqa32       $c_,$c
2350         vpaddd          $fourz,$d_,$d
2351         mov             \$10,$counter
2352         vmovdqa32       $d,$d_
2353         jmp             .Loop_avx512
2354
2355 .align  32
2356 .Loop_avx512:
2357 ___
2358         &AVX512ROUND();
2359         &vpshufd        ($c,$c,0b01001110);
2360         &vpshufd        ($b,$b,0b00111001);
2361         &vpshufd        ($d,$d,0b10010011);
2362
2363         &AVX512ROUND();
2364         &vpshufd        ($c,$c,0b01001110);
2365         &vpshufd        ($b,$b,0b10010011);
2366         &vpshufd        ($d,$d,0b00111001);
2367
2368         &dec            ($counter);
2369         &jnz            (".Loop_avx512");
2370
2371 $code.=<<___;
2372         vpaddd          $a_,$a,$a
2373         vpaddd          $b_,$b,$b
2374         vpaddd          $c_,$c,$c
2375         vpaddd          $d_,$d,$d
2376
2377         sub             \$64,$len
2378         jb              .Ltail64_avx512
2379
2380         vpxor           0x00($inp),%x#$a,$t0    # xor with input
2381         vpxor           0x10($inp),%x#$b,$t1
2382         vpxor           0x20($inp),%x#$c,$t2
2383         vpxor           0x30($inp),%x#$d,$t3
2384         lea             0x40($inp),$inp         # inp+=64
2385
2386         vmovdqu         $t0,0x00($out)          # write output
2387         vmovdqu         $t1,0x10($out)
2388         vmovdqu         $t2,0x20($out)
2389         vmovdqu         $t3,0x30($out)
2390         lea             0x40($out),$out         # out+=64
2391
2392         jz              .Ldone_avx512
2393
2394         vextracti32x4   \$1,$a,$t0
2395         vextracti32x4   \$1,$b,$t1
2396         vextracti32x4   \$1,$c,$t2
2397         vextracti32x4   \$1,$d,$t3
2398
2399         sub             \$64,$len
2400         jb              .Ltail_avx512
2401
2402         vpxor           0x00($inp),$t0,$t0      # xor with input
2403         vpxor           0x10($inp),$t1,$t1
2404         vpxor           0x20($inp),$t2,$t2
2405         vpxor           0x30($inp),$t3,$t3
2406         lea             0x40($inp),$inp         # inp+=64
2407
2408         vmovdqu         $t0,0x00($out)          # write output
2409         vmovdqu         $t1,0x10($out)
2410         vmovdqu         $t2,0x20($out)
2411         vmovdqu         $t3,0x30($out)
2412         lea             0x40($out),$out         # out+=64
2413
2414         jz              .Ldone_avx512
2415
2416         vextracti32x4   \$2,$a,$t0
2417         vextracti32x4   \$2,$b,$t1
2418         vextracti32x4   \$2,$c,$t2
2419         vextracti32x4   \$2,$d,$t3
2420
2421         sub             \$64,$len
2422         jb              .Ltail_avx512
2423
2424         vpxor           0x00($inp),$t0,$t0      # xor with input
2425         vpxor           0x10($inp),$t1,$t1
2426         vpxor           0x20($inp),$t2,$t2
2427         vpxor           0x30($inp),$t3,$t3
2428         lea             0x40($inp),$inp         # inp+=64
2429
2430         vmovdqu         $t0,0x00($out)          # write output
2431         vmovdqu         $t1,0x10($out)
2432         vmovdqu         $t2,0x20($out)
2433         vmovdqu         $t3,0x30($out)
2434         lea             0x40($out),$out         # out+=64
2435
2436         jz              .Ldone_avx512
2437
2438         vextracti32x4   \$3,$a,$t0
2439         vextracti32x4   \$3,$b,$t1
2440         vextracti32x4   \$3,$c,$t2
2441         vextracti32x4   \$3,$d,$t3
2442
2443         sub             \$64,$len
2444         jb              .Ltail_avx512
2445
2446         vpxor           0x00($inp),$t0,$t0      # xor with input
2447         vpxor           0x10($inp),$t1,$t1
2448         vpxor           0x20($inp),$t2,$t2
2449         vpxor           0x30($inp),$t3,$t3
2450         lea             0x40($inp),$inp         # inp+=64
2451
2452         vmovdqu         $t0,0x00($out)          # write output
2453         vmovdqu         $t1,0x10($out)
2454         vmovdqu         $t2,0x20($out)
2455         vmovdqu         $t3,0x30($out)
2456         lea             0x40($out),$out         # out+=64
2457
2458         jnz             .Loop_outer_avx512
2459
2460         jmp             .Ldone_avx512
2461
2462 .align  16
2463 .Ltail64_avx512:
2464         vmovdqa         %x#$a,0x00(%rsp)
2465         vmovdqa         %x#$b,0x10(%rsp)
2466         vmovdqa         %x#$c,0x20(%rsp)
2467         vmovdqa         %x#$d,0x30(%rsp)
2468         add             \$64,$len
2469         jmp             .Loop_tail_avx512
2470
2471 .align  16
2472 .Ltail_avx512:
2473         vmovdqa         $t0,0x00(%rsp)
2474         vmovdqa         $t1,0x10(%rsp)
2475         vmovdqa         $t2,0x20(%rsp)
2476         vmovdqa         $t3,0x30(%rsp)
2477         add             \$64,$len
2478
2479 .Loop_tail_avx512:
2480         movzb           ($inp,$counter),%eax
2481         movzb           (%rsp,$counter),%ecx
2482         lea             1($counter),$counter
2483         xor             %ecx,%eax
2484         mov             %al,-1($out,$counter)
2485         dec             $len
2486         jnz             .Loop_tail_avx512
2487
2488         vmovdqa32       $a_,0x00(%rsp)
2489
2490 .Ldone_avx512:
2491         vzeroall
2492 ___
2493 $code.=<<___    if ($win64);
2494         movaps  -0x28(%r9),%xmm6
2495         movaps  -0x18(%r9),%xmm7
2496 ___
2497 $code.=<<___;
2498         lea     (%r9),%rsp
2499 .cfi_def_cfa_register   %rsp
2500 .Lavx512_epilogue:
2501         ret
2502 .cfi_endproc
2503 .size   ChaCha20_avx512,.-ChaCha20_avx512
2504 ___
2505 }
2506 if ($avx>2) {
2507 # This one handles longer inputs...
2508
2509 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2510     $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2511 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2512          $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2513 my @key=map("%zmm$_",(16..31));
2514 my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2515
2516 sub AVX512_lane_ROUND {
2517 my ($a0,$b0,$c0,$d0)=@_;
2518 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2519 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2520 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2521 my @x=map("\"$_\"",@xx);
2522
2523         (
2524         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
2525          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
2526           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",     # Q3
2527            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",     # Q4
2528         "&vpxord        (@x[$d0],@x[$d0],@x[$a0])",
2529          "&vpxord       (@x[$d1],@x[$d1],@x[$a1])",
2530           "&vpxord      (@x[$d2],@x[$d2],@x[$a2])",
2531            "&vpxord     (@x[$d3],@x[$d3],@x[$a3])",
2532         "&vprold        (@x[$d0],@x[$d0],16)",
2533          "&vprold       (@x[$d1],@x[$d1],16)",
2534           "&vprold      (@x[$d2],@x[$d2],16)",
2535            "&vprold     (@x[$d3],@x[$d3],16)",
2536
2537         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
2538          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
2539           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
2540            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
2541         "&vpxord        (@x[$b0],@x[$b0],@x[$c0])",
2542          "&vpxord       (@x[$b1],@x[$b1],@x[$c1])",
2543           "&vpxord      (@x[$b2],@x[$b2],@x[$c2])",
2544            "&vpxord     (@x[$b3],@x[$b3],@x[$c3])",
2545         "&vprold        (@x[$b0],@x[$b0],12)",
2546          "&vprold       (@x[$b1],@x[$b1],12)",
2547           "&vprold      (@x[$b2],@x[$b2],12)",
2548            "&vprold     (@x[$b3],@x[$b3],12)",
2549
2550         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",
2551          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",
2552           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",
2553            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",
2554         "&vpxord        (@x[$d0],@x[$d0],@x[$a0])",
2555          "&vpxord       (@x[$d1],@x[$d1],@x[$a1])",
2556           "&vpxord      (@x[$d2],@x[$d2],@x[$a2])",
2557            "&vpxord     (@x[$d3],@x[$d3],@x[$a3])",
2558         "&vprold        (@x[$d0],@x[$d0],8)",
2559          "&vprold       (@x[$d1],@x[$d1],8)",
2560           "&vprold      (@x[$d2],@x[$d2],8)",
2561            "&vprold     (@x[$d3],@x[$d3],8)",
2562
2563         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
2564          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
2565           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
2566            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
2567         "&vpxord        (@x[$b0],@x[$b0],@x[$c0])",
2568          "&vpxord       (@x[$b1],@x[$b1],@x[$c1])",
2569           "&vpxord      (@x[$b2],@x[$b2],@x[$c2])",
2570            "&vpxord     (@x[$b3],@x[$b3],@x[$c3])",
2571         "&vprold        (@x[$b0],@x[$b0],7)",
2572          "&vprold       (@x[$b1],@x[$b1],7)",
2573           "&vprold      (@x[$b2],@x[$b2],7)",
2574            "&vprold     (@x[$b3],@x[$b3],7)"
2575         );
2576 }
2577
2578 my $xframe = $win64 ? 0xa8 : 8;
2579
2580 $code.=<<___;
2581 .type   ChaCha20_16x,\@function,5
2582 .align  32
2583 ChaCha20_16x:
2584 .cfi_startproc
2585 .LChaCha20_16x:
2586         mov             %rsp,%r9                # frame register
2587 .cfi_def_cfa_register   %r9
2588         sub             \$64+$xframe,%rsp
2589         and             \$-64,%rsp
2590 ___
2591 $code.=<<___    if ($win64);
2592         movaps          %xmm6,-0xa8(%r9)
2593         movaps          %xmm7,-0x98(%r9)
2594         movaps          %xmm8,-0x88(%r9)
2595         movaps          %xmm9,-0x78(%r9)
2596         movaps          %xmm10,-0x68(%r9)
2597         movaps          %xmm11,-0x58(%r9)
2598         movaps          %xmm12,-0x48(%r9)
2599         movaps          %xmm13,-0x38(%r9)
2600         movaps          %xmm14,-0x28(%r9)
2601         movaps          %xmm15,-0x18(%r9)
2602 .L16x_body:
2603 ___
2604 $code.=<<___;
2605         vzeroupper
2606
2607         lea             .Lsigma(%rip),%r10
2608         vbroadcasti32x4 (%r10),$xa3             # key[0]
2609         vbroadcasti32x4 ($key),$xb3             # key[1]
2610         vbroadcasti32x4 16($key),$xc3           # key[2]
2611         vbroadcasti32x4 ($counter),$xd3         # key[3]
2612
2613         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
2614         vpshufd         \$0x55,$xa3,$xa1
2615         vpshufd         \$0xaa,$xa3,$xa2
2616         vpshufd         \$0xff,$xa3,$xa3
2617         vmovdqa64       $xa0,@key[0]
2618         vmovdqa64       $xa1,@key[1]
2619         vmovdqa64       $xa2,@key[2]
2620         vmovdqa64       $xa3,@key[3]
2621
2622         vpshufd         \$0x00,$xb3,$xb0
2623         vpshufd         \$0x55,$xb3,$xb1
2624         vpshufd         \$0xaa,$xb3,$xb2
2625         vpshufd         \$0xff,$xb3,$xb3
2626         vmovdqa64       $xb0,@key[4]
2627         vmovdqa64       $xb1,@key[5]
2628         vmovdqa64       $xb2,@key[6]
2629         vmovdqa64       $xb3,@key[7]
2630
2631         vpshufd         \$0x00,$xc3,$xc0
2632         vpshufd         \$0x55,$xc3,$xc1
2633         vpshufd         \$0xaa,$xc3,$xc2
2634         vpshufd         \$0xff,$xc3,$xc3
2635         vmovdqa64       $xc0,@key[8]
2636         vmovdqa64       $xc1,@key[9]
2637         vmovdqa64       $xc2,@key[10]
2638         vmovdqa64       $xc3,@key[11]
2639
2640         vpshufd         \$0x00,$xd3,$xd0
2641         vpshufd         \$0x55,$xd3,$xd1
2642         vpshufd         \$0xaa,$xd3,$xd2
2643         vpshufd         \$0xff,$xd3,$xd3
2644         vpaddd          .Lincz(%rip),$xd0,$xd0  # don't save counters yet
2645         vmovdqa64       $xd0,@key[12]
2646         vmovdqa64       $xd1,@key[13]
2647         vmovdqa64       $xd2,@key[14]
2648         vmovdqa64       $xd3,@key[15]
2649
2650         mov             \$10,%eax
2651         jmp             .Loop16x
2652
2653 .align  32
2654 .Loop_outer16x:
2655         vpbroadcastd    0(%r10),$xa0            # reload key
2656         vpbroadcastd    4(%r10),$xa1
2657         vpbroadcastd    8(%r10),$xa2
2658         vpbroadcastd    12(%r10),$xa3
2659         vpaddd          .Lsixteen(%rip),@key[12],@key[12]       # next SIMD counters
2660         vmovdqa64       @key[4],$xb0
2661         vmovdqa64       @key[5],$xb1
2662         vmovdqa64       @key[6],$xb2
2663         vmovdqa64       @key[7],$xb3
2664         vmovdqa64       @key[8],$xc0
2665         vmovdqa64       @key[9],$xc1
2666         vmovdqa64       @key[10],$xc2
2667         vmovdqa64       @key[11],$xc3
2668         vmovdqa64       @key[12],$xd0
2669         vmovdqa64       @key[13],$xd1
2670         vmovdqa64       @key[14],$xd2
2671         vmovdqa64       @key[15],$xd3
2672
2673         vmovdqa64       $xa0,@key[0]
2674         vmovdqa64       $xa1,@key[1]
2675         vmovdqa64       $xa2,@key[2]
2676         vmovdqa64       $xa3,@key[3]
2677
2678         mov             \$10,%eax
2679         jmp             .Loop16x
2680
2681 .align  32
2682 .Loop16x:
2683 ___
2684         foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2685         foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2686 $code.=<<___;
2687         dec             %eax
2688         jnz             .Loop16x
2689
2690         vpaddd          @key[0],$xa0,$xa0       # accumulate key
2691         vpaddd          @key[1],$xa1,$xa1
2692         vpaddd          @key[2],$xa2,$xa2
2693         vpaddd          @key[3],$xa3,$xa3
2694
2695         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
2696         vpunpckldq      $xa3,$xa2,$xt3
2697         vpunpckhdq      $xa1,$xa0,$xa0
2698         vpunpckhdq      $xa3,$xa2,$xa2
2699         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
2700         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
2701         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
2702         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
2703 ___
2704         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2705 $code.=<<___;
2706         vpaddd          @key[4],$xb0,$xb0
2707         vpaddd          @key[5],$xb1,$xb1
2708         vpaddd          @key[6],$xb2,$xb2
2709         vpaddd          @key[7],$xb3,$xb3
2710
2711         vpunpckldq      $xb1,$xb0,$xt2
2712         vpunpckldq      $xb3,$xb2,$xt3
2713         vpunpckhdq      $xb1,$xb0,$xb0
2714         vpunpckhdq      $xb3,$xb2,$xb2
2715         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
2716         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
2717         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
2718         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
2719 ___
2720         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2721 $code.=<<___;
2722         vshufi32x4      \$0x44,$xb0,$xa0,$xt3   # "de-interlace" further
2723         vshufi32x4      \$0xee,$xb0,$xa0,$xb0
2724         vshufi32x4      \$0x44,$xb1,$xa1,$xa0
2725         vshufi32x4      \$0xee,$xb1,$xa1,$xb1
2726         vshufi32x4      \$0x44,$xb2,$xa2,$xa1
2727         vshufi32x4      \$0xee,$xb2,$xa2,$xb2
2728         vshufi32x4      \$0x44,$xb3,$xa3,$xa2
2729         vshufi32x4      \$0xee,$xb3,$xa3,$xb3
2730 ___
2731         ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2732 $code.=<<___;
2733         vpaddd          @key[8],$xc0,$xc0
2734         vpaddd          @key[9],$xc1,$xc1
2735         vpaddd          @key[10],$xc2,$xc2
2736         vpaddd          @key[11],$xc3,$xc3
2737
2738         vpunpckldq      $xc1,$xc0,$xt2
2739         vpunpckldq      $xc3,$xc2,$xt3
2740         vpunpckhdq      $xc1,$xc0,$xc0
2741         vpunpckhdq      $xc3,$xc2,$xc2
2742         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
2743         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
2744         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
2745         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
2746 ___
2747         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2748 $code.=<<___;
2749         vpaddd          @key[12],$xd0,$xd0
2750         vpaddd          @key[13],$xd1,$xd1
2751         vpaddd          @key[14],$xd2,$xd2
2752         vpaddd          @key[15],$xd3,$xd3
2753
2754         vpunpckldq      $xd1,$xd0,$xt2
2755         vpunpckldq      $xd3,$xd2,$xt3
2756         vpunpckhdq      $xd1,$xd0,$xd0
2757         vpunpckhdq      $xd3,$xd2,$xd2
2758         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
2759         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
2760         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
2761         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
2762 ___
2763         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2764 $code.=<<___;
2765         vshufi32x4      \$0x44,$xd0,$xc0,$xt3   # "de-interlace" further
2766         vshufi32x4      \$0xee,$xd0,$xc0,$xd0
2767         vshufi32x4      \$0x44,$xd1,$xc1,$xc0
2768         vshufi32x4      \$0xee,$xd1,$xc1,$xd1
2769         vshufi32x4      \$0x44,$xd2,$xc2,$xc1
2770         vshufi32x4      \$0xee,$xd2,$xc2,$xd2
2771         vshufi32x4      \$0x44,$xd3,$xc3,$xc2
2772         vshufi32x4      \$0xee,$xd3,$xc3,$xd3
2773 ___
2774         ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2775 $code.=<<___;
2776         vshufi32x4      \$0x88,$xc0,$xa0,$xt0   # "de-interlace" further
2777         vshufi32x4      \$0xdd,$xc0,$xa0,$xa0
2778          vshufi32x4     \$0x88,$xd0,$xb0,$xc0
2779          vshufi32x4     \$0xdd,$xd0,$xb0,$xd0
2780         vshufi32x4      \$0x88,$xc1,$xa1,$xt1
2781         vshufi32x4      \$0xdd,$xc1,$xa1,$xa1
2782          vshufi32x4     \$0x88,$xd1,$xb1,$xc1
2783          vshufi32x4     \$0xdd,$xd1,$xb1,$xd1
2784         vshufi32x4      \$0x88,$xc2,$xa2,$xt2
2785         vshufi32x4      \$0xdd,$xc2,$xa2,$xa2
2786          vshufi32x4     \$0x88,$xd2,$xb2,$xc2
2787          vshufi32x4     \$0xdd,$xd2,$xb2,$xd2
2788         vshufi32x4      \$0x88,$xc3,$xa3,$xt3
2789         vshufi32x4      \$0xdd,$xc3,$xa3,$xa3
2790          vshufi32x4     \$0x88,$xd3,$xb3,$xc3
2791          vshufi32x4     \$0xdd,$xd3,$xb3,$xd3
2792 ___
2793         ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2794         ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2795
2796         ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2797          $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2798         ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2799          $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2800 $code.=<<___;
2801         cmp             \$64*16,$len
2802         jb              .Ltail16x
2803
2804         vpxord          0x00($inp),$xa0,$xa0    # xor with input
2805         vpxord          0x40($inp),$xb0,$xb0
2806         vpxord          0x80($inp),$xc0,$xc0
2807         vpxord          0xc0($inp),$xd0,$xd0
2808         vmovdqu32       $xa0,0x00($out)
2809         vmovdqu32       $xb0,0x40($out)
2810         vmovdqu32       $xc0,0x80($out)
2811         vmovdqu32       $xd0,0xc0($out)
2812
2813         vpxord          0x100($inp),$xa1,$xa1
2814         vpxord          0x140($inp),$xb1,$xb1
2815         vpxord          0x180($inp),$xc1,$xc1
2816         vpxord          0x1c0($inp),$xd1,$xd1
2817         vmovdqu32       $xa1,0x100($out)
2818         vmovdqu32       $xb1,0x140($out)
2819         vmovdqu32       $xc1,0x180($out)
2820         vmovdqu32       $xd1,0x1c0($out)
2821
2822         vpxord          0x200($inp),$xa2,$xa2
2823         vpxord          0x240($inp),$xb2,$xb2
2824         vpxord          0x280($inp),$xc2,$xc2
2825         vpxord          0x2c0($inp),$xd2,$xd2
2826         vmovdqu32       $xa2,0x200($out)
2827         vmovdqu32       $xb2,0x240($out)
2828         vmovdqu32       $xc2,0x280($out)
2829         vmovdqu32       $xd2,0x2c0($out)
2830
2831         vpxord          0x300($inp),$xa3,$xa3
2832         vpxord          0x340($inp),$xb3,$xb3
2833         vpxord          0x380($inp),$xc3,$xc3
2834         vpxord          0x3c0($inp),$xd3,$xd3
2835         lea             0x400($inp),$inp
2836         vmovdqu32       $xa3,0x300($out)
2837         vmovdqu32       $xb3,0x340($out)
2838         vmovdqu32       $xc3,0x380($out)
2839         vmovdqu32       $xd3,0x3c0($out)
2840         lea             0x400($out),$out
2841
2842         sub             \$64*16,$len
2843         jnz             .Loop_outer16x
2844
2845         jmp             .Ldone16x
2846
2847 .align  32
2848 .Ltail16x:
2849         xor             %r10,%r10
2850         sub             $inp,$out
2851         cmp             \$64*1,$len
2852         jb              .Less_than_64_16x
2853         vpxord          ($inp),$xa0,$xa0        # xor with input
2854         vmovdqu32       $xa0,($out,$inp)
2855         je              .Ldone16x
2856         vmovdqa32       $xb0,$xa0
2857         lea             64($inp),$inp
2858
2859         cmp             \$64*2,$len
2860         jb              .Less_than_64_16x
2861         vpxord          ($inp),$xb0,$xb0
2862         vmovdqu32       $xb0,($out,$inp)
2863         je              .Ldone16x
2864         vmovdqa32       $xc0,$xa0
2865         lea             64($inp),$inp
2866
2867         cmp             \$64*3,$len
2868         jb              .Less_than_64_16x
2869         vpxord          ($inp),$xc0,$xc0
2870         vmovdqu32       $xc0,($out,$inp)
2871         je              .Ldone16x
2872         vmovdqa32       $xd0,$xa0
2873         lea             64($inp),$inp
2874
2875         cmp             \$64*4,$len
2876         jb              .Less_than_64_16x
2877         vpxord          ($inp),$xd0,$xd0
2878         vmovdqu32       $xd0,($out,$inp)
2879         je              .Ldone16x
2880         vmovdqa32       $xa1,$xa0
2881         lea             64($inp),$inp
2882
2883         cmp             \$64*5,$len
2884         jb              .Less_than_64_16x
2885         vpxord          ($inp),$xa1,$xa1
2886         vmovdqu32       $xa1,($out,$inp)
2887         je              .Ldone16x
2888         vmovdqa32       $xb1,$xa0
2889         lea             64($inp),$inp
2890
2891         cmp             \$64*6,$len
2892         jb              .Less_than_64_16x
2893         vpxord          ($inp),$xb1,$xb1
2894         vmovdqu32       $xb1,($out,$inp)
2895         je              .Ldone16x
2896         vmovdqa32       $xc1,$xa0
2897         lea             64($inp),$inp
2898
2899         cmp             \$64*7,$len
2900         jb              .Less_than_64_16x
2901         vpxord          ($inp),$xc1,$xc1
2902         vmovdqu32       $xc1,($out,$inp)
2903         je              .Ldone16x
2904         vmovdqa32       $xd1,$xa0
2905         lea             64($inp),$inp
2906
2907         cmp             \$64*8,$len
2908         jb              .Less_than_64_16x
2909         vpxord          ($inp),$xd1,$xd1
2910         vmovdqu32       $xd1,($out,$inp)
2911         je              .Ldone16x
2912         vmovdqa32       $xa2,$xa0
2913         lea             64($inp),$inp
2914
2915         cmp             \$64*9,$len
2916         jb              .Less_than_64_16x
2917         vpxord          ($inp),$xa2,$xa2
2918         vmovdqu32       $xa2,($out,$inp)
2919         je              .Ldone16x
2920         vmovdqa32       $xb2,$xa0
2921         lea             64($inp),$inp
2922
2923         cmp             \$64*10,$len
2924         jb              .Less_than_64_16x
2925         vpxord          ($inp),$xb2,$xb2
2926         vmovdqu32       $xb2,($out,$inp)
2927         je              .Ldone16x
2928         vmovdqa32       $xc2,$xa0
2929         lea             64($inp),$inp
2930
2931         cmp             \$64*11,$len
2932         jb              .Less_than_64_16x
2933         vpxord          ($inp),$xc2,$xc2
2934         vmovdqu32       $xc2,($out,$inp)
2935         je              .Ldone16x
2936         vmovdqa32       $xd2,$xa0
2937         lea             64($inp),$inp
2938
2939         cmp             \$64*12,$len
2940         jb              .Less_than_64_16x
2941         vpxord          ($inp),$xd2,$xd2
2942         vmovdqu32       $xd2,($out,$inp)
2943         je              .Ldone16x
2944         vmovdqa32       $xa3,$xa0
2945         lea             64($inp),$inp
2946
2947         cmp             \$64*13,$len
2948         jb              .Less_than_64_16x
2949         vpxord          ($inp),$xa3,$xa3
2950         vmovdqu32       $xa3,($out,$inp)
2951         je              .Ldone16x
2952         vmovdqa32       $xb3,$xa0
2953         lea             64($inp),$inp
2954
2955         cmp             \$64*14,$len
2956         jb              .Less_than_64_16x
2957         vpxord          ($inp),$xb3,$xb3
2958         vmovdqu32       $xb3,($out,$inp)
2959         je              .Ldone16x
2960         vmovdqa32       $xc3,$xa0
2961         lea             64($inp),$inp
2962
2963         cmp             \$64*15,$len
2964         jb              .Less_than_64_16x
2965         vpxord          ($inp),$xc3,$xc3
2966         vmovdqu32       $xc3,($out,$inp)
2967         je              .Ldone16x
2968         vmovdqa32       $xd3,$xa0
2969         lea             64($inp),$inp
2970
2971 .Less_than_64_16x:
2972         vmovdqa32       $xa0,0x00(%rsp)
2973         lea             ($out,$inp),$out
2974         and             \$63,$len
2975
2976 .Loop_tail16x:
2977         movzb           ($inp,%r10),%eax
2978         movzb           (%rsp,%r10),%ecx
2979         lea             1(%r10),%r10
2980         xor             %ecx,%eax
2981         mov             %al,-1($out,%r10)
2982         dec             $len
2983         jnz             .Loop_tail16x
2984
2985         vpxord          $xa0,$xa0,$xa0
2986         vmovdqa32       $xa0,0(%rsp)
2987
2988 .Ldone16x:
2989         vzeroall
2990 ___
2991 $code.=<<___    if ($win64);
2992         movaps          -0xa8(%r9),%xmm6
2993         movaps          -0x98(%r9),%xmm7
2994         movaps          -0x88(%r9),%xmm8
2995         movaps          -0x78(%r9),%xmm9
2996         movaps          -0x68(%r9),%xmm10
2997         movaps          -0x58(%r9),%xmm11
2998         movaps          -0x48(%r9),%xmm12
2999         movaps          -0x38(%r9),%xmm13
3000         movaps          -0x28(%r9),%xmm14
3001         movaps          -0x18(%r9),%xmm15
3002 ___
3003 $code.=<<___;
3004         lea             (%r9),%rsp
3005 .cfi_def_cfa_register   %rsp
3006 .L16x_epilogue:
3007         ret
3008 .cfi_endproc
3009 .size   ChaCha20_16x,.-ChaCha20_16x
3010 ___
3011 }
3012
3013 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3014 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
3015 if ($win64) {
3016 $rec="%rcx";
3017 $frame="%rdx";
3018 $context="%r8";
3019 $disp="%r9";
3020
3021 $code.=<<___;
3022 .extern __imp_RtlVirtualUnwind
3023 .type   se_handler,\@abi-omnipotent
3024 .align  16
3025 se_handler:
3026         push    %rsi
3027         push    %rdi
3028         push    %rbx
3029         push    %rbp
3030         push    %r12
3031         push    %r13
3032         push    %r14
3033         push    %r15
3034         pushfq
3035         sub     \$64,%rsp
3036
3037         mov     120($context),%rax      # pull context->Rax
3038         mov     248($context),%rbx      # pull context->Rip
3039
3040         mov     8($disp),%rsi           # disp->ImageBase
3041         mov     56($disp),%r11          # disp->HandlerData
3042
3043         lea     .Lctr32_body(%rip),%r10
3044         cmp     %r10,%rbx               # context->Rip<.Lprologue
3045         jb      .Lcommon_seh_tail
3046
3047         mov     152($context),%rax      # pull context->Rsp
3048
3049         lea     .Lno_data(%rip),%r10    # epilogue label
3050         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
3051         jae     .Lcommon_seh_tail
3052
3053         lea     64+24+48(%rax),%rax
3054
3055         mov     -8(%rax),%rbx
3056         mov     -16(%rax),%rbp
3057         mov     -24(%rax),%r12
3058         mov     -32(%rax),%r13
3059         mov     -40(%rax),%r14
3060         mov     -48(%rax),%r15
3061         mov     %rbx,144($context)      # restore context->Rbx
3062         mov     %rbp,160($context)      # restore context->Rbp
3063         mov     %r12,216($context)      # restore context->R12
3064         mov     %r13,224($context)      # restore context->R13
3065         mov     %r14,232($context)      # restore context->R14
3066         mov     %r15,240($context)      # restore context->R14
3067
3068 .Lcommon_seh_tail:
3069         mov     8(%rax),%rdi
3070         mov     16(%rax),%rsi
3071         mov     %rax,152($context)      # restore context->Rsp
3072         mov     %rsi,168($context)      # restore context->Rsi
3073         mov     %rdi,176($context)      # restore context->Rdi
3074
3075         mov     40($disp),%rdi          # disp->ContextRecord
3076         mov     $context,%rsi           # context
3077         mov     \$154,%ecx              # sizeof(CONTEXT)
3078         .long   0xa548f3fc              # cld; rep movsq
3079
3080         mov     $disp,%rsi
3081         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
3082         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
3083         mov     0(%rsi),%r8             # arg3, disp->ControlPc
3084         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
3085         mov     40(%rsi),%r10           # disp->ContextRecord
3086         lea     56(%rsi),%r11           # &disp->HandlerData
3087         lea     24(%rsi),%r12           # &disp->EstablisherFrame
3088         mov     %r10,32(%rsp)           # arg5
3089         mov     %r11,40(%rsp)           # arg6
3090         mov     %r12,48(%rsp)           # arg7
3091         mov     %rcx,56(%rsp)           # arg8, (NULL)
3092         call    *__imp_RtlVirtualUnwind(%rip)
3093
3094         mov     \$1,%eax                # ExceptionContinueSearch
3095         add     \$64,%rsp
3096         popfq
3097         pop     %r15
3098         pop     %r14
3099         pop     %r13
3100         pop     %r12
3101         pop     %rbp
3102         pop     %rbx
3103         pop     %rdi
3104         pop     %rsi
3105         ret
3106 .size   se_handler,.-se_handler
3107
3108 .type   ssse3_handler,\@abi-omnipotent
3109 .align  16
3110 ssse3_handler:
3111         push    %rsi
3112         push    %rdi
3113         push    %rbx
3114         push    %rbp
3115         push    %r12
3116         push    %r13
3117         push    %r14
3118         push    %r15
3119         pushfq
3120         sub     \$64,%rsp
3121
3122         mov     120($context),%rax      # pull context->Rax
3123         mov     248($context),%rbx      # pull context->Rip
3124
3125         mov     8($disp),%rsi           # disp->ImageBase
3126         mov     56($disp),%r11          # disp->HandlerData
3127
3128         mov     0(%r11),%r10d           # HandlerData[0]
3129         lea     (%rsi,%r10),%r10        # prologue label
3130         cmp     %r10,%rbx               # context->Rip<prologue label
3131         jb      .Lcommon_seh_tail
3132
3133         mov     192($context),%rax      # pull context->R9
3134
3135         mov     4(%r11),%r10d           # HandlerData[1]
3136         lea     (%rsi,%r10),%r10        # epilogue label
3137         cmp     %r10,%rbx               # context->Rip>=epilogue label
3138         jae     .Lcommon_seh_tail
3139
3140         lea     -0x28(%rax),%rsi
3141         lea     512($context),%rdi      # &context.Xmm6
3142         mov     \$4,%ecx
3143         .long   0xa548f3fc              # cld; rep movsq
3144
3145         jmp     .Lcommon_seh_tail
3146 .size   ssse3_handler,.-ssse3_handler
3147
3148 .type   full_handler,\@abi-omnipotent
3149 .align  16
3150 full_handler:
3151         push    %rsi
3152         push    %rdi
3153         push    %rbx
3154         push    %rbp
3155         push    %r12
3156         push    %r13
3157         push    %r14
3158         push    %r15
3159         pushfq
3160         sub     \$64,%rsp
3161
3162         mov     120($context),%rax      # pull context->Rax
3163         mov     248($context),%rbx      # pull context->Rip
3164
3165         mov     8($disp),%rsi           # disp->ImageBase
3166         mov     56($disp),%r11          # disp->HandlerData
3167
3168         mov     0(%r11),%r10d           # HandlerData[0]
3169         lea     (%rsi,%r10),%r10        # prologue label
3170         cmp     %r10,%rbx               # context->Rip<prologue label
3171         jb      .Lcommon_seh_tail
3172
3173         mov     192($context),%rax      # pull context->R9
3174
3175         mov     4(%r11),%r10d           # HandlerData[1]
3176         lea     (%rsi,%r10),%r10        # epilogue label
3177         cmp     %r10,%rbx               # context->Rip>=epilogue label
3178         jae     .Lcommon_seh_tail
3179
3180         lea     -0xa8(%rax),%rsi
3181         lea     512($context),%rdi      # &context.Xmm6
3182         mov     \$20,%ecx
3183         .long   0xa548f3fc              # cld; rep movsq
3184
3185         jmp     .Lcommon_seh_tail
3186 .size   full_handler,.-full_handler
3187
3188 .section        .pdata
3189 .align  4
3190         .rva    .LSEH_begin_ChaCha20_ctr32
3191         .rva    .LSEH_end_ChaCha20_ctr32
3192         .rva    .LSEH_info_ChaCha20_ctr32
3193
3194         .rva    .LSEH_begin_ChaCha20_ssse3
3195         .rva    .LSEH_end_ChaCha20_ssse3
3196         .rva    .LSEH_info_ChaCha20_ssse3
3197
3198         .rva    .LSEH_begin_ChaCha20_4x
3199         .rva    .LSEH_end_ChaCha20_4x
3200         .rva    .LSEH_info_ChaCha20_4x
3201 ___
3202 $code.=<<___ if ($avx);
3203         .rva    .LSEH_begin_ChaCha20_4xop
3204         .rva    .LSEH_end_ChaCha20_4xop
3205         .rva    .LSEH_info_ChaCha20_4xop
3206 ___
3207 $code.=<<___ if ($avx>1);
3208         .rva    .LSEH_begin_ChaCha20_8x
3209         .rva    .LSEH_end_ChaCha20_8x
3210         .rva    .LSEH_info_ChaCha20_8x
3211 ___
3212 $code.=<<___ if ($avx>2);
3213         .rva    .LSEH_begin_ChaCha20_avx512
3214         .rva    .LSEH_end_ChaCha20_avx512
3215         .rva    .LSEH_info_ChaCha20_avx512
3216
3217         .rva    .LSEH_begin_ChaCha20_16x
3218         .rva    .LSEH_end_ChaCha20_16x
3219         .rva    .LSEH_info_ChaCha20_16x
3220 ___
3221 $code.=<<___;
3222 .section        .xdata
3223 .align  8
3224 .LSEH_info_ChaCha20_ctr32:
3225         .byte   9,0,0,0
3226         .rva    se_handler
3227
3228 .LSEH_info_ChaCha20_ssse3:
3229         .byte   9,0,0,0
3230         .rva    ssse3_handler
3231         .rva    .Lssse3_body,.Lssse3_epilogue
3232
3233 .LSEH_info_ChaCha20_4x:
3234         .byte   9,0,0,0
3235         .rva    full_handler
3236         .rva    .L4x_body,.L4x_epilogue
3237 ___
3238 $code.=<<___ if ($avx);
3239 .LSEH_info_ChaCha20_4xop:
3240         .byte   9,0,0,0
3241         .rva    full_handler
3242         .rva    .L4xop_body,.L4xop_epilogue             # HandlerData[]
3243 ___
3244 $code.=<<___ if ($avx>1);
3245 .LSEH_info_ChaCha20_8x:
3246         .byte   9,0,0,0
3247         .rva    full_handler
3248         .rva    .L8x_body,.L8x_epilogue                 # HandlerData[]
3249 ___
3250 $code.=<<___ if ($avx>2);
3251 .LSEH_info_ChaCha20_avx512:
3252         .byte   9,0,0,0
3253         .rva    ssse3_handler
3254         .rva    .Lavx512_body,.Lavx512_epilogue         # HandlerData[]
3255
3256 .LSEH_info_ChaCha20_16x:
3257         .byte   9,0,0,0
3258         .rva    full_handler
3259         .rva    .L16x_body,.L16x_epilogue               # HandlerData[]
3260 ___
3261 }
3262
3263 foreach (split("\n",$code)) {
3264         s/\`([^\`]*)\`/eval $1/ge;
3265
3266         s/%x#%[yz]/%x/g;        # "down-shift"
3267
3268         print $_,"\n";
3269 }
3270
3271 close STDOUT;