chacha/asm/chacha-x86_64.pl: fix sporadic crash in AVX512 code path.
[openssl.git] / crypto / chacha / asm / chacha-x86_64.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # November 2014
18 #
19 # ChaCha20 for x86_64.
20 #
21 # December 2016
22 #
23 # Add AVX512F code path.
24 #
25 # Performance in cycles per byte out of large buffer.
26 #
27 #               IALU/gcc 4.8(i) 1xSSSE3/SSE2    4xSSSE3     NxAVX(v)
28 #
29 # P4            9.48/+99%       -/22.7(ii)      -
30 # Core2         7.83/+55%       7.90/8.08       4.35
31 # Westmere      7.19/+50%       5.60/6.70       3.00
32 # Sandy Bridge  8.31/+42%       5.45/6.76       2.72
33 # Ivy Bridge    6.71/+46%       5.40/6.49       2.41
34 # Haswell       5.92/+43%       5.20/6.45       2.42        1.23
35 # Skylake[-X]   5.87/+39%       4.70/-          2.31        1.19[0.57]
36 # Silvermont    12.0/+33%       7.75/7.40       7.03(iii)
37 # Knights L     11.7/-          -               9.60(iii)   0.80
38 # Goldmont      10.6/+17%       5.10/-          3.28
39 # Sledgehammer  7.28/+52%       -/14.2(ii)      -
40 # Bulldozer     9.66/+28%       9.85/11.1       3.06(iv)
41 # Ryzen         5.96/+50%       5.19/-          2.40        2.09
42 # VIA Nano      10.5/+46%       6.72/8.60       6.05
43 #
44 # (i)   compared to older gcc 3.x one can observe >2x improvement on
45 #       most platforms;
46 # (ii)  as it can be seen, SSE2 performance is too low on legacy
47 #       processors; NxSSE2 results are naturally better, but not
48 #       impressively better than IALU ones, which is why you won't
49 #       find SSE2 code below;
50 # (iii) this is not optimal result for Atom because of MSROM
51 #       limitations, SSE2 can do better, but gain is considered too
52 #       low to justify the [maintenance] effort;
53 # (iv)  Bulldozer actually executes 4xXOP code path that delivers 2.20;
54 # (v)   8xAVX2 or 16xAVX-512, whichever best applicable;
55
56 $flavour = shift;
57 $output  = shift;
58 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
59
60 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
61
62 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
63 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
64 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
65 die "can't locate x86_64-xlate.pl";
66
67 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
68                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
69         $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
70 }
71
72 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
73            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
74         $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
75         $avx += 1 if ($1==2.11 && $2>=8);
76 }
77
78 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
79            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
80         $avx = ($1>=10) + ($1>=11);
81 }
82
83 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
84         $avx = ($2>=3.0) + ($2>3.0);
85 }
86
87 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
88 *STDOUT=*OUT;
89
90 # input parameter block
91 ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
92
93 $code.=<<___;
94 .text
95
96 .extern OPENSSL_ia32cap_P
97
98 .align  64
99 .Lzero:
100 .long   0,0,0,0
101 .Lone:
102 .long   1,0,0,0
103 .Linc:
104 .long   0,1,2,3
105 .Lfour:
106 .long   4,4,4,4
107 .Lincy:
108 .long   0,2,4,6,1,3,5,7
109 .Leight:
110 .long   8,8,8,8,8,8,8,8
111 .Lrot16:
112 .byte   0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
113 .Lrot24:
114 .byte   0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
115 .Lsigma:
116 .asciz  "expand 32-byte k"
117 .align  64
118 .Lzeroz:
119 .long   0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
120 .Lfourz:
121 .long   4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
122 .Lincz:
123 .long   0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
124 .Lsixteen:
125 .long   16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
126 .asciz  "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
127 ___
128
129 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
130 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
131   my $arg = pop;
132     $arg = "\$$arg" if ($arg*1 eq $arg);
133     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
134 }
135
136 @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
137     "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
138 @t=("%esi","%edi");
139
140 sub ROUND {                     # critical path is 24 cycles per round
141 my ($a0,$b0,$c0,$d0)=@_;
142 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
143 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
144 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
145 my ($xc,$xc_)=map("\"$_\"",@t);
146 my @x=map("\"$_\"",@x);
147
148         # Consider order in which variables are addressed by their
149         # index:
150         #
151         #       a   b   c   d
152         #
153         #       0   4   8  12 < even round
154         #       1   5   9  13
155         #       2   6  10  14
156         #       3   7  11  15
157         #       0   5  10  15 < odd round
158         #       1   6  11  12
159         #       2   7   8  13
160         #       3   4   9  14
161         #
162         # 'a', 'b' and 'd's are permanently allocated in registers,
163         # @x[0..7,12..15], while 'c's are maintained in memory. If
164         # you observe 'c' column, you'll notice that pair of 'c's is
165         # invariant between rounds. This means that we have to reload
166         # them once per round, in the middle. This is why you'll see
167         # bunch of 'c' stores and loads in the middle, but none in
168         # the beginning or end.
169
170         # Normally instructions would be interleaved to favour in-order
171         # execution. Generally out-of-order cores manage it gracefully,
172         # but not this time for some reason. As in-order execution
173         # cores are dying breed, old Atom is the only one around,
174         # instructions are left uninterleaved. Besides, Atom is better
175         # off executing 1xSSSE3 code anyway...
176
177         (
178         "&add   (@x[$a0],@x[$b0])",     # Q1
179         "&xor   (@x[$d0],@x[$a0])",
180         "&rol   (@x[$d0],16)",
181          "&add  (@x[$a1],@x[$b1])",     # Q2
182          "&xor  (@x[$d1],@x[$a1])",
183          "&rol  (@x[$d1],16)",
184
185         "&add   ($xc,@x[$d0])",
186         "&xor   (@x[$b0],$xc)",
187         "&rol   (@x[$b0],12)",
188          "&add  ($xc_,@x[$d1])",
189          "&xor  (@x[$b1],$xc_)",
190          "&rol  (@x[$b1],12)",
191
192         "&add   (@x[$a0],@x[$b0])",
193         "&xor   (@x[$d0],@x[$a0])",
194         "&rol   (@x[$d0],8)",
195          "&add  (@x[$a1],@x[$b1])",
196          "&xor  (@x[$d1],@x[$a1])",
197          "&rol  (@x[$d1],8)",
198
199         "&add   ($xc,@x[$d0])",
200         "&xor   (@x[$b0],$xc)",
201         "&rol   (@x[$b0],7)",
202          "&add  ($xc_,@x[$d1])",
203          "&xor  (@x[$b1],$xc_)",
204          "&rol  (@x[$b1],7)",
205
206         "&mov   (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
207          "&mov  (\"4*$c1(%rsp)\",$xc_)",
208         "&mov   ($xc,\"4*$c2(%rsp)\")",
209          "&mov  ($xc_,\"4*$c3(%rsp)\")",
210
211         "&add   (@x[$a2],@x[$b2])",     # Q3
212         "&xor   (@x[$d2],@x[$a2])",
213         "&rol   (@x[$d2],16)",
214          "&add  (@x[$a3],@x[$b3])",     # Q4
215          "&xor  (@x[$d3],@x[$a3])",
216          "&rol  (@x[$d3],16)",
217
218         "&add   ($xc,@x[$d2])",
219         "&xor   (@x[$b2],$xc)",
220         "&rol   (@x[$b2],12)",
221          "&add  ($xc_,@x[$d3])",
222          "&xor  (@x[$b3],$xc_)",
223          "&rol  (@x[$b3],12)",
224
225         "&add   (@x[$a2],@x[$b2])",
226         "&xor   (@x[$d2],@x[$a2])",
227         "&rol   (@x[$d2],8)",
228          "&add  (@x[$a3],@x[$b3])",
229          "&xor  (@x[$d3],@x[$a3])",
230          "&rol  (@x[$d3],8)",
231
232         "&add   ($xc,@x[$d2])",
233         "&xor   (@x[$b2],$xc)",
234         "&rol   (@x[$b2],7)",
235          "&add  ($xc_,@x[$d3])",
236          "&xor  (@x[$b3],$xc_)",
237          "&rol  (@x[$b3],7)"
238         );
239 }
240
241 ########################################################################
242 # Generic code path that handles all lengths on pre-SSSE3 processors.
243 $code.=<<___;
244 .globl  ChaCha20_ctr32
245 .type   ChaCha20_ctr32,\@function,5
246 .align  64
247 ChaCha20_ctr32:
248 .cfi_startproc
249         cmp     \$0,$len
250         je      .Lno_data
251         mov     OPENSSL_ia32cap_P+4(%rip),%r10
252 ___
253 $code.=<<___    if ($avx>2);
254         bt      \$48,%r10               # check for AVX512F
255         jc      .LChaCha20_avx512
256 ___
257 $code.=<<___;
258         test    \$`1<<(41-32)`,%r10d
259         jnz     .LChaCha20_ssse3
260
261         push    %rbx
262 .cfi_push       %rbx
263         push    %rbp
264 .cfi_push       %rbp
265         push    %r12
266 .cfi_push       %r12
267         push    %r13
268 .cfi_push       %r13
269         push    %r14
270 .cfi_push       %r14
271         push    %r15
272 .cfi_push       %r15
273         sub     \$64+24,%rsp
274 .cfi_adjust_cfa_offset  64+24
275 .Lctr32_body:
276
277         #movdqa .Lsigma(%rip),%xmm0
278         movdqu  ($key),%xmm1
279         movdqu  16($key),%xmm2
280         movdqu  ($counter),%xmm3
281         movdqa  .Lone(%rip),%xmm4
282
283         #movdqa %xmm0,4*0(%rsp)         # key[0]
284         movdqa  %xmm1,4*4(%rsp)         # key[1]
285         movdqa  %xmm2,4*8(%rsp)         # key[2]
286         movdqa  %xmm3,4*12(%rsp)        # key[3]
287         mov     $len,%rbp               # reassign $len
288         jmp     .Loop_outer
289
290 .align  32
291 .Loop_outer:
292         mov     \$0x61707865,@x[0]      # 'expa'
293         mov     \$0x3320646e,@x[1]      # 'nd 3'
294         mov     \$0x79622d32,@x[2]      # '2-by'
295         mov     \$0x6b206574,@x[3]      # 'te k'
296         mov     4*4(%rsp),@x[4]
297         mov     4*5(%rsp),@x[5]
298         mov     4*6(%rsp),@x[6]
299         mov     4*7(%rsp),@x[7]
300         movd    %xmm3,@x[12]
301         mov     4*13(%rsp),@x[13]
302         mov     4*14(%rsp),@x[14]
303         mov     4*15(%rsp),@x[15]
304
305         mov     %rbp,64+0(%rsp)         # save len
306         mov     \$10,%ebp
307         mov     $inp,64+8(%rsp)         # save inp
308         movq    %xmm2,%rsi              # "@x[8]"
309         mov     $out,64+16(%rsp)        # save out
310         mov     %rsi,%rdi
311         shr     \$32,%rdi               # "@x[9]"
312         jmp     .Loop
313
314 .align  32
315 .Loop:
316 ___
317         foreach (&ROUND (0, 4, 8,12)) { eval; }
318         foreach (&ROUND (0, 5,10,15)) { eval; }
319         &dec    ("%ebp");
320         &jnz    (".Loop");
321
322 $code.=<<___;
323         mov     @t[1],4*9(%rsp)         # modulo-scheduled
324         mov     @t[0],4*8(%rsp)
325         mov     64(%rsp),%rbp           # load len
326         movdqa  %xmm2,%xmm1
327         mov     64+8(%rsp),$inp         # load inp
328         paddd   %xmm4,%xmm3             # increment counter
329         mov     64+16(%rsp),$out        # load out
330
331         add     \$0x61707865,@x[0]      # 'expa'
332         add     \$0x3320646e,@x[1]      # 'nd 3'
333         add     \$0x79622d32,@x[2]      # '2-by'
334         add     \$0x6b206574,@x[3]      # 'te k'
335         add     4*4(%rsp),@x[4]
336         add     4*5(%rsp),@x[5]
337         add     4*6(%rsp),@x[6]
338         add     4*7(%rsp),@x[7]
339         add     4*12(%rsp),@x[12]
340         add     4*13(%rsp),@x[13]
341         add     4*14(%rsp),@x[14]
342         add     4*15(%rsp),@x[15]
343         paddd   4*8(%rsp),%xmm1
344
345         cmp     \$64,%rbp
346         jb      .Ltail
347
348         xor     4*0($inp),@x[0]         # xor with input
349         xor     4*1($inp),@x[1]
350         xor     4*2($inp),@x[2]
351         xor     4*3($inp),@x[3]
352         xor     4*4($inp),@x[4]
353         xor     4*5($inp),@x[5]
354         xor     4*6($inp),@x[6]
355         xor     4*7($inp),@x[7]
356         movdqu  4*8($inp),%xmm0
357         xor     4*12($inp),@x[12]
358         xor     4*13($inp),@x[13]
359         xor     4*14($inp),@x[14]
360         xor     4*15($inp),@x[15]
361         lea     4*16($inp),$inp         # inp+=64
362         pxor    %xmm1,%xmm0
363
364         movdqa  %xmm2,4*8(%rsp)
365         movd    %xmm3,4*12(%rsp)
366
367         mov     @x[0],4*0($out)         # write output
368         mov     @x[1],4*1($out)
369         mov     @x[2],4*2($out)
370         mov     @x[3],4*3($out)
371         mov     @x[4],4*4($out)
372         mov     @x[5],4*5($out)
373         mov     @x[6],4*6($out)
374         mov     @x[7],4*7($out)
375         movdqu  %xmm0,4*8($out)
376         mov     @x[12],4*12($out)
377         mov     @x[13],4*13($out)
378         mov     @x[14],4*14($out)
379         mov     @x[15],4*15($out)
380         lea     4*16($out),$out         # out+=64
381
382         sub     \$64,%rbp
383         jnz     .Loop_outer
384
385         jmp     .Ldone
386
387 .align  16
388 .Ltail:
389         mov     @x[0],4*0(%rsp)
390         mov     @x[1],4*1(%rsp)
391         xor     %rbx,%rbx
392         mov     @x[2],4*2(%rsp)
393         mov     @x[3],4*3(%rsp)
394         mov     @x[4],4*4(%rsp)
395         mov     @x[5],4*5(%rsp)
396         mov     @x[6],4*6(%rsp)
397         mov     @x[7],4*7(%rsp)
398         movdqa  %xmm1,4*8(%rsp)
399         mov     @x[12],4*12(%rsp)
400         mov     @x[13],4*13(%rsp)
401         mov     @x[14],4*14(%rsp)
402         mov     @x[15],4*15(%rsp)
403
404 .Loop_tail:
405         movzb   ($inp,%rbx),%eax
406         movzb   (%rsp,%rbx),%edx
407         lea     1(%rbx),%rbx
408         xor     %edx,%eax
409         mov     %al,-1($out,%rbx)
410         dec     %rbp
411         jnz     .Loop_tail
412
413 .Ldone:
414         lea     64+24+48(%rsp),%rsi
415 .cfi_def_cfa    %rsi,8
416         mov     -48(%rsi),%r15
417 .cfi_restore    %r15
418         mov     -40(%rsi),%r14
419 .cfi_restore    %r14
420         mov     -32(%rsi),%r13
421 .cfi_restore    %r13
422         mov     -24(%rsi),%r12
423 .cfi_restore    %r12
424         mov     -16(%rsi),%rbp
425 .cfi_restore    %rbp
426         mov     -8(%rsi),%rbx
427 .cfi_restore    %rbx
428         lea     (%rsi),%rsp
429 .cfi_def_cfa_register   %rsp
430 .Lno_data:
431         ret
432 .cfi_endproc
433 .size   ChaCha20_ctr32,.-ChaCha20_ctr32
434 ___
435
436 ########################################################################
437 # SSSE3 code path that handles shorter lengths
438 {
439 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
440
441 sub SSSE3ROUND {        # critical path is 20 "SIMD ticks" per round
442         &paddd  ($a,$b);
443         &pxor   ($d,$a);
444         &pshufb ($d,$rot16);
445
446         &paddd  ($c,$d);
447         &pxor   ($b,$c);
448         &movdqa ($t,$b);
449         &psrld  ($b,20);
450         &pslld  ($t,12);
451         &por    ($b,$t);
452
453         &paddd  ($a,$b);
454         &pxor   ($d,$a);
455         &pshufb ($d,$rot24);
456
457         &paddd  ($c,$d);
458         &pxor   ($b,$c);
459         &movdqa ($t,$b);
460         &psrld  ($b,25);
461         &pslld  ($t,7);
462         &por    ($b,$t);
463 }
464
465 my $xframe = $win64 ? 32+8 : 8;
466
467 $code.=<<___;
468 .type   ChaCha20_ssse3,\@function,5
469 .align  32
470 ChaCha20_ssse3:
471 .cfi_startproc
472 .LChaCha20_ssse3:
473         mov     %rsp,%r9                # frame pointer
474 .cfi_def_cfa_register   %r9
475 ___
476 $code.=<<___    if ($avx);
477         test    \$`1<<(43-32)`,%r10d
478         jnz     .LChaCha20_4xop         # XOP is fastest even if we use 1/4
479 ___
480 $code.=<<___;
481         cmp     \$128,$len              # we might throw away some data,
482         ja      .LChaCha20_4x           # but overall it won't be slower
483
484 .Ldo_sse3_after_all:
485         sub     \$64+$xframe,%rsp
486 ___
487 $code.=<<___    if ($win64);
488         movaps  %xmm6,-0x28(%r9)
489         movaps  %xmm7,-0x18(%r9)
490 .Lssse3_body:
491 ___
492 $code.=<<___;
493         movdqa  .Lsigma(%rip),$a
494         movdqu  ($key),$b
495         movdqu  16($key),$c
496         movdqu  ($counter),$d
497         movdqa  .Lrot16(%rip),$rot16
498         movdqa  .Lrot24(%rip),$rot24
499
500         movdqa  $a,0x00(%rsp)
501         movdqa  $b,0x10(%rsp)
502         movdqa  $c,0x20(%rsp)
503         movdqa  $d,0x30(%rsp)
504         mov     \$10,$counter           # reuse $counter
505         jmp     .Loop_ssse3
506
507 .align  32
508 .Loop_outer_ssse3:
509         movdqa  .Lone(%rip),$d
510         movdqa  0x00(%rsp),$a
511         movdqa  0x10(%rsp),$b
512         movdqa  0x20(%rsp),$c
513         paddd   0x30(%rsp),$d
514         mov     \$10,$counter
515         movdqa  $d,0x30(%rsp)
516         jmp     .Loop_ssse3
517
518 .align  32
519 .Loop_ssse3:
520 ___
521         &SSSE3ROUND();
522         &pshufd ($c,$c,0b01001110);
523         &pshufd ($b,$b,0b00111001);
524         &pshufd ($d,$d,0b10010011);
525         &nop    ();
526
527         &SSSE3ROUND();
528         &pshufd ($c,$c,0b01001110);
529         &pshufd ($b,$b,0b10010011);
530         &pshufd ($d,$d,0b00111001);
531
532         &dec    ($counter);
533         &jnz    (".Loop_ssse3");
534
535 $code.=<<___;
536         paddd   0x00(%rsp),$a
537         paddd   0x10(%rsp),$b
538         paddd   0x20(%rsp),$c
539         paddd   0x30(%rsp),$d
540
541         cmp     \$64,$len
542         jb      .Ltail_ssse3
543
544         movdqu  0x00($inp),$t
545         movdqu  0x10($inp),$t1
546         pxor    $t,$a                   # xor with input
547         movdqu  0x20($inp),$t
548         pxor    $t1,$b
549         movdqu  0x30($inp),$t1
550         lea     0x40($inp),$inp         # inp+=64
551         pxor    $t,$c
552         pxor    $t1,$d
553
554         movdqu  $a,0x00($out)           # write output
555         movdqu  $b,0x10($out)
556         movdqu  $c,0x20($out)
557         movdqu  $d,0x30($out)
558         lea     0x40($out),$out         # out+=64
559
560         sub     \$64,$len
561         jnz     .Loop_outer_ssse3
562
563         jmp     .Ldone_ssse3
564
565 .align  16
566 .Ltail_ssse3:
567         movdqa  $a,0x00(%rsp)
568         movdqa  $b,0x10(%rsp)
569         movdqa  $c,0x20(%rsp)
570         movdqa  $d,0x30(%rsp)
571         xor     $counter,$counter
572
573 .Loop_tail_ssse3:
574         movzb   ($inp,$counter),%eax
575         movzb   (%rsp,$counter),%ecx
576         lea     1($counter),$counter
577         xor     %ecx,%eax
578         mov     %al,-1($out,$counter)
579         dec     $len
580         jnz     .Loop_tail_ssse3
581
582 .Ldone_ssse3:
583 ___
584 $code.=<<___    if ($win64);
585         movaps  -0x28(%r9),%xmm6
586         movaps  -0x18(%r9),%xmm7
587 ___
588 $code.=<<___;
589         lea     (%r9),%rsp
590 .cfi_def_cfa_register   %rsp
591 .Lssse3_epilogue:
592         ret
593 .cfi_endproc
594 .size   ChaCha20_ssse3,.-ChaCha20_ssse3
595 ___
596 }
597
598 ########################################################################
599 # SSSE3 code path that handles longer messages.
600 {
601 # assign variables to favor Atom front-end
602 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
603     $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
604 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
605         "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
606
607 sub SSSE3_lane_ROUND {
608 my ($a0,$b0,$c0,$d0)=@_;
609 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
610 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
611 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
612 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
613 my @x=map("\"$_\"",@xx);
614
615         # Consider order in which variables are addressed by their
616         # index:
617         #
618         #       a   b   c   d
619         #
620         #       0   4   8  12 < even round
621         #       1   5   9  13
622         #       2   6  10  14
623         #       3   7  11  15
624         #       0   5  10  15 < odd round
625         #       1   6  11  12
626         #       2   7   8  13
627         #       3   4   9  14
628         #
629         # 'a', 'b' and 'd's are permanently allocated in registers,
630         # @x[0..7,12..15], while 'c's are maintained in memory. If
631         # you observe 'c' column, you'll notice that pair of 'c's is
632         # invariant between rounds. This means that we have to reload
633         # them once per round, in the middle. This is why you'll see
634         # bunch of 'c' stores and loads in the middle, but none in
635         # the beginning or end.
636
637         (
638         "&paddd         (@x[$a0],@x[$b0])",     # Q1
639          "&paddd        (@x[$a1],@x[$b1])",     # Q2
640         "&pxor          (@x[$d0],@x[$a0])",
641          "&pxor         (@x[$d1],@x[$a1])",
642         "&pshufb        (@x[$d0],$t1)",
643          "&pshufb       (@x[$d1],$t1)",
644
645         "&paddd         ($xc,@x[$d0])",
646          "&paddd        ($xc_,@x[$d1])",
647         "&pxor          (@x[$b0],$xc)",
648          "&pxor         (@x[$b1],$xc_)",
649         "&movdqa        ($t0,@x[$b0])",
650         "&pslld         (@x[$b0],12)",
651         "&psrld         ($t0,20)",
652          "&movdqa       ($t1,@x[$b1])",
653          "&pslld        (@x[$b1],12)",
654         "&por           (@x[$b0],$t0)",
655          "&psrld        ($t1,20)",
656         "&movdqa        ($t0,'(%r11)')",        # .Lrot24(%rip)
657          "&por          (@x[$b1],$t1)",
658
659         "&paddd         (@x[$a0],@x[$b0])",
660          "&paddd        (@x[$a1],@x[$b1])",
661         "&pxor          (@x[$d0],@x[$a0])",
662          "&pxor         (@x[$d1],@x[$a1])",
663         "&pshufb        (@x[$d0],$t0)",
664          "&pshufb       (@x[$d1],$t0)",
665
666         "&paddd         ($xc,@x[$d0])",
667          "&paddd        ($xc_,@x[$d1])",
668         "&pxor          (@x[$b0],$xc)",
669          "&pxor         (@x[$b1],$xc_)",
670         "&movdqa        ($t1,@x[$b0])",
671         "&pslld         (@x[$b0],7)",
672         "&psrld         ($t1,25)",
673          "&movdqa       ($t0,@x[$b1])",
674          "&pslld        (@x[$b1],7)",
675         "&por           (@x[$b0],$t1)",
676          "&psrld        ($t0,25)",
677         "&movdqa        ($t1,'(%r10)')",        # .Lrot16(%rip)
678          "&por          (@x[$b1],$t0)",
679
680         "&movdqa        (\"`16*($c0-8)`(%rsp)\",$xc)",  # reload pair of 'c's
681          "&movdqa       (\"`16*($c1-8)`(%rsp)\",$xc_)",
682         "&movdqa        ($xc,\"`16*($c2-8)`(%rsp)\")",
683          "&movdqa       ($xc_,\"`16*($c3-8)`(%rsp)\")",
684
685         "&paddd         (@x[$a2],@x[$b2])",     # Q3
686          "&paddd        (@x[$a3],@x[$b3])",     # Q4
687         "&pxor          (@x[$d2],@x[$a2])",
688          "&pxor         (@x[$d3],@x[$a3])",
689         "&pshufb        (@x[$d2],$t1)",
690          "&pshufb       (@x[$d3],$t1)",
691
692         "&paddd         ($xc,@x[$d2])",
693          "&paddd        ($xc_,@x[$d3])",
694         "&pxor          (@x[$b2],$xc)",
695          "&pxor         (@x[$b3],$xc_)",
696         "&movdqa        ($t0,@x[$b2])",
697         "&pslld         (@x[$b2],12)",
698         "&psrld         ($t0,20)",
699          "&movdqa       ($t1,@x[$b3])",
700          "&pslld        (@x[$b3],12)",
701         "&por           (@x[$b2],$t0)",
702          "&psrld        ($t1,20)",
703         "&movdqa        ($t0,'(%r11)')",        # .Lrot24(%rip)
704          "&por          (@x[$b3],$t1)",
705
706         "&paddd         (@x[$a2],@x[$b2])",
707          "&paddd        (@x[$a3],@x[$b3])",
708         "&pxor          (@x[$d2],@x[$a2])",
709          "&pxor         (@x[$d3],@x[$a3])",
710         "&pshufb        (@x[$d2],$t0)",
711          "&pshufb       (@x[$d3],$t0)",
712
713         "&paddd         ($xc,@x[$d2])",
714          "&paddd        ($xc_,@x[$d3])",
715         "&pxor          (@x[$b2],$xc)",
716          "&pxor         (@x[$b3],$xc_)",
717         "&movdqa        ($t1,@x[$b2])",
718         "&pslld         (@x[$b2],7)",
719         "&psrld         ($t1,25)",
720          "&movdqa       ($t0,@x[$b3])",
721          "&pslld        (@x[$b3],7)",
722         "&por           (@x[$b2],$t1)",
723          "&psrld        ($t0,25)",
724         "&movdqa        ($t1,'(%r10)')",        # .Lrot16(%rip)
725          "&por          (@x[$b3],$t0)"
726         );
727 }
728
729 my $xframe = $win64 ? 0xa8 : 8;
730
731 $code.=<<___;
732 .type   ChaCha20_4x,\@function,5
733 .align  32
734 ChaCha20_4x:
735 .cfi_startproc
736 .LChaCha20_4x:
737         mov             %rsp,%r9                # frame pointer
738 .cfi_def_cfa_register   %r9
739         mov             %r10,%r11
740 ___
741 $code.=<<___    if ($avx>1);
742         shr             \$32,%r10               # OPENSSL_ia32cap_P+8
743         test            \$`1<<5`,%r10           # test AVX2
744         jnz             .LChaCha20_8x
745 ___
746 $code.=<<___;
747         cmp             \$192,$len
748         ja              .Lproceed4x
749
750         and             \$`1<<26|1<<22`,%r11    # isolate XSAVE+MOVBE
751         cmp             \$`1<<22`,%r11          # check for MOVBE without XSAVE
752         je              .Ldo_sse3_after_all     # to detect Atom
753
754 .Lproceed4x:
755         sub             \$0x140+$xframe,%rsp
756 ___
757         ################ stack layout
758         # +0x00         SIMD equivalent of @x[8-12]
759         # ...
760         # +0x40         constant copy of key[0-2] smashed by lanes
761         # ...
762         # +0x100        SIMD counters (with nonce smashed by lanes)
763         # ...
764         # +0x140
765 $code.=<<___    if ($win64);
766         movaps          %xmm6,-0xa8(%r9)
767         movaps          %xmm7,-0x98(%r9)
768         movaps          %xmm8,-0x88(%r9)
769         movaps          %xmm9,-0x78(%r9)
770         movaps          %xmm10,-0x68(%r9)
771         movaps          %xmm11,-0x58(%r9)
772         movaps          %xmm12,-0x48(%r9)
773         movaps          %xmm13,-0x38(%r9)
774         movaps          %xmm14,-0x28(%r9)
775         movaps          %xmm15,-0x18(%r9)
776 .L4x_body:
777 ___
778 $code.=<<___;
779         movdqa          .Lsigma(%rip),$xa3      # key[0]
780         movdqu          ($key),$xb3             # key[1]
781         movdqu          16($key),$xt3           # key[2]
782         movdqu          ($counter),$xd3         # key[3]
783         lea             0x100(%rsp),%rcx        # size optimization
784         lea             .Lrot16(%rip),%r10
785         lea             .Lrot24(%rip),%r11
786
787         pshufd          \$0x00,$xa3,$xa0        # smash key by lanes...
788         pshufd          \$0x55,$xa3,$xa1
789         movdqa          $xa0,0x40(%rsp)         # ... and offload
790         pshufd          \$0xaa,$xa3,$xa2
791         movdqa          $xa1,0x50(%rsp)
792         pshufd          \$0xff,$xa3,$xa3
793         movdqa          $xa2,0x60(%rsp)
794         movdqa          $xa3,0x70(%rsp)
795
796         pshufd          \$0x00,$xb3,$xb0
797         pshufd          \$0x55,$xb3,$xb1
798         movdqa          $xb0,0x80-0x100(%rcx)
799         pshufd          \$0xaa,$xb3,$xb2
800         movdqa          $xb1,0x90-0x100(%rcx)
801         pshufd          \$0xff,$xb3,$xb3
802         movdqa          $xb2,0xa0-0x100(%rcx)
803         movdqa          $xb3,0xb0-0x100(%rcx)
804
805         pshufd          \$0x00,$xt3,$xt0        # "$xc0"
806         pshufd          \$0x55,$xt3,$xt1        # "$xc1"
807         movdqa          $xt0,0xc0-0x100(%rcx)
808         pshufd          \$0xaa,$xt3,$xt2        # "$xc2"
809         movdqa          $xt1,0xd0-0x100(%rcx)
810         pshufd          \$0xff,$xt3,$xt3        # "$xc3"
811         movdqa          $xt2,0xe0-0x100(%rcx)
812         movdqa          $xt3,0xf0-0x100(%rcx)
813
814         pshufd          \$0x00,$xd3,$xd0
815         pshufd          \$0x55,$xd3,$xd1
816         paddd           .Linc(%rip),$xd0        # don't save counters yet
817         pshufd          \$0xaa,$xd3,$xd2
818         movdqa          $xd1,0x110-0x100(%rcx)
819         pshufd          \$0xff,$xd3,$xd3
820         movdqa          $xd2,0x120-0x100(%rcx)
821         movdqa          $xd3,0x130-0x100(%rcx)
822
823         jmp             .Loop_enter4x
824
825 .align  32
826 .Loop_outer4x:
827         movdqa          0x40(%rsp),$xa0         # re-load smashed key
828         movdqa          0x50(%rsp),$xa1
829         movdqa          0x60(%rsp),$xa2
830         movdqa          0x70(%rsp),$xa3
831         movdqa          0x80-0x100(%rcx),$xb0
832         movdqa          0x90-0x100(%rcx),$xb1
833         movdqa          0xa0-0x100(%rcx),$xb2
834         movdqa          0xb0-0x100(%rcx),$xb3
835         movdqa          0xc0-0x100(%rcx),$xt0   # "$xc0"
836         movdqa          0xd0-0x100(%rcx),$xt1   # "$xc1"
837         movdqa          0xe0-0x100(%rcx),$xt2   # "$xc2"
838         movdqa          0xf0-0x100(%rcx),$xt3   # "$xc3"
839         movdqa          0x100-0x100(%rcx),$xd0
840         movdqa          0x110-0x100(%rcx),$xd1
841         movdqa          0x120-0x100(%rcx),$xd2
842         movdqa          0x130-0x100(%rcx),$xd3
843         paddd           .Lfour(%rip),$xd0       # next SIMD counters
844
845 .Loop_enter4x:
846         movdqa          $xt2,0x20(%rsp)         # SIMD equivalent of "@x[10]"
847         movdqa          $xt3,0x30(%rsp)         # SIMD equivalent of "@x[11]"
848         movdqa          (%r10),$xt3             # .Lrot16(%rip)
849         mov             \$10,%eax
850         movdqa          $xd0,0x100-0x100(%rcx)  # save SIMD counters
851         jmp             .Loop4x
852
853 .align  32
854 .Loop4x:
855 ___
856         foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
857         foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
858 $code.=<<___;
859         dec             %eax
860         jnz             .Loop4x
861
862         paddd           0x40(%rsp),$xa0         # accumulate key material
863         paddd           0x50(%rsp),$xa1
864         paddd           0x60(%rsp),$xa2
865         paddd           0x70(%rsp),$xa3
866
867         movdqa          $xa0,$xt2               # "de-interlace" data
868         punpckldq       $xa1,$xa0
869         movdqa          $xa2,$xt3
870         punpckldq       $xa3,$xa2
871         punpckhdq       $xa1,$xt2
872         punpckhdq       $xa3,$xt3
873         movdqa          $xa0,$xa1
874         punpcklqdq      $xa2,$xa0               # "a0"
875         movdqa          $xt2,$xa3
876         punpcklqdq      $xt3,$xt2               # "a2"
877         punpckhqdq      $xa2,$xa1               # "a1"
878         punpckhqdq      $xt3,$xa3               # "a3"
879 ___
880         ($xa2,$xt2)=($xt2,$xa2);
881 $code.=<<___;
882         paddd           0x80-0x100(%rcx),$xb0
883         paddd           0x90-0x100(%rcx),$xb1
884         paddd           0xa0-0x100(%rcx),$xb2
885         paddd           0xb0-0x100(%rcx),$xb3
886
887         movdqa          $xa0,0x00(%rsp)         # offload $xaN
888         movdqa          $xa1,0x10(%rsp)
889         movdqa          0x20(%rsp),$xa0         # "xc2"
890         movdqa          0x30(%rsp),$xa1         # "xc3"
891
892         movdqa          $xb0,$xt2
893         punpckldq       $xb1,$xb0
894         movdqa          $xb2,$xt3
895         punpckldq       $xb3,$xb2
896         punpckhdq       $xb1,$xt2
897         punpckhdq       $xb3,$xt3
898         movdqa          $xb0,$xb1
899         punpcklqdq      $xb2,$xb0               # "b0"
900         movdqa          $xt2,$xb3
901         punpcklqdq      $xt3,$xt2               # "b2"
902         punpckhqdq      $xb2,$xb1               # "b1"
903         punpckhqdq      $xt3,$xb3               # "b3"
904 ___
905         ($xb2,$xt2)=($xt2,$xb2);
906         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
907 $code.=<<___;
908         paddd           0xc0-0x100(%rcx),$xc0
909         paddd           0xd0-0x100(%rcx),$xc1
910         paddd           0xe0-0x100(%rcx),$xc2
911         paddd           0xf0-0x100(%rcx),$xc3
912
913         movdqa          $xa2,0x20(%rsp)         # keep offloading $xaN
914         movdqa          $xa3,0x30(%rsp)
915
916         movdqa          $xc0,$xt2
917         punpckldq       $xc1,$xc0
918         movdqa          $xc2,$xt3
919         punpckldq       $xc3,$xc2
920         punpckhdq       $xc1,$xt2
921         punpckhdq       $xc3,$xt3
922         movdqa          $xc0,$xc1
923         punpcklqdq      $xc2,$xc0               # "c0"
924         movdqa          $xt2,$xc3
925         punpcklqdq      $xt3,$xt2               # "c2"
926         punpckhqdq      $xc2,$xc1               # "c1"
927         punpckhqdq      $xt3,$xc3               # "c3"
928 ___
929         ($xc2,$xt2)=($xt2,$xc2);
930         ($xt0,$xt1)=($xa2,$xa3);                # use $xaN as temporary
931 $code.=<<___;
932         paddd           0x100-0x100(%rcx),$xd0
933         paddd           0x110-0x100(%rcx),$xd1
934         paddd           0x120-0x100(%rcx),$xd2
935         paddd           0x130-0x100(%rcx),$xd3
936
937         movdqa          $xd0,$xt2
938         punpckldq       $xd1,$xd0
939         movdqa          $xd2,$xt3
940         punpckldq       $xd3,$xd2
941         punpckhdq       $xd1,$xt2
942         punpckhdq       $xd3,$xt3
943         movdqa          $xd0,$xd1
944         punpcklqdq      $xd2,$xd0               # "d0"
945         movdqa          $xt2,$xd3
946         punpcklqdq      $xt3,$xt2               # "d2"
947         punpckhqdq      $xd2,$xd1               # "d1"
948         punpckhqdq      $xt3,$xd3               # "d3"
949 ___
950         ($xd2,$xt2)=($xt2,$xd2);
951 $code.=<<___;
952         cmp             \$64*4,$len
953         jb              .Ltail4x
954
955         movdqu          0x00($inp),$xt0         # xor with input
956         movdqu          0x10($inp),$xt1
957         movdqu          0x20($inp),$xt2
958         movdqu          0x30($inp),$xt3
959         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
960         pxor            $xb0,$xt1
961         pxor            $xc0,$xt2
962         pxor            $xd0,$xt3
963
964          movdqu         $xt0,0x00($out)
965         movdqu          0x40($inp),$xt0
966          movdqu         $xt1,0x10($out)
967         movdqu          0x50($inp),$xt1
968          movdqu         $xt2,0x20($out)
969         movdqu          0x60($inp),$xt2
970          movdqu         $xt3,0x30($out)
971         movdqu          0x70($inp),$xt3
972         lea             0x80($inp),$inp         # size optimization
973         pxor            0x10(%rsp),$xt0
974         pxor            $xb1,$xt1
975         pxor            $xc1,$xt2
976         pxor            $xd1,$xt3
977
978          movdqu         $xt0,0x40($out)
979         movdqu          0x00($inp),$xt0
980          movdqu         $xt1,0x50($out)
981         movdqu          0x10($inp),$xt1
982          movdqu         $xt2,0x60($out)
983         movdqu          0x20($inp),$xt2
984          movdqu         $xt3,0x70($out)
985          lea            0x80($out),$out         # size optimization
986         movdqu          0x30($inp),$xt3
987         pxor            0x20(%rsp),$xt0
988         pxor            $xb2,$xt1
989         pxor            $xc2,$xt2
990         pxor            $xd2,$xt3
991
992          movdqu         $xt0,0x00($out)
993         movdqu          0x40($inp),$xt0
994          movdqu         $xt1,0x10($out)
995         movdqu          0x50($inp),$xt1
996          movdqu         $xt2,0x20($out)
997         movdqu          0x60($inp),$xt2
998          movdqu         $xt3,0x30($out)
999         movdqu          0x70($inp),$xt3
1000         lea             0x80($inp),$inp         # inp+=64*4
1001         pxor            0x30(%rsp),$xt0
1002         pxor            $xb3,$xt1
1003         pxor            $xc3,$xt2
1004         pxor            $xd3,$xt3
1005         movdqu          $xt0,0x40($out)
1006         movdqu          $xt1,0x50($out)
1007         movdqu          $xt2,0x60($out)
1008         movdqu          $xt3,0x70($out)
1009         lea             0x80($out),$out         # out+=64*4
1010
1011         sub             \$64*4,$len
1012         jnz             .Loop_outer4x
1013
1014         jmp             .Ldone4x
1015
1016 .Ltail4x:
1017         cmp             \$192,$len
1018         jae             .L192_or_more4x
1019         cmp             \$128,$len
1020         jae             .L128_or_more4x
1021         cmp             \$64,$len
1022         jae             .L64_or_more4x
1023
1024         #movdqa         0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1025         xor             %r10,%r10
1026         #movdqa         $xt0,0x00(%rsp)
1027         movdqa          $xb0,0x10(%rsp)
1028         movdqa          $xc0,0x20(%rsp)
1029         movdqa          $xd0,0x30(%rsp)
1030         jmp             .Loop_tail4x
1031
1032 .align  32
1033 .L64_or_more4x:
1034         movdqu          0x00($inp),$xt0         # xor with input
1035         movdqu          0x10($inp),$xt1
1036         movdqu          0x20($inp),$xt2
1037         movdqu          0x30($inp),$xt3
1038         pxor            0x00(%rsp),$xt0         # $xaxN is offloaded, remember?
1039         pxor            $xb0,$xt1
1040         pxor            $xc0,$xt2
1041         pxor            $xd0,$xt3
1042         movdqu          $xt0,0x00($out)
1043         movdqu          $xt1,0x10($out)
1044         movdqu          $xt2,0x20($out)
1045         movdqu          $xt3,0x30($out)
1046         je              .Ldone4x
1047
1048         movdqa          0x10(%rsp),$xt0         # $xaN is offloaded, remember?
1049         lea             0x40($inp),$inp         # inp+=64*1
1050         xor             %r10,%r10
1051         movdqa          $xt0,0x00(%rsp)
1052         movdqa          $xb1,0x10(%rsp)
1053         lea             0x40($out),$out         # out+=64*1
1054         movdqa          $xc1,0x20(%rsp)
1055         sub             \$64,$len               # len-=64*1
1056         movdqa          $xd1,0x30(%rsp)
1057         jmp             .Loop_tail4x
1058
1059 .align  32
1060 .L128_or_more4x:
1061         movdqu          0x00($inp),$xt0         # xor with input
1062         movdqu          0x10($inp),$xt1
1063         movdqu          0x20($inp),$xt2
1064         movdqu          0x30($inp),$xt3
1065         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1066         pxor            $xb0,$xt1
1067         pxor            $xc0,$xt2
1068         pxor            $xd0,$xt3
1069
1070          movdqu         $xt0,0x00($out)
1071         movdqu          0x40($inp),$xt0
1072          movdqu         $xt1,0x10($out)
1073         movdqu          0x50($inp),$xt1
1074          movdqu         $xt2,0x20($out)
1075         movdqu          0x60($inp),$xt2
1076          movdqu         $xt3,0x30($out)
1077         movdqu          0x70($inp),$xt3
1078         pxor            0x10(%rsp),$xt0
1079         pxor            $xb1,$xt1
1080         pxor            $xc1,$xt2
1081         pxor            $xd1,$xt3
1082         movdqu          $xt0,0x40($out)
1083         movdqu          $xt1,0x50($out)
1084         movdqu          $xt2,0x60($out)
1085         movdqu          $xt3,0x70($out)
1086         je              .Ldone4x
1087
1088         movdqa          0x20(%rsp),$xt0         # $xaN is offloaded, remember?
1089         lea             0x80($inp),$inp         # inp+=64*2
1090         xor             %r10,%r10
1091         movdqa          $xt0,0x00(%rsp)
1092         movdqa          $xb2,0x10(%rsp)
1093         lea             0x80($out),$out         # out+=64*2
1094         movdqa          $xc2,0x20(%rsp)
1095         sub             \$128,$len              # len-=64*2
1096         movdqa          $xd2,0x30(%rsp)
1097         jmp             .Loop_tail4x
1098
1099 .align  32
1100 .L192_or_more4x:
1101         movdqu          0x00($inp),$xt0         # xor with input
1102         movdqu          0x10($inp),$xt1
1103         movdqu          0x20($inp),$xt2
1104         movdqu          0x30($inp),$xt3
1105         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1106         pxor            $xb0,$xt1
1107         pxor            $xc0,$xt2
1108         pxor            $xd0,$xt3
1109
1110          movdqu         $xt0,0x00($out)
1111         movdqu          0x40($inp),$xt0
1112          movdqu         $xt1,0x10($out)
1113         movdqu          0x50($inp),$xt1
1114          movdqu         $xt2,0x20($out)
1115         movdqu          0x60($inp),$xt2
1116          movdqu         $xt3,0x30($out)
1117         movdqu          0x70($inp),$xt3
1118         lea             0x80($inp),$inp         # size optimization
1119         pxor            0x10(%rsp),$xt0
1120         pxor            $xb1,$xt1
1121         pxor            $xc1,$xt2
1122         pxor            $xd1,$xt3
1123
1124          movdqu         $xt0,0x40($out)
1125         movdqu          0x00($inp),$xt0
1126          movdqu         $xt1,0x50($out)
1127         movdqu          0x10($inp),$xt1
1128          movdqu         $xt2,0x60($out)
1129         movdqu          0x20($inp),$xt2
1130          movdqu         $xt3,0x70($out)
1131          lea            0x80($out),$out         # size optimization
1132         movdqu          0x30($inp),$xt3
1133         pxor            0x20(%rsp),$xt0
1134         pxor            $xb2,$xt1
1135         pxor            $xc2,$xt2
1136         pxor            $xd2,$xt3
1137         movdqu          $xt0,0x00($out)
1138         movdqu          $xt1,0x10($out)
1139         movdqu          $xt2,0x20($out)
1140         movdqu          $xt3,0x30($out)
1141         je              .Ldone4x
1142
1143         movdqa          0x30(%rsp),$xt0         # $xaN is offloaded, remember?
1144         lea             0x40($inp),$inp         # inp+=64*3
1145         xor             %r10,%r10
1146         movdqa          $xt0,0x00(%rsp)
1147         movdqa          $xb3,0x10(%rsp)
1148         lea             0x40($out),$out         # out+=64*3
1149         movdqa          $xc3,0x20(%rsp)
1150         sub             \$192,$len              # len-=64*3
1151         movdqa          $xd3,0x30(%rsp)
1152
1153 .Loop_tail4x:
1154         movzb           ($inp,%r10),%eax
1155         movzb           (%rsp,%r10),%ecx
1156         lea             1(%r10),%r10
1157         xor             %ecx,%eax
1158         mov             %al,-1($out,%r10)
1159         dec             $len
1160         jnz             .Loop_tail4x
1161
1162 .Ldone4x:
1163 ___
1164 $code.=<<___    if ($win64);
1165         movaps          -0xa8(%r9),%xmm6
1166         movaps          -0x98(%r9),%xmm7
1167         movaps          -0x88(%r9),%xmm8
1168         movaps          -0x78(%r9),%xmm9
1169         movaps          -0x68(%r9),%xmm10
1170         movaps          -0x58(%r9),%xmm11
1171         movaps          -0x48(%r9),%xmm12
1172         movaps          -0x38(%r9),%xmm13
1173         movaps          -0x28(%r9),%xmm14
1174         movaps          -0x18(%r9),%xmm15
1175 ___
1176 $code.=<<___;
1177         lea             (%r9),%rsp
1178 .cfi_def_cfa_register   %rsp
1179 .L4x_epilogue:
1180         ret
1181 .cfi_endproc
1182 .size   ChaCha20_4x,.-ChaCha20_4x
1183 ___
1184 }
1185
1186 ########################################################################
1187 # XOP code path that handles all lengths.
1188 if ($avx) {
1189 # There is some "anomaly" observed depending on instructions' size or
1190 # alignment. If you look closely at below code you'll notice that
1191 # sometimes argument order varies. The order affects instruction
1192 # encoding by making it larger, and such fiddling gives 5% performance
1193 # improvement. This is on FX-4100...
1194
1195 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1196     $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
1197 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1198          $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
1199
1200 sub XOP_lane_ROUND {
1201 my ($a0,$b0,$c0,$d0)=@_;
1202 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1203 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1204 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1205 my @x=map("\"$_\"",@xx);
1206
1207         (
1208         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
1209          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
1210           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",     # Q3
1211            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",     # Q4
1212         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1213          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1214           "&vpxor       (@x[$d2],@x[$a2],@x[$d2])",
1215            "&vpxor      (@x[$d3],@x[$a3],@x[$d3])",
1216         "&vprotd        (@x[$d0],@x[$d0],16)",
1217          "&vprotd       (@x[$d1],@x[$d1],16)",
1218           "&vprotd      (@x[$d2],@x[$d2],16)",
1219            "&vprotd     (@x[$d3],@x[$d3],16)",
1220
1221         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
1222          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
1223           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
1224            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
1225         "&vpxor         (@x[$b0],@x[$c0],@x[$b0])",
1226          "&vpxor        (@x[$b1],@x[$c1],@x[$b1])",
1227           "&vpxor       (@x[$b2],@x[$b2],@x[$c2])",     # flip
1228            "&vpxor      (@x[$b3],@x[$b3],@x[$c3])",     # flip
1229         "&vprotd        (@x[$b0],@x[$b0],12)",
1230          "&vprotd       (@x[$b1],@x[$b1],12)",
1231           "&vprotd      (@x[$b2],@x[$b2],12)",
1232            "&vprotd     (@x[$b3],@x[$b3],12)",
1233
1234         "&vpaddd        (@x[$a0],@x[$b0],@x[$a0])",     # flip
1235          "&vpaddd       (@x[$a1],@x[$b1],@x[$a1])",     # flip
1236           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",
1237            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",
1238         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1239          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1240           "&vpxor       (@x[$d2],@x[$a2],@x[$d2])",
1241            "&vpxor      (@x[$d3],@x[$a3],@x[$d3])",
1242         "&vprotd        (@x[$d0],@x[$d0],8)",
1243          "&vprotd       (@x[$d1],@x[$d1],8)",
1244           "&vprotd      (@x[$d2],@x[$d2],8)",
1245            "&vprotd     (@x[$d3],@x[$d3],8)",
1246
1247         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
1248          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
1249           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
1250            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
1251         "&vpxor         (@x[$b0],@x[$c0],@x[$b0])",
1252          "&vpxor        (@x[$b1],@x[$c1],@x[$b1])",
1253           "&vpxor       (@x[$b2],@x[$b2],@x[$c2])",     # flip
1254            "&vpxor      (@x[$b3],@x[$b3],@x[$c3])",     # flip
1255         "&vprotd        (@x[$b0],@x[$b0],7)",
1256          "&vprotd       (@x[$b1],@x[$b1],7)",
1257           "&vprotd      (@x[$b2],@x[$b2],7)",
1258            "&vprotd     (@x[$b3],@x[$b3],7)"
1259         );
1260 }
1261
1262 my $xframe = $win64 ? 0xa8 : 8;
1263
1264 $code.=<<___;
1265 .type   ChaCha20_4xop,\@function,5
1266 .align  32
1267 ChaCha20_4xop:
1268 .cfi_startproc
1269 .LChaCha20_4xop:
1270         mov             %rsp,%r9                # frame pointer
1271 .cfi_def_cfa_register   %r9
1272         sub             \$0x140+$xframe,%rsp
1273 ___
1274         ################ stack layout
1275         # +0x00         SIMD equivalent of @x[8-12]
1276         # ...
1277         # +0x40         constant copy of key[0-2] smashed by lanes
1278         # ...
1279         # +0x100        SIMD counters (with nonce smashed by lanes)
1280         # ...
1281         # +0x140
1282 $code.=<<___    if ($win64);
1283         movaps          %xmm6,-0xa8(%r9)
1284         movaps          %xmm7,-0x98(%r9)
1285         movaps          %xmm8,-0x88(%r9)
1286         movaps          %xmm9,-0x78(%r9)
1287         movaps          %xmm10,-0x68(%r9)
1288         movaps          %xmm11,-0x58(%r9)
1289         movaps          %xmm12,-0x48(%r9)
1290         movaps          %xmm13,-0x38(%r9)
1291         movaps          %xmm14,-0x28(%r9)
1292         movaps          %xmm15,-0x18(%r9)
1293 .L4xop_body:
1294 ___
1295 $code.=<<___;
1296         vzeroupper
1297
1298         vmovdqa         .Lsigma(%rip),$xa3      # key[0]
1299         vmovdqu         ($key),$xb3             # key[1]
1300         vmovdqu         16($key),$xt3           # key[2]
1301         vmovdqu         ($counter),$xd3         # key[3]
1302         lea             0x100(%rsp),%rcx        # size optimization
1303
1304         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
1305         vpshufd         \$0x55,$xa3,$xa1
1306         vmovdqa         $xa0,0x40(%rsp)         # ... and offload
1307         vpshufd         \$0xaa,$xa3,$xa2
1308         vmovdqa         $xa1,0x50(%rsp)
1309         vpshufd         \$0xff,$xa3,$xa3
1310         vmovdqa         $xa2,0x60(%rsp)
1311         vmovdqa         $xa3,0x70(%rsp)
1312
1313         vpshufd         \$0x00,$xb3,$xb0
1314         vpshufd         \$0x55,$xb3,$xb1
1315         vmovdqa         $xb0,0x80-0x100(%rcx)
1316         vpshufd         \$0xaa,$xb3,$xb2
1317         vmovdqa         $xb1,0x90-0x100(%rcx)
1318         vpshufd         \$0xff,$xb3,$xb3
1319         vmovdqa         $xb2,0xa0-0x100(%rcx)
1320         vmovdqa         $xb3,0xb0-0x100(%rcx)
1321
1322         vpshufd         \$0x00,$xt3,$xt0        # "$xc0"
1323         vpshufd         \$0x55,$xt3,$xt1        # "$xc1"
1324         vmovdqa         $xt0,0xc0-0x100(%rcx)
1325         vpshufd         \$0xaa,$xt3,$xt2        # "$xc2"
1326         vmovdqa         $xt1,0xd0-0x100(%rcx)
1327         vpshufd         \$0xff,$xt3,$xt3        # "$xc3"
1328         vmovdqa         $xt2,0xe0-0x100(%rcx)
1329         vmovdqa         $xt3,0xf0-0x100(%rcx)
1330
1331         vpshufd         \$0x00,$xd3,$xd0
1332         vpshufd         \$0x55,$xd3,$xd1
1333         vpaddd          .Linc(%rip),$xd0,$xd0   # don't save counters yet
1334         vpshufd         \$0xaa,$xd3,$xd2
1335         vmovdqa         $xd1,0x110-0x100(%rcx)
1336         vpshufd         \$0xff,$xd3,$xd3
1337         vmovdqa         $xd2,0x120-0x100(%rcx)
1338         vmovdqa         $xd3,0x130-0x100(%rcx)
1339
1340         jmp             .Loop_enter4xop
1341
1342 .align  32
1343 .Loop_outer4xop:
1344         vmovdqa         0x40(%rsp),$xa0         # re-load smashed key
1345         vmovdqa         0x50(%rsp),$xa1
1346         vmovdqa         0x60(%rsp),$xa2
1347         vmovdqa         0x70(%rsp),$xa3
1348         vmovdqa         0x80-0x100(%rcx),$xb0
1349         vmovdqa         0x90-0x100(%rcx),$xb1
1350         vmovdqa         0xa0-0x100(%rcx),$xb2
1351         vmovdqa         0xb0-0x100(%rcx),$xb3
1352         vmovdqa         0xc0-0x100(%rcx),$xt0   # "$xc0"
1353         vmovdqa         0xd0-0x100(%rcx),$xt1   # "$xc1"
1354         vmovdqa         0xe0-0x100(%rcx),$xt2   # "$xc2"
1355         vmovdqa         0xf0-0x100(%rcx),$xt3   # "$xc3"
1356         vmovdqa         0x100-0x100(%rcx),$xd0
1357         vmovdqa         0x110-0x100(%rcx),$xd1
1358         vmovdqa         0x120-0x100(%rcx),$xd2
1359         vmovdqa         0x130-0x100(%rcx),$xd3
1360         vpaddd          .Lfour(%rip),$xd0,$xd0  # next SIMD counters
1361
1362 .Loop_enter4xop:
1363         mov             \$10,%eax
1364         vmovdqa         $xd0,0x100-0x100(%rcx)  # save SIMD counters
1365         jmp             .Loop4xop
1366
1367 .align  32
1368 .Loop4xop:
1369 ___
1370         foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
1371         foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
1372 $code.=<<___;
1373         dec             %eax
1374         jnz             .Loop4xop
1375
1376         vpaddd          0x40(%rsp),$xa0,$xa0    # accumulate key material
1377         vpaddd          0x50(%rsp),$xa1,$xa1
1378         vpaddd          0x60(%rsp),$xa2,$xa2
1379         vpaddd          0x70(%rsp),$xa3,$xa3
1380
1381         vmovdqa         $xt2,0x20(%rsp)         # offload $xc2,3
1382         vmovdqa         $xt3,0x30(%rsp)
1383
1384         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
1385         vpunpckldq      $xa3,$xa2,$xt3
1386         vpunpckhdq      $xa1,$xa0,$xa0
1387         vpunpckhdq      $xa3,$xa2,$xa2
1388         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
1389         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
1390         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
1391         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
1392 ___
1393         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1394 $code.=<<___;
1395         vpaddd          0x80-0x100(%rcx),$xb0,$xb0
1396         vpaddd          0x90-0x100(%rcx),$xb1,$xb1
1397         vpaddd          0xa0-0x100(%rcx),$xb2,$xb2
1398         vpaddd          0xb0-0x100(%rcx),$xb3,$xb3
1399
1400         vmovdqa         $xa0,0x00(%rsp)         # offload $xa0,1
1401         vmovdqa         $xa1,0x10(%rsp)
1402         vmovdqa         0x20(%rsp),$xa0         # "xc2"
1403         vmovdqa         0x30(%rsp),$xa1         # "xc3"
1404
1405         vpunpckldq      $xb1,$xb0,$xt2
1406         vpunpckldq      $xb3,$xb2,$xt3
1407         vpunpckhdq      $xb1,$xb0,$xb0
1408         vpunpckhdq      $xb3,$xb2,$xb2
1409         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
1410         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
1411         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
1412         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
1413 ___
1414         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1415         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1416 $code.=<<___;
1417         vpaddd          0xc0-0x100(%rcx),$xc0,$xc0
1418         vpaddd          0xd0-0x100(%rcx),$xc1,$xc1
1419         vpaddd          0xe0-0x100(%rcx),$xc2,$xc2
1420         vpaddd          0xf0-0x100(%rcx),$xc3,$xc3
1421
1422         vpunpckldq      $xc1,$xc0,$xt2
1423         vpunpckldq      $xc3,$xc2,$xt3
1424         vpunpckhdq      $xc1,$xc0,$xc0
1425         vpunpckhdq      $xc3,$xc2,$xc2
1426         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
1427         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
1428         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
1429         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
1430 ___
1431         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1432 $code.=<<___;
1433         vpaddd          0x100-0x100(%rcx),$xd0,$xd0
1434         vpaddd          0x110-0x100(%rcx),$xd1,$xd1
1435         vpaddd          0x120-0x100(%rcx),$xd2,$xd2
1436         vpaddd          0x130-0x100(%rcx),$xd3,$xd3
1437
1438         vpunpckldq      $xd1,$xd0,$xt2
1439         vpunpckldq      $xd3,$xd2,$xt3
1440         vpunpckhdq      $xd1,$xd0,$xd0
1441         vpunpckhdq      $xd3,$xd2,$xd2
1442         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
1443         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
1444         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
1445         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
1446 ___
1447         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1448         ($xa0,$xa1)=($xt2,$xt3);
1449 $code.=<<___;
1450         vmovdqa         0x00(%rsp),$xa0         # restore $xa0,1
1451         vmovdqa         0x10(%rsp),$xa1
1452
1453         cmp             \$64*4,$len
1454         jb              .Ltail4xop
1455
1456         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1457         vpxor           0x10($inp),$xb0,$xb0
1458         vpxor           0x20($inp),$xc0,$xc0
1459         vpxor           0x30($inp),$xd0,$xd0
1460         vpxor           0x40($inp),$xa1,$xa1
1461         vpxor           0x50($inp),$xb1,$xb1
1462         vpxor           0x60($inp),$xc1,$xc1
1463         vpxor           0x70($inp),$xd1,$xd1
1464         lea             0x80($inp),$inp         # size optimization
1465         vpxor           0x00($inp),$xa2,$xa2
1466         vpxor           0x10($inp),$xb2,$xb2
1467         vpxor           0x20($inp),$xc2,$xc2
1468         vpxor           0x30($inp),$xd2,$xd2
1469         vpxor           0x40($inp),$xa3,$xa3
1470         vpxor           0x50($inp),$xb3,$xb3
1471         vpxor           0x60($inp),$xc3,$xc3
1472         vpxor           0x70($inp),$xd3,$xd3
1473         lea             0x80($inp),$inp         # inp+=64*4
1474
1475         vmovdqu         $xa0,0x00($out)
1476         vmovdqu         $xb0,0x10($out)
1477         vmovdqu         $xc0,0x20($out)
1478         vmovdqu         $xd0,0x30($out)
1479         vmovdqu         $xa1,0x40($out)
1480         vmovdqu         $xb1,0x50($out)
1481         vmovdqu         $xc1,0x60($out)
1482         vmovdqu         $xd1,0x70($out)
1483         lea             0x80($out),$out         # size optimization
1484         vmovdqu         $xa2,0x00($out)
1485         vmovdqu         $xb2,0x10($out)
1486         vmovdqu         $xc2,0x20($out)
1487         vmovdqu         $xd2,0x30($out)
1488         vmovdqu         $xa3,0x40($out)
1489         vmovdqu         $xb3,0x50($out)
1490         vmovdqu         $xc3,0x60($out)
1491         vmovdqu         $xd3,0x70($out)
1492         lea             0x80($out),$out         # out+=64*4
1493
1494         sub             \$64*4,$len
1495         jnz             .Loop_outer4xop
1496
1497         jmp             .Ldone4xop
1498
1499 .align  32
1500 .Ltail4xop:
1501         cmp             \$192,$len
1502         jae             .L192_or_more4xop
1503         cmp             \$128,$len
1504         jae             .L128_or_more4xop
1505         cmp             \$64,$len
1506         jae             .L64_or_more4xop
1507
1508         xor             %r10,%r10
1509         vmovdqa         $xa0,0x00(%rsp)
1510         vmovdqa         $xb0,0x10(%rsp)
1511         vmovdqa         $xc0,0x20(%rsp)
1512         vmovdqa         $xd0,0x30(%rsp)
1513         jmp             .Loop_tail4xop
1514
1515 .align  32
1516 .L64_or_more4xop:
1517         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1518         vpxor           0x10($inp),$xb0,$xb0
1519         vpxor           0x20($inp),$xc0,$xc0
1520         vpxor           0x30($inp),$xd0,$xd0
1521         vmovdqu         $xa0,0x00($out)
1522         vmovdqu         $xb0,0x10($out)
1523         vmovdqu         $xc0,0x20($out)
1524         vmovdqu         $xd0,0x30($out)
1525         je              .Ldone4xop
1526
1527         lea             0x40($inp),$inp         # inp+=64*1
1528         vmovdqa         $xa1,0x00(%rsp)
1529         xor             %r10,%r10
1530         vmovdqa         $xb1,0x10(%rsp)
1531         lea             0x40($out),$out         # out+=64*1
1532         vmovdqa         $xc1,0x20(%rsp)
1533         sub             \$64,$len               # len-=64*1
1534         vmovdqa         $xd1,0x30(%rsp)
1535         jmp             .Loop_tail4xop
1536
1537 .align  32
1538 .L128_or_more4xop:
1539         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1540         vpxor           0x10($inp),$xb0,$xb0
1541         vpxor           0x20($inp),$xc0,$xc0
1542         vpxor           0x30($inp),$xd0,$xd0
1543         vpxor           0x40($inp),$xa1,$xa1
1544         vpxor           0x50($inp),$xb1,$xb1
1545         vpxor           0x60($inp),$xc1,$xc1
1546         vpxor           0x70($inp),$xd1,$xd1
1547
1548         vmovdqu         $xa0,0x00($out)
1549         vmovdqu         $xb0,0x10($out)
1550         vmovdqu         $xc0,0x20($out)
1551         vmovdqu         $xd0,0x30($out)
1552         vmovdqu         $xa1,0x40($out)
1553         vmovdqu         $xb1,0x50($out)
1554         vmovdqu         $xc1,0x60($out)
1555         vmovdqu         $xd1,0x70($out)
1556         je              .Ldone4xop
1557
1558         lea             0x80($inp),$inp         # inp+=64*2
1559         vmovdqa         $xa2,0x00(%rsp)
1560         xor             %r10,%r10
1561         vmovdqa         $xb2,0x10(%rsp)
1562         lea             0x80($out),$out         # out+=64*2
1563         vmovdqa         $xc2,0x20(%rsp)
1564         sub             \$128,$len              # len-=64*2
1565         vmovdqa         $xd2,0x30(%rsp)
1566         jmp             .Loop_tail4xop
1567
1568 .align  32
1569 .L192_or_more4xop:
1570         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1571         vpxor           0x10($inp),$xb0,$xb0
1572         vpxor           0x20($inp),$xc0,$xc0
1573         vpxor           0x30($inp),$xd0,$xd0
1574         vpxor           0x40($inp),$xa1,$xa1
1575         vpxor           0x50($inp),$xb1,$xb1
1576         vpxor           0x60($inp),$xc1,$xc1
1577         vpxor           0x70($inp),$xd1,$xd1
1578         lea             0x80($inp),$inp         # size optimization
1579         vpxor           0x00($inp),$xa2,$xa2
1580         vpxor           0x10($inp),$xb2,$xb2
1581         vpxor           0x20($inp),$xc2,$xc2
1582         vpxor           0x30($inp),$xd2,$xd2
1583
1584         vmovdqu         $xa0,0x00($out)
1585         vmovdqu         $xb0,0x10($out)
1586         vmovdqu         $xc0,0x20($out)
1587         vmovdqu         $xd0,0x30($out)
1588         vmovdqu         $xa1,0x40($out)
1589         vmovdqu         $xb1,0x50($out)
1590         vmovdqu         $xc1,0x60($out)
1591         vmovdqu         $xd1,0x70($out)
1592         lea             0x80($out),$out         # size optimization
1593         vmovdqu         $xa2,0x00($out)
1594         vmovdqu         $xb2,0x10($out)
1595         vmovdqu         $xc2,0x20($out)
1596         vmovdqu         $xd2,0x30($out)
1597         je              .Ldone4xop
1598
1599         lea             0x40($inp),$inp         # inp+=64*3
1600         vmovdqa         $xa3,0x00(%rsp)
1601         xor             %r10,%r10
1602         vmovdqa         $xb3,0x10(%rsp)
1603         lea             0x40($out),$out         # out+=64*3
1604         vmovdqa         $xc3,0x20(%rsp)
1605         sub             \$192,$len              # len-=64*3
1606         vmovdqa         $xd3,0x30(%rsp)
1607
1608 .Loop_tail4xop:
1609         movzb           ($inp,%r10),%eax
1610         movzb           (%rsp,%r10),%ecx
1611         lea             1(%r10),%r10
1612         xor             %ecx,%eax
1613         mov             %al,-1($out,%r10)
1614         dec             $len
1615         jnz             .Loop_tail4xop
1616
1617 .Ldone4xop:
1618         vzeroupper
1619 ___
1620 $code.=<<___    if ($win64);
1621         movaps          -0xa8(%r9),%xmm6
1622         movaps          -0x98(%r9),%xmm7
1623         movaps          -0x88(%r9),%xmm8
1624         movaps          -0x78(%r9),%xmm9
1625         movaps          -0x68(%r9),%xmm10
1626         movaps          -0x58(%r9),%xmm11
1627         movaps          -0x48(%r9),%xmm12
1628         movaps          -0x38(%r9),%xmm13
1629         movaps          -0x28(%r9),%xmm14
1630         movaps          -0x18(%r9),%xmm15
1631 ___
1632 $code.=<<___;
1633         lea             (%r9),%rsp
1634 .cfi_def_cfa_register   %rsp
1635 .L4xop_epilogue:
1636         ret
1637 .cfi_endproc
1638 .size   ChaCha20_4xop,.-ChaCha20_4xop
1639 ___
1640 }
1641
1642 ########################################################################
1643 # AVX2 code path
1644 if ($avx>1) {
1645 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1646     $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1647 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1648         "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1649
1650 sub AVX2_lane_ROUND {
1651 my ($a0,$b0,$c0,$d0)=@_;
1652 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1653 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1654 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1655 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1656 my @x=map("\"$_\"",@xx);
1657
1658         # Consider order in which variables are addressed by their
1659         # index:
1660         #
1661         #       a   b   c   d
1662         #
1663         #       0   4   8  12 < even round
1664         #       1   5   9  13
1665         #       2   6  10  14
1666         #       3   7  11  15
1667         #       0   5  10  15 < odd round
1668         #       1   6  11  12
1669         #       2   7   8  13
1670         #       3   4   9  14
1671         #
1672         # 'a', 'b' and 'd's are permanently allocated in registers,
1673         # @x[0..7,12..15], while 'c's are maintained in memory. If
1674         # you observe 'c' column, you'll notice that pair of 'c's is
1675         # invariant between rounds. This means that we have to reload
1676         # them once per round, in the middle. This is why you'll see
1677         # bunch of 'c' stores and loads in the middle, but none in
1678         # the beginning or end.
1679
1680         (
1681         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
1682         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1683         "&vpshufb       (@x[$d0],@x[$d0],$t1)",
1684          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
1685          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1686          "&vpshufb      (@x[$d1],@x[$d1],$t1)",
1687
1688         "&vpaddd        ($xc,$xc,@x[$d0])",
1689         "&vpxor         (@x[$b0],$xc,@x[$b0])",
1690         "&vpslld        ($t0,@x[$b0],12)",
1691         "&vpsrld        (@x[$b0],@x[$b0],20)",
1692         "&vpor          (@x[$b0],$t0,@x[$b0])",
1693         "&vbroadcasti128($t0,'(%r11)')",                # .Lrot24(%rip)
1694          "&vpaddd       ($xc_,$xc_,@x[$d1])",
1695          "&vpxor        (@x[$b1],$xc_,@x[$b1])",
1696          "&vpslld       ($t1,@x[$b1],12)",
1697          "&vpsrld       (@x[$b1],@x[$b1],20)",
1698          "&vpor         (@x[$b1],$t1,@x[$b1])",
1699
1700         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",
1701         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1702         "&vpshufb       (@x[$d0],@x[$d0],$t0)",
1703          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",
1704          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1705          "&vpshufb      (@x[$d1],@x[$d1],$t0)",
1706
1707         "&vpaddd        ($xc,$xc,@x[$d0])",
1708         "&vpxor         (@x[$b0],$xc,@x[$b0])",
1709         "&vpslld        ($t1,@x[$b0],7)",
1710         "&vpsrld        (@x[$b0],@x[$b0],25)",
1711         "&vpor          (@x[$b0],$t1,@x[$b0])",
1712         "&vbroadcasti128($t1,'(%r10)')",                # .Lrot16(%rip)
1713          "&vpaddd       ($xc_,$xc_,@x[$d1])",
1714          "&vpxor        (@x[$b1],$xc_,@x[$b1])",
1715          "&vpslld       ($t0,@x[$b1],7)",
1716          "&vpsrld       (@x[$b1],@x[$b1],25)",
1717          "&vpor         (@x[$b1],$t0,@x[$b1])",
1718
1719         "&vmovdqa       (\"`32*($c0-8)`(%rsp)\",$xc)",  # reload pair of 'c's
1720          "&vmovdqa      (\"`32*($c1-8)`(%rsp)\",$xc_)",
1721         "&vmovdqa       ($xc,\"`32*($c2-8)`(%rsp)\")",
1722          "&vmovdqa      ($xc_,\"`32*($c3-8)`(%rsp)\")",
1723
1724         "&vpaddd        (@x[$a2],@x[$a2],@x[$b2])",     # Q3
1725         "&vpxor         (@x[$d2],@x[$a2],@x[$d2])",
1726         "&vpshufb       (@x[$d2],@x[$d2],$t1)",
1727          "&vpaddd       (@x[$a3],@x[$a3],@x[$b3])",     # Q4
1728          "&vpxor        (@x[$d3],@x[$a3],@x[$d3])",
1729          "&vpshufb      (@x[$d3],@x[$d3],$t1)",
1730
1731         "&vpaddd        ($xc,$xc,@x[$d2])",
1732         "&vpxor         (@x[$b2],$xc,@x[$b2])",
1733         "&vpslld        ($t0,@x[$b2],12)",
1734         "&vpsrld        (@x[$b2],@x[$b2],20)",
1735         "&vpor          (@x[$b2],$t0,@x[$b2])",
1736         "&vbroadcasti128($t0,'(%r11)')",                # .Lrot24(%rip)
1737          "&vpaddd       ($xc_,$xc_,@x[$d3])",
1738          "&vpxor        (@x[$b3],$xc_,@x[$b3])",
1739          "&vpslld       ($t1,@x[$b3],12)",
1740          "&vpsrld       (@x[$b3],@x[$b3],20)",
1741          "&vpor         (@x[$b3],$t1,@x[$b3])",
1742
1743         "&vpaddd        (@x[$a2],@x[$a2],@x[$b2])",
1744         "&vpxor         (@x[$d2],@x[$a2],@x[$d2])",
1745         "&vpshufb       (@x[$d2],@x[$d2],$t0)",
1746          "&vpaddd       (@x[$a3],@x[$a3],@x[$b3])",
1747          "&vpxor        (@x[$d3],@x[$a3],@x[$d3])",
1748          "&vpshufb      (@x[$d3],@x[$d3],$t0)",
1749
1750         "&vpaddd        ($xc,$xc,@x[$d2])",
1751         "&vpxor         (@x[$b2],$xc,@x[$b2])",
1752         "&vpslld        ($t1,@x[$b2],7)",
1753         "&vpsrld        (@x[$b2],@x[$b2],25)",
1754         "&vpor          (@x[$b2],$t1,@x[$b2])",
1755         "&vbroadcasti128($t1,'(%r10)')",                # .Lrot16(%rip)
1756          "&vpaddd       ($xc_,$xc_,@x[$d3])",
1757          "&vpxor        (@x[$b3],$xc_,@x[$b3])",
1758          "&vpslld       ($t0,@x[$b3],7)",
1759          "&vpsrld       (@x[$b3],@x[$b3],25)",
1760          "&vpor         (@x[$b3],$t0,@x[$b3])"
1761         );
1762 }
1763
1764 my $xframe = $win64 ? 0xa8 : 8;
1765
1766 $code.=<<___;
1767 .type   ChaCha20_8x,\@function,5
1768 .align  32
1769 ChaCha20_8x:
1770 .cfi_startproc
1771 .LChaCha20_8x:
1772         mov             %rsp,%r9                # frame register
1773 .cfi_def_cfa_register   %r9
1774         sub             \$0x280+$xframe,%rsp
1775         and             \$-32,%rsp
1776 ___
1777 $code.=<<___    if ($win64);
1778         movaps          %xmm6,-0xa8(%r9)
1779         movaps          %xmm7,-0x98(%r9)
1780         movaps          %xmm8,-0x88(%r9)
1781         movaps          %xmm9,-0x78(%r9)
1782         movaps          %xmm10,-0x68(%r9)
1783         movaps          %xmm11,-0x58(%r9)
1784         movaps          %xmm12,-0x48(%r9)
1785         movaps          %xmm13,-0x38(%r9)
1786         movaps          %xmm14,-0x28(%r9)
1787         movaps          %xmm15,-0x18(%r9)
1788 .L8x_body:
1789 ___
1790 $code.=<<___;
1791         vzeroupper
1792
1793         ################ stack layout
1794         # +0x00         SIMD equivalent of @x[8-12]
1795         # ...
1796         # +0x80         constant copy of key[0-2] smashed by lanes
1797         # ...
1798         # +0x200        SIMD counters (with nonce smashed by lanes)
1799         # ...
1800         # +0x280
1801
1802         vbroadcasti128  .Lsigma(%rip),$xa3      # key[0]
1803         vbroadcasti128  ($key),$xb3             # key[1]
1804         vbroadcasti128  16($key),$xt3           # key[2]
1805         vbroadcasti128  ($counter),$xd3         # key[3]
1806         lea             0x100(%rsp),%rcx        # size optimization
1807         lea             0x200(%rsp),%rax        # size optimization
1808         lea             .Lrot16(%rip),%r10
1809         lea             .Lrot24(%rip),%r11
1810
1811         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
1812         vpshufd         \$0x55,$xa3,$xa1
1813         vmovdqa         $xa0,0x80-0x100(%rcx)   # ... and offload
1814         vpshufd         \$0xaa,$xa3,$xa2
1815         vmovdqa         $xa1,0xa0-0x100(%rcx)
1816         vpshufd         \$0xff,$xa3,$xa3
1817         vmovdqa         $xa2,0xc0-0x100(%rcx)
1818         vmovdqa         $xa3,0xe0-0x100(%rcx)
1819
1820         vpshufd         \$0x00,$xb3,$xb0
1821         vpshufd         \$0x55,$xb3,$xb1
1822         vmovdqa         $xb0,0x100-0x100(%rcx)
1823         vpshufd         \$0xaa,$xb3,$xb2
1824         vmovdqa         $xb1,0x120-0x100(%rcx)
1825         vpshufd         \$0xff,$xb3,$xb3
1826         vmovdqa         $xb2,0x140-0x100(%rcx)
1827         vmovdqa         $xb3,0x160-0x100(%rcx)
1828
1829         vpshufd         \$0x00,$xt3,$xt0        # "xc0"
1830         vpshufd         \$0x55,$xt3,$xt1        # "xc1"
1831         vmovdqa         $xt0,0x180-0x200(%rax)
1832         vpshufd         \$0xaa,$xt3,$xt2        # "xc2"
1833         vmovdqa         $xt1,0x1a0-0x200(%rax)
1834         vpshufd         \$0xff,$xt3,$xt3        # "xc3"
1835         vmovdqa         $xt2,0x1c0-0x200(%rax)
1836         vmovdqa         $xt3,0x1e0-0x200(%rax)
1837
1838         vpshufd         \$0x00,$xd3,$xd0
1839         vpshufd         \$0x55,$xd3,$xd1
1840         vpaddd          .Lincy(%rip),$xd0,$xd0  # don't save counters yet
1841         vpshufd         \$0xaa,$xd3,$xd2
1842         vmovdqa         $xd1,0x220-0x200(%rax)
1843         vpshufd         \$0xff,$xd3,$xd3
1844         vmovdqa         $xd2,0x240-0x200(%rax)
1845         vmovdqa         $xd3,0x260-0x200(%rax)
1846
1847         jmp             .Loop_enter8x
1848
1849 .align  32
1850 .Loop_outer8x:
1851         vmovdqa         0x80-0x100(%rcx),$xa0   # re-load smashed key
1852         vmovdqa         0xa0-0x100(%rcx),$xa1
1853         vmovdqa         0xc0-0x100(%rcx),$xa2
1854         vmovdqa         0xe0-0x100(%rcx),$xa3
1855         vmovdqa         0x100-0x100(%rcx),$xb0
1856         vmovdqa         0x120-0x100(%rcx),$xb1
1857         vmovdqa         0x140-0x100(%rcx),$xb2
1858         vmovdqa         0x160-0x100(%rcx),$xb3
1859         vmovdqa         0x180-0x200(%rax),$xt0  # "xc0"
1860         vmovdqa         0x1a0-0x200(%rax),$xt1  # "xc1"
1861         vmovdqa         0x1c0-0x200(%rax),$xt2  # "xc2"
1862         vmovdqa         0x1e0-0x200(%rax),$xt3  # "xc3"
1863         vmovdqa         0x200-0x200(%rax),$xd0
1864         vmovdqa         0x220-0x200(%rax),$xd1
1865         vmovdqa         0x240-0x200(%rax),$xd2
1866         vmovdqa         0x260-0x200(%rax),$xd3
1867         vpaddd          .Leight(%rip),$xd0,$xd0 # next SIMD counters
1868
1869 .Loop_enter8x:
1870         vmovdqa         $xt2,0x40(%rsp)         # SIMD equivalent of "@x[10]"
1871         vmovdqa         $xt3,0x60(%rsp)         # SIMD equivalent of "@x[11]"
1872         vbroadcasti128  (%r10),$xt3
1873         vmovdqa         $xd0,0x200-0x200(%rax)  # save SIMD counters
1874         mov             \$10,%eax
1875         jmp             .Loop8x
1876
1877 .align  32
1878 .Loop8x:
1879 ___
1880         foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1881         foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1882 $code.=<<___;
1883         dec             %eax
1884         jnz             .Loop8x
1885
1886         lea             0x200(%rsp),%rax        # size optimization
1887         vpaddd          0x80-0x100(%rcx),$xa0,$xa0      # accumulate key
1888         vpaddd          0xa0-0x100(%rcx),$xa1,$xa1
1889         vpaddd          0xc0-0x100(%rcx),$xa2,$xa2
1890         vpaddd          0xe0-0x100(%rcx),$xa3,$xa3
1891
1892         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
1893         vpunpckldq      $xa3,$xa2,$xt3
1894         vpunpckhdq      $xa1,$xa0,$xa0
1895         vpunpckhdq      $xa3,$xa2,$xa2
1896         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
1897         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
1898         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
1899         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
1900 ___
1901         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1902 $code.=<<___;
1903         vpaddd          0x100-0x100(%rcx),$xb0,$xb0
1904         vpaddd          0x120-0x100(%rcx),$xb1,$xb1
1905         vpaddd          0x140-0x100(%rcx),$xb2,$xb2
1906         vpaddd          0x160-0x100(%rcx),$xb3,$xb3
1907
1908         vpunpckldq      $xb1,$xb0,$xt2
1909         vpunpckldq      $xb3,$xb2,$xt3
1910         vpunpckhdq      $xb1,$xb0,$xb0
1911         vpunpckhdq      $xb3,$xb2,$xb2
1912         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
1913         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
1914         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
1915         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
1916 ___
1917         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1918 $code.=<<___;
1919         vperm2i128      \$0x20,$xb0,$xa0,$xt3   # "de-interlace" further
1920         vperm2i128      \$0x31,$xb0,$xa0,$xb0
1921         vperm2i128      \$0x20,$xb1,$xa1,$xa0
1922         vperm2i128      \$0x31,$xb1,$xa1,$xb1
1923         vperm2i128      \$0x20,$xb2,$xa2,$xa1
1924         vperm2i128      \$0x31,$xb2,$xa2,$xb2
1925         vperm2i128      \$0x20,$xb3,$xa3,$xa2
1926         vperm2i128      \$0x31,$xb3,$xa3,$xb3
1927 ___
1928         ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1929         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1930 $code.=<<___;
1931         vmovdqa         $xa0,0x00(%rsp)         # offload $xaN
1932         vmovdqa         $xa1,0x20(%rsp)
1933         vmovdqa         0x40(%rsp),$xc2         # $xa0
1934         vmovdqa         0x60(%rsp),$xc3         # $xa1
1935
1936         vpaddd          0x180-0x200(%rax),$xc0,$xc0
1937         vpaddd          0x1a0-0x200(%rax),$xc1,$xc1
1938         vpaddd          0x1c0-0x200(%rax),$xc2,$xc2
1939         vpaddd          0x1e0-0x200(%rax),$xc3,$xc3
1940
1941         vpunpckldq      $xc1,$xc0,$xt2
1942         vpunpckldq      $xc3,$xc2,$xt3
1943         vpunpckhdq      $xc1,$xc0,$xc0
1944         vpunpckhdq      $xc3,$xc2,$xc2
1945         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
1946         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
1947         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
1948         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
1949 ___
1950         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1951 $code.=<<___;
1952         vpaddd          0x200-0x200(%rax),$xd0,$xd0
1953         vpaddd          0x220-0x200(%rax),$xd1,$xd1
1954         vpaddd          0x240-0x200(%rax),$xd2,$xd2
1955         vpaddd          0x260-0x200(%rax),$xd3,$xd3
1956
1957         vpunpckldq      $xd1,$xd0,$xt2
1958         vpunpckldq      $xd3,$xd2,$xt3
1959         vpunpckhdq      $xd1,$xd0,$xd0
1960         vpunpckhdq      $xd3,$xd2,$xd2
1961         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
1962         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
1963         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
1964         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
1965 ___
1966         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1967 $code.=<<___;
1968         vperm2i128      \$0x20,$xd0,$xc0,$xt3   # "de-interlace" further
1969         vperm2i128      \$0x31,$xd0,$xc0,$xd0
1970         vperm2i128      \$0x20,$xd1,$xc1,$xc0
1971         vperm2i128      \$0x31,$xd1,$xc1,$xd1
1972         vperm2i128      \$0x20,$xd2,$xc2,$xc1
1973         vperm2i128      \$0x31,$xd2,$xc2,$xd2
1974         vperm2i128      \$0x20,$xd3,$xc3,$xc2
1975         vperm2i128      \$0x31,$xd3,$xc3,$xd3
1976 ___
1977         ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1978         ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1979         ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1980         ($xa0,$xa1)=($xt2,$xt3);
1981 $code.=<<___;
1982         vmovdqa         0x00(%rsp),$xa0         # $xaN was offloaded, remember?
1983         vmovdqa         0x20(%rsp),$xa1
1984
1985         cmp             \$64*8,$len
1986         jb              .Ltail8x
1987
1988         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1989         vpxor           0x20($inp),$xb0,$xb0
1990         vpxor           0x40($inp),$xc0,$xc0
1991         vpxor           0x60($inp),$xd0,$xd0
1992         lea             0x80($inp),$inp         # size optimization
1993         vmovdqu         $xa0,0x00($out)
1994         vmovdqu         $xb0,0x20($out)
1995         vmovdqu         $xc0,0x40($out)
1996         vmovdqu         $xd0,0x60($out)
1997         lea             0x80($out),$out         # size optimization
1998
1999         vpxor           0x00($inp),$xa1,$xa1
2000         vpxor           0x20($inp),$xb1,$xb1
2001         vpxor           0x40($inp),$xc1,$xc1
2002         vpxor           0x60($inp),$xd1,$xd1
2003         lea             0x80($inp),$inp         # size optimization
2004         vmovdqu         $xa1,0x00($out)
2005         vmovdqu         $xb1,0x20($out)
2006         vmovdqu         $xc1,0x40($out)
2007         vmovdqu         $xd1,0x60($out)
2008         lea             0x80($out),$out         # size optimization
2009
2010         vpxor           0x00($inp),$xa2,$xa2
2011         vpxor           0x20($inp),$xb2,$xb2
2012         vpxor           0x40($inp),$xc2,$xc2
2013         vpxor           0x60($inp),$xd2,$xd2
2014         lea             0x80($inp),$inp         # size optimization
2015         vmovdqu         $xa2,0x00($out)
2016         vmovdqu         $xb2,0x20($out)
2017         vmovdqu         $xc2,0x40($out)
2018         vmovdqu         $xd2,0x60($out)
2019         lea             0x80($out),$out         # size optimization
2020
2021         vpxor           0x00($inp),$xa3,$xa3
2022         vpxor           0x20($inp),$xb3,$xb3
2023         vpxor           0x40($inp),$xc3,$xc3
2024         vpxor           0x60($inp),$xd3,$xd3
2025         lea             0x80($inp),$inp         # size optimization
2026         vmovdqu         $xa3,0x00($out)
2027         vmovdqu         $xb3,0x20($out)
2028         vmovdqu         $xc3,0x40($out)
2029         vmovdqu         $xd3,0x60($out)
2030         lea             0x80($out),$out         # size optimization
2031
2032         sub             \$64*8,$len
2033         jnz             .Loop_outer8x
2034
2035         jmp             .Ldone8x
2036
2037 .Ltail8x:
2038         cmp             \$448,$len
2039         jae             .L448_or_more8x
2040         cmp             \$384,$len
2041         jae             .L384_or_more8x
2042         cmp             \$320,$len
2043         jae             .L320_or_more8x
2044         cmp             \$256,$len
2045         jae             .L256_or_more8x
2046         cmp             \$192,$len
2047         jae             .L192_or_more8x
2048         cmp             \$128,$len
2049         jae             .L128_or_more8x
2050         cmp             \$64,$len
2051         jae             .L64_or_more8x
2052
2053         xor             %r10,%r10
2054         vmovdqa         $xa0,0x00(%rsp)
2055         vmovdqa         $xb0,0x20(%rsp)
2056         jmp             .Loop_tail8x
2057
2058 .align  32
2059 .L64_or_more8x:
2060         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2061         vpxor           0x20($inp),$xb0,$xb0
2062         vmovdqu         $xa0,0x00($out)
2063         vmovdqu         $xb0,0x20($out)
2064         je              .Ldone8x
2065
2066         lea             0x40($inp),$inp         # inp+=64*1
2067         xor             %r10,%r10
2068         vmovdqa         $xc0,0x00(%rsp)
2069         lea             0x40($out),$out         # out+=64*1
2070         sub             \$64,$len               # len-=64*1
2071         vmovdqa         $xd0,0x20(%rsp)
2072         jmp             .Loop_tail8x
2073
2074 .align  32
2075 .L128_or_more8x:
2076         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2077         vpxor           0x20($inp),$xb0,$xb0
2078         vpxor           0x40($inp),$xc0,$xc0
2079         vpxor           0x60($inp),$xd0,$xd0
2080         vmovdqu         $xa0,0x00($out)
2081         vmovdqu         $xb0,0x20($out)
2082         vmovdqu         $xc0,0x40($out)
2083         vmovdqu         $xd0,0x60($out)
2084         je              .Ldone8x
2085
2086         lea             0x80($inp),$inp         # inp+=64*2
2087         xor             %r10,%r10
2088         vmovdqa         $xa1,0x00(%rsp)
2089         lea             0x80($out),$out         # out+=64*2
2090         sub             \$128,$len              # len-=64*2
2091         vmovdqa         $xb1,0x20(%rsp)
2092         jmp             .Loop_tail8x
2093
2094 .align  32
2095 .L192_or_more8x:
2096         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2097         vpxor           0x20($inp),$xb0,$xb0
2098         vpxor           0x40($inp),$xc0,$xc0
2099         vpxor           0x60($inp),$xd0,$xd0
2100         vpxor           0x80($inp),$xa1,$xa1
2101         vpxor           0xa0($inp),$xb1,$xb1
2102         vmovdqu         $xa0,0x00($out)
2103         vmovdqu         $xb0,0x20($out)
2104         vmovdqu         $xc0,0x40($out)
2105         vmovdqu         $xd0,0x60($out)
2106         vmovdqu         $xa1,0x80($out)
2107         vmovdqu         $xb1,0xa0($out)
2108         je              .Ldone8x
2109
2110         lea             0xc0($inp),$inp         # inp+=64*3
2111         xor             %r10,%r10
2112         vmovdqa         $xc1,0x00(%rsp)
2113         lea             0xc0($out),$out         # out+=64*3
2114         sub             \$192,$len              # len-=64*3
2115         vmovdqa         $xd1,0x20(%rsp)
2116         jmp             .Loop_tail8x
2117
2118 .align  32
2119 .L256_or_more8x:
2120         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2121         vpxor           0x20($inp),$xb0,$xb0
2122         vpxor           0x40($inp),$xc0,$xc0
2123         vpxor           0x60($inp),$xd0,$xd0
2124         vpxor           0x80($inp),$xa1,$xa1
2125         vpxor           0xa0($inp),$xb1,$xb1
2126         vpxor           0xc0($inp),$xc1,$xc1
2127         vpxor           0xe0($inp),$xd1,$xd1
2128         vmovdqu         $xa0,0x00($out)
2129         vmovdqu         $xb0,0x20($out)
2130         vmovdqu         $xc0,0x40($out)
2131         vmovdqu         $xd0,0x60($out)
2132         vmovdqu         $xa1,0x80($out)
2133         vmovdqu         $xb1,0xa0($out)
2134         vmovdqu         $xc1,0xc0($out)
2135         vmovdqu         $xd1,0xe0($out)
2136         je              .Ldone8x
2137
2138         lea             0x100($inp),$inp        # inp+=64*4
2139         xor             %r10,%r10
2140         vmovdqa         $xa2,0x00(%rsp)
2141         lea             0x100($out),$out        # out+=64*4
2142         sub             \$256,$len              # len-=64*4
2143         vmovdqa         $xb2,0x20(%rsp)
2144         jmp             .Loop_tail8x
2145
2146 .align  32
2147 .L320_or_more8x:
2148         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2149         vpxor           0x20($inp),$xb0,$xb0
2150         vpxor           0x40($inp),$xc0,$xc0
2151         vpxor           0x60($inp),$xd0,$xd0
2152         vpxor           0x80($inp),$xa1,$xa1
2153         vpxor           0xa0($inp),$xb1,$xb1
2154         vpxor           0xc0($inp),$xc1,$xc1
2155         vpxor           0xe0($inp),$xd1,$xd1
2156         vpxor           0x100($inp),$xa2,$xa2
2157         vpxor           0x120($inp),$xb2,$xb2
2158         vmovdqu         $xa0,0x00($out)
2159         vmovdqu         $xb0,0x20($out)
2160         vmovdqu         $xc0,0x40($out)
2161         vmovdqu         $xd0,0x60($out)
2162         vmovdqu         $xa1,0x80($out)
2163         vmovdqu         $xb1,0xa0($out)
2164         vmovdqu         $xc1,0xc0($out)
2165         vmovdqu         $xd1,0xe0($out)
2166         vmovdqu         $xa2,0x100($out)
2167         vmovdqu         $xb2,0x120($out)
2168         je              .Ldone8x
2169
2170         lea             0x140($inp),$inp        # inp+=64*5
2171         xor             %r10,%r10
2172         vmovdqa         $xc2,0x00(%rsp)
2173         lea             0x140($out),$out        # out+=64*5
2174         sub             \$320,$len              # len-=64*5
2175         vmovdqa         $xd2,0x20(%rsp)
2176         jmp             .Loop_tail8x
2177
2178 .align  32
2179 .L384_or_more8x:
2180         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2181         vpxor           0x20($inp),$xb0,$xb0
2182         vpxor           0x40($inp),$xc0,$xc0
2183         vpxor           0x60($inp),$xd0,$xd0
2184         vpxor           0x80($inp),$xa1,$xa1
2185         vpxor           0xa0($inp),$xb1,$xb1
2186         vpxor           0xc0($inp),$xc1,$xc1
2187         vpxor           0xe0($inp),$xd1,$xd1
2188         vpxor           0x100($inp),$xa2,$xa2
2189         vpxor           0x120($inp),$xb2,$xb2
2190         vpxor           0x140($inp),$xc2,$xc2
2191         vpxor           0x160($inp),$xd2,$xd2
2192         vmovdqu         $xa0,0x00($out)
2193         vmovdqu         $xb0,0x20($out)
2194         vmovdqu         $xc0,0x40($out)
2195         vmovdqu         $xd0,0x60($out)
2196         vmovdqu         $xa1,0x80($out)
2197         vmovdqu         $xb1,0xa0($out)
2198         vmovdqu         $xc1,0xc0($out)
2199         vmovdqu         $xd1,0xe0($out)
2200         vmovdqu         $xa2,0x100($out)
2201         vmovdqu         $xb2,0x120($out)
2202         vmovdqu         $xc2,0x140($out)
2203         vmovdqu         $xd2,0x160($out)
2204         je              .Ldone8x
2205
2206         lea             0x180($inp),$inp        # inp+=64*6
2207         xor             %r10,%r10
2208         vmovdqa         $xa3,0x00(%rsp)
2209         lea             0x180($out),$out        # out+=64*6
2210         sub             \$384,$len              # len-=64*6
2211         vmovdqa         $xb3,0x20(%rsp)
2212         jmp             .Loop_tail8x
2213
2214 .align  32
2215 .L448_or_more8x:
2216         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2217         vpxor           0x20($inp),$xb0,$xb0
2218         vpxor           0x40($inp),$xc0,$xc0
2219         vpxor           0x60($inp),$xd0,$xd0
2220         vpxor           0x80($inp),$xa1,$xa1
2221         vpxor           0xa0($inp),$xb1,$xb1
2222         vpxor           0xc0($inp),$xc1,$xc1
2223         vpxor           0xe0($inp),$xd1,$xd1
2224         vpxor           0x100($inp),$xa2,$xa2
2225         vpxor           0x120($inp),$xb2,$xb2
2226         vpxor           0x140($inp),$xc2,$xc2
2227         vpxor           0x160($inp),$xd2,$xd2
2228         vpxor           0x180($inp),$xa3,$xa3
2229         vpxor           0x1a0($inp),$xb3,$xb3
2230         vmovdqu         $xa0,0x00($out)
2231         vmovdqu         $xb0,0x20($out)
2232         vmovdqu         $xc0,0x40($out)
2233         vmovdqu         $xd0,0x60($out)
2234         vmovdqu         $xa1,0x80($out)
2235         vmovdqu         $xb1,0xa0($out)
2236         vmovdqu         $xc1,0xc0($out)
2237         vmovdqu         $xd1,0xe0($out)
2238         vmovdqu         $xa2,0x100($out)
2239         vmovdqu         $xb2,0x120($out)
2240         vmovdqu         $xc2,0x140($out)
2241         vmovdqu         $xd2,0x160($out)
2242         vmovdqu         $xa3,0x180($out)
2243         vmovdqu         $xb3,0x1a0($out)
2244         je              .Ldone8x
2245
2246         lea             0x1c0($inp),$inp        # inp+=64*7
2247         xor             %r10,%r10
2248         vmovdqa         $xc3,0x00(%rsp)
2249         lea             0x1c0($out),$out        # out+=64*7
2250         sub             \$448,$len              # len-=64*7
2251         vmovdqa         $xd3,0x20(%rsp)
2252
2253 .Loop_tail8x:
2254         movzb           ($inp,%r10),%eax
2255         movzb           (%rsp,%r10),%ecx
2256         lea             1(%r10),%r10
2257         xor             %ecx,%eax
2258         mov             %al,-1($out,%r10)
2259         dec             $len
2260         jnz             .Loop_tail8x
2261
2262 .Ldone8x:
2263         vzeroall
2264 ___
2265 $code.=<<___    if ($win64);
2266         movaps          -0xa8(%r9),%xmm6
2267         movaps          -0x98(%r9),%xmm7
2268         movaps          -0x88(%r9),%xmm8
2269         movaps          -0x78(%r9),%xmm9
2270         movaps          -0x68(%r9),%xmm10
2271         movaps          -0x58(%r9),%xmm11
2272         movaps          -0x48(%r9),%xmm12
2273         movaps          -0x38(%r9),%xmm13
2274         movaps          -0x28(%r9),%xmm14
2275         movaps          -0x18(%r9),%xmm15
2276 ___
2277 $code.=<<___;
2278         lea             (%r9),%rsp
2279 .cfi_def_cfa_register   %rsp
2280 .L8x_epilogue:
2281         ret
2282 .cfi_endproc
2283 .size   ChaCha20_8x,.-ChaCha20_8x
2284 ___
2285 }
2286
2287 ########################################################################
2288 # AVX512 code paths
2289 if ($avx>2) {
2290 # This one handles shorter inputs...
2291
2292 my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
2293 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
2294
2295 sub AVX512ROUND {       # critical path is 14 "SIMD ticks" per round
2296         &vpaddd ($a,$a,$b);
2297         &vpxord ($d,$d,$a);
2298         &vprold ($d,$d,16);
2299
2300         &vpaddd ($c,$c,$d);
2301         &vpxord ($b,$b,$c);
2302         &vprold ($b,$b,12);
2303
2304         &vpaddd ($a,$a,$b);
2305         &vpxord ($d,$d,$a);
2306         &vprold ($d,$d,8);
2307
2308         &vpaddd ($c,$c,$d);
2309         &vpxord ($b,$b,$c);
2310         &vprold ($b,$b,7);
2311 }
2312
2313 my $xframe = $win64 ? 32+8 : 8;
2314
2315 $code.=<<___;
2316 .type   ChaCha20_avx512,\@function,5
2317 .align  32
2318 ChaCha20_avx512:
2319 .cfi_startproc
2320 .LChaCha20_avx512:
2321         mov     %rsp,%r9                # frame pointer
2322 .cfi_def_cfa_register   %r9
2323         cmp     \$512,$len
2324         ja      .LChaCha20_16x
2325
2326         sub     \$64+$xframe,%rsp
2327 ___
2328 $code.=<<___    if ($win64);
2329         movaps  %xmm6,-0x28(%r9)
2330         movaps  %xmm7,-0x18(%r9)
2331 .Lavx512_body:
2332 ___
2333 $code.=<<___;
2334         vbroadcasti32x4 .Lsigma(%rip),$a
2335         vbroadcasti32x4 ($key),$b
2336         vbroadcasti32x4 16($key),$c
2337         vbroadcasti32x4 ($counter),$d
2338
2339         vmovdqa32       $a,$a_
2340         vmovdqa32       $b,$b_
2341         vmovdqa32       $c,$c_
2342         vpaddd          .Lzeroz(%rip),$d,$d
2343         vmovdqa32       .Lfourz(%rip),$fourz
2344         mov             \$10,$counter   # reuse $counter
2345         vmovdqa32       $d,$d_
2346         jmp             .Loop_avx512
2347
2348 .align  16
2349 .Loop_outer_avx512:
2350         vmovdqa32       $a_,$a
2351         vmovdqa32       $b_,$b
2352         vmovdqa32       $c_,$c
2353         vpaddd          $fourz,$d_,$d
2354         mov             \$10,$counter
2355         vmovdqa32       $d,$d_
2356         jmp             .Loop_avx512
2357
2358 .align  32
2359 .Loop_avx512:
2360 ___
2361         &AVX512ROUND();
2362         &vpshufd        ($c,$c,0b01001110);
2363         &vpshufd        ($b,$b,0b00111001);
2364         &vpshufd        ($d,$d,0b10010011);
2365
2366         &AVX512ROUND();
2367         &vpshufd        ($c,$c,0b01001110);
2368         &vpshufd        ($b,$b,0b10010011);
2369         &vpshufd        ($d,$d,0b00111001);
2370
2371         &dec            ($counter);
2372         &jnz            (".Loop_avx512");
2373
2374 $code.=<<___;
2375         vpaddd          $a_,$a,$a
2376         vpaddd          $b_,$b,$b
2377         vpaddd          $c_,$c,$c
2378         vpaddd          $d_,$d,$d
2379
2380         sub             \$64,$len
2381         jb              .Ltail64_avx512
2382
2383         vpxor           0x00($inp),%x#$a,$t0    # xor with input
2384         vpxor           0x10($inp),%x#$b,$t1
2385         vpxor           0x20($inp),%x#$c,$t2
2386         vpxor           0x30($inp),%x#$d,$t3
2387         lea             0x40($inp),$inp         # inp+=64
2388
2389         vmovdqu         $t0,0x00($out)          # write output
2390         vmovdqu         $t1,0x10($out)
2391         vmovdqu         $t2,0x20($out)
2392         vmovdqu         $t3,0x30($out)
2393         lea             0x40($out),$out         # out+=64
2394
2395         jz              .Ldone_avx512
2396
2397         vextracti32x4   \$1,$a,$t0
2398         vextracti32x4   \$1,$b,$t1
2399         vextracti32x4   \$1,$c,$t2
2400         vextracti32x4   \$1,$d,$t3
2401
2402         sub             \$64,$len
2403         jb              .Ltail_avx512
2404
2405         vpxor           0x00($inp),$t0,$t0      # xor with input
2406         vpxor           0x10($inp),$t1,$t1
2407         vpxor           0x20($inp),$t2,$t2
2408         vpxor           0x30($inp),$t3,$t3
2409         lea             0x40($inp),$inp         # inp+=64
2410
2411         vmovdqu         $t0,0x00($out)          # write output
2412         vmovdqu         $t1,0x10($out)
2413         vmovdqu         $t2,0x20($out)
2414         vmovdqu         $t3,0x30($out)
2415         lea             0x40($out),$out         # out+=64
2416
2417         jz              .Ldone_avx512
2418
2419         vextracti32x4   \$2,$a,$t0
2420         vextracti32x4   \$2,$b,$t1
2421         vextracti32x4   \$2,$c,$t2
2422         vextracti32x4   \$2,$d,$t3
2423
2424         sub             \$64,$len
2425         jb              .Ltail_avx512
2426
2427         vpxor           0x00($inp),$t0,$t0      # xor with input
2428         vpxor           0x10($inp),$t1,$t1
2429         vpxor           0x20($inp),$t2,$t2
2430         vpxor           0x30($inp),$t3,$t3
2431         lea             0x40($inp),$inp         # inp+=64
2432
2433         vmovdqu         $t0,0x00($out)          # write output
2434         vmovdqu         $t1,0x10($out)
2435         vmovdqu         $t2,0x20($out)
2436         vmovdqu         $t3,0x30($out)
2437         lea             0x40($out),$out         # out+=64
2438
2439         jz              .Ldone_avx512
2440
2441         vextracti32x4   \$3,$a,$t0
2442         vextracti32x4   \$3,$b,$t1
2443         vextracti32x4   \$3,$c,$t2
2444         vextracti32x4   \$3,$d,$t3
2445
2446         sub             \$64,$len
2447         jb              .Ltail_avx512
2448
2449         vpxor           0x00($inp),$t0,$t0      # xor with input
2450         vpxor           0x10($inp),$t1,$t1
2451         vpxor           0x20($inp),$t2,$t2
2452         vpxor           0x30($inp),$t3,$t3
2453         lea             0x40($inp),$inp         # inp+=64
2454
2455         vmovdqu         $t0,0x00($out)          # write output
2456         vmovdqu         $t1,0x10($out)
2457         vmovdqu         $t2,0x20($out)
2458         vmovdqu         $t3,0x30($out)
2459         lea             0x40($out),$out         # out+=64
2460
2461         jnz             .Loop_outer_avx512
2462
2463         jmp             .Ldone_avx512
2464
2465 .align  16
2466 .Ltail64_avx512:
2467         vmovdqa         %x#$a,0x00(%rsp)
2468         vmovdqa         %x#$b,0x10(%rsp)
2469         vmovdqa         %x#$c,0x20(%rsp)
2470         vmovdqa         %x#$d,0x30(%rsp)
2471         add             \$64,$len
2472         jmp             .Loop_tail_avx512
2473
2474 .align  16
2475 .Ltail_avx512:
2476         vmovdqa         $t0,0x00(%rsp)
2477         vmovdqa         $t1,0x10(%rsp)
2478         vmovdqa         $t2,0x20(%rsp)
2479         vmovdqa         $t3,0x30(%rsp)
2480         add             \$64,$len
2481
2482 .Loop_tail_avx512:
2483         movzb           ($inp,$counter),%eax
2484         movzb           (%rsp,$counter),%ecx
2485         lea             1($counter),$counter
2486         xor             %ecx,%eax
2487         mov             %al,-1($out,$counter)
2488         dec             $len
2489         jnz             .Loop_tail_avx512
2490
2491         vmovdqu32       $a_,0x00(%rsp)
2492
2493 .Ldone_avx512:
2494         vzeroall
2495 ___
2496 $code.=<<___    if ($win64);
2497         movaps  -0x28(%r9),%xmm6
2498         movaps  -0x18(%r9),%xmm7
2499 ___
2500 $code.=<<___;
2501         lea     (%r9),%rsp
2502 .cfi_def_cfa_register   %rsp
2503 .Lavx512_epilogue:
2504         ret
2505 .cfi_endproc
2506 .size   ChaCha20_avx512,.-ChaCha20_avx512
2507 ___
2508 }
2509 if ($avx>2) {
2510 # This one handles longer inputs...
2511
2512 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2513     $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2514 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2515          $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2516 my @key=map("%zmm$_",(16..31));
2517 my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2518
2519 sub AVX512_lane_ROUND {
2520 my ($a0,$b0,$c0,$d0)=@_;
2521 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2522 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2523 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2524 my @x=map("\"$_\"",@xx);
2525
2526         (
2527         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
2528          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
2529           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",     # Q3
2530            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",     # Q4
2531         "&vpxord        (@x[$d0],@x[$d0],@x[$a0])",
2532          "&vpxord       (@x[$d1],@x[$d1],@x[$a1])",
2533           "&vpxord      (@x[$d2],@x[$d2],@x[$a2])",
2534            "&vpxord     (@x[$d3],@x[$d3],@x[$a3])",
2535         "&vprold        (@x[$d0],@x[$d0],16)",
2536          "&vprold       (@x[$d1],@x[$d1],16)",
2537           "&vprold      (@x[$d2],@x[$d2],16)",
2538            "&vprold     (@x[$d3],@x[$d3],16)",
2539
2540         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
2541          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
2542           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
2543            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
2544         "&vpxord        (@x[$b0],@x[$b0],@x[$c0])",
2545          "&vpxord       (@x[$b1],@x[$b1],@x[$c1])",
2546           "&vpxord      (@x[$b2],@x[$b2],@x[$c2])",
2547            "&vpxord     (@x[$b3],@x[$b3],@x[$c3])",
2548         "&vprold        (@x[$b0],@x[$b0],12)",
2549          "&vprold       (@x[$b1],@x[$b1],12)",
2550           "&vprold      (@x[$b2],@x[$b2],12)",
2551            "&vprold     (@x[$b3],@x[$b3],12)",
2552
2553         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",
2554          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",
2555           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",
2556            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",
2557         "&vpxord        (@x[$d0],@x[$d0],@x[$a0])",
2558          "&vpxord       (@x[$d1],@x[$d1],@x[$a1])",
2559           "&vpxord      (@x[$d2],@x[$d2],@x[$a2])",
2560            "&vpxord     (@x[$d3],@x[$d3],@x[$a3])",
2561         "&vprold        (@x[$d0],@x[$d0],8)",
2562          "&vprold       (@x[$d1],@x[$d1],8)",
2563           "&vprold      (@x[$d2],@x[$d2],8)",
2564            "&vprold     (@x[$d3],@x[$d3],8)",
2565
2566         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
2567          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
2568           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
2569            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
2570         "&vpxord        (@x[$b0],@x[$b0],@x[$c0])",
2571          "&vpxord       (@x[$b1],@x[$b1],@x[$c1])",
2572           "&vpxord      (@x[$b2],@x[$b2],@x[$c2])",
2573            "&vpxord     (@x[$b3],@x[$b3],@x[$c3])",
2574         "&vprold        (@x[$b0],@x[$b0],7)",
2575          "&vprold       (@x[$b1],@x[$b1],7)",
2576           "&vprold      (@x[$b2],@x[$b2],7)",
2577            "&vprold     (@x[$b3],@x[$b3],7)"
2578         );
2579 }
2580
2581 my $xframe = $win64 ? 0xa8 : 8;
2582
2583 $code.=<<___;
2584 .type   ChaCha20_16x,\@function,5
2585 .align  32
2586 ChaCha20_16x:
2587 .cfi_startproc
2588 .LChaCha20_16x:
2589         mov             %rsp,%r9                # frame register
2590 .cfi_def_cfa_register   %r9
2591         sub             \$64+$xframe,%rsp
2592         and             \$-64,%rsp
2593 ___
2594 $code.=<<___    if ($win64);
2595         movaps          %xmm6,-0xa8(%r9)
2596         movaps          %xmm7,-0x98(%r9)
2597         movaps          %xmm8,-0x88(%r9)
2598         movaps          %xmm9,-0x78(%r9)
2599         movaps          %xmm10,-0x68(%r9)
2600         movaps          %xmm11,-0x58(%r9)
2601         movaps          %xmm12,-0x48(%r9)
2602         movaps          %xmm13,-0x38(%r9)
2603         movaps          %xmm14,-0x28(%r9)
2604         movaps          %xmm15,-0x18(%r9)
2605 .L16x_body:
2606 ___
2607 $code.=<<___;
2608         vzeroupper
2609
2610         lea             .Lsigma(%rip),%r10
2611         vbroadcasti32x4 (%r10),$xa3             # key[0]
2612         vbroadcasti32x4 ($key),$xb3             # key[1]
2613         vbroadcasti32x4 16($key),$xc3           # key[2]
2614         vbroadcasti32x4 ($counter),$xd3         # key[3]
2615
2616         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
2617         vpshufd         \$0x55,$xa3,$xa1
2618         vpshufd         \$0xaa,$xa3,$xa2
2619         vpshufd         \$0xff,$xa3,$xa3
2620         vmovdqa64       $xa0,@key[0]
2621         vmovdqa64       $xa1,@key[1]
2622         vmovdqa64       $xa2,@key[2]
2623         vmovdqa64       $xa3,@key[3]
2624
2625         vpshufd         \$0x00,$xb3,$xb0
2626         vpshufd         \$0x55,$xb3,$xb1
2627         vpshufd         \$0xaa,$xb3,$xb2
2628         vpshufd         \$0xff,$xb3,$xb3
2629         vmovdqa64       $xb0,@key[4]
2630         vmovdqa64       $xb1,@key[5]
2631         vmovdqa64       $xb2,@key[6]
2632         vmovdqa64       $xb3,@key[7]
2633
2634         vpshufd         \$0x00,$xc3,$xc0
2635         vpshufd         \$0x55,$xc3,$xc1
2636         vpshufd         \$0xaa,$xc3,$xc2
2637         vpshufd         \$0xff,$xc3,$xc3
2638         vmovdqa64       $xc0,@key[8]
2639         vmovdqa64       $xc1,@key[9]
2640         vmovdqa64       $xc2,@key[10]
2641         vmovdqa64       $xc3,@key[11]
2642
2643         vpshufd         \$0x00,$xd3,$xd0
2644         vpshufd         \$0x55,$xd3,$xd1
2645         vpshufd         \$0xaa,$xd3,$xd2
2646         vpshufd         \$0xff,$xd3,$xd3
2647         vpaddd          .Lincz(%rip),$xd0,$xd0  # don't save counters yet
2648         vmovdqa64       $xd0,@key[12]
2649         vmovdqa64       $xd1,@key[13]
2650         vmovdqa64       $xd2,@key[14]
2651         vmovdqa64       $xd3,@key[15]
2652
2653         mov             \$10,%eax
2654         jmp             .Loop16x
2655
2656 .align  32
2657 .Loop_outer16x:
2658         vpbroadcastd    0(%r10),$xa0            # reload key
2659         vpbroadcastd    4(%r10),$xa1
2660         vpbroadcastd    8(%r10),$xa2
2661         vpbroadcastd    12(%r10),$xa3
2662         vpaddd          .Lsixteen(%rip),@key[12],@key[12]       # next SIMD counters
2663         vmovdqa64       @key[4],$xb0
2664         vmovdqa64       @key[5],$xb1
2665         vmovdqa64       @key[6],$xb2
2666         vmovdqa64       @key[7],$xb3
2667         vmovdqa64       @key[8],$xc0
2668         vmovdqa64       @key[9],$xc1
2669         vmovdqa64       @key[10],$xc2
2670         vmovdqa64       @key[11],$xc3
2671         vmovdqa64       @key[12],$xd0
2672         vmovdqa64       @key[13],$xd1
2673         vmovdqa64       @key[14],$xd2
2674         vmovdqa64       @key[15],$xd3
2675
2676         vmovdqa64       $xa0,@key[0]
2677         vmovdqa64       $xa1,@key[1]
2678         vmovdqa64       $xa2,@key[2]
2679         vmovdqa64       $xa3,@key[3]
2680
2681         mov             \$10,%eax
2682         jmp             .Loop16x
2683
2684 .align  32
2685 .Loop16x:
2686 ___
2687         foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2688         foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2689 $code.=<<___;
2690         dec             %eax
2691         jnz             .Loop16x
2692
2693         vpaddd          @key[0],$xa0,$xa0       # accumulate key
2694         vpaddd          @key[1],$xa1,$xa1
2695         vpaddd          @key[2],$xa2,$xa2
2696         vpaddd          @key[3],$xa3,$xa3
2697
2698         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
2699         vpunpckldq      $xa3,$xa2,$xt3
2700         vpunpckhdq      $xa1,$xa0,$xa0
2701         vpunpckhdq      $xa3,$xa2,$xa2
2702         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
2703         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
2704         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
2705         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
2706 ___
2707         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2708 $code.=<<___;
2709         vpaddd          @key[4],$xb0,$xb0
2710         vpaddd          @key[5],$xb1,$xb1
2711         vpaddd          @key[6],$xb2,$xb2
2712         vpaddd          @key[7],$xb3,$xb3
2713
2714         vpunpckldq      $xb1,$xb0,$xt2
2715         vpunpckldq      $xb3,$xb2,$xt3
2716         vpunpckhdq      $xb1,$xb0,$xb0
2717         vpunpckhdq      $xb3,$xb2,$xb2
2718         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
2719         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
2720         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
2721         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
2722 ___
2723         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2724 $code.=<<___;
2725         vshufi32x4      \$0x44,$xb0,$xa0,$xt3   # "de-interlace" further
2726         vshufi32x4      \$0xee,$xb0,$xa0,$xb0
2727         vshufi32x4      \$0x44,$xb1,$xa1,$xa0
2728         vshufi32x4      \$0xee,$xb1,$xa1,$xb1
2729         vshufi32x4      \$0x44,$xb2,$xa2,$xa1
2730         vshufi32x4      \$0xee,$xb2,$xa2,$xb2
2731         vshufi32x4      \$0x44,$xb3,$xa3,$xa2
2732         vshufi32x4      \$0xee,$xb3,$xa3,$xb3
2733 ___
2734         ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2735 $code.=<<___;
2736         vpaddd          @key[8],$xc0,$xc0
2737         vpaddd          @key[9],$xc1,$xc1
2738         vpaddd          @key[10],$xc2,$xc2
2739         vpaddd          @key[11],$xc3,$xc3
2740
2741         vpunpckldq      $xc1,$xc0,$xt2
2742         vpunpckldq      $xc3,$xc2,$xt3
2743         vpunpckhdq      $xc1,$xc0,$xc0
2744         vpunpckhdq      $xc3,$xc2,$xc2
2745         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
2746         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
2747         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
2748         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
2749 ___
2750         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2751 $code.=<<___;
2752         vpaddd          @key[12],$xd0,$xd0
2753         vpaddd          @key[13],$xd1,$xd1
2754         vpaddd          @key[14],$xd2,$xd2
2755         vpaddd          @key[15],$xd3,$xd3
2756
2757         vpunpckldq      $xd1,$xd0,$xt2
2758         vpunpckldq      $xd3,$xd2,$xt3
2759         vpunpckhdq      $xd1,$xd0,$xd0
2760         vpunpckhdq      $xd3,$xd2,$xd2
2761         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
2762         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
2763         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
2764         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
2765 ___
2766         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2767 $code.=<<___;
2768         vshufi32x4      \$0x44,$xd0,$xc0,$xt3   # "de-interlace" further
2769         vshufi32x4      \$0xee,$xd0,$xc0,$xd0
2770         vshufi32x4      \$0x44,$xd1,$xc1,$xc0
2771         vshufi32x4      \$0xee,$xd1,$xc1,$xd1
2772         vshufi32x4      \$0x44,$xd2,$xc2,$xc1
2773         vshufi32x4      \$0xee,$xd2,$xc2,$xd2
2774         vshufi32x4      \$0x44,$xd3,$xc3,$xc2
2775         vshufi32x4      \$0xee,$xd3,$xc3,$xd3
2776 ___
2777         ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2778 $code.=<<___;
2779         vshufi32x4      \$0x88,$xc0,$xa0,$xt0   # "de-interlace" further
2780         vshufi32x4      \$0xdd,$xc0,$xa0,$xa0
2781          vshufi32x4     \$0x88,$xd0,$xb0,$xc0
2782          vshufi32x4     \$0xdd,$xd0,$xb0,$xd0
2783         vshufi32x4      \$0x88,$xc1,$xa1,$xt1
2784         vshufi32x4      \$0xdd,$xc1,$xa1,$xa1
2785          vshufi32x4     \$0x88,$xd1,$xb1,$xc1
2786          vshufi32x4     \$0xdd,$xd1,$xb1,$xd1
2787         vshufi32x4      \$0x88,$xc2,$xa2,$xt2
2788         vshufi32x4      \$0xdd,$xc2,$xa2,$xa2
2789          vshufi32x4     \$0x88,$xd2,$xb2,$xc2
2790          vshufi32x4     \$0xdd,$xd2,$xb2,$xd2
2791         vshufi32x4      \$0x88,$xc3,$xa3,$xt3
2792         vshufi32x4      \$0xdd,$xc3,$xa3,$xa3
2793          vshufi32x4     \$0x88,$xd3,$xb3,$xc3
2794          vshufi32x4     \$0xdd,$xd3,$xb3,$xd3
2795 ___
2796         ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2797         ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2798
2799         ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2800          $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2801         ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2802          $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2803 $code.=<<___;
2804         cmp             \$64*16,$len
2805         jb              .Ltail16x
2806
2807         vpxord          0x00($inp),$xa0,$xa0    # xor with input
2808         vpxord          0x40($inp),$xb0,$xb0
2809         vpxord          0x80($inp),$xc0,$xc0
2810         vpxord          0xc0($inp),$xd0,$xd0
2811         vmovdqu32       $xa0,0x00($out)
2812         vmovdqu32       $xb0,0x40($out)
2813         vmovdqu32       $xc0,0x80($out)
2814         vmovdqu32       $xd0,0xc0($out)
2815
2816         vpxord          0x100($inp),$xa1,$xa1
2817         vpxord          0x140($inp),$xb1,$xb1
2818         vpxord          0x180($inp),$xc1,$xc1
2819         vpxord          0x1c0($inp),$xd1,$xd1
2820         vmovdqu32       $xa1,0x100($out)
2821         vmovdqu32       $xb1,0x140($out)
2822         vmovdqu32       $xc1,0x180($out)
2823         vmovdqu32       $xd1,0x1c0($out)
2824
2825         vpxord          0x200($inp),$xa2,$xa2
2826         vpxord          0x240($inp),$xb2,$xb2
2827         vpxord          0x280($inp),$xc2,$xc2
2828         vpxord          0x2c0($inp),$xd2,$xd2
2829         vmovdqu32       $xa2,0x200($out)
2830         vmovdqu32       $xb2,0x240($out)
2831         vmovdqu32       $xc2,0x280($out)
2832         vmovdqu32       $xd2,0x2c0($out)
2833
2834         vpxord          0x300($inp),$xa3,$xa3
2835         vpxord          0x340($inp),$xb3,$xb3
2836         vpxord          0x380($inp),$xc3,$xc3
2837         vpxord          0x3c0($inp),$xd3,$xd3
2838         lea             0x400($inp),$inp
2839         vmovdqu32       $xa3,0x300($out)
2840         vmovdqu32       $xb3,0x340($out)
2841         vmovdqu32       $xc3,0x380($out)
2842         vmovdqu32       $xd3,0x3c0($out)
2843         lea             0x400($out),$out
2844
2845         sub             \$64*16,$len
2846         jnz             .Loop_outer16x
2847
2848         jmp             .Ldone16x
2849
2850 .align  32
2851 .Ltail16x:
2852         xor             %r10,%r10
2853         sub             $inp,$out
2854         cmp             \$64*1,$len
2855         jb              .Less_than_64_16x
2856         vpxord          ($inp),$xa0,$xa0        # xor with input
2857         vmovdqu32       $xa0,($out,$inp)
2858         je              .Ldone16x
2859         vmovdqa32       $xb0,$xa0
2860         lea             64($inp),$inp
2861
2862         cmp             \$64*2,$len
2863         jb              .Less_than_64_16x
2864         vpxord          ($inp),$xb0,$xb0
2865         vmovdqu32       $xb0,($out,$inp)
2866         je              .Ldone16x
2867         vmovdqa32       $xc0,$xa0
2868         lea             64($inp),$inp
2869
2870         cmp             \$64*3,$len
2871         jb              .Less_than_64_16x
2872         vpxord          ($inp),$xc0,$xc0
2873         vmovdqu32       $xc0,($out,$inp)
2874         je              .Ldone16x
2875         vmovdqa32       $xd0,$xa0
2876         lea             64($inp),$inp
2877
2878         cmp             \$64*4,$len
2879         jb              .Less_than_64_16x
2880         vpxord          ($inp),$xd0,$xd0
2881         vmovdqu32       $xd0,($out,$inp)
2882         je              .Ldone16x
2883         vmovdqa32       $xa1,$xa0
2884         lea             64($inp),$inp
2885
2886         cmp             \$64*5,$len
2887         jb              .Less_than_64_16x
2888         vpxord          ($inp),$xa1,$xa1
2889         vmovdqu32       $xa1,($out,$inp)
2890         je              .Ldone16x
2891         vmovdqa32       $xb1,$xa0
2892         lea             64($inp),$inp
2893
2894         cmp             \$64*6,$len
2895         jb              .Less_than_64_16x
2896         vpxord          ($inp),$xb1,$xb1
2897         vmovdqu32       $xb1,($out,$inp)
2898         je              .Ldone16x
2899         vmovdqa32       $xc1,$xa0
2900         lea             64($inp),$inp
2901
2902         cmp             \$64*7,$len
2903         jb              .Less_than_64_16x
2904         vpxord          ($inp),$xc1,$xc1
2905         vmovdqu32       $xc1,($out,$inp)
2906         je              .Ldone16x
2907         vmovdqa32       $xd1,$xa0
2908         lea             64($inp),$inp
2909
2910         cmp             \$64*8,$len
2911         jb              .Less_than_64_16x
2912         vpxord          ($inp),$xd1,$xd1
2913         vmovdqu32       $xd1,($out,$inp)
2914         je              .Ldone16x
2915         vmovdqa32       $xa2,$xa0
2916         lea             64($inp),$inp
2917
2918         cmp             \$64*9,$len
2919         jb              .Less_than_64_16x
2920         vpxord          ($inp),$xa2,$xa2
2921         vmovdqu32       $xa2,($out,$inp)
2922         je              .Ldone16x
2923         vmovdqa32       $xb2,$xa0
2924         lea             64($inp),$inp
2925
2926         cmp             \$64*10,$len
2927         jb              .Less_than_64_16x
2928         vpxord          ($inp),$xb2,$xb2
2929         vmovdqu32       $xb2,($out,$inp)
2930         je              .Ldone16x
2931         vmovdqa32       $xc2,$xa0
2932         lea             64($inp),$inp
2933
2934         cmp             \$64*11,$len
2935         jb              .Less_than_64_16x
2936         vpxord          ($inp),$xc2,$xc2
2937         vmovdqu32       $xc2,($out,$inp)
2938         je              .Ldone16x
2939         vmovdqa32       $xd2,$xa0
2940         lea             64($inp),$inp
2941
2942         cmp             \$64*12,$len
2943         jb              .Less_than_64_16x
2944         vpxord          ($inp),$xd2,$xd2
2945         vmovdqu32       $xd2,($out,$inp)
2946         je              .Ldone16x
2947         vmovdqa32       $xa3,$xa0
2948         lea             64($inp),$inp
2949
2950         cmp             \$64*13,$len
2951         jb              .Less_than_64_16x
2952         vpxord          ($inp),$xa3,$xa3
2953         vmovdqu32       $xa3,($out,$inp)
2954         je              .Ldone16x
2955         vmovdqa32       $xb3,$xa0
2956         lea             64($inp),$inp
2957
2958         cmp             \$64*14,$len
2959         jb              .Less_than_64_16x
2960         vpxord          ($inp),$xb3,$xb3
2961         vmovdqu32       $xb3,($out,$inp)
2962         je              .Ldone16x
2963         vmovdqa32       $xc3,$xa0
2964         lea             64($inp),$inp
2965
2966         cmp             \$64*15,$len
2967         jb              .Less_than_64_16x
2968         vpxord          ($inp),$xc3,$xc3
2969         vmovdqu32       $xc3,($out,$inp)
2970         je              .Ldone16x
2971         vmovdqa32       $xd3,$xa0
2972         lea             64($inp),$inp
2973
2974 .Less_than_64_16x:
2975         vmovdqa32       $xa0,0x00(%rsp)
2976         lea             ($out,$inp),$out
2977         and             \$63,$len
2978
2979 .Loop_tail16x:
2980         movzb           ($inp,%r10),%eax
2981         movzb           (%rsp,%r10),%ecx
2982         lea             1(%r10),%r10
2983         xor             %ecx,%eax
2984         mov             %al,-1($out,%r10)
2985         dec             $len
2986         jnz             .Loop_tail16x
2987
2988         vpxord          $xa0,$xa0,$xa0
2989         vmovdqa32       $xa0,0(%rsp)
2990
2991 .Ldone16x:
2992         vzeroall
2993 ___
2994 $code.=<<___    if ($win64);
2995         movaps          -0xa8(%r9),%xmm6
2996         movaps          -0x98(%r9),%xmm7
2997         movaps          -0x88(%r9),%xmm8
2998         movaps          -0x78(%r9),%xmm9
2999         movaps          -0x68(%r9),%xmm10
3000         movaps          -0x58(%r9),%xmm11
3001         movaps          -0x48(%r9),%xmm12
3002         movaps          -0x38(%r9),%xmm13
3003         movaps          -0x28(%r9),%xmm14
3004         movaps          -0x18(%r9),%xmm15
3005 ___
3006 $code.=<<___;
3007         lea             (%r9),%rsp
3008 .cfi_def_cfa_register   %rsp
3009 .L16x_epilogue:
3010         ret
3011 .cfi_endproc
3012 .size   ChaCha20_16x,.-ChaCha20_16x
3013 ___
3014 }
3015
3016 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3017 #               CONTEXT *context,DISPATCHER_CONTEXT *disp)
3018 if ($win64) {
3019 $rec="%rcx";
3020 $frame="%rdx";
3021 $context="%r8";
3022 $disp="%r9";
3023
3024 $code.=<<___;
3025 .extern __imp_RtlVirtualUnwind
3026 .type   se_handler,\@abi-omnipotent
3027 .align  16
3028 se_handler:
3029         push    %rsi
3030         push    %rdi
3031         push    %rbx
3032         push    %rbp
3033         push    %r12
3034         push    %r13
3035         push    %r14
3036         push    %r15
3037         pushfq
3038         sub     \$64,%rsp
3039
3040         mov     120($context),%rax      # pull context->Rax
3041         mov     248($context),%rbx      # pull context->Rip
3042
3043         mov     8($disp),%rsi           # disp->ImageBase
3044         mov     56($disp),%r11          # disp->HandlerData
3045
3046         lea     .Lctr32_body(%rip),%r10
3047         cmp     %r10,%rbx               # context->Rip<.Lprologue
3048         jb      .Lcommon_seh_tail
3049
3050         mov     152($context),%rax      # pull context->Rsp
3051
3052         lea     .Lno_data(%rip),%r10    # epilogue label
3053         cmp     %r10,%rbx               # context->Rip>=.Lepilogue
3054         jae     .Lcommon_seh_tail
3055
3056         lea     64+24+48(%rax),%rax
3057
3058         mov     -8(%rax),%rbx
3059         mov     -16(%rax),%rbp
3060         mov     -24(%rax),%r12
3061         mov     -32(%rax),%r13
3062         mov     -40(%rax),%r14
3063         mov     -48(%rax),%r15
3064         mov     %rbx,144($context)      # restore context->Rbx
3065         mov     %rbp,160($context)      # restore context->Rbp
3066         mov     %r12,216($context)      # restore context->R12
3067         mov     %r13,224($context)      # restore context->R13
3068         mov     %r14,232($context)      # restore context->R14
3069         mov     %r15,240($context)      # restore context->R14
3070
3071 .Lcommon_seh_tail:
3072         mov     8(%rax),%rdi
3073         mov     16(%rax),%rsi
3074         mov     %rax,152($context)      # restore context->Rsp
3075         mov     %rsi,168($context)      # restore context->Rsi
3076         mov     %rdi,176($context)      # restore context->Rdi
3077
3078         mov     40($disp),%rdi          # disp->ContextRecord
3079         mov     $context,%rsi           # context
3080         mov     \$154,%ecx              # sizeof(CONTEXT)
3081         .long   0xa548f3fc              # cld; rep movsq
3082
3083         mov     $disp,%rsi
3084         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
3085         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
3086         mov     0(%rsi),%r8             # arg3, disp->ControlPc
3087         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
3088         mov     40(%rsi),%r10           # disp->ContextRecord
3089         lea     56(%rsi),%r11           # &disp->HandlerData
3090         lea     24(%rsi),%r12           # &disp->EstablisherFrame
3091         mov     %r10,32(%rsp)           # arg5
3092         mov     %r11,40(%rsp)           # arg6
3093         mov     %r12,48(%rsp)           # arg7
3094         mov     %rcx,56(%rsp)           # arg8, (NULL)
3095         call    *__imp_RtlVirtualUnwind(%rip)
3096
3097         mov     \$1,%eax                # ExceptionContinueSearch
3098         add     \$64,%rsp
3099         popfq
3100         pop     %r15
3101         pop     %r14
3102         pop     %r13
3103         pop     %r12
3104         pop     %rbp
3105         pop     %rbx
3106         pop     %rdi
3107         pop     %rsi
3108         ret
3109 .size   se_handler,.-se_handler
3110
3111 .type   ssse3_handler,\@abi-omnipotent
3112 .align  16
3113 ssse3_handler:
3114         push    %rsi
3115         push    %rdi
3116         push    %rbx
3117         push    %rbp
3118         push    %r12
3119         push    %r13
3120         push    %r14
3121         push    %r15
3122         pushfq
3123         sub     \$64,%rsp
3124
3125         mov     120($context),%rax      # pull context->Rax
3126         mov     248($context),%rbx      # pull context->Rip
3127
3128         mov     8($disp),%rsi           # disp->ImageBase
3129         mov     56($disp),%r11          # disp->HandlerData
3130
3131         mov     0(%r11),%r10d           # HandlerData[0]
3132         lea     (%rsi,%r10),%r10        # prologue label
3133         cmp     %r10,%rbx               # context->Rip<prologue label
3134         jb      .Lcommon_seh_tail
3135
3136         mov     192($context),%rax      # pull context->R9
3137
3138         mov     4(%r11),%r10d           # HandlerData[1]
3139         lea     (%rsi,%r10),%r10        # epilogue label
3140         cmp     %r10,%rbx               # context->Rip>=epilogue label
3141         jae     .Lcommon_seh_tail
3142
3143         lea     -0x28(%rax),%rsi
3144         lea     512($context),%rdi      # &context.Xmm6
3145         mov     \$4,%ecx
3146         .long   0xa548f3fc              # cld; rep movsq
3147
3148         jmp     .Lcommon_seh_tail
3149 .size   ssse3_handler,.-ssse3_handler
3150
3151 .type   full_handler,\@abi-omnipotent
3152 .align  16
3153 full_handler:
3154         push    %rsi
3155         push    %rdi
3156         push    %rbx
3157         push    %rbp
3158         push    %r12
3159         push    %r13
3160         push    %r14
3161         push    %r15
3162         pushfq
3163         sub     \$64,%rsp
3164
3165         mov     120($context),%rax      # pull context->Rax
3166         mov     248($context),%rbx      # pull context->Rip
3167
3168         mov     8($disp),%rsi           # disp->ImageBase
3169         mov     56($disp),%r11          # disp->HandlerData
3170
3171         mov     0(%r11),%r10d           # HandlerData[0]
3172         lea     (%rsi,%r10),%r10        # prologue label
3173         cmp     %r10,%rbx               # context->Rip<prologue label
3174         jb      .Lcommon_seh_tail
3175
3176         mov     192($context),%rax      # pull context->R9
3177
3178         mov     4(%r11),%r10d           # HandlerData[1]
3179         lea     (%rsi,%r10),%r10        # epilogue label
3180         cmp     %r10,%rbx               # context->Rip>=epilogue label
3181         jae     .Lcommon_seh_tail
3182
3183         lea     -0xa8(%rax),%rsi
3184         lea     512($context),%rdi      # &context.Xmm6
3185         mov     \$20,%ecx
3186         .long   0xa548f3fc              # cld; rep movsq
3187
3188         jmp     .Lcommon_seh_tail
3189 .size   full_handler,.-full_handler
3190
3191 .section        .pdata
3192 .align  4
3193         .rva    .LSEH_begin_ChaCha20_ctr32
3194         .rva    .LSEH_end_ChaCha20_ctr32
3195         .rva    .LSEH_info_ChaCha20_ctr32
3196
3197         .rva    .LSEH_begin_ChaCha20_ssse3
3198         .rva    .LSEH_end_ChaCha20_ssse3
3199         .rva    .LSEH_info_ChaCha20_ssse3
3200
3201         .rva    .LSEH_begin_ChaCha20_4x
3202         .rva    .LSEH_end_ChaCha20_4x
3203         .rva    .LSEH_info_ChaCha20_4x
3204 ___
3205 $code.=<<___ if ($avx);
3206         .rva    .LSEH_begin_ChaCha20_4xop
3207         .rva    .LSEH_end_ChaCha20_4xop
3208         .rva    .LSEH_info_ChaCha20_4xop
3209 ___
3210 $code.=<<___ if ($avx>1);
3211         .rva    .LSEH_begin_ChaCha20_8x
3212         .rva    .LSEH_end_ChaCha20_8x
3213         .rva    .LSEH_info_ChaCha20_8x
3214 ___
3215 $code.=<<___ if ($avx>2);
3216         .rva    .LSEH_begin_ChaCha20_avx512
3217         .rva    .LSEH_end_ChaCha20_avx512
3218         .rva    .LSEH_info_ChaCha20_avx512
3219
3220         .rva    .LSEH_begin_ChaCha20_16x
3221         .rva    .LSEH_end_ChaCha20_16x
3222         .rva    .LSEH_info_ChaCha20_16x
3223 ___
3224 $code.=<<___;
3225 .section        .xdata
3226 .align  8
3227 .LSEH_info_ChaCha20_ctr32:
3228         .byte   9,0,0,0
3229         .rva    se_handler
3230
3231 .LSEH_info_ChaCha20_ssse3:
3232         .byte   9,0,0,0
3233         .rva    ssse3_handler
3234         .rva    .Lssse3_body,.Lssse3_epilogue
3235
3236 .LSEH_info_ChaCha20_4x:
3237         .byte   9,0,0,0
3238         .rva    full_handler
3239         .rva    .L4x_body,.L4x_epilogue
3240 ___
3241 $code.=<<___ if ($avx);
3242 .LSEH_info_ChaCha20_4xop:
3243         .byte   9,0,0,0
3244         .rva    full_handler
3245         .rva    .L4xop_body,.L4xop_epilogue             # HandlerData[]
3246 ___
3247 $code.=<<___ if ($avx>1);
3248 .LSEH_info_ChaCha20_8x:
3249         .byte   9,0,0,0
3250         .rva    full_handler
3251         .rva    .L8x_body,.L8x_epilogue                 # HandlerData[]
3252 ___
3253 $code.=<<___ if ($avx>2);
3254 .LSEH_info_ChaCha20_avx512:
3255         .byte   9,0,0,0
3256         .rva    ssse3_handler
3257         .rva    .Lavx512_body,.Lavx512_epilogue         # HandlerData[]
3258
3259 .LSEH_info_ChaCha20_16x:
3260         .byte   9,0,0,0
3261         .rva    full_handler
3262         .rva    .L16x_body,.L16x_epilogue               # HandlerData[]
3263 ___
3264 }
3265
3266 foreach (split("\n",$code)) {
3267         s/\`([^\`]*)\`/eval $1/ge;
3268
3269         s/%x#%[yz]/%x/g;        # "down-shift"
3270
3271         print $_,"\n";
3272 }
3273
3274 close STDOUT;