chacha/asm/chacha-*.pl: fix typos in tail processing.
[openssl.git] / crypto / chacha / asm / chacha-x86_64.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # November 2014
11 #
12 # ChaCha20 for x86_64.
13 #
14 # Performance in cycles per byte out of large buffer.
15 #
16 #               IALU/gcc 4.8(i) 1xSSSE3/SSE2    4xSSSE3     8xAVX2
17 #
18 # P4            9.48/+99%       -/22.7(ii)      -
19 # Core2         7.83/+55%       7.90/8.08       4.35
20 # Westmere      7.19/+50%       5.60/6.70       3.00
21 # Sandy Bridge  8.31/+42%       5.45/6.76       2.72
22 # Ivy Bridge    6.71/+46%       5.40/6.49       2.41
23 # Haswell       5.92/+43%       5.20/6.45       2.42        1.23
24 # Silvermont    12.0/+33%       7.75/7.40       7.03(iii)
25 # Sledgehammer  7.28/+52%       -/14.2(ii)      -
26 # Bulldozer     9.66/+28%       9.85/11.1       3.06(iv)
27 # VIA Nano      10.5/+46%       6.72/8.60       6.05
28 #
29 # (i)   compared to older gcc 3.x one can observe >2x improvement on
30 #       most platforms;
31 # (ii)  as it can be seen, SSE2 performance is too low on legacy
32 #       processors; NxSSE2 results are naturally better, but not
33 #       impressively better than IALU ones, which is why you won't
34 #       find SSE2 code below;
35 # (iii) this is not optimal result for Atom because of MSROM
36 #       limitations, SSE2 can do better, but gain is considered too
37 #       low to justify the [maintenance] effort;
38 # (iv)  Bulldozer actually executes 4xXOP code path that delivers 2.20;
39
40 $flavour = shift;
41 $output  = shift;
42 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
43
44 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
45
46 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
47 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
48 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
49 die "can't locate x86_64-xlate.pl";
50
51 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
52                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
53         $avx = ($1>=2.19) + ($1>=2.22);
54 }
55
56 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
57            `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
58         $avx = ($1>=2.09) + ($1>=2.10);
59 }
60
61 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
62            `ml64 2>&1` =~ /Version ([0-9]+)\./) {
63         $avx = ($1>=10) + ($1>=11);
64 }
65
66 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
67         $avx = ($2>=3.0) + ($2>3.0);
68 }
69
70 open OUT,"| \"$^X\" $xlate $flavour $output";
71 *STDOUT=*OUT;
72
73 # input parameter block
74 ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
75
76 $code.=<<___;
77 .text
78
79 .extern OPENSSL_ia32cap_P
80
81 .align  64
82 .Lzero:
83 .long   0,0,0,0
84 .Lone:
85 .long   1,0,0,0
86 .Linc:
87 .long   0,1,2,3
88 .Lfour:
89 .long   4,4,4,4
90 .Lincy:
91 .long   0,2,4,6,1,3,5,7
92 .Leight:
93 .long   8,8,8,8,8,8,8,8
94 .Lrot16:
95 .byte   0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
96 .Lrot24:
97 .byte   0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
98 .Lsigma:
99 .asciz  "expand 32-byte k"
100 .asciz  "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
101 ___
102
103 sub AUTOLOAD()          # thunk [simplified] 32-bit style perlasm
104 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
105   my $arg = pop;
106     $arg = "\$$arg" if ($arg*1 eq $arg);
107     $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
108 }
109
110 @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
111     "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
112 @t=("%esi","%edi");
113
114 sub ROUND {                     # critical path is 24 cycles per round
115 my ($a0,$b0,$c0,$d0)=@_;
116 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
117 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
118 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
119 my ($xc,$xc_)=map("\"$_\"",@t);
120 my @x=map("\"$_\"",@x);
121
122         # Consider order in which variables are addressed by their
123         # index:
124         #
125         #       a   b   c   d
126         #
127         #       0   4   8  12 < even round
128         #       1   5   9  13
129         #       2   6  10  14
130         #       3   7  11  15
131         #       0   5  10  15 < odd round
132         #       1   6  11  12
133         #       2   7   8  13
134         #       3   4   9  14
135         #
136         # 'a', 'b' and 'd's are permanently allocated in registers,
137         # @x[0..7,12..15], while 'c's are maintained in memory. If
138         # you observe 'c' column, you'll notice that pair of 'c's is
139         # invariant between rounds. This means that we have to reload
140         # them once per round, in the middle. This is why you'll see
141         # bunch of 'c' stores and loads in the middle, but none in
142         # the beginning or end.
143
144         # Normally instructions would be interleaved to favour in-order
145         # execution. Generally out-of-order cores manage it gracefully,
146         # but not this time for some reason. As in-order execution
147         # cores are dying breed, old Atom is the only one around,
148         # instructions are left uninterleaved. Besides, Atom is better
149         # off executing 1xSSSE3 code anyway...
150
151         (
152         "&add   (@x[$a0],@x[$b0])",     # Q1
153         "&xor   (@x[$d0],@x[$a0])",
154         "&rol   (@x[$d0],16)",
155          "&add  (@x[$a1],@x[$b1])",     # Q2
156          "&xor  (@x[$d1],@x[$a1])",
157          "&rol  (@x[$d1],16)",
158
159         "&add   ($xc,@x[$d0])",
160         "&xor   (@x[$b0],$xc)",
161         "&rol   (@x[$b0],12)",
162          "&add  ($xc_,@x[$d1])",
163          "&xor  (@x[$b1],$xc_)",
164          "&rol  (@x[$b1],12)",
165
166         "&add   (@x[$a0],@x[$b0])",
167         "&xor   (@x[$d0],@x[$a0])",
168         "&rol   (@x[$d0],8)",
169          "&add  (@x[$a1],@x[$b1])",
170          "&xor  (@x[$d1],@x[$a1])",
171          "&rol  (@x[$d1],8)",
172
173         "&add   ($xc,@x[$d0])",
174         "&xor   (@x[$b0],$xc)",
175         "&rol   (@x[$b0],7)",
176          "&add  ($xc_,@x[$d1])",
177          "&xor  (@x[$b1],$xc_)",
178          "&rol  (@x[$b1],7)",
179
180         "&mov   (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
181          "&mov  (\"4*$c1(%rsp)\",$xc_)",
182         "&mov   ($xc,\"4*$c2(%rsp)\")",
183          "&mov  ($xc_,\"4*$c3(%rsp)\")",
184
185         "&add   (@x[$a2],@x[$b2])",     # Q3
186         "&xor   (@x[$d2],@x[$a2])",
187         "&rol   (@x[$d2],16)",
188          "&add  (@x[$a3],@x[$b3])",     # Q4
189          "&xor  (@x[$d3],@x[$a3])",
190          "&rol  (@x[$d3],16)",
191
192         "&add   ($xc,@x[$d2])",
193         "&xor   (@x[$b2],$xc)",
194         "&rol   (@x[$b2],12)",
195          "&add  ($xc_,@x[$d3])",
196          "&xor  (@x[$b3],$xc_)",
197          "&rol  (@x[$b3],12)",
198
199         "&add   (@x[$a2],@x[$b2])",
200         "&xor   (@x[$d2],@x[$a2])",
201         "&rol   (@x[$d2],8)",
202          "&add  (@x[$a3],@x[$b3])",
203          "&xor  (@x[$d3],@x[$a3])",
204          "&rol  (@x[$d3],8)",
205
206         "&add   ($xc,@x[$d2])",
207         "&xor   (@x[$b2],$xc)",
208         "&rol   (@x[$b2],7)",
209          "&add  ($xc_,@x[$d3])",
210          "&xor  (@x[$b3],$xc_)",
211          "&rol  (@x[$b3],7)"
212         );
213 }
214
215 ########################################################################
216 # Generic code path that handles all lengths on pre-SSSE3 processors.
217 $code.=<<___;
218 .globl  ChaCha20_ctr32
219 .type   ChaCha20_ctr32,\@function,5
220 .align  64
221 ChaCha20_ctr32:
222         cmp     \$0,$len
223         je      .Lno_data
224         mov     OPENSSL_ia32cap_P+4(%rip),%r10
225         test    \$`1<<(41-32)`,%r10d
226         jnz     .LChaCha20_ssse3
227
228         push    %rbx
229         push    %rbp
230         push    %r12
231         push    %r13
232         push    %r14
233         push    %r15
234         sub     \$64+24,%rsp
235
236         #movdqa .Lsigma(%rip),%xmm0
237         movdqu  ($key),%xmm1
238         movdqu  16($key),%xmm2
239         movdqu  ($counter),%xmm3
240         movdqa  .Lone(%rip),%xmm4
241
242         #movdqa %xmm0,4*0(%rsp)         # key[0]
243         movdqa  %xmm1,4*4(%rsp)         # key[1]
244         movdqa  %xmm2,4*8(%rsp)         # key[2]
245         movdqa  %xmm3,4*12(%rsp)        # key[3]
246         mov     $len,%rbp               # reassign $len
247         jmp     .Loop_outer
248
249 .align  32
250 .Loop_outer:
251         mov     \$0x61707865,@x[0]      # 'expa'
252         mov     \$0x3320646e,@x[1]      # 'nd 3'
253         mov     \$0x79622d32,@x[2]      # '2-by'
254         mov     \$0x6b206574,@x[3]      # 'te k'
255         mov     4*4(%rsp),@x[4]
256         mov     4*5(%rsp),@x[5]
257         mov     4*6(%rsp),@x[6]
258         mov     4*7(%rsp),@x[7]
259         movd    %xmm3,@x[12]
260         mov     4*13(%rsp),@x[13]
261         mov     4*14(%rsp),@x[14]
262         mov     4*15(%rsp),@x[15]
263
264         mov     %rbp,64+0(%rsp)         # save len
265         mov     \$10,%ebp
266         mov     $inp,64+8(%rsp)         # save inp
267         movq    %xmm2,%rsi              # "@x[8]"
268         mov     $out,64+16(%rsp)        # save out
269         mov     %rsi,%rdi
270         shr     \$32,%rdi               # "@x[9]"
271         jmp     .Loop
272
273 .align  32
274 .Loop:
275 ___
276         foreach (&ROUND (0, 4, 8,12)) { eval; }
277         foreach (&ROUND (0, 5,10,15)) { eval; }
278         &dec    ("%ebp");
279         &jnz    (".Loop");
280
281 $code.=<<___;
282         mov     @t[1],4*9(%rsp)         # modulo-scheduled
283         mov     @t[0],4*8(%rsp)
284         mov     64(%rsp),%rbp           # load len
285         movdqa  %xmm2,%xmm1
286         mov     64+8(%rsp),$inp         # load inp
287         paddd   %xmm4,%xmm3             # increment counter
288         mov     64+16(%rsp),$out        # load out
289
290         add     \$0x61707865,@x[0]      # 'expa'
291         add     \$0x3320646e,@x[1]      # 'nd 3'
292         add     \$0x79622d32,@x[2]      # '2-by'
293         add     \$0x6b206574,@x[3]      # 'te k'
294         add     4*4(%rsp),@x[4]
295         add     4*5(%rsp),@x[5]
296         add     4*6(%rsp),@x[6]
297         add     4*7(%rsp),@x[7]
298         add     4*12(%rsp),@x[12]
299         add     4*13(%rsp),@x[13]
300         add     4*14(%rsp),@x[14]
301         add     4*15(%rsp),@x[15]
302         paddd   4*8(%rsp),%xmm1
303
304         cmp     \$64,%rbp
305         jb      .Ltail
306
307         xor     4*0($inp),@x[0]         # xor with input
308         xor     4*1($inp),@x[1]
309         xor     4*2($inp),@x[2]
310         xor     4*3($inp),@x[3]
311         xor     4*4($inp),@x[4]
312         xor     4*5($inp),@x[5]
313         xor     4*6($inp),@x[6]
314         xor     4*7($inp),@x[7]
315         movdqu  4*8($inp),%xmm0
316         xor     4*12($inp),@x[12]
317         xor     4*13($inp),@x[13]
318         xor     4*14($inp),@x[14]
319         xor     4*15($inp),@x[15]
320         lea     4*16($inp),$inp         # inp+=64
321         pxor    %xmm1,%xmm0
322
323         movdqa  %xmm2,4*8(%rsp)
324         movd    %xmm3,4*12(%rsp)
325
326         mov     @x[0],4*0($out)         # write output
327         mov     @x[1],4*1($out)
328         mov     @x[2],4*2($out)
329         mov     @x[3],4*3($out)
330         mov     @x[4],4*4($out)
331         mov     @x[5],4*5($out)
332         mov     @x[6],4*6($out)
333         mov     @x[7],4*7($out)
334         movdqu  %xmm0,4*8($out)
335         mov     @x[12],4*12($out)
336         mov     @x[13],4*13($out)
337         mov     @x[14],4*14($out)
338         mov     @x[15],4*15($out)
339         lea     4*16($out),$out         # out+=64
340
341         sub     \$64,%rbp
342         jnz     .Loop_outer
343
344         jmp     .Ldone
345
346 .align  16
347 .Ltail:
348         mov     @x[0],4*0(%rsp)
349         mov     @x[1],4*1(%rsp)
350         xor     %rbx,%rbx
351         mov     @x[2],4*2(%rsp)
352         mov     @x[3],4*3(%rsp)
353         mov     @x[4],4*4(%rsp)
354         mov     @x[5],4*5(%rsp)
355         mov     @x[6],4*6(%rsp)
356         mov     @x[7],4*7(%rsp)
357         movdqa  %xmm1,4*8(%rsp)
358         mov     @x[12],4*12(%rsp)
359         mov     @x[13],4*13(%rsp)
360         mov     @x[14],4*14(%rsp)
361         mov     @x[15],4*15(%rsp)
362
363 .Loop_tail:
364         movzb   ($inp,%rbx),%eax
365         movzb   (%rsp,%rbx),%edx
366         lea     1(%rbx),%rbx
367         xor     %edx,%eax
368         mov     %al,-1($out,%rbx)
369         dec     %rbp
370         jnz     .Loop_tail
371
372 .Ldone:
373         add     \$64+24,%rsp
374         pop     %r15
375         pop     %r14
376         pop     %r13
377         pop     %r12
378         pop     %rbp
379         pop     %rbx
380 .Lno_data:
381         ret
382 .size   ChaCha20_ctr32,.-ChaCha20_ctr32
383 ___
384
385 ########################################################################
386 # SSSE3 code path that handles shorter lengths
387 {
388 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
389
390 sub SSSE3ROUND {        # critical path is 20 "SIMD ticks" per round
391         &paddd  ($a,$b);
392         &pxor   ($d,$a);
393         &pshufb ($d,$rot16);
394
395         &paddd  ($c,$d);
396         &pxor   ($b,$c);
397         &movdqa ($t,$b);
398         &psrld  ($b,20);
399         &pslld  ($t,12);
400         &por    ($b,$t);
401
402         &paddd  ($a,$b);
403         &pxor   ($d,$a);
404         &pshufb ($d,$rot24);
405
406         &paddd  ($c,$d);
407         &pxor   ($b,$c);
408         &movdqa ($t,$b);
409         &psrld  ($b,25);
410         &pslld  ($t,7);
411         &por    ($b,$t);
412 }
413
414 my $xframe = $win64 ? 32+32+8 : 24;
415
416 $code.=<<___;
417 .type   ChaCha20_ssse3,\@function,5
418 .align  32
419 ChaCha20_ssse3:
420 .LChaCha20_ssse3:
421 ___
422 $code.=<<___    if ($avx);
423         test    \$`1<<(43-32)`,%r10d
424         jnz     .LChaCha20_4xop         # XOP is fastest even if we use 1/4
425 ___
426 $code.=<<___;
427         cmp     \$128,$len              # we might throw away some data,
428         ja      .LChaCha20_4x           # but overall it won't be slower
429
430 .Ldo_sse3_after_all:
431         push    %rbx
432         push    %rbp
433         push    %r12
434         push    %r13
435         push    %r14
436         push    %r15
437
438         sub     \$64+$xframe,%rsp
439 ___
440 $code.=<<___    if ($win64);
441         movaps  %xmm6,64+32(%rsp)
442         movaps  %xmm7,64+48(%rsp)
443 ___
444 $code.=<<___;
445         movdqa  .Lsigma(%rip),$a
446         movdqu  ($key),$b
447         movdqu  16($key),$c
448         movdqu  ($counter),$d
449         movdqa  .Lrot16(%rip),$rot16
450         movdqa  .Lrot24(%rip),$rot24
451
452         movdqa  $a,0x00(%rsp)
453         movdqa  $b,0x10(%rsp)
454         movdqa  $c,0x20(%rsp)
455         movdqa  $d,0x30(%rsp)
456         mov     \$10,%ebp
457         jmp     .Loop_ssse3
458
459 .align  32
460 .Loop_outer_ssse3:
461         movdqa  .Lone(%rip),$d
462         movdqa  0x00(%rsp),$a
463         movdqa  0x10(%rsp),$b
464         movdqa  0x20(%rsp),$c
465         paddd   0x30(%rsp),$d
466         mov     \$10,%ebp
467         movdqa  $d,0x30(%rsp)
468         jmp     .Loop_ssse3
469
470 .align  32
471 .Loop_ssse3:
472 ___
473         &SSSE3ROUND();
474         &pshufd ($c,$c,0b01001110);
475         &pshufd ($b,$b,0b00111001);
476         &pshufd ($d,$d,0b10010011);
477         &nop    ();
478
479         &SSSE3ROUND();
480         &pshufd ($c,$c,0b01001110);
481         &pshufd ($b,$b,0b10010011);
482         &pshufd ($d,$d,0b00111001);
483
484         &dec    ("%ebp");
485         &jnz    (".Loop_ssse3");
486
487 $code.=<<___;
488         paddd   0x00(%rsp),$a
489         paddd   0x10(%rsp),$b
490         paddd   0x20(%rsp),$c
491         paddd   0x30(%rsp),$d
492
493         cmp     \$64,$len
494         jb      .Ltail_ssse3
495
496         movdqu  0x00($inp),$t
497         movdqu  0x10($inp),$t1
498         pxor    $t,$a                   # xor with input
499         movdqu  0x20($inp),$t
500         pxor    $t1,$b
501         movdqu  0x30($inp),$t1
502         lea     0x40($inp),$inp         # inp+=64
503         pxor    $t,$c
504         pxor    $t1,$d
505
506         movdqu  $a,0x00($out)           # write output
507         movdqu  $b,0x10($out)
508         movdqu  $c,0x20($out)
509         movdqu  $d,0x30($out)
510         lea     0x40($out),$out         # out+=64
511
512         sub     \$64,$len
513         jnz     .Loop_outer_ssse3
514
515         jmp     .Ldone_ssse3
516
517 .align  16
518 .Ltail_ssse3:
519         movdqa  $a,0x00(%rsp)
520         movdqa  $b,0x10(%rsp)
521         movdqa  $c,0x20(%rsp)
522         movdqa  $d,0x30(%rsp)
523         xor     %rbx,%rbx
524
525 .Loop_tail_ssse3:
526         movzb   ($inp,%rbx),%eax
527         movzb   (%rsp,%rbx),%ecx
528         lea     1(%rbx),%rbx
529         xor     %ecx,%eax
530         mov     %al,-1($out,%rbx)
531         dec     $len
532         jnz     .Loop_tail_ssse3
533
534 .Ldone_ssse3:
535 ___
536 $code.=<<___    if ($win64);
537         movaps  64+32(%rsp),%xmm6
538         movaps  64+48(%rsp),%xmm7
539 ___
540 $code.=<<___;
541         add     \$64+$xframe,%rsp
542         pop     %r15
543         pop     %r14
544         pop     %r13
545         pop     %r12
546         pop     %rbp
547         pop     %rbx
548         ret
549 .size   ChaCha20_ssse3,.-ChaCha20_ssse3
550 ___
551 }
552
553 ########################################################################
554 # SSSE3 code path that handles longer messages.
555 {
556 # assign variables to favor Atom front-end
557 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
558     $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
559 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
560         "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
561
562 sub SSSE3_lane_ROUND {
563 my ($a0,$b0,$c0,$d0)=@_;
564 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
565 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
566 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
567 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
568 my @x=map("\"$_\"",@xx);
569
570         # Consider order in which variables are addressed by their
571         # index:
572         #
573         #       a   b   c   d
574         #
575         #       0   4   8  12 < even round
576         #       1   5   9  13
577         #       2   6  10  14
578         #       3   7  11  15
579         #       0   5  10  15 < odd round
580         #       1   6  11  12
581         #       2   7   8  13
582         #       3   4   9  14
583         #
584         # 'a', 'b' and 'd's are permanently allocated in registers,
585         # @x[0..7,12..15], while 'c's are maintained in memory. If
586         # you observe 'c' column, you'll notice that pair of 'c's is
587         # invariant between rounds. This means that we have to reload
588         # them once per round, in the middle. This is why you'll see
589         # bunch of 'c' stores and loads in the middle, but none in
590         # the beginning or end.
591
592         (
593         "&paddd         (@x[$a0],@x[$b0])",     # Q1
594          "&paddd        (@x[$a1],@x[$b1])",     # Q2
595         "&pxor          (@x[$d0],@x[$a0])",
596          "&pxor         (@x[$d1],@x[$a1])",
597         "&pshufb        (@x[$d0],$t1)",
598          "&pshufb       (@x[$d1],$t1)",
599
600         "&paddd         ($xc,@x[$d0])",
601          "&paddd        ($xc_,@x[$d1])",
602         "&pxor          (@x[$b0],$xc)",
603          "&pxor         (@x[$b1],$xc_)",
604         "&movdqa        ($t0,@x[$b0])",
605         "&pslld         (@x[$b0],12)",
606         "&psrld         ($t0,20)",
607          "&movdqa       ($t1,@x[$b1])",
608          "&pslld        (@x[$b1],12)",
609         "&por           (@x[$b0],$t0)",
610          "&psrld        ($t1,20)",
611         "&movdqa        ($t0,'(%r11)')",        # .Lrot24(%rip)
612          "&por          (@x[$b1],$t1)",
613
614         "&paddd         (@x[$a0],@x[$b0])",
615          "&paddd        (@x[$a1],@x[$b1])",
616         "&pxor          (@x[$d0],@x[$a0])",
617          "&pxor         (@x[$d1],@x[$a1])",
618         "&pshufb        (@x[$d0],$t0)",
619          "&pshufb       (@x[$d1],$t0)",
620
621         "&paddd         ($xc,@x[$d0])",
622          "&paddd        ($xc_,@x[$d1])",
623         "&pxor          (@x[$b0],$xc)",
624          "&pxor         (@x[$b1],$xc_)",
625         "&movdqa        ($t1,@x[$b0])",
626         "&pslld         (@x[$b0],7)",
627         "&psrld         ($t1,25)",
628          "&movdqa       ($t0,@x[$b1])",
629          "&pslld        (@x[$b1],7)",
630         "&por           (@x[$b0],$t1)",
631          "&psrld        ($t0,25)",
632         "&movdqa        ($t1,'(%r10)')",        # .Lrot16(%rip)
633          "&por          (@x[$b1],$t0)",
634
635         "&movdqa        (\"`16*($c0-8)`(%rsp)\",$xc)",  # reload pair of 'c's
636          "&movdqa       (\"`16*($c1-8)`(%rsp)\",$xc_)",
637         "&movdqa        ($xc,\"`16*($c2-8)`(%rsp)\")",
638          "&movdqa       ($xc_,\"`16*($c3-8)`(%rsp)\")",
639
640         "&paddd         (@x[$a2],@x[$b2])",     # Q3
641          "&paddd        (@x[$a3],@x[$b3])",     # Q4
642         "&pxor          (@x[$d2],@x[$a2])",
643          "&pxor         (@x[$d3],@x[$a3])",
644         "&pshufb        (@x[$d2],$t1)",
645          "&pshufb       (@x[$d3],$t1)",
646
647         "&paddd         ($xc,@x[$d2])",
648          "&paddd        ($xc_,@x[$d3])",
649         "&pxor          (@x[$b2],$xc)",
650          "&pxor         (@x[$b3],$xc_)",
651         "&movdqa        ($t0,@x[$b2])",
652         "&pslld         (@x[$b2],12)",
653         "&psrld         ($t0,20)",
654          "&movdqa       ($t1,@x[$b3])",
655          "&pslld        (@x[$b3],12)",
656         "&por           (@x[$b2],$t0)",
657          "&psrld        ($t1,20)",
658         "&movdqa        ($t0,'(%r11)')",        # .Lrot24(%rip)
659          "&por          (@x[$b3],$t1)",
660
661         "&paddd         (@x[$a2],@x[$b2])",
662          "&paddd        (@x[$a3],@x[$b3])",
663         "&pxor          (@x[$d2],@x[$a2])",
664          "&pxor         (@x[$d3],@x[$a3])",
665         "&pshufb        (@x[$d2],$t0)",
666          "&pshufb       (@x[$d3],$t0)",
667
668         "&paddd         ($xc,@x[$d2])",
669          "&paddd        ($xc_,@x[$d3])",
670         "&pxor          (@x[$b2],$xc)",
671          "&pxor         (@x[$b3],$xc_)",
672         "&movdqa        ($t1,@x[$b2])",
673         "&pslld         (@x[$b2],7)",
674         "&psrld         ($t1,25)",
675          "&movdqa       ($t0,@x[$b3])",
676          "&pslld        (@x[$b3],7)",
677         "&por           (@x[$b2],$t1)",
678          "&psrld        ($t0,25)",
679         "&movdqa        ($t1,'(%r10)')",        # .Lrot16(%rip)
680          "&por          (@x[$b3],$t0)"
681         );
682 }
683
684 my $xframe = $win64 ? 0xa0 : 0;
685
686 $code.=<<___;
687 .type   ChaCha20_4x,\@function,5
688 .align  32
689 ChaCha20_4x:
690 .LChaCha20_4x:
691         mov             %r10,%r11
692 ___
693 $code.=<<___    if ($avx>1);
694         shr             \$32,%r10               # OPENSSL_ia32cap_P+8
695         test            \$`1<<5`,%r10           # test AVX2
696         jnz             .LChaCha20_8x
697 ___
698 $code.=<<___;
699         cmp             \$192,$len
700         ja              .Lproceed4x
701
702         and             \$`1<<26|1<<22`,%r11    # isolate XSAVE+MOVBE
703         cmp             \$`1<<22`,%r11          # check for MOVBE without XSAVE
704         je              .Ldo_sse3_after_all     # to detect Atom
705
706 .Lproceed4x:
707         lea             -0x78(%rsp),%r11
708         sub             \$0x148+$xframe,%rsp
709 ___
710         ################ stack layout
711         # +0x00         SIMD equivalent of @x[8-12]
712         # ...
713         # +0x40         constant copy of key[0-2] smashed by lanes
714         # ...
715         # +0x100        SIMD counters (with nonce smashed by lanes)
716         # ...
717         # +0x140
718 $code.=<<___    if ($win64);
719         movaps          %xmm6,-0x30(%r11)
720         movaps          %xmm7,-0x20(%r11)
721         movaps          %xmm8,-0x10(%r11)
722         movaps          %xmm9,0x00(%r11)
723         movaps          %xmm10,0x10(%r11)
724         movaps          %xmm11,0x20(%r11)
725         movaps          %xmm12,0x30(%r11)
726         movaps          %xmm13,0x40(%r11)
727         movaps          %xmm14,0x50(%r11)
728         movaps          %xmm15,0x60(%r11)
729 ___
730 $code.=<<___;
731         movdqa          .Lsigma(%rip),$xa3      # key[0]
732         movdqu          ($key),$xb3             # key[1]
733         movdqu          16($key),$xt3           # key[2]
734         movdqu          ($counter),$xd3         # key[3]
735         lea             0x100(%rsp),%rcx        # size optimization
736         lea             .Lrot16(%rip),%r10
737         lea             .Lrot24(%rip),%r11
738
739         pshufd          \$0x00,$xa3,$xa0        # smash key by lanes...
740         pshufd          \$0x55,$xa3,$xa1
741         movdqa          $xa0,0x40(%rsp)         # ... and offload
742         pshufd          \$0xaa,$xa3,$xa2
743         movdqa          $xa1,0x50(%rsp)
744         pshufd          \$0xff,$xa3,$xa3
745         movdqa          $xa2,0x60(%rsp)
746         movdqa          $xa3,0x70(%rsp)
747
748         pshufd          \$0x00,$xb3,$xb0
749         pshufd          \$0x55,$xb3,$xb1
750         movdqa          $xb0,0x80-0x100(%rcx)
751         pshufd          \$0xaa,$xb3,$xb2
752         movdqa          $xb1,0x90-0x100(%rcx)
753         pshufd          \$0xff,$xb3,$xb3
754         movdqa          $xb2,0xa0-0x100(%rcx)
755         movdqa          $xb3,0xb0-0x100(%rcx)
756
757         pshufd          \$0x00,$xt3,$xt0        # "$xc0"
758         pshufd          \$0x55,$xt3,$xt1        # "$xc1"
759         movdqa          $xt0,0xc0-0x100(%rcx)
760         pshufd          \$0xaa,$xt3,$xt2        # "$xc2"
761         movdqa          $xt1,0xd0-0x100(%rcx)
762         pshufd          \$0xff,$xt3,$xt3        # "$xc3"
763         movdqa          $xt2,0xe0-0x100(%rcx)
764         movdqa          $xt3,0xf0-0x100(%rcx)
765
766         pshufd          \$0x00,$xd3,$xd0
767         pshufd          \$0x55,$xd3,$xd1
768         paddd           .Linc(%rip),$xd0        # don't save counters yet
769         pshufd          \$0xaa,$xd3,$xd2
770         movdqa          $xd1,0x110-0x100(%rcx)
771         pshufd          \$0xff,$xd3,$xd3
772         movdqa          $xd2,0x120-0x100(%rcx)
773         movdqa          $xd3,0x130-0x100(%rcx)
774
775         jmp             .Loop_enter4x
776
777 .align  32
778 .Loop_outer4x:
779         movdqa          0x40(%rsp),$xa0         # re-load smashed key
780         movdqa          0x50(%rsp),$xa1
781         movdqa          0x60(%rsp),$xa2
782         movdqa          0x70(%rsp),$xa3
783         movdqa          0x80-0x100(%rcx),$xb0
784         movdqa          0x90-0x100(%rcx),$xb1
785         movdqa          0xa0-0x100(%rcx),$xb2
786         movdqa          0xb0-0x100(%rcx),$xb3
787         movdqa          0xc0-0x100(%rcx),$xt0   # "$xc0"
788         movdqa          0xd0-0x100(%rcx),$xt1   # "$xc1"
789         movdqa          0xe0-0x100(%rcx),$xt2   # "$xc2"
790         movdqa          0xf0-0x100(%rcx),$xt3   # "$xc3"
791         movdqa          0x100-0x100(%rcx),$xd0
792         movdqa          0x110-0x100(%rcx),$xd1
793         movdqa          0x120-0x100(%rcx),$xd2
794         movdqa          0x130-0x100(%rcx),$xd3
795         paddd           .Lfour(%rip),$xd0       # next SIMD counters
796
797 .Loop_enter4x:
798         movdqa          $xt2,0x20(%rsp)         # SIMD equivalent of "@x[10]"
799         movdqa          $xt3,0x30(%rsp)         # SIMD equivalent of "@x[11]"
800         movdqa          (%r10),$xt3             # .Lrot16(%rip)
801         mov             \$10,%eax
802         movdqa          $xd0,0x100-0x100(%rcx)  # save SIMD counters
803         jmp             .Loop4x
804
805 .align  32
806 .Loop4x:
807 ___
808         foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
809         foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
810 $code.=<<___;
811         dec             %eax
812         jnz             .Loop4x
813
814         paddd           0x40(%rsp),$xa0         # accumulate key material
815         paddd           0x50(%rsp),$xa1
816         paddd           0x60(%rsp),$xa2
817         paddd           0x70(%rsp),$xa3
818
819         movdqa          $xa0,$xt2               # "de-interlace" data
820         punpckldq       $xa1,$xa0
821         movdqa          $xa2,$xt3
822         punpckldq       $xa3,$xa2
823         punpckhdq       $xa1,$xt2
824         punpckhdq       $xa3,$xt3
825         movdqa          $xa0,$xa1
826         punpcklqdq      $xa2,$xa0               # "a0"
827         movdqa          $xt2,$xa3
828         punpcklqdq      $xt3,$xt2               # "a2"
829         punpckhqdq      $xa2,$xa1               # "a1"
830         punpckhqdq      $xt3,$xa3               # "a3"
831 ___
832         ($xa2,$xt2)=($xt2,$xa2);
833 $code.=<<___;
834         paddd           0x80-0x100(%rcx),$xb0
835         paddd           0x90-0x100(%rcx),$xb1
836         paddd           0xa0-0x100(%rcx),$xb2
837         paddd           0xb0-0x100(%rcx),$xb3
838
839         movdqa          $xa0,0x00(%rsp)         # offload $xaN
840         movdqa          $xa1,0x10(%rsp)
841         movdqa          0x20(%rsp),$xa0         # "xc2"
842         movdqa          0x30(%rsp),$xa1         # "xc3"
843
844         movdqa          $xb0,$xt2
845         punpckldq       $xb1,$xb0
846         movdqa          $xb2,$xt3
847         punpckldq       $xb3,$xb2
848         punpckhdq       $xb1,$xt2
849         punpckhdq       $xb3,$xt3
850         movdqa          $xb0,$xb1
851         punpcklqdq      $xb2,$xb0               # "b0"
852         movdqa          $xt2,$xb3
853         punpcklqdq      $xt3,$xt2               # "b2"
854         punpckhqdq      $xb2,$xb1               # "b1"
855         punpckhqdq      $xt3,$xb3               # "b3"
856 ___
857         ($xb2,$xt2)=($xt2,$xb2);
858         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
859 $code.=<<___;
860         paddd           0xc0-0x100(%rcx),$xc0
861         paddd           0xd0-0x100(%rcx),$xc1
862         paddd           0xe0-0x100(%rcx),$xc2
863         paddd           0xf0-0x100(%rcx),$xc3
864
865         movdqa          $xa2,0x20(%rsp)         # keep offloading $xaN
866         movdqa          $xa3,0x30(%rsp)
867
868         movdqa          $xc0,$xt2
869         punpckldq       $xc1,$xc0
870         movdqa          $xc2,$xt3
871         punpckldq       $xc3,$xc2
872         punpckhdq       $xc1,$xt2
873         punpckhdq       $xc3,$xt3
874         movdqa          $xc0,$xc1
875         punpcklqdq      $xc2,$xc0               # "c0"
876         movdqa          $xt2,$xc3
877         punpcklqdq      $xt3,$xt2               # "c2"
878         punpckhqdq      $xc2,$xc1               # "c1"
879         punpckhqdq      $xt3,$xc3               # "c3"
880 ___
881         ($xc2,$xt2)=($xt2,$xc2);
882         ($xt0,$xt1)=($xa2,$xa3);                # use $xaN as temporary
883 $code.=<<___;
884         paddd           0x100-0x100(%rcx),$xd0
885         paddd           0x110-0x100(%rcx),$xd1
886         paddd           0x120-0x100(%rcx),$xd2
887         paddd           0x130-0x100(%rcx),$xd3
888
889         movdqa          $xd0,$xt2
890         punpckldq       $xd1,$xd0
891         movdqa          $xd2,$xt3
892         punpckldq       $xd3,$xd2
893         punpckhdq       $xd1,$xt2
894         punpckhdq       $xd3,$xt3
895         movdqa          $xd0,$xd1
896         punpcklqdq      $xd2,$xd0               # "d0"
897         movdqa          $xt2,$xd3
898         punpcklqdq      $xt3,$xt2               # "d2"
899         punpckhqdq      $xd2,$xd1               # "d1"
900         punpckhqdq      $xt3,$xd3               # "d3"
901 ___
902         ($xd2,$xt2)=($xt2,$xd2);
903 $code.=<<___;
904         cmp             \$64*4,$len
905         jb              .Ltail4x
906
907         movdqu          0x00($inp),$xt0         # xor with input
908         movdqu          0x10($inp),$xt1
909         movdqu          0x20($inp),$xt2
910         movdqu          0x30($inp),$xt3
911         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
912         pxor            $xb0,$xt1
913         pxor            $xc0,$xt2
914         pxor            $xd0,$xt3
915
916          movdqu         $xt0,0x00($out)
917         movdqu          0x40($inp),$xt0
918          movdqu         $xt1,0x10($out)
919         movdqu          0x50($inp),$xt1
920          movdqu         $xt2,0x20($out)
921         movdqu          0x60($inp),$xt2
922          movdqu         $xt3,0x30($out)
923         movdqu          0x70($inp),$xt3
924         lea             0x80($inp),$inp         # size optimization
925         pxor            0x10(%rsp),$xt0
926         pxor            $xb1,$xt1
927         pxor            $xc1,$xt2
928         pxor            $xd1,$xt3
929
930          movdqu         $xt0,0x40($out)
931         movdqu          0x00($inp),$xt0
932          movdqu         $xt1,0x50($out)
933         movdqu          0x10($inp),$xt1
934          movdqu         $xt2,0x60($out)
935         movdqu          0x20($inp),$xt2
936          movdqu         $xt3,0x70($out)
937          lea            0x80($out),$out         # size optimization
938         movdqu          0x30($inp),$xt3
939         pxor            0x20(%rsp),$xt0
940         pxor            $xb2,$xt1
941         pxor            $xc2,$xt2
942         pxor            $xd2,$xt3
943
944          movdqu         $xt0,0x00($out)
945         movdqu          0x40($inp),$xt0
946          movdqu         $xt1,0x10($out)
947         movdqu          0x50($inp),$xt1
948          movdqu         $xt2,0x20($out)
949         movdqu          0x60($inp),$xt2
950          movdqu         $xt3,0x30($out)
951         movdqu          0x70($inp),$xt3
952         lea             0x80($inp),$inp         # inp+=64*4
953         pxor            0x30(%rsp),$xt0
954         pxor            $xb3,$xt1
955         pxor            $xc3,$xt2
956         pxor            $xd3,$xt3
957         movdqu          $xt0,0x40($out)
958         movdqu          $xt1,0x50($out)
959         movdqu          $xt2,0x60($out)
960         movdqu          $xt3,0x70($out)
961         lea             0x80($out),$out         # out+=64*4
962
963         sub             \$64*4,$len
964         jnz             .Loop_outer4x
965
966         jmp             .Ldone4x
967
968 .Ltail4x:
969         cmp             \$192,$len
970         jae             .L192_or_more4x
971         cmp             \$128,$len
972         jae             .L128_or_more4x
973         cmp             \$64,$len
974         jae             .L64_or_more4x
975
976         #movdqa         0x00(%rsp),$xt0         # $xaN is offloaded, remember?
977         xor             %r10,%r10
978         #movdqa         $xt0,0x00(%rsp)
979         movdqa          $xb0,0x10(%rsp)
980         movdqa          $xc0,0x20(%rsp)
981         movdqa          $xd0,0x30(%rsp)
982         jmp             .Loop_tail4x
983
984 .align  32
985 .L64_or_more4x:
986         movdqu          0x00($inp),$xt0         # xor with input
987         movdqu          0x10($inp),$xt1
988         movdqu          0x20($inp),$xt2
989         movdqu          0x30($inp),$xt3
990         pxor            0x00(%rsp),$xt0         # $xaxN is offloaded, remember?
991         pxor            $xb0,$xt1
992         pxor            $xc0,$xt2
993         pxor            $xd0,$xt3
994         movdqu          $xt0,0x00($out)
995         movdqu          $xt1,0x10($out)
996         movdqu          $xt2,0x20($out)
997         movdqu          $xt3,0x30($out)
998         je              .Ldone4x
999
1000         movdqa          0x10(%rsp),$xt0         # $xaN is offloaded, remember?
1001         lea             0x40($inp),$inp         # inp+=64*1
1002         xor             %r10,%r10
1003         movdqa          $xt0,0x00(%rsp)
1004         movdqa          $xb1,0x10(%rsp)
1005         lea             0x40($out),$out         # out+=64*1
1006         movdqa          $xc1,0x20(%rsp)
1007         sub             \$64,$len               # len-=64*1
1008         movdqa          $xd1,0x30(%rsp)
1009         jmp             .Loop_tail4x
1010
1011 .align  32
1012 .L128_or_more4x:
1013         movdqu          0x00($inp),$xt0         # xor with input
1014         movdqu          0x10($inp),$xt1
1015         movdqu          0x20($inp),$xt2
1016         movdqu          0x30($inp),$xt3
1017         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1018         pxor            $xb0,$xt1
1019         pxor            $xc0,$xt2
1020         pxor            $xd0,$xt3
1021
1022          movdqu         $xt0,0x00($out)
1023         movdqu          0x40($inp),$xt0
1024          movdqu         $xt1,0x10($out)
1025         movdqu          0x50($inp),$xt1
1026          movdqu         $xt2,0x20($out)
1027         movdqu          0x60($inp),$xt2
1028          movdqu         $xt3,0x30($out)
1029         movdqu          0x70($inp),$xt3
1030         pxor            0x10(%rsp),$xt0
1031         pxor            $xb1,$xt1
1032         pxor            $xc1,$xt2
1033         pxor            $xd1,$xt3
1034         movdqu          $xt0,0x40($out)
1035         movdqu          $xt1,0x50($out)
1036         movdqu          $xt2,0x60($out)
1037         movdqu          $xt3,0x70($out)
1038         je              .Ldone4x
1039
1040         movdqa          0x20(%rsp),$xt0         # $xaN is offloaded, remember?
1041         lea             0x80($inp),$inp         # inp+=64*2
1042         xor             %r10,%r10
1043         movdqa          $xt0,0x00(%rsp)
1044         movdqa          $xb2,0x10(%rsp)
1045         lea             0x80($out),$out         # out+=64*2
1046         movdqa          $xc2,0x20(%rsp)
1047         sub             \$128,$len              # len-=64*2
1048         movdqa          $xd2,0x30(%rsp)
1049         jmp             .Loop_tail4x
1050
1051 .align  32
1052 .L192_or_more4x:
1053         movdqu          0x00($inp),$xt0         # xor with input
1054         movdqu          0x10($inp),$xt1
1055         movdqu          0x20($inp),$xt2
1056         movdqu          0x30($inp),$xt3
1057         pxor            0x00(%rsp),$xt0         # $xaN is offloaded, remember?
1058         pxor            $xb0,$xt1
1059         pxor            $xc0,$xt2
1060         pxor            $xd0,$xt3
1061
1062          movdqu         $xt0,0x00($out)
1063         movdqu          0x40($inp),$xt0
1064          movdqu         $xt1,0x10($out)
1065         movdqu          0x50($inp),$xt1
1066          movdqu         $xt2,0x20($out)
1067         movdqu          0x60($inp),$xt2
1068          movdqu         $xt3,0x30($out)
1069         movdqu          0x70($inp),$xt3
1070         lea             0x80($inp),$inp         # size optimization
1071         pxor            0x10(%rsp),$xt0
1072         pxor            $xb1,$xt1
1073         pxor            $xc1,$xt2
1074         pxor            $xd1,$xt3
1075
1076          movdqu         $xt0,0x40($out)
1077         movdqu          0x00($inp),$xt0
1078          movdqu         $xt1,0x50($out)
1079         movdqu          0x10($inp),$xt1
1080          movdqu         $xt2,0x60($out)
1081         movdqu          0x20($inp),$xt2
1082          movdqu         $xt3,0x70($out)
1083          lea            0x80($out),$out         # size optimization
1084         movdqu          0x30($inp),$xt3
1085         pxor            0x20(%rsp),$xt0
1086         pxor            $xb2,$xt1
1087         pxor            $xc2,$xt2
1088         pxor            $xd2,$xt3
1089         movdqu          $xt0,0x00($out)
1090         movdqu          $xt1,0x10($out)
1091         movdqu          $xt2,0x20($out)
1092         movdqu          $xt3,0x30($out)
1093         je              .Ldone4x
1094
1095         movdqa          0x30(%rsp),$xt0         # $xaN is offloaded, remember?
1096         lea             0x40($inp),$inp         # inp+=64*3
1097         xor             %r10,%r10
1098         movdqa          $xt0,0x00(%rsp)
1099         movdqa          $xb3,0x10(%rsp)
1100         lea             0x40($out),$out         # out+=64*3
1101         movdqa          $xc3,0x20(%rsp)
1102         sub             \$192,$len              # len-=64*3
1103         movdqa          $xd3,0x30(%rsp)
1104
1105 .Loop_tail4x:
1106         movzb           ($inp,%r10),%eax
1107         movzb           (%rsp,%r10),%ecx
1108         lea             1(%r10),%r10
1109         xor             %ecx,%eax
1110         mov             %al,-1($out,%r10)
1111         dec             $len
1112         jnz             .Loop_tail4x
1113
1114 .Ldone4x:
1115 ___
1116 $code.=<<___    if ($win64);
1117         lea             0x140+0x30(%rsp),%r11
1118         movaps          -0x30(%r11),%xmm6
1119         movaps          -0x20(%r11),%xmm7
1120         movaps          -0x10(%r11),%xmm8
1121         movaps          0x00(%r11),%xmm9
1122         movaps          0x10(%r11),%xmm10
1123         movaps          0x20(%r11),%xmm11
1124         movaps          0x30(%r11),%xmm12
1125         movaps          0x40(%r11),%xmm13
1126         movaps          0x50(%r11),%xmm14
1127         movaps          0x60(%r11),%xmm15
1128 ___
1129 $code.=<<___;
1130         add             \$0x148+$xframe,%rsp
1131         ret
1132 .size   ChaCha20_4x,.-ChaCha20_4x
1133 ___
1134 }
1135
1136 ########################################################################
1137 # XOP code path that handles all lengths.
1138 if ($avx) {
1139 # There is some "anomaly" observed depending on instructions' size or
1140 # alignment. If you look closely at below code you'll notice that
1141 # sometimes argument order varies. The order affects instruction
1142 # encoding by making it larger, and such fiddling gives 5% performance
1143 # improvement. This is on FX-4100...
1144
1145 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1146     $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
1147 my  @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1148          $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
1149
1150 sub XOP_lane_ROUND {
1151 my ($a0,$b0,$c0,$d0)=@_;
1152 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1153 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1154 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1155 my @x=map("\"$_\"",@xx);
1156
1157         (
1158         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
1159          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
1160           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",     # Q3
1161            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",     # Q4
1162         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1163          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1164           "&vpxor       (@x[$d2],@x[$a2],@x[$d2])",
1165            "&vpxor      (@x[$d3],@x[$a3],@x[$d3])",
1166         "&vprotd        (@x[$d0],@x[$d0],16)",
1167          "&vprotd       (@x[$d1],@x[$d1],16)",
1168           "&vprotd      (@x[$d2],@x[$d2],16)",
1169            "&vprotd     (@x[$d3],@x[$d3],16)",
1170
1171         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
1172          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
1173           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
1174            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
1175         "&vpxor         (@x[$b0],@x[$c0],@x[$b0])",
1176          "&vpxor        (@x[$b1],@x[$c1],@x[$b1])",
1177           "&vpxor       (@x[$b2],@x[$b2],@x[$c2])",     # flip
1178            "&vpxor      (@x[$b3],@x[$b3],@x[$c3])",     # flip
1179         "&vprotd        (@x[$b0],@x[$b0],12)",
1180          "&vprotd       (@x[$b1],@x[$b1],12)",
1181           "&vprotd      (@x[$b2],@x[$b2],12)",
1182            "&vprotd     (@x[$b3],@x[$b3],12)",
1183
1184         "&vpaddd        (@x[$a0],@x[$b0],@x[$a0])",     # flip
1185          "&vpaddd       (@x[$a1],@x[$b1],@x[$a1])",     # flip
1186           "&vpaddd      (@x[$a2],@x[$a2],@x[$b2])",
1187            "&vpaddd     (@x[$a3],@x[$a3],@x[$b3])",
1188         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1189          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1190           "&vpxor       (@x[$d2],@x[$a2],@x[$d2])",
1191            "&vpxor      (@x[$d3],@x[$a3],@x[$d3])",
1192         "&vprotd        (@x[$d0],@x[$d0],8)",
1193          "&vprotd       (@x[$d1],@x[$d1],8)",
1194           "&vprotd      (@x[$d2],@x[$d2],8)",
1195            "&vprotd     (@x[$d3],@x[$d3],8)",
1196
1197         "&vpaddd        (@x[$c0],@x[$c0],@x[$d0])",
1198          "&vpaddd       (@x[$c1],@x[$c1],@x[$d1])",
1199           "&vpaddd      (@x[$c2],@x[$c2],@x[$d2])",
1200            "&vpaddd     (@x[$c3],@x[$c3],@x[$d3])",
1201         "&vpxor         (@x[$b0],@x[$c0],@x[$b0])",
1202          "&vpxor        (@x[$b1],@x[$c1],@x[$b1])",
1203           "&vpxor       (@x[$b2],@x[$b2],@x[$c2])",     # flip
1204            "&vpxor      (@x[$b3],@x[$b3],@x[$c3])",     # flip
1205         "&vprotd        (@x[$b0],@x[$b0],7)",
1206          "&vprotd       (@x[$b1],@x[$b1],7)",
1207           "&vprotd      (@x[$b2],@x[$b2],7)",
1208            "&vprotd     (@x[$b3],@x[$b3],7)"
1209         );
1210 }
1211
1212 my $xframe = $win64 ? 0xa0 : 0;
1213
1214 $code.=<<___;
1215 .type   ChaCha20_4xop,\@function,5
1216 .align  32
1217 ChaCha20_4xop:
1218 .LChaCha20_4xop:
1219         lea             -0x78(%rsp),%r11
1220         sub             \$0x148+$xframe,%rsp
1221 ___
1222         ################ stack layout
1223         # +0x00         SIMD equivalent of @x[8-12]
1224         # ...
1225         # +0x40         constant copy of key[0-2] smashed by lanes
1226         # ...
1227         # +0x100        SIMD counters (with nonce smashed by lanes)
1228         # ...
1229         # +0x140
1230 $code.=<<___    if ($win64);
1231         movaps          %xmm6,-0x30(%r11)
1232         movaps          %xmm7,-0x20(%r11)
1233         movaps          %xmm8,-0x10(%r11)
1234         movaps          %xmm9,0x00(%r11)
1235         movaps          %xmm10,0x10(%r11)
1236         movaps          %xmm11,0x20(%r11)
1237         movaps          %xmm12,0x30(%r11)
1238         movaps          %xmm13,0x40(%r11)
1239         movaps          %xmm14,0x50(%r11)
1240         movaps          %xmm15,0x60(%r11)
1241 ___
1242 $code.=<<___;
1243         vzeroupper
1244
1245         vmovdqa         .Lsigma(%rip),$xa3      # key[0]
1246         vmovdqu         ($key),$xb3             # key[1]
1247         vmovdqu         16($key),$xt3           # key[2]
1248         vmovdqu         ($counter),$xd3         # key[3]
1249         lea             0x100(%rsp),%rcx        # size optimization
1250
1251         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
1252         vpshufd         \$0x55,$xa3,$xa1
1253         vmovdqa         $xa0,0x40(%rsp)         # ... and offload
1254         vpshufd         \$0xaa,$xa3,$xa2
1255         vmovdqa         $xa1,0x50(%rsp)
1256         vpshufd         \$0xff,$xa3,$xa3
1257         vmovdqa         $xa2,0x60(%rsp)
1258         vmovdqa         $xa3,0x70(%rsp)
1259
1260         vpshufd         \$0x00,$xb3,$xb0
1261         vpshufd         \$0x55,$xb3,$xb1
1262         vmovdqa         $xb0,0x80-0x100(%rcx)
1263         vpshufd         \$0xaa,$xb3,$xb2
1264         vmovdqa         $xb1,0x90-0x100(%rcx)
1265         vpshufd         \$0xff,$xb3,$xb3
1266         vmovdqa         $xb2,0xa0-0x100(%rcx)
1267         vmovdqa         $xb3,0xb0-0x100(%rcx)
1268
1269         vpshufd         \$0x00,$xt3,$xt0        # "$xc0"
1270         vpshufd         \$0x55,$xt3,$xt1        # "$xc1"
1271         vmovdqa         $xt0,0xc0-0x100(%rcx)
1272         vpshufd         \$0xaa,$xt3,$xt2        # "$xc2"
1273         vmovdqa         $xt1,0xd0-0x100(%rcx)
1274         vpshufd         \$0xff,$xt3,$xt3        # "$xc3"
1275         vmovdqa         $xt2,0xe0-0x100(%rcx)
1276         vmovdqa         $xt3,0xf0-0x100(%rcx)
1277
1278         vpshufd         \$0x00,$xd3,$xd0
1279         vpshufd         \$0x55,$xd3,$xd1
1280         vpaddd          .Linc(%rip),$xd0,$xd0   # don't save counters yet
1281         vpshufd         \$0xaa,$xd3,$xd2
1282         vmovdqa         $xd1,0x110-0x100(%rcx)
1283         vpshufd         \$0xff,$xd3,$xd3
1284         vmovdqa         $xd2,0x120-0x100(%rcx)
1285         vmovdqa         $xd3,0x130-0x100(%rcx)
1286
1287         jmp             .Loop_enter4xop
1288
1289 .align  32
1290 .Loop_outer4xop:
1291         vmovdqa         0x40(%rsp),$xa0         # re-load smashed key
1292         vmovdqa         0x50(%rsp),$xa1
1293         vmovdqa         0x60(%rsp),$xa2
1294         vmovdqa         0x70(%rsp),$xa3
1295         vmovdqa         0x80-0x100(%rcx),$xb0
1296         vmovdqa         0x90-0x100(%rcx),$xb1
1297         vmovdqa         0xa0-0x100(%rcx),$xb2
1298         vmovdqa         0xb0-0x100(%rcx),$xb3
1299         vmovdqa         0xc0-0x100(%rcx),$xt0   # "$xc0"
1300         vmovdqa         0xd0-0x100(%rcx),$xt1   # "$xc1"
1301         vmovdqa         0xe0-0x100(%rcx),$xt2   # "$xc2"
1302         vmovdqa         0xf0-0x100(%rcx),$xt3   # "$xc3"
1303         vmovdqa         0x100-0x100(%rcx),$xd0
1304         vmovdqa         0x110-0x100(%rcx),$xd1
1305         vmovdqa         0x120-0x100(%rcx),$xd2
1306         vmovdqa         0x130-0x100(%rcx),$xd3
1307         vpaddd          .Lfour(%rip),$xd0,$xd0  # next SIMD counters
1308
1309 .Loop_enter4xop:
1310         mov             \$10,%eax
1311         vmovdqa         $xd0,0x100-0x100(%rcx)  # save SIMD counters
1312         jmp             .Loop4xop
1313
1314 .align  32
1315 .Loop4xop:
1316 ___
1317         foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
1318         foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
1319 $code.=<<___;
1320         dec             %eax
1321         jnz             .Loop4xop
1322
1323         vpaddd          0x40(%rsp),$xa0,$xa0    # accumulate key material
1324         vpaddd          0x50(%rsp),$xa1,$xa1
1325         vpaddd          0x60(%rsp),$xa2,$xa2
1326         vpaddd          0x70(%rsp),$xa3,$xa3
1327
1328         vmovdqa         $xt2,0x20(%rsp)         # offload $xc2,3
1329         vmovdqa         $xt3,0x30(%rsp)
1330
1331         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
1332         vpunpckldq      $xa3,$xa2,$xt3
1333         vpunpckhdq      $xa1,$xa0,$xa0
1334         vpunpckhdq      $xa3,$xa2,$xa2
1335         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
1336         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
1337         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
1338         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
1339 ___
1340         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1341 $code.=<<___;
1342         vpaddd          0x80-0x100(%rcx),$xb0,$xb0
1343         vpaddd          0x90-0x100(%rcx),$xb1,$xb1
1344         vpaddd          0xa0-0x100(%rcx),$xb2,$xb2
1345         vpaddd          0xb0-0x100(%rcx),$xb3,$xb3
1346
1347         vmovdqa         $xa0,0x00(%rsp)         # offload $xa0,1
1348         vmovdqa         $xa1,0x10(%rsp)
1349         vmovdqa         0x20(%rsp),$xa0         # "xc2"
1350         vmovdqa         0x30(%rsp),$xa1         # "xc3"
1351
1352         vpunpckldq      $xb1,$xb0,$xt2
1353         vpunpckldq      $xb3,$xb2,$xt3
1354         vpunpckhdq      $xb1,$xb0,$xb0
1355         vpunpckhdq      $xb3,$xb2,$xb2
1356         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
1357         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
1358         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
1359         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
1360 ___
1361         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1362         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1363 $code.=<<___;
1364         vpaddd          0xc0-0x100(%rcx),$xc0,$xc0
1365         vpaddd          0xd0-0x100(%rcx),$xc1,$xc1
1366         vpaddd          0xe0-0x100(%rcx),$xc2,$xc2
1367         vpaddd          0xf0-0x100(%rcx),$xc3,$xc3
1368
1369         vpunpckldq      $xc1,$xc0,$xt2
1370         vpunpckldq      $xc3,$xc2,$xt3
1371         vpunpckhdq      $xc1,$xc0,$xc0
1372         vpunpckhdq      $xc3,$xc2,$xc2
1373         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
1374         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
1375         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
1376         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
1377 ___
1378         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1379 $code.=<<___;
1380         vpaddd          0x100-0x100(%rcx),$xd0,$xd0
1381         vpaddd          0x110-0x100(%rcx),$xd1,$xd1
1382         vpaddd          0x120-0x100(%rcx),$xd2,$xd2
1383         vpaddd          0x130-0x100(%rcx),$xd3,$xd3
1384
1385         vpunpckldq      $xd1,$xd0,$xt2
1386         vpunpckldq      $xd3,$xd2,$xt3
1387         vpunpckhdq      $xd1,$xd0,$xd0
1388         vpunpckhdq      $xd3,$xd2,$xd2
1389         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
1390         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
1391         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
1392         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
1393 ___
1394         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1395         ($xa0,$xa1)=($xt2,$xt3);
1396 $code.=<<___;
1397         vmovdqa         0x00(%rsp),$xa0         # restore $xa0,1
1398         vmovdqa         0x10(%rsp),$xa1
1399
1400         cmp             \$64*4,$len
1401         jb              .Ltail4xop
1402
1403         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1404         vpxor           0x10($inp),$xb0,$xb0
1405         vpxor           0x20($inp),$xc0,$xc0
1406         vpxor           0x30($inp),$xd0,$xd0
1407         vpxor           0x40($inp),$xa1,$xa1
1408         vpxor           0x50($inp),$xb1,$xb1
1409         vpxor           0x60($inp),$xc1,$xc1
1410         vpxor           0x70($inp),$xd1,$xd1
1411         lea             0x80($inp),$inp         # size optimization
1412         vpxor           0x00($inp),$xa2,$xa2
1413         vpxor           0x10($inp),$xb2,$xb2
1414         vpxor           0x20($inp),$xc2,$xc2
1415         vpxor           0x30($inp),$xd2,$xd2
1416         vpxor           0x40($inp),$xa3,$xa3
1417         vpxor           0x50($inp),$xb3,$xb3
1418         vpxor           0x60($inp),$xc3,$xc3
1419         vpxor           0x70($inp),$xd3,$xd3
1420         lea             0x80($inp),$inp         # inp+=64*4
1421
1422         vmovdqu         $xa0,0x00($out)
1423         vmovdqu         $xb0,0x10($out)
1424         vmovdqu         $xc0,0x20($out)
1425         vmovdqu         $xd0,0x30($out)
1426         vmovdqu         $xa1,0x40($out)
1427         vmovdqu         $xb1,0x50($out)
1428         vmovdqu         $xc1,0x60($out)
1429         vmovdqu         $xd1,0x70($out)
1430         lea             0x80($out),$out         # size optimization
1431         vmovdqu         $xa2,0x00($out)
1432         vmovdqu         $xb2,0x10($out)
1433         vmovdqu         $xc2,0x20($out)
1434         vmovdqu         $xd2,0x30($out)
1435         vmovdqu         $xa3,0x40($out)
1436         vmovdqu         $xb3,0x50($out)
1437         vmovdqu         $xc3,0x60($out)
1438         vmovdqu         $xd3,0x70($out)
1439         lea             0x80($out),$out         # out+=64*4
1440
1441         sub             \$64*4,$len
1442         jnz             .Loop_outer4xop
1443
1444         jmp             .Ldone4xop
1445
1446 .align  32
1447 .Ltail4xop:
1448         cmp             \$192,$len
1449         jae             .L192_or_more4xop
1450         cmp             \$128,$len
1451         jae             .L128_or_more4xop
1452         cmp             \$64,$len
1453         jae             .L64_or_more4xop
1454
1455         xor             %r10,%r10
1456         vmovdqa         $xa0,0x00(%rsp)
1457         vmovdqa         $xb0,0x10(%rsp)
1458         vmovdqa         $xc0,0x20(%rsp)
1459         vmovdqa         $xd0,0x30(%rsp)
1460         jmp             .Loop_tail4xop
1461
1462 .align  32
1463 .L64_or_more4xop:
1464         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1465         vpxor           0x10($inp),$xb0,$xb0
1466         vpxor           0x20($inp),$xc0,$xc0
1467         vpxor           0x30($inp),$xd0,$xd0
1468         vmovdqu         $xa0,0x00($out)
1469         vmovdqu         $xb0,0x10($out)
1470         vmovdqu         $xc0,0x20($out)
1471         vmovdqu         $xd0,0x30($out)
1472         je              .Ldone4xop
1473
1474         lea             0x40($inp),$inp         # inp+=64*1
1475         vmovdqa         $xa1,0x00(%rsp)
1476         xor             %r10,%r10
1477         vmovdqa         $xb1,0x10(%rsp)
1478         lea             0x40($out),$out         # out+=64*1
1479         vmovdqa         $xc1,0x20(%rsp)
1480         sub             \$64,$len               # len-=64*1
1481         vmovdqa         $xd1,0x30(%rsp)
1482         jmp             .Loop_tail4xop
1483
1484 .align  32
1485 .L128_or_more4xop:
1486         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1487         vpxor           0x10($inp),$xb0,$xb0
1488         vpxor           0x20($inp),$xc0,$xc0
1489         vpxor           0x30($inp),$xd0,$xd0
1490         vpxor           0x40($inp),$xa1,$xa1
1491         vpxor           0x50($inp),$xb1,$xb1
1492         vpxor           0x60($inp),$xc1,$xc1
1493         vpxor           0x70($inp),$xd1,$xd1
1494
1495         vmovdqu         $xa0,0x00($out)
1496         vmovdqu         $xb0,0x10($out)
1497         vmovdqu         $xc0,0x20($out)
1498         vmovdqu         $xd0,0x30($out)
1499         vmovdqu         $xa1,0x40($out)
1500         vmovdqu         $xb1,0x50($out)
1501         vmovdqu         $xc1,0x60($out)
1502         vmovdqu         $xd1,0x70($out)
1503         je              .Ldone4xop
1504
1505         lea             0x80($inp),$inp         # inp+=64*2
1506         vmovdqa         $xa2,0x00(%rsp)
1507         xor             %r10,%r10
1508         vmovdqa         $xb2,0x10(%rsp)
1509         lea             0x80($out),$out         # out+=64*2
1510         vmovdqa         $xc2,0x20(%rsp)
1511         sub             \$128,$len              # len-=64*2
1512         vmovdqa         $xd2,0x30(%rsp)
1513         jmp             .Loop_tail4xop
1514
1515 .align  32
1516 .L192_or_more4xop:
1517         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1518         vpxor           0x10($inp),$xb0,$xb0
1519         vpxor           0x20($inp),$xc0,$xc0
1520         vpxor           0x30($inp),$xd0,$xd0
1521         vpxor           0x40($inp),$xa1,$xa1
1522         vpxor           0x50($inp),$xb1,$xb1
1523         vpxor           0x60($inp),$xc1,$xc1
1524         vpxor           0x70($inp),$xd1,$xd1
1525         lea             0x80($inp),$inp         # size optimization
1526         vpxor           0x00($inp),$xa2,$xa2
1527         vpxor           0x10($inp),$xb2,$xb2
1528         vpxor           0x20($inp),$xc2,$xc2
1529         vpxor           0x30($inp),$xd2,$xd2
1530
1531         vmovdqu         $xa0,0x00($out)
1532         vmovdqu         $xb0,0x10($out)
1533         vmovdqu         $xc0,0x20($out)
1534         vmovdqu         $xd0,0x30($out)
1535         vmovdqu         $xa1,0x40($out)
1536         vmovdqu         $xb1,0x50($out)
1537         vmovdqu         $xc1,0x60($out)
1538         vmovdqu         $xd1,0x70($out)
1539         lea             0x80($out),$out         # size optimization
1540         vmovdqu         $xa2,0x00($out)
1541         vmovdqu         $xb2,0x10($out)
1542         vmovdqu         $xc2,0x20($out)
1543         vmovdqu         $xd2,0x30($out)
1544         je              .Ldone4xop
1545
1546         lea             0x40($inp),$inp         # inp+=64*3
1547         vmovdqa         $xa3,0x00(%rsp)
1548         xor             %r10,%r10
1549         vmovdqa         $xb3,0x10(%rsp)
1550         lea             0x40($out),$out         # out+=64*3
1551         vmovdqa         $xc3,0x20(%rsp)
1552         sub             \$192,$len              # len-=64*3
1553         vmovdqa         $xd3,0x30(%rsp)
1554
1555 .Loop_tail4xop:
1556         movzb           ($inp,%r10),%eax
1557         movzb           (%rsp,%r10),%ecx
1558         lea             1(%r10),%r10
1559         xor             %ecx,%eax
1560         mov             %al,-1($out,%r10)
1561         dec             $len
1562         jnz             .Loop_tail4xop
1563
1564 .Ldone4xop:
1565         vzeroupper
1566 ___
1567 $code.=<<___    if ($win64);
1568         lea             0x140+0x30(%rsp),%r11
1569         movaps          -0x30(%r11),%xmm6
1570         movaps          -0x20(%r11),%xmm7
1571         movaps          -0x10(%r11),%xmm8
1572         movaps          0x00(%r11),%xmm9
1573         movaps          0x10(%r11),%xmm10
1574         movaps          0x20(%r11),%xmm11
1575         movaps          0x30(%r11),%xmm12
1576         movaps          0x40(%r11),%xmm13
1577         movaps          0x50(%r11),%xmm14
1578         movaps          0x60(%r11),%xmm15
1579 ___
1580 $code.=<<___;
1581         add             \$0x148+$xframe,%rsp
1582         ret
1583 .size   ChaCha20_4xop,.-ChaCha20_4xop
1584 ___
1585 }
1586
1587 ########################################################################
1588 # AVX2 code path
1589 if ($avx>1) {
1590 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1591     $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1592 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1593         "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1594
1595 sub AVX2_lane_ROUND {
1596 my ($a0,$b0,$c0,$d0)=@_;
1597 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1598 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1599 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1600 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1601 my @x=map("\"$_\"",@xx);
1602
1603         # Consider order in which variables are addressed by their
1604         # index:
1605         #
1606         #       a   b   c   d
1607         #
1608         #       0   4   8  12 < even round
1609         #       1   5   9  13
1610         #       2   6  10  14
1611         #       3   7  11  15
1612         #       0   5  10  15 < odd round
1613         #       1   6  11  12
1614         #       2   7   8  13
1615         #       3   4   9  14
1616         #
1617         # 'a', 'b' and 'd's are permanently allocated in registers,
1618         # @x[0..7,12..15], while 'c's are maintained in memory. If
1619         # you observe 'c' column, you'll notice that pair of 'c's is
1620         # invariant between rounds. This means that we have to reload
1621         # them once per round, in the middle. This is why you'll see
1622         # bunch of 'c' stores and loads in the middle, but none in
1623         # the beginning or end.
1624
1625         (
1626         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",     # Q1
1627         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1628         "&vpshufb       (@x[$d0],@x[$d0],$t1)",
1629          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",     # Q2
1630          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1631          "&vpshufb      (@x[$d1],@x[$d1],$t1)",
1632
1633         "&vpaddd        ($xc,$xc,@x[$d0])",
1634         "&vpxor         (@x[$b0],$xc,@x[$b0])",
1635         "&vpslld        ($t0,@x[$b0],12)",
1636         "&vpsrld        (@x[$b0],@x[$b0],20)",
1637         "&vpor          (@x[$b0],$t0,@x[$b0])",
1638         "&vbroadcasti128($t0,'(%r11)')",                # .Lrot24(%rip)
1639          "&vpaddd       ($xc_,$xc_,@x[$d1])",
1640          "&vpxor        (@x[$b1],$xc_,@x[$b1])",
1641          "&vpslld       ($t1,@x[$b1],12)",
1642          "&vpsrld       (@x[$b1],@x[$b1],20)",
1643          "&vpor         (@x[$b1],$t1,@x[$b1])",
1644
1645         "&vpaddd        (@x[$a0],@x[$a0],@x[$b0])",
1646         "&vpxor         (@x[$d0],@x[$a0],@x[$d0])",
1647         "&vpshufb       (@x[$d0],@x[$d0],$t0)",
1648          "&vpaddd       (@x[$a1],@x[$a1],@x[$b1])",
1649          "&vpxor        (@x[$d1],@x[$a1],@x[$d1])",
1650          "&vpshufb      (@x[$d1],@x[$d1],$t0)",
1651
1652         "&vpaddd        ($xc,$xc,@x[$d0])",
1653         "&vpxor         (@x[$b0],$xc,@x[$b0])",
1654         "&vpslld        ($t1,@x[$b0],7)",
1655         "&vpsrld        (@x[$b0],@x[$b0],25)",
1656         "&vpor          (@x[$b0],$t1,@x[$b0])",
1657         "&vbroadcasti128($t1,'(%r10)')",                # .Lrot16(%rip)
1658          "&vpaddd       ($xc_,$xc_,@x[$d1])",
1659          "&vpxor        (@x[$b1],$xc_,@x[$b1])",
1660          "&vpslld       ($t0,@x[$b1],7)",
1661          "&vpsrld       (@x[$b1],@x[$b1],25)",
1662          "&vpor         (@x[$b1],$t0,@x[$b1])",
1663
1664         "&vmovdqa       (\"`32*($c0-8)`(%rsp)\",$xc)",  # reload pair of 'c's
1665          "&vmovdqa      (\"`32*($c1-8)`(%rsp)\",$xc_)",
1666         "&vmovdqa       ($xc,\"`32*($c2-8)`(%rsp)\")",
1667          "&vmovdqa      ($xc_,\"`32*($c3-8)`(%rsp)\")",
1668
1669         "&vpaddd        (@x[$a2],@x[$a2],@x[$b2])",     # Q3
1670         "&vpxor         (@x[$d2],@x[$a2],@x[$d2])",
1671         "&vpshufb       (@x[$d2],@x[$d2],$t1)",
1672          "&vpaddd       (@x[$a3],@x[$a3],@x[$b3])",     # Q4
1673          "&vpxor        (@x[$d3],@x[$a3],@x[$d3])",
1674          "&vpshufb      (@x[$d3],@x[$d3],$t1)",
1675
1676         "&vpaddd        ($xc,$xc,@x[$d2])",
1677         "&vpxor         (@x[$b2],$xc,@x[$b2])",
1678         "&vpslld        ($t0,@x[$b2],12)",
1679         "&vpsrld        (@x[$b2],@x[$b2],20)",
1680         "&vpor          (@x[$b2],$t0,@x[$b2])",
1681         "&vbroadcasti128($t0,'(%r11)')",                # .Lrot24(%rip)
1682          "&vpaddd       ($xc_,$xc_,@x[$d3])",
1683          "&vpxor        (@x[$b3],$xc_,@x[$b3])",
1684          "&vpslld       ($t1,@x[$b3],12)",
1685          "&vpsrld       (@x[$b3],@x[$b3],20)",
1686          "&vpor         (@x[$b3],$t1,@x[$b3])",
1687
1688         "&vpaddd        (@x[$a2],@x[$a2],@x[$b2])",
1689         "&vpxor         (@x[$d2],@x[$a2],@x[$d2])",
1690         "&vpshufb       (@x[$d2],@x[$d2],$t0)",
1691          "&vpaddd       (@x[$a3],@x[$a3],@x[$b3])",
1692          "&vpxor        (@x[$d3],@x[$a3],@x[$d3])",
1693          "&vpshufb      (@x[$d3],@x[$d3],$t0)",
1694
1695         "&vpaddd        ($xc,$xc,@x[$d2])",
1696         "&vpxor         (@x[$b2],$xc,@x[$b2])",
1697         "&vpslld        ($t1,@x[$b2],7)",
1698         "&vpsrld        (@x[$b2],@x[$b2],25)",
1699         "&vpor          (@x[$b2],$t1,@x[$b2])",
1700         "&vbroadcasti128($t1,'(%r10)')",                # .Lrot16(%rip)
1701          "&vpaddd       ($xc_,$xc_,@x[$d3])",
1702          "&vpxor        (@x[$b3],$xc_,@x[$b3])",
1703          "&vpslld       ($t0,@x[$b3],7)",
1704          "&vpsrld       (@x[$b3],@x[$b3],25)",
1705          "&vpor         (@x[$b3],$t0,@x[$b3])"
1706         );
1707 }
1708
1709 my $xframe = $win64 ? 0xb0 : 8;
1710
1711 $code.=<<___;
1712 .type   ChaCha20_8x,\@function,5
1713 .align  32
1714 ChaCha20_8x:
1715 .LChaCha20_8x:
1716         mov             %rsp,%r10
1717         sub             \$0x280+$xframe,%rsp
1718         and             \$-32,%rsp
1719 ___
1720 $code.=<<___    if ($win64);
1721         lea             0x290+0x30(%rsp),%r11
1722         movaps          %xmm6,-0x30(%r11)
1723         movaps          %xmm7,-0x20(%r11)
1724         movaps          %xmm8,-0x10(%r11)
1725         movaps          %xmm9,0x00(%r11)
1726         movaps          %xmm10,0x10(%r11)
1727         movaps          %xmm11,0x20(%r11)
1728         movaps          %xmm12,0x30(%r11)
1729         movaps          %xmm13,0x40(%r11)
1730         movaps          %xmm14,0x50(%r11)
1731         movaps          %xmm15,0x60(%r11)
1732 ___
1733 $code.=<<___;
1734         vzeroupper
1735         mov             %r10,0x280(%rsp)
1736
1737         ################ stack layout
1738         # +0x00         SIMD equivalent of @x[8-12]
1739         # ...
1740         # +0x80         constant copy of key[0-2] smashed by lanes
1741         # ...
1742         # +0x200        SIMD counters (with nonce smashed by lanes)
1743         # ...
1744         # +0x280        saved %rsp
1745
1746         vbroadcasti128  .Lsigma(%rip),$xa3      # key[0]
1747         vbroadcasti128  ($key),$xb3             # key[1]
1748         vbroadcasti128  16($key),$xt3           # key[2]
1749         vbroadcasti128  ($counter),$xd3         # key[3]
1750         lea             0x100(%rsp),%rcx        # size optimization
1751         lea             0x200(%rsp),%rax        # size optimization
1752         lea             .Lrot16(%rip),%r10
1753         lea             .Lrot24(%rip),%r11
1754
1755         vpshufd         \$0x00,$xa3,$xa0        # smash key by lanes...
1756         vpshufd         \$0x55,$xa3,$xa1
1757         vmovdqa         $xa0,0x80-0x100(%rcx)   # ... and offload
1758         vpshufd         \$0xaa,$xa3,$xa2
1759         vmovdqa         $xa1,0xa0-0x100(%rcx)
1760         vpshufd         \$0xff,$xa3,$xa3
1761         vmovdqa         $xa2,0xc0-0x100(%rcx)
1762         vmovdqa         $xa3,0xe0-0x100(%rcx)
1763
1764         vpshufd         \$0x00,$xb3,$xb0
1765         vpshufd         \$0x55,$xb3,$xb1
1766         vmovdqa         $xb0,0x100-0x100(%rcx)
1767         vpshufd         \$0xaa,$xb3,$xb2
1768         vmovdqa         $xb1,0x120-0x100(%rcx)
1769         vpshufd         \$0xff,$xb3,$xb3
1770         vmovdqa         $xb2,0x140-0x100(%rcx)
1771         vmovdqa         $xb3,0x160-0x100(%rcx)
1772
1773         vpshufd         \$0x00,$xt3,$xt0        # "xc0"
1774         vpshufd         \$0x55,$xt3,$xt1        # "xc1"
1775         vmovdqa         $xt0,0x180-0x200(%rax)
1776         vpshufd         \$0xaa,$xt3,$xt2        # "xc2"
1777         vmovdqa         $xt1,0x1a0-0x200(%rax)
1778         vpshufd         \$0xff,$xt3,$xt3        # "xc3"
1779         vmovdqa         $xt2,0x1c0-0x200(%rax)
1780         vmovdqa         $xt3,0x1e0-0x200(%rax)
1781
1782         vpshufd         \$0x00,$xd3,$xd0
1783         vpshufd         \$0x55,$xd3,$xd1
1784         vpaddd          .Lincy(%rip),$xd0,$xd0  # don't save counters yet
1785         vpshufd         \$0xaa,$xd3,$xd2
1786         vmovdqa         $xd1,0x220-0x200(%rax)
1787         vpshufd         \$0xff,$xd3,$xd3
1788         vmovdqa         $xd2,0x240-0x200(%rax)
1789         vmovdqa         $xd3,0x260-0x200(%rax)
1790
1791         jmp             .Loop_enter8x
1792
1793 .align  32
1794 .Loop_outer8x:
1795         vmovdqa         0x80-0x100(%rcx),$xa0   # re-load smashed key
1796         vmovdqa         0xa0-0x100(%rcx),$xa1
1797         vmovdqa         0xc0-0x100(%rcx),$xa2
1798         vmovdqa         0xe0-0x100(%rcx),$xa3
1799         vmovdqa         0x100-0x100(%rcx),$xb0
1800         vmovdqa         0x120-0x100(%rcx),$xb1
1801         vmovdqa         0x140-0x100(%rcx),$xb2
1802         vmovdqa         0x160-0x100(%rcx),$xb3
1803         vmovdqa         0x180-0x200(%rax),$xt0  # "xc0"
1804         vmovdqa         0x1a0-0x200(%rax),$xt1  # "xc1"
1805         vmovdqa         0x1c0-0x200(%rax),$xt2  # "xc2"
1806         vmovdqa         0x1e0-0x200(%rax),$xt3  # "xc3"
1807         vmovdqa         0x200-0x200(%rax),$xd0
1808         vmovdqa         0x220-0x200(%rax),$xd1
1809         vmovdqa         0x240-0x200(%rax),$xd2
1810         vmovdqa         0x260-0x200(%rax),$xd3
1811         vpaddd          .Leight(%rip),$xd0,$xd0 # next SIMD counters
1812
1813 .Loop_enter8x:
1814         vmovdqa         $xt2,0x40(%rsp)         # SIMD equivalent of "@x[10]"
1815         vmovdqa         $xt3,0x60(%rsp)         # SIMD equivalent of "@x[11]"
1816         vbroadcasti128  (%r10),$xt3
1817         vmovdqa         $xd0,0x200-0x200(%rax)  # save SIMD counters
1818         mov             \$10,%eax
1819         jmp             .Loop8x
1820
1821 .align  32
1822 .Loop8x:
1823 ___
1824         foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1825         foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1826 $code.=<<___;
1827         dec             %eax
1828         jnz             .Loop8x
1829
1830         lea             0x200(%rsp),%rax        # size optimization
1831         vpaddd          0x80-0x100(%rcx),$xa0,$xa0      # accumulate key
1832         vpaddd          0xa0-0x100(%rcx),$xa1,$xa1
1833         vpaddd          0xc0-0x100(%rcx),$xa2,$xa2
1834         vpaddd          0xe0-0x100(%rcx),$xa3,$xa3
1835
1836         vpunpckldq      $xa1,$xa0,$xt2          # "de-interlace" data
1837         vpunpckldq      $xa3,$xa2,$xt3
1838         vpunpckhdq      $xa1,$xa0,$xa0
1839         vpunpckhdq      $xa3,$xa2,$xa2
1840         vpunpcklqdq     $xt3,$xt2,$xa1          # "a0"
1841         vpunpckhqdq     $xt3,$xt2,$xt2          # "a1"
1842         vpunpcklqdq     $xa2,$xa0,$xa3          # "a2"
1843         vpunpckhqdq     $xa2,$xa0,$xa0          # "a3"
1844 ___
1845         ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1846 $code.=<<___;
1847         vpaddd          0x100-0x100(%rcx),$xb0,$xb0
1848         vpaddd          0x120-0x100(%rcx),$xb1,$xb1
1849         vpaddd          0x140-0x100(%rcx),$xb2,$xb2
1850         vpaddd          0x160-0x100(%rcx),$xb3,$xb3
1851
1852         vpunpckldq      $xb1,$xb0,$xt2
1853         vpunpckldq      $xb3,$xb2,$xt3
1854         vpunpckhdq      $xb1,$xb0,$xb0
1855         vpunpckhdq      $xb3,$xb2,$xb2
1856         vpunpcklqdq     $xt3,$xt2,$xb1          # "b0"
1857         vpunpckhqdq     $xt3,$xt2,$xt2          # "b1"
1858         vpunpcklqdq     $xb2,$xb0,$xb3          # "b2"
1859         vpunpckhqdq     $xb2,$xb0,$xb0          # "b3"
1860 ___
1861         ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1862 $code.=<<___;
1863         vperm2i128      \$0x20,$xb0,$xa0,$xt3   # "de-interlace" further
1864         vperm2i128      \$0x31,$xb0,$xa0,$xb0
1865         vperm2i128      \$0x20,$xb1,$xa1,$xa0
1866         vperm2i128      \$0x31,$xb1,$xa1,$xb1
1867         vperm2i128      \$0x20,$xb2,$xa2,$xa1
1868         vperm2i128      \$0x31,$xb2,$xa2,$xb2
1869         vperm2i128      \$0x20,$xb3,$xa3,$xa2
1870         vperm2i128      \$0x31,$xb3,$xa3,$xb3
1871 ___
1872         ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1873         my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1874 $code.=<<___;
1875         vmovdqa         $xa0,0x00(%rsp)         # offload $xaN
1876         vmovdqa         $xa1,0x20(%rsp)
1877         vmovdqa         0x40(%rsp),$xc2         # $xa0
1878         vmovdqa         0x60(%rsp),$xc3         # $xa1
1879
1880         vpaddd          0x180-0x200(%rax),$xc0,$xc0
1881         vpaddd          0x1a0-0x200(%rax),$xc1,$xc1
1882         vpaddd          0x1c0-0x200(%rax),$xc2,$xc2
1883         vpaddd          0x1e0-0x200(%rax),$xc3,$xc3
1884
1885         vpunpckldq      $xc1,$xc0,$xt2
1886         vpunpckldq      $xc3,$xc2,$xt3
1887         vpunpckhdq      $xc1,$xc0,$xc0
1888         vpunpckhdq      $xc3,$xc2,$xc2
1889         vpunpcklqdq     $xt3,$xt2,$xc1          # "c0"
1890         vpunpckhqdq     $xt3,$xt2,$xt2          # "c1"
1891         vpunpcklqdq     $xc2,$xc0,$xc3          # "c2"
1892         vpunpckhqdq     $xc2,$xc0,$xc0          # "c3"
1893 ___
1894         ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1895 $code.=<<___;
1896         vpaddd          0x200-0x200(%rax),$xd0,$xd0
1897         vpaddd          0x220-0x200(%rax),$xd1,$xd1
1898         vpaddd          0x240-0x200(%rax),$xd2,$xd2
1899         vpaddd          0x260-0x200(%rax),$xd3,$xd3
1900
1901         vpunpckldq      $xd1,$xd0,$xt2
1902         vpunpckldq      $xd3,$xd2,$xt3
1903         vpunpckhdq      $xd1,$xd0,$xd0
1904         vpunpckhdq      $xd3,$xd2,$xd2
1905         vpunpcklqdq     $xt3,$xt2,$xd1          # "d0"
1906         vpunpckhqdq     $xt3,$xt2,$xt2          # "d1"
1907         vpunpcklqdq     $xd2,$xd0,$xd3          # "d2"
1908         vpunpckhqdq     $xd2,$xd0,$xd0          # "d3"
1909 ___
1910         ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1911 $code.=<<___;
1912         vperm2i128      \$0x20,$xd0,$xc0,$xt3   # "de-interlace" further
1913         vperm2i128      \$0x31,$xd0,$xc0,$xd0
1914         vperm2i128      \$0x20,$xd1,$xc1,$xc0
1915         vperm2i128      \$0x31,$xd1,$xc1,$xd1
1916         vperm2i128      \$0x20,$xd2,$xc2,$xc1
1917         vperm2i128      \$0x31,$xd2,$xc2,$xd2
1918         vperm2i128      \$0x20,$xd3,$xc3,$xc2
1919         vperm2i128      \$0x31,$xd3,$xc3,$xd3
1920 ___
1921         ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1922         ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1923         ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1924         ($xa0,$xa1)=($xt2,$xt3);
1925 $code.=<<___;
1926         vmovdqa         0x00(%rsp),$xa0         # $xaN was offloaded, remember?
1927         vmovdqa         0x20(%rsp),$xa1
1928
1929         cmp             \$64*8,$len
1930         jb              .Ltail8x
1931
1932         vpxor           0x00($inp),$xa0,$xa0    # xor with input
1933         vpxor           0x20($inp),$xb0,$xb0
1934         vpxor           0x40($inp),$xc0,$xc0
1935         vpxor           0x60($inp),$xd0,$xd0
1936         lea             0x80($inp),$inp         # size optimization
1937         vmovdqu         $xa0,0x00($out)
1938         vmovdqu         $xb0,0x20($out)
1939         vmovdqu         $xc0,0x40($out)
1940         vmovdqu         $xd0,0x60($out)
1941         lea             0x80($out),$out         # size optimization
1942
1943         vpxor           0x00($inp),$xa1,$xa1
1944         vpxor           0x20($inp),$xb1,$xb1
1945         vpxor           0x40($inp),$xc1,$xc1
1946         vpxor           0x60($inp),$xd1,$xd1
1947         lea             0x80($inp),$inp         # size optimization
1948         vmovdqu         $xa1,0x00($out)
1949         vmovdqu         $xb1,0x20($out)
1950         vmovdqu         $xc1,0x40($out)
1951         vmovdqu         $xd1,0x60($out)
1952         lea             0x80($out),$out         # size optimization
1953
1954         vpxor           0x00($inp),$xa2,$xa2
1955         vpxor           0x20($inp),$xb2,$xb2
1956         vpxor           0x40($inp),$xc2,$xc2
1957         vpxor           0x60($inp),$xd2,$xd2
1958         lea             0x80($inp),$inp         # size optimization
1959         vmovdqu         $xa2,0x00($out)
1960         vmovdqu         $xb2,0x20($out)
1961         vmovdqu         $xc2,0x40($out)
1962         vmovdqu         $xd2,0x60($out)
1963         lea             0x80($out),$out         # size optimization
1964
1965         vpxor           0x00($inp),$xa3,$xa3
1966         vpxor           0x20($inp),$xb3,$xb3
1967         vpxor           0x40($inp),$xc3,$xc3
1968         vpxor           0x60($inp),$xd3,$xd3
1969         lea             0x80($inp),$inp         # size optimization
1970         vmovdqu         $xa3,0x00($out)
1971         vmovdqu         $xb3,0x20($out)
1972         vmovdqu         $xc3,0x40($out)
1973         vmovdqu         $xd3,0x60($out)
1974         lea             0x80($out),$out         # size optimization
1975
1976         sub             \$64*8,$len
1977         jnz             .Loop_outer8x
1978
1979         jmp             .Ldone8x
1980
1981 .Ltail8x:
1982         cmp             \$448,$len
1983         jae             .L448_or_more8x
1984         cmp             \$384,$len
1985         jae             .L384_or_more8x
1986         cmp             \$320,$len
1987         jae             .L320_or_more8x
1988         cmp             \$256,$len
1989         jae             .L256_or_more8x
1990         cmp             \$192,$len
1991         jae             .L192_or_more8x
1992         cmp             \$128,$len
1993         jae             .L128_or_more8x
1994         cmp             \$64,$len
1995         jae             .L64_or_more8x
1996
1997         xor             %r10,%r10
1998         vmovdqa         $xa0,0x00(%rsp)
1999         vmovdqa         $xb0,0x20(%rsp)
2000         jmp             .Loop_tail8x
2001
2002 .align  32
2003 .L64_or_more8x:
2004         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2005         vpxor           0x20($inp),$xb0,$xb0
2006         vmovdqu         $xa0,0x00($out)
2007         vmovdqu         $xb0,0x20($out)
2008         je              .Ldone8x
2009
2010         lea             0x40($inp),$inp         # inp+=64*1
2011         xor             %r10,%r10
2012         vmovdqa         $xc0,0x00(%rsp)
2013         lea             0x40($out),$out         # out+=64*1
2014         sub             \$64,$len               # len-=64*1
2015         vmovdqa         $xd0,0x20(%rsp)
2016         jmp             .Loop_tail8x
2017
2018 .align  32
2019 .L128_or_more8x:
2020         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2021         vpxor           0x20($inp),$xb0,$xb0
2022         vpxor           0x40($inp),$xc0,$xc0
2023         vpxor           0x60($inp),$xd0,$xd0
2024         vmovdqu         $xa0,0x00($out)
2025         vmovdqu         $xb0,0x20($out)
2026         vmovdqu         $xc0,0x40($out)
2027         vmovdqu         $xd0,0x60($out)
2028         je              .Ldone8x
2029
2030         lea             0x80($inp),$inp         # inp+=64*2
2031         xor             %r10,%r10
2032         vmovdqa         $xa1,0x00(%rsp)
2033         lea             0x80($out),$out         # out+=64*2
2034         sub             \$128,$len              # len-=64*2
2035         vmovdqa         $xb1,0x20(%rsp)
2036         jmp             .Loop_tail8x
2037
2038 .align  32
2039 .L192_or_more8x:
2040         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2041         vpxor           0x20($inp),$xb0,$xb0
2042         vpxor           0x40($inp),$xc0,$xc0
2043         vpxor           0x60($inp),$xd0,$xd0
2044         vpxor           0x80($inp),$xa1,$xa1
2045         vpxor           0xa0($inp),$xb1,$xb1
2046         vmovdqu         $xa0,0x00($out)
2047         vmovdqu         $xb0,0x20($out)
2048         vmovdqu         $xc0,0x40($out)
2049         vmovdqu         $xd0,0x60($out)
2050         vmovdqu         $xa1,0x80($out)
2051         vmovdqu         $xb1,0xa0($out)
2052         je              .Ldone8x
2053
2054         lea             0xc0($inp),$inp         # inp+=64*3
2055         xor             %r10,%r10
2056         vmovdqa         $xc1,0x00(%rsp)
2057         lea             0xc0($out),$out         # out+=64*3
2058         sub             \$192,$len              # len-=64*3
2059         vmovdqa         $xd1,0x20(%rsp)
2060         jmp             .Loop_tail8x
2061
2062 .align  32
2063 .L256_or_more8x:
2064         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2065         vpxor           0x20($inp),$xb0,$xb0
2066         vpxor           0x40($inp),$xc0,$xc0
2067         vpxor           0x60($inp),$xd0,$xd0
2068         vpxor           0x80($inp),$xa1,$xa1
2069         vpxor           0xa0($inp),$xb1,$xb1
2070         vpxor           0xc0($inp),$xc1,$xc1
2071         vpxor           0xe0($inp),$xd1,$xd1
2072         vmovdqu         $xa0,0x00($out)
2073         vmovdqu         $xb0,0x20($out)
2074         vmovdqu         $xc0,0x40($out)
2075         vmovdqu         $xd0,0x60($out)
2076         vmovdqu         $xa1,0x80($out)
2077         vmovdqu         $xb1,0xa0($out)
2078         vmovdqu         $xc1,0xc0($out)
2079         vmovdqu         $xd1,0xe0($out)
2080         je              .Ldone8x
2081
2082         lea             0x100($inp),$inp        # inp+=64*4
2083         xor             %r10,%r10
2084         vmovdqa         $xa2,0x00(%rsp)
2085         lea             0x100($out),$out        # out+=64*4
2086         sub             \$256,$len              # len-=64*4
2087         vmovdqa         $xb2,0x20(%rsp)
2088         jmp             .Loop_tail8x
2089
2090 .align  32
2091 .L320_or_more8x:
2092         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2093         vpxor           0x20($inp),$xb0,$xb0
2094         vpxor           0x40($inp),$xc0,$xc0
2095         vpxor           0x60($inp),$xd0,$xd0
2096         vpxor           0x80($inp),$xa1,$xa1
2097         vpxor           0xa0($inp),$xb1,$xb1
2098         vpxor           0xc0($inp),$xc1,$xc1
2099         vpxor           0xe0($inp),$xd1,$xd1
2100         vpxor           0x100($inp),$xa2,$xa2
2101         vpxor           0x120($inp),$xb2,$xb2
2102         vmovdqu         $xa0,0x00($out)
2103         vmovdqu         $xb0,0x20($out)
2104         vmovdqu         $xc0,0x40($out)
2105         vmovdqu         $xd0,0x60($out)
2106         vmovdqu         $xa1,0x80($out)
2107         vmovdqu         $xb1,0xa0($out)
2108         vmovdqu         $xc1,0xc0($out)
2109         vmovdqu         $xd1,0xe0($out)
2110         vmovdqu         $xa2,0x100($out)
2111         vmovdqu         $xb2,0x120($out)
2112         je              .Ldone8x
2113
2114         lea             0x140($inp),$inp        # inp+=64*5
2115         xor             %r10,%r10
2116         vmovdqa         $xc2,0x00(%rsp)
2117         lea             0x140($out),$out        # out+=64*5
2118         sub             \$320,$len              # len-=64*5
2119         vmovdqa         $xd2,0x20(%rsp)
2120         jmp             .Loop_tail8x
2121
2122 .align  32
2123 .L384_or_more8x:
2124         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2125         vpxor           0x20($inp),$xb0,$xb0
2126         vpxor           0x40($inp),$xc0,$xc0
2127         vpxor           0x60($inp),$xd0,$xd0
2128         vpxor           0x80($inp),$xa1,$xa1
2129         vpxor           0xa0($inp),$xb1,$xb1
2130         vpxor           0xc0($inp),$xc1,$xc1
2131         vpxor           0xe0($inp),$xd1,$xd1
2132         vpxor           0x100($inp),$xa2,$xa2
2133         vpxor           0x120($inp),$xb2,$xb2
2134         vpxor           0x140($inp),$xc2,$xc2
2135         vpxor           0x160($inp),$xd2,$xd2
2136         vmovdqu         $xa0,0x00($out)
2137         vmovdqu         $xb0,0x20($out)
2138         vmovdqu         $xc0,0x40($out)
2139         vmovdqu         $xd0,0x60($out)
2140         vmovdqu         $xa1,0x80($out)
2141         vmovdqu         $xb1,0xa0($out)
2142         vmovdqu         $xc1,0xc0($out)
2143         vmovdqu         $xd1,0xe0($out)
2144         vmovdqu         $xa2,0x100($out)
2145         vmovdqu         $xb2,0x120($out)
2146         vmovdqu         $xc2,0x140($out)
2147         vmovdqu         $xd2,0x160($out)
2148         je              .Ldone8x
2149
2150         lea             0x180($inp),$inp        # inp+=64*6
2151         xor             %r10,%r10
2152         vmovdqa         $xa3,0x00(%rsp)
2153         lea             0x180($out),$out        # out+=64*6
2154         sub             \$384,$len              # len-=64*6
2155         vmovdqa         $xb3,0x20(%rsp)
2156         jmp             .Loop_tail8x
2157
2158 .align  32
2159 .L448_or_more8x:
2160         vpxor           0x00($inp),$xa0,$xa0    # xor with input
2161         vpxor           0x20($inp),$xb0,$xb0
2162         vpxor           0x40($inp),$xc0,$xc0
2163         vpxor           0x60($inp),$xd0,$xd0
2164         vpxor           0x80($inp),$xa1,$xa1
2165         vpxor           0xa0($inp),$xb1,$xb1
2166         vpxor           0xc0($inp),$xc1,$xc1
2167         vpxor           0xe0($inp),$xd1,$xd1
2168         vpxor           0x100($inp),$xa2,$xa2
2169         vpxor           0x120($inp),$xb2,$xb2
2170         vpxor           0x140($inp),$xc2,$xc2
2171         vpxor           0x160($inp),$xd2,$xd2
2172         vpxor           0x180($inp),$xa3,$xa3
2173         vpxor           0x1a0($inp),$xb3,$xb3
2174         vmovdqu         $xa0,0x00($out)
2175         vmovdqu         $xb0,0x20($out)
2176         vmovdqu         $xc0,0x40($out)
2177         vmovdqu         $xd0,0x60($out)
2178         vmovdqu         $xa1,0x80($out)
2179         vmovdqu         $xb1,0xa0($out)
2180         vmovdqu         $xc1,0xc0($out)
2181         vmovdqu         $xd1,0xe0($out)
2182         vmovdqu         $xa2,0x100($out)
2183         vmovdqu         $xb2,0x120($out)
2184         vmovdqu         $xc2,0x140($out)
2185         vmovdqu         $xd2,0x160($out)
2186         vmovdqu         $xa3,0x180($out)
2187         vmovdqu         $xb3,0x1a0($out)
2188         je              .Ldone8x
2189
2190         lea             0x1c0($inp),$inp        # inp+=64*7
2191         xor             %r10,%r10
2192         vmovdqa         $xc3,0x00(%rsp)
2193         lea             0x1c0($out),$out        # out+=64*7
2194         sub             \$448,$len              # len-=64*7
2195         vmovdqa         $xd3,0x20(%rsp)
2196
2197 .Loop_tail8x:
2198         movzb           ($inp,%r10),%eax
2199         movzb           (%rsp,%r10),%ecx
2200         lea             1(%r10),%r10
2201         xor             %ecx,%eax
2202         mov             %al,-1($out,%r10)
2203         dec             $len
2204         jnz             .Loop_tail8x
2205
2206 .Ldone8x:
2207         vzeroall
2208 ___
2209 $code.=<<___    if ($win64);
2210         lea             0x290+0x30(%rsp),%r11
2211         movaps          -0x30(%r11),%xmm6
2212         movaps          -0x20(%r11),%xmm7
2213         movaps          -0x10(%r11),%xmm8
2214         movaps          0x00(%r11),%xmm9
2215         movaps          0x10(%r11),%xmm10
2216         movaps          0x20(%r11),%xmm11
2217         movaps          0x30(%r11),%xmm12
2218         movaps          0x40(%r11),%xmm13
2219         movaps          0x50(%r11),%xmm14
2220         movaps          0x60(%r11),%xmm15
2221 ___
2222 $code.=<<___;
2223         mov             0x280(%rsp),%rsp
2224         ret
2225 .size   ChaCha20_8x,.-ChaCha20_8x
2226 ___
2227 }
2228
2229 foreach (split("\n",$code)) {
2230         s/\`([^\`]*)\`/eval $1/geo;
2231
2232         s/%x#%y/%x/go;
2233
2234         print $_,"\n";
2235 }
2236
2237 close STDOUT;